101101 * Exported interfaces ---- output
102102 * ===============================
103103 *
104- * There are three exported interfaces; the first is one designed to
105- * be used from within the kernel:
104+ * There are four exported interfaces; two for use within the kernel,
105+ * and two or use from userspace.
106106 *
107- * void get_random_bytes(void *buf, int nbytes);
108- *
109- * This interface will return the requested number of random bytes,
110- * and place it in the requested buffer.
107+ * Exported interfaces ---- userspace output
108+ * -----------------------------------------
111109 *
112- * The two other interfaces are two character devices /dev/random and
110+ * The userspace interfaces are two character devices /dev/random and
113111 * /dev/urandom. /dev/random is suitable for use when very high
114112 * quality randomness is desired (for example, for key generation or
115113 * one-time pads), as it will only return a maximum of the number of
122120 * this will result in random numbers that are merely cryptographically
123121 * strong. For many applications, however, this is acceptable.
124122 *
123+ * Exported interfaces ---- kernel output
124+ * --------------------------------------
125+ *
126+ * The primary kernel interface is
127+ *
128+ * void get_random_bytes(void *buf, int nbytes);
129+ *
130+ * This interface will return the requested number of random bytes,
131+ * and place it in the requested buffer. This is equivalent to a
132+ * read from /dev/urandom.
133+ *
134+ * For less critical applications, there are the functions:
135+ *
136+ * u32 get_random_u32()
137+ * u64 get_random_u64()
138+ * unsigned int get_random_int()
139+ * unsigned long get_random_long()
140+ *
141+ * These are produced by a cryptographic RNG seeded from get_random_bytes,
142+ * and so do not deplete the entropy pool as much. These are recommended
143+ * for most in-kernel operations *if the result is going to be stored in
144+ * the kernel*.
145+ *
146+ * Specifically, the get_random_int() family do not attempt to do
147+ * "anti-backtracking". If you capture the state of the kernel (e.g.
148+ * by snapshotting the VM), you can figure out previous get_random_int()
149+ * return values. But if the value is stored in the kernel anyway,
150+ * this is not a problem.
151+ *
152+ * It *is* safe to expose get_random_int() output to attackers (e.g. as
153+ * network cookies); given outputs 1..n, it's not feasible to predict
154+ * outputs 0 or n+1. The only concern is an attacker who breaks into
155+ * the kernel later; the get_random_int() engine is not reseeded as
156+ * often as the get_random_bytes() one.
157+ *
158+ * get_random_bytes() is needed for keys that need to stay secret after
159+ * they are erased from the kernel. For example, any key that will
160+ * be wrapped and stored encrypted. And session encryption keys: we'd
161+ * like to know that after the session is closed and the keys erased,
162+ * the plaintext is unrecoverable to someone who recorded the ciphertext.
163+ *
164+ * But for network ports/cookies, stack canaries, PRNG seeds, address
165+ * space layout randomization, session *authentication* keys, or other
166+ * applications where the sensitive data is stored in the kernel in
167+ * plaintext for as long as it's sensitive, the get_random_int() family
168+ * is just fine.
169+ *
170+ * Consider ASLR. We want to keep the address space secret from an
171+ * outside attacker while the process is running, but once the address
172+ * space is torn down, it's of no use to an attacker any more. And it's
173+ * stored in kernel data structures as long as it's alive, so worrying
174+ * about an attacker's ability to extrapolate it from the get_random_int()
175+ * CRNG is silly.
176+ *
177+ * Even some cryptographic keys are safe to generate with get_random_int().
178+ * In particular, keys for SipHash are generally fine. Here, knowledge
179+ * of the key authorizes you to do something to a kernel object (inject
180+ * packets to a network connection, or flood a hash table), and the
181+ * key is stored with the object being protected. Once it goes away,
182+ * we no longer care if anyone knows the key.
183+ *
184+ * prandom_u32()
185+ * -------------
186+ *
187+ * For even weaker applications, see the pseudorandom generator
188+ * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
189+ * numbers aren't security-critical at all, these are *far* cheaper.
190+ * Useful for self-tests, random error simulation, randomized backoffs,
191+ * and any other application where you trust that nobody is trying to
192+ * maliciously mess with you by guessing the "random" numbers.
193+ *
125194 * Exported interfaces ---- input
126195 * ==============================
127196 *
295364 * To allow fractional bits to be tracked, the entropy_count field is
296365 * denominated in units of 1/8th bits.
297366 *
298- * 2*(ENTROPY_SHIFT + log2(poolbits) ) must <= 31, or the multiply in
367+ * 2*(ENTROPY_SHIFT + poolbitshift ) must <= 31, or the multiply in
299368 * credit_entropy_bits() needs to be 64 bits wide.
300369 */
301370#define ENTROPY_SHIFT 3
@@ -359,9 +428,9 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
359428 * polynomial which improves the resulting TGFSR polynomial to be
360429 * irreducible, which we have made here.
361430 */
362- static struct poolinfo {
363- int poolbitshift , poolwords , poolbytes , poolbits , poolfracbits ;
364- #define S (x ) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
431+ static const struct poolinfo {
432+ int poolbitshift , poolwords , poolbytes , poolfracbits ;
433+ #define S (x ) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
365434 int tap1 , tap2 , tap3 , tap4 , tap5 ;
366435} poolinfo_table [] = {
367436 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
@@ -415,7 +484,7 @@ struct crng_state {
415484 spinlock_t lock ;
416485};
417486
418- struct crng_state primary_crng = {
487+ static struct crng_state primary_crng = {
419488 .lock = __SPIN_LOCK_UNLOCKED (primary_crng .lock ),
420489};
421490
@@ -470,7 +539,6 @@ struct entropy_store {
470539 unsigned short add_ptr ;
471540 unsigned short input_rotate ;
472541 int entropy_count ;
473- int entropy_total ;
474542 unsigned int initialized :1 ;
475543 unsigned int last_data_init :1 ;
476544 __u8 last_data [EXTRACT_SIZE ];
@@ -643,7 +711,7 @@ static void process_random_ready_list(void)
643711 */
644712static void credit_entropy_bits (struct entropy_store * r , int nbits )
645713{
646- int entropy_count , orig ;
714+ int entropy_count , orig , has_initialized = 0 ;
647715 const int pool_size = r -> poolinfo -> poolfracbits ;
648716 int nfrac = nbits << ENTROPY_SHIFT ;
649717
@@ -698,23 +766,25 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
698766 entropy_count = 0 ;
699767 } else if (entropy_count > pool_size )
700768 entropy_count = pool_size ;
769+ if ((r == & blocking_pool ) && !r -> initialized &&
770+ (entropy_count >> ENTROPY_SHIFT ) > 128 )
771+ has_initialized = 1 ;
701772 if (cmpxchg (& r -> entropy_count , orig , entropy_count ) != orig )
702773 goto retry ;
703774
704- r -> entropy_total += nbits ;
705- if (!r -> initialized && r -> entropy_total > 128 ) {
775+ if (has_initialized )
706776 r -> initialized = 1 ;
707- r -> entropy_total = 0 ;
708- }
709777
710778 trace_credit_entropy_bits (r -> name , nbits ,
711- entropy_count >> ENTROPY_SHIFT ,
712- r -> entropy_total , _RET_IP_ );
779+ entropy_count >> ENTROPY_SHIFT , _RET_IP_ );
713780
714781 if (r == & input_pool ) {
715782 int entropy_bits = entropy_count >> ENTROPY_SHIFT ;
783+ struct entropy_store * other = & blocking_pool ;
716784
717- if (crng_init < 2 && entropy_bits >= 128 ) {
785+ if (crng_init < 2 ) {
786+ if (entropy_bits < 128 )
787+ return ;
718788 crng_reseed (& primary_crng , r );
719789 entropy_bits = r -> entropy_count >> ENTROPY_SHIFT ;
720790 }
@@ -725,20 +795,14 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
725795 wake_up_interruptible (& random_read_wait );
726796 kill_fasync (& fasync , SIGIO , POLL_IN );
727797 }
728- /* If the input pool is getting full, send some
729- * entropy to the blocking pool until it is 75% full.
798+ /* If the input pool is getting full, and the blocking
799+ * pool has room, send some entropy to the blocking
800+ * pool.
730801 */
731- if (entropy_bits > random_write_wakeup_bits &&
732- r -> initialized &&
733- r -> entropy_total >= 2 * random_read_wakeup_bits ) {
734- struct entropy_store * other = & blocking_pool ;
735-
736- if (other -> entropy_count <=
737- 3 * other -> poolinfo -> poolfracbits / 4 ) {
738- schedule_work (& other -> push_work );
739- r -> entropy_total = 0 ;
740- }
741- }
802+ if (!work_pending (& other -> push_work ) &&
803+ (ENTROPY_BITS (r ) > 6 * r -> poolinfo -> poolbytes ) &&
804+ (ENTROPY_BITS (other ) <= 6 * other -> poolinfo -> poolbytes ))
805+ schedule_work (& other -> push_work );
742806 }
743807}
744808
@@ -777,6 +841,7 @@ static struct crng_state **crng_node_pool __read_mostly;
777841#endif
778842
779843static void invalidate_batched_entropy (void );
844+ static void numa_crng_init (void );
780845
781846static bool trust_cpu __ro_after_init = IS_ENABLED (CONFIG_RANDOM_TRUST_CPU );
782847static int __init parse_trust_cpu (char * arg )
@@ -805,7 +870,9 @@ static void crng_initialize(struct crng_state *crng)
805870 }
806871 crng -> state [i ] ^= rv ;
807872 }
808- if (trust_cpu && arch_init ) {
873+ if (trust_cpu && arch_init && crng == & primary_crng ) {
874+ invalidate_batched_entropy ();
875+ numa_crng_init ();
809876 crng_init = 2 ;
810877 pr_notice ("random: crng done (trusting CPU's manufacturer)\n" );
811878 }
@@ -1553,6 +1620,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
15531620 int large_request = (nbytes > 256 );
15541621
15551622 trace_extract_entropy_user (r -> name , nbytes , ENTROPY_BITS (r ), _RET_IP_ );
1623+ if (!r -> initialized && r -> pull ) {
1624+ xfer_secondary_pool (r , ENTROPY_BITS (r -> pull )/8 );
1625+ if (!r -> initialized )
1626+ return 0 ;
1627+ }
15561628 xfer_secondary_pool (r , nbytes );
15571629 nbytes = account (r , nbytes , 0 , 0 );
15581630
@@ -1783,7 +1855,7 @@ EXPORT_SYMBOL(get_random_bytes_arch);
17831855 * data into the pool to prepare it for use. The pool is not cleared
17841856 * as that can only decrease the entropy in the pool.
17851857 */
1786- static void init_std_data (struct entropy_store * r )
1858+ static void __init init_std_data (struct entropy_store * r )
17871859{
17881860 int i ;
17891861 ktime_t now = ktime_get_real ();
@@ -1810,7 +1882,7 @@ static void init_std_data(struct entropy_store *r)
18101882 * take care not to overwrite the precious per platform data
18111883 * we were given.
18121884 */
1813- static int rand_initialize (void )
1885+ int __init rand_initialize (void )
18141886{
18151887 init_std_data (& input_pool );
18161888 init_std_data (& blocking_pool );
@@ -1822,7 +1894,6 @@ static int rand_initialize(void)
18221894 }
18231895 return 0 ;
18241896}
1825- early_initcall (rand_initialize );
18261897
18271898#ifdef CONFIG_BLOCK
18281899void rand_initialize_disk (struct gendisk * disk )
@@ -2211,8 +2282,8 @@ struct batched_entropy {
22112282 u32 entropy_u32 [CHACHA_BLOCK_SIZE / sizeof (u32 )];
22122283 };
22132284 unsigned int position ;
2285+ spinlock_t batch_lock ;
22142286};
2215- static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED (batched_entropy_reset_lock );
22162287
22172288/*
22182289 * Get a random word for internal kernel use only. The quality of the random
@@ -2222,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
22222293 * wait_for_random_bytes() should be called and return 0 at least once
22232294 * at any point prior.
22242295 */
2225- static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u64 ) ;
2296+ static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u64 ) = {
2297+ .batch_lock = __SPIN_LOCK_UNLOCKED (batched_entropy_u64 .lock ),
2298+ };
2299+
22262300u64 get_random_u64 (void )
22272301{
22282302 u64 ret ;
2229- bool use_lock ;
2230- unsigned long flags = 0 ;
2303+ unsigned long flags ;
22312304 struct batched_entropy * batch ;
22322305 static void * previous ;
22332306
@@ -2242,28 +2315,25 @@ u64 get_random_u64(void)
22422315
22432316 warn_unseeded_randomness (& previous );
22442317
2245- use_lock = READ_ONCE (crng_init ) < 2 ;
2246- batch = & get_cpu_var (batched_entropy_u64 );
2247- if (use_lock )
2248- read_lock_irqsave (& batched_entropy_reset_lock , flags );
2318+ batch = raw_cpu_ptr (& batched_entropy_u64 );
2319+ spin_lock_irqsave (& batch -> batch_lock , flags );
22492320 if (batch -> position % ARRAY_SIZE (batch -> entropy_u64 ) == 0 ) {
22502321 extract_crng ((u8 * )batch -> entropy_u64 );
22512322 batch -> position = 0 ;
22522323 }
22532324 ret = batch -> entropy_u64 [batch -> position ++ ];
2254- if (use_lock )
2255- read_unlock_irqrestore (& batched_entropy_reset_lock , flags );
2256- put_cpu_var (batched_entropy_u64 );
2325+ spin_unlock_irqrestore (& batch -> batch_lock , flags );
22572326 return ret ;
22582327}
22592328EXPORT_SYMBOL (get_random_u64 );
22602329
2261- static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u32 ) ;
2330+ static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u32 ) = {
2331+ .batch_lock = __SPIN_LOCK_UNLOCKED (batched_entropy_u32 .lock ),
2332+ };
22622333u32 get_random_u32 (void )
22632334{
22642335 u32 ret ;
2265- bool use_lock ;
2266- unsigned long flags = 0 ;
2336+ unsigned long flags ;
22672337 struct batched_entropy * batch ;
22682338 static void * previous ;
22692339
@@ -2272,18 +2342,14 @@ u32 get_random_u32(void)
22722342
22732343 warn_unseeded_randomness (& previous );
22742344
2275- use_lock = READ_ONCE (crng_init ) < 2 ;
2276- batch = & get_cpu_var (batched_entropy_u32 );
2277- if (use_lock )
2278- read_lock_irqsave (& batched_entropy_reset_lock , flags );
2345+ batch = raw_cpu_ptr (& batched_entropy_u32 );
2346+ spin_lock_irqsave (& batch -> batch_lock , flags );
22792347 if (batch -> position % ARRAY_SIZE (batch -> entropy_u32 ) == 0 ) {
22802348 extract_crng ((u8 * )batch -> entropy_u32 );
22812349 batch -> position = 0 ;
22822350 }
22832351 ret = batch -> entropy_u32 [batch -> position ++ ];
2284- if (use_lock )
2285- read_unlock_irqrestore (& batched_entropy_reset_lock , flags );
2286- put_cpu_var (batched_entropy_u32 );
2352+ spin_unlock_irqrestore (& batch -> batch_lock , flags );
22872353 return ret ;
22882354}
22892355EXPORT_SYMBOL (get_random_u32 );
@@ -2297,12 +2363,19 @@ static void invalidate_batched_entropy(void)
22972363 int cpu ;
22982364 unsigned long flags ;
22992365
2300- write_lock_irqsave (& batched_entropy_reset_lock , flags );
23012366 for_each_possible_cpu (cpu ) {
2302- per_cpu_ptr (& batched_entropy_u32 , cpu )-> position = 0 ;
2303- per_cpu_ptr (& batched_entropy_u64 , cpu )-> position = 0 ;
2367+ struct batched_entropy * batched_entropy ;
2368+
2369+ batched_entropy = per_cpu_ptr (& batched_entropy_u32 , cpu );
2370+ spin_lock_irqsave (& batched_entropy -> batch_lock , flags );
2371+ batched_entropy -> position = 0 ;
2372+ spin_unlock (& batched_entropy -> batch_lock );
2373+
2374+ batched_entropy = per_cpu_ptr (& batched_entropy_u64 , cpu );
2375+ spin_lock (& batched_entropy -> batch_lock );
2376+ batched_entropy -> position = 0 ;
2377+ spin_unlock_irqrestore (& batched_entropy -> batch_lock , flags );
23042378 }
2305- write_unlock_irqrestore (& batched_entropy_reset_lock , flags );
23062379}
23072380
23082381/**
0 commit comments