LCOV - code coverage report
Current view: top level - drivers/char - random.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 180 442 40.7 %
Date: 2023-07-19 18:55:55 Functions: 19 48 39.6 %

          Line data    Source code
       1             : // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
       2             : /*
       3             :  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
       4             :  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
       5             :  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
       6             :  *
       7             :  * This driver produces cryptographically secure pseudorandom data. It is divided
       8             :  * into roughly six sections, each with a section header:
       9             :  *
      10             :  *   - Initialization and readiness waiting.
      11             :  *   - Fast key erasure RNG, the "crng".
      12             :  *   - Entropy accumulation and extraction routines.
      13             :  *   - Entropy collection routines.
      14             :  *   - Userspace reader/writer interfaces.
      15             :  *   - Sysctl interface.
      16             :  *
      17             :  * The high level overview is that there is one input pool, into which
      18             :  * various pieces of data are hashed. Prior to initialization, some of that
      19             :  * data is then "credited" as having a certain number of bits of entropy.
      20             :  * When enough bits of entropy are available, the hash is finalized and
      21             :  * handed as a key to a stream cipher that expands it indefinitely for
      22             :  * various consumers. This key is periodically refreshed as the various
      23             :  * entropy collectors, described below, add data to the input pool.
      24             :  */
      25             : 
      26             : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      27             : 
      28             : #include <linux/utsname.h>
      29             : #include <linux/module.h>
      30             : #include <linux/kernel.h>
      31             : #include <linux/major.h>
      32             : #include <linux/string.h>
      33             : #include <linux/fcntl.h>
      34             : #include <linux/slab.h>
      35             : #include <linux/random.h>
      36             : #include <linux/poll.h>
      37             : #include <linux/init.h>
      38             : #include <linux/fs.h>
      39             : #include <linux/blkdev.h>
      40             : #include <linux/interrupt.h>
      41             : #include <linux/mm.h>
      42             : #include <linux/nodemask.h>
      43             : #include <linux/spinlock.h>
      44             : #include <linux/kthread.h>
      45             : #include <linux/percpu.h>
      46             : #include <linux/ptrace.h>
      47             : #include <linux/workqueue.h>
      48             : #include <linux/irq.h>
      49             : #include <linux/ratelimit.h>
      50             : #include <linux/syscalls.h>
      51             : #include <linux/completion.h>
      52             : #include <linux/uuid.h>
      53             : #include <linux/uaccess.h>
      54             : #include <linux/suspend.h>
      55             : #include <linux/siphash.h>
      56             : #include <linux/sched/isolation.h>
      57             : #include <crypto/chacha.h>
      58             : #include <crypto/blake2s.h>
      59             : #include <asm/archrandom.h>
      60             : #include <asm/processor.h>
      61             : #include <asm/irq.h>
      62             : #include <asm/irq_regs.h>
      63             : #include <asm/io.h>
      64             : 
      65             : /*********************************************************************
      66             :  *
      67             :  * Initialization and readiness waiting.
      68             :  *
      69             :  * Much of the RNG infrastructure is devoted to various dependencies
      70             :  * being able to wait until the RNG has collected enough entropy and
      71             :  * is ready for safe consumption.
      72             :  *
      73             :  *********************************************************************/
      74             : 
      75             : /*
      76             :  * crng_init is protected by base_crng->lock, and only increases
      77             :  * its value (from empty->early->ready).
      78             :  */
      79             : static enum {
      80             :         CRNG_EMPTY = 0, /* Little to no entropy collected */
      81             :         CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
      82             :         CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
      83             : } crng_init __read_mostly = CRNG_EMPTY;
      84             : static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
      85             : #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
      86             : /* Various types of waiters for crng_init->CRNG_READY transition. */
      87             : static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
      88             : static struct fasync_struct *fasync;
      89             : static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
      90             : 
      91             : /* Control how we warn userspace. */
      92             : static struct ratelimit_state urandom_warning =
      93             :         RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
      94             : static int ratelimit_disable __read_mostly =
      95             :         IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
      96             : module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
      97             : MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
      98             : 
      99             : /*
     100             :  * Returns whether or not the input pool has been seeded and thus guaranteed
     101             :  * to supply cryptographically secure random numbers. This applies to: the
     102             :  * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
     103             :  * u16,u32,u64,long} family of functions.
     104             :  *
     105             :  * Returns: true if the input pool has been seeded.
     106             :  *          false if the input pool has not been seeded.
     107             :  */
     108           0 : bool rng_is_initialized(void)
     109             : {
     110           0 :         return crng_ready();
     111             : }
     112             : EXPORT_SYMBOL(rng_is_initialized);
     113             : 
     114           0 : static void __cold crng_set_ready(struct work_struct *work)
     115             : {
     116           1 :         static_branch_enable(&crng_is_ready);
     117           0 : }
     118             : 
     119             : /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
     120             : static void try_to_generate_entropy(void);
     121             : 
     122             : /*
     123             :  * Wait for the input pool to be seeded and thus guaranteed to supply
     124             :  * cryptographically secure random numbers. This applies to: the /dev/urandom
     125             :  * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
     126             :  * long} family of functions. Using any of these functions without first
     127             :  * calling this function forfeits the guarantee of security.
     128             :  *
     129             :  * Returns: 0 if the input pool has been seeded.
     130             :  *          -ERESTARTSYS if the function was interrupted by a signal.
     131             :  */
     132           0 : int wait_for_random_bytes(void)
     133             : {
     134           0 :         while (!crng_ready()) {
     135             :                 int ret;
     136             : 
     137           0 :                 try_to_generate_entropy();
     138           0 :                 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
     139           0 :                 if (ret)
     140           0 :                         return ret > 0 ? 0 : ret;
     141             :         }
     142             :         return 0;
     143             : }
     144             : EXPORT_SYMBOL(wait_for_random_bytes);
     145             : 
     146             : /*
     147             :  * Add a callback function that will be invoked when the crng is initialised,
     148             :  * or immediately if it already has been. Only use this is you are absolutely
     149             :  * sure it is required. Most users should instead be able to test
     150             :  * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
     151             :  */
     152           1 : int __cold execute_with_initialized_rng(struct notifier_block *nb)
     153             : {
     154             :         unsigned long flags;
     155           1 :         int ret = 0;
     156             : 
     157           1 :         spin_lock_irqsave(&random_ready_notifier.lock, flags);
     158           1 :         if (crng_ready())
     159           1 :                 nb->notifier_call(nb, 0, NULL);
     160             :         else
     161           0 :                 ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
     162           1 :         spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
     163           1 :         return ret;
     164             : }
     165             : 
     166             : #define warn_unseeded_randomness() \
     167             :         if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
     168             :                 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
     169             :                                 __func__, (void *)_RET_IP_, crng_init)
     170             : 
     171             : 
     172             : /*********************************************************************
     173             :  *
     174             :  * Fast key erasure RNG, the "crng".
     175             :  *
     176             :  * These functions expand entropy from the entropy extractor into
     177             :  * long streams for external consumption using the "fast key erasure"
     178             :  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
     179             :  *
     180             :  * There are a few exported interfaces for use by other drivers:
     181             :  *
     182             :  *      void get_random_bytes(void *buf, size_t len)
     183             :  *      u8 get_random_u8()
     184             :  *      u16 get_random_u16()
     185             :  *      u32 get_random_u32()
     186             :  *      u32 get_random_u32_below(u32 ceil)
     187             :  *      u32 get_random_u32_above(u32 floor)
     188             :  *      u32 get_random_u32_inclusive(u32 floor, u32 ceil)
     189             :  *      u64 get_random_u64()
     190             :  *      unsigned long get_random_long()
     191             :  *
     192             :  * These interfaces will return the requested number of random bytes
     193             :  * into the given buffer or as a return value. This is equivalent to
     194             :  * a read from /dev/urandom. The u8, u16, u32, u64, long family of
     195             :  * functions may be higher performance for one-off random integers,
     196             :  * because they do a bit of buffering and do not invoke reseeding
     197             :  * until the buffer is emptied.
     198             :  *
     199             :  *********************************************************************/
     200             : 
     201             : enum {
     202             :         CRNG_RESEED_START_INTERVAL = HZ,
     203             :         CRNG_RESEED_INTERVAL = 60 * HZ
     204             : };
     205             : 
     206             : static struct {
     207             :         u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
     208             :         unsigned long generation;
     209             :         spinlock_t lock;
     210             : } base_crng = {
     211             :         .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
     212             : };
     213             : 
     214             : struct crng {
     215             :         u8 key[CHACHA_KEY_SIZE];
     216             :         unsigned long generation;
     217             :         local_lock_t lock;
     218             : };
     219             : 
     220             : static DEFINE_PER_CPU(struct crng, crngs) = {
     221             :         .generation = ULONG_MAX,
     222             :         .lock = INIT_LOCAL_LOCK(crngs.lock),
     223             : };
     224             : 
     225             : /*
     226             :  * Return the interval until the next reseeding, which is normally
     227             :  * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
     228             :  * proportional to the uptime.
     229             :  */
     230          10 : static unsigned int crng_reseed_interval(void)
     231             : {
     232             :         static bool early_boot = true;
     233             : 
     234          10 :         if (unlikely(READ_ONCE(early_boot))) {
     235          10 :                 time64_t uptime = ktime_get_seconds();
     236          10 :                 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
     237           0 :                         WRITE_ONCE(early_boot, false);
     238             :                 else
     239          10 :                         return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
     240             :                                      (unsigned int)uptime / 2 * HZ);
     241             :         }
     242             :         return CRNG_RESEED_INTERVAL;
     243             : }
     244             : 
     245             : /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
     246             : static void extract_entropy(void *buf, size_t len);
     247             : 
     248             : /* This extracts a new crng key from the input pool. */
     249          12 : static void crng_reseed(struct work_struct *work)
     250             : {
     251             :         static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
     252             :         unsigned long flags;
     253             :         unsigned long next_gen;
     254             :         u8 key[CHACHA_KEY_SIZE];
     255             : 
     256             :         /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
     257          12 :         if (likely(system_unbound_wq))
     258          10 :                 queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
     259             : 
     260          12 :         extract_entropy(key, sizeof(key));
     261             : 
     262             :         /*
     263             :          * We copy the new key into the base_crng, overwriting the old one,
     264             :          * and update the generation counter. We avoid hitting ULONG_MAX,
     265             :          * because the per-cpu crngs are initialized to ULONG_MAX, so this
     266             :          * forces new CPUs that come online to always initialize.
     267             :          */
     268          12 :         spin_lock_irqsave(&base_crng.lock, flags);
     269          12 :         memcpy(base_crng.key, key, sizeof(base_crng.key));
     270          12 :         next_gen = base_crng.generation + 1;
     271          12 :         if (next_gen == ULONG_MAX)
     272           0 :                 ++next_gen;
     273          12 :         WRITE_ONCE(base_crng.generation, next_gen);
     274          12 :         if (!static_branch_likely(&crng_is_ready))
     275           2 :                 crng_init = CRNG_READY;
     276          12 :         spin_unlock_irqrestore(&base_crng.lock, flags);
     277          12 :         memzero_explicit(key, sizeof(key));
     278          12 : }
     279             : 
     280             : /*
     281             :  * This generates a ChaCha block using the provided key, and then
     282             :  * immediately overwrites that key with half the block. It returns
     283             :  * the resultant ChaCha state to the user, along with the second
     284             :  * half of the block containing 32 bytes of random data that may
     285             :  * be used; random_data_len may not be greater than 32.
     286             :  *
     287             :  * The returned ChaCha state contains within it a copy of the old
     288             :  * key value, at index 4, so the state should always be zeroed out
     289             :  * immediately after using in order to maintain forward secrecy.
     290             :  * If the state cannot be erased in a timely manner, then it is
     291             :  * safer to set the random_data parameter to &chacha_state[4] so
     292             :  * that this function overwrites it before returning.
     293             :  */
     294          69 : static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
     295             :                                   u32 chacha_state[CHACHA_STATE_WORDS],
     296             :                                   u8 *random_data, size_t random_data_len)
     297             : {
     298             :         u8 first_block[CHACHA_BLOCK_SIZE];
     299             : 
     300          69 :         BUG_ON(random_data_len > 32);
     301             : 
     302          69 :         chacha_init_consts(chacha_state);
     303          69 :         memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
     304          69 :         memset(&chacha_state[12], 0, sizeof(u32) * 4);
     305          69 :         chacha20_block(chacha_state, first_block);
     306             : 
     307          69 :         memcpy(key, first_block, CHACHA_KEY_SIZE);
     308          69 :         memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
     309          69 :         memzero_explicit(first_block, sizeof(first_block));
     310          69 : }
     311             : 
     312             : /*
     313             :  * This function returns a ChaCha state that you may use for generating
     314             :  * random data. It also returns up to 32 bytes on its own of random data
     315             :  * that may be used; random_data_len may not be greater than 32.
     316             :  */
     317          68 : static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
     318             :                             u8 *random_data, size_t random_data_len)
     319             : {
     320             :         unsigned long flags;
     321             :         struct crng *crng;
     322             : 
     323          68 :         BUG_ON(random_data_len > 32);
     324             : 
     325             :         /*
     326             :          * For the fast path, we check whether we're ready, unlocked first, and
     327             :          * then re-check once locked later. In the case where we're really not
     328             :          * ready, we do fast key erasure with the base_crng directly, extracting
     329             :          * when crng_init is CRNG_EMPTY.
     330             :          */
     331          68 :         if (!crng_ready()) {
     332             :                 bool ready;
     333             : 
     334           0 :                 spin_lock_irqsave(&base_crng.lock, flags);
     335           0 :                 ready = crng_ready();
     336           0 :                 if (!ready) {
     337           0 :                         if (crng_init == CRNG_EMPTY)
     338           0 :                                 extract_entropy(base_crng.key, sizeof(base_crng.key));
     339           0 :                         crng_fast_key_erasure(base_crng.key, chacha_state,
     340             :                                               random_data, random_data_len);
     341             :                 }
     342           0 :                 spin_unlock_irqrestore(&base_crng.lock, flags);
     343           0 :                 if (!ready)
     344             :                         return;
     345             :         }
     346             : 
     347          68 :         local_lock_irqsave(&crngs.lock, flags);
     348          68 :         crng = raw_cpu_ptr(&crngs);
     349             : 
     350             :         /*
     351             :          * If our per-cpu crng is older than the base_crng, then it means
     352             :          * somebody reseeded the base_crng. In that case, we do fast key
     353             :          * erasure on the base_crng, and use its output as the new key
     354             :          * for our per-cpu crng. This brings us up to date with base_crng.
     355             :          */
     356          68 :         if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
     357           1 :                 spin_lock(&base_crng.lock);
     358           1 :                 crng_fast_key_erasure(base_crng.key, chacha_state,
     359             :                                       crng->key, sizeof(crng->key));
     360           1 :                 crng->generation = base_crng.generation;
     361             :                 spin_unlock(&base_crng.lock);
     362             :         }
     363             : 
     364             :         /*
     365             :          * Finally, when we've made it this far, our per-cpu crng has an up
     366             :          * to date key, and we can do fast key erasure with it to produce
     367             :          * some random data and a ChaCha state for the caller. All other
     368             :          * branches of this function are "unlikely", so most of the time we
     369             :          * should wind up here immediately.
     370             :          */
     371          68 :         crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
     372          68 :         local_unlock_irqrestore(&crngs.lock, flags);
     373             : }
     374             : 
     375          68 : static void _get_random_bytes(void *buf, size_t len)
     376             : {
     377             :         u32 chacha_state[CHACHA_STATE_WORDS];
     378             :         u8 tmp[CHACHA_BLOCK_SIZE];
     379             :         size_t first_block_len;
     380             : 
     381          68 :         if (!len)
     382           0 :                 return;
     383             : 
     384          68 :         first_block_len = min_t(size_t, 32, len);
     385          68 :         crng_make_state(chacha_state, buf, first_block_len);
     386          68 :         len -= first_block_len;
     387          68 :         buf += first_block_len;
     388             : 
     389         202 :         while (len) {
     390         131 :                 if (len < CHACHA_BLOCK_SIZE) {
     391          65 :                         chacha20_block(chacha_state, tmp);
     392          65 :                         memcpy(buf, tmp, len);
     393             :                         memzero_explicit(tmp, sizeof(tmp));
     394             :                         break;
     395             :                 }
     396             : 
     397          66 :                 chacha20_block(chacha_state, buf);
     398          66 :                 if (unlikely(chacha_state[12] == 0))
     399           0 :                         ++chacha_state[13];
     400          66 :                 len -= CHACHA_BLOCK_SIZE;
     401          66 :                 buf += CHACHA_BLOCK_SIZE;
     402             :         }
     403             : 
     404          68 :         memzero_explicit(chacha_state, sizeof(chacha_state));
     405             : }
     406             : 
     407             : /*
     408             :  * This returns random bytes in arbitrary quantities. The quality of the
     409             :  * random bytes is good as /dev/urandom. In order to ensure that the
     410             :  * randomness provided by this function is okay, the function
     411             :  * wait_for_random_bytes() should be called and return 0 at least once
     412             :  * at any point prior.
     413             :  */
     414          67 : void get_random_bytes(void *buf, size_t len)
     415             : {
     416             :         warn_unseeded_randomness();
     417          67 :         _get_random_bytes(buf, len);
     418          67 : }
     419             : EXPORT_SYMBOL(get_random_bytes);
     420             : 
     421           0 : static ssize_t get_random_bytes_user(struct iov_iter *iter)
     422             : {
     423             :         u32 chacha_state[CHACHA_STATE_WORDS];
     424             :         u8 block[CHACHA_BLOCK_SIZE];
     425           0 :         size_t ret = 0, copied;
     426             : 
     427           0 :         if (unlikely(!iov_iter_count(iter)))
     428             :                 return 0;
     429             : 
     430             :         /*
     431             :          * Immediately overwrite the ChaCha key at index 4 with random
     432             :          * bytes, in case userspace causes copy_to_iter() below to sleep
     433             :          * forever, so that we still retain forward secrecy in that case.
     434             :          */
     435           0 :         crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
     436             :         /*
     437             :          * However, if we're doing a read of len <= 32, we don't need to
     438             :          * use chacha_state after, so we can simply return those bytes to
     439             :          * the user directly.
     440             :          */
     441           0 :         if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
     442           0 :                 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
     443           0 :                 goto out_zero_chacha;
     444             :         }
     445             : 
     446             :         for (;;) {
     447           0 :                 chacha20_block(chacha_state, block);
     448           0 :                 if (unlikely(chacha_state[12] == 0))
     449           0 :                         ++chacha_state[13];
     450             : 
     451           0 :                 copied = copy_to_iter(block, sizeof(block), iter);
     452           0 :                 ret += copied;
     453           0 :                 if (!iov_iter_count(iter) || copied != sizeof(block))
     454             :                         break;
     455             : 
     456             :                 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
     457           0 :                 if (ret % PAGE_SIZE == 0) {
     458           0 :                         if (signal_pending(current))
     459             :                                 break;
     460           0 :                         cond_resched();
     461             :                 }
     462             :         }
     463             : 
     464             :         memzero_explicit(block, sizeof(block));
     465             : out_zero_chacha:
     466           0 :         memzero_explicit(chacha_state, sizeof(chacha_state));
     467           0 :         return ret ? ret : -EFAULT;
     468             : }
     469             : 
     470             : /*
     471             :  * Batched entropy returns random integers. The quality of the random
     472             :  * number is good as /dev/urandom. In order to ensure that the randomness
     473             :  * provided by this function is okay, the function wait_for_random_bytes()
     474             :  * should be called and return 0 at least once at any point prior.
     475             :  */
     476             : 
     477             : #define DEFINE_BATCHED_ENTROPY(type)                                            \
     478             : struct batch_ ##type {                                                          \
     479             :         /*                                                                      \
     480             :          * We make this 1.5x a ChaCha block, so that we get the                 \
     481             :          * remaining 32 bytes from fast key erasure, plus one full              \
     482             :          * block from the detached ChaCha state. We can increase                \
     483             :          * the size of this later if needed so long as we keep the              \
     484             :          * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.               \
     485             :          */                                                                     \
     486             :         type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];               \
     487             :         local_lock_t lock;                                                      \
     488             :         unsigned long generation;                                               \
     489             :         unsigned int position;                                                  \
     490             : };                                                                              \
     491             :                                                                                 \
     492             : static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {        \
     493             :         .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),                  \
     494             :         .position = UINT_MAX                                                    \
     495             : };                                                                              \
     496             :                                                                                 \
     497             : type get_random_ ##type(void)                                                   \
     498             : {                                                                               \
     499             :         type ret;                                                               \
     500             :         unsigned long flags;                                                    \
     501             :         struct batch_ ##type *batch;                                            \
     502             :         unsigned long next_gen;                                                 \
     503             :                                                                                 \
     504             :         warn_unseeded_randomness();                                             \
     505             :                                                                                 \
     506             :         if  (!crng_ready()) {                                                   \
     507             :                 _get_random_bytes(&ret, sizeof(ret));                               \
     508             :                 return ret;                                                     \
     509             :         }                                                                       \
     510             :                                                                                 \
     511             :         local_lock_irqsave(&batched_entropy_ ##type.lock, flags);           \
     512             :         batch = raw_cpu_ptr(&batched_entropy_##type);                               \
     513             :                                                                                 \
     514             :         next_gen = READ_ONCE(base_crng.generation);                             \
     515             :         if (batch->position >= ARRAY_SIZE(batch->entropy) ||                   \
     516             :             next_gen != batch->generation) {                                 \
     517             :                 _get_random_bytes(batch->entropy, sizeof(batch->entropy));        \
     518             :                 batch->position = 0;                                         \
     519             :                 batch->generation = next_gen;                                        \
     520             :         }                                                                       \
     521             :                                                                                 \
     522             :         ret = batch->entropy[batch->position];                                    \
     523             :         batch->entropy[batch->position] = 0;                                      \
     524             :         ++batch->position;                                                   \
     525             :         local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);              \
     526             :         return ret;                                                             \
     527             : }                                                                               \
     528             : EXPORT_SYMBOL(get_random_ ##type);
     529             : 
     530           0 : DEFINE_BATCHED_ENTROPY(u8)
     531           0 : DEFINE_BATCHED_ENTROPY(u16)
     532          12 : DEFINE_BATCHED_ENTROPY(u32)
     533           0 : DEFINE_BATCHED_ENTROPY(u64)
     534             : 
     535           0 : u32 __get_random_u32_below(u32 ceil)
     536             : {
     537             :         /*
     538             :          * This is the slow path for variable ceil. It is still fast, most of
     539             :          * the time, by doing traditional reciprocal multiplication and
     540             :          * opportunistically comparing the lower half to ceil itself, before
     541             :          * falling back to computing a larger bound, and then rejecting samples
     542             :          * whose lower half would indicate a range indivisible by ceil. The use
     543             :          * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
     544             :          * in 32-bits.
     545             :          */
     546           0 :         u32 rand = get_random_u32();
     547             :         u64 mult;
     548             : 
     549             :         /*
     550             :          * This function is technically undefined for ceil == 0, and in fact
     551             :          * for the non-underscored constant version in the header, we build bug
     552             :          * on that. But for the non-constant case, it's convenient to have that
     553             :          * evaluate to being a straight call to get_random_u32(), so that
     554             :          * get_random_u32_inclusive() can work over its whole range without
     555             :          * undefined behavior.
     556             :          */
     557           0 :         if (unlikely(!ceil))
     558             :                 return rand;
     559             : 
     560           0 :         mult = (u64)ceil * rand;
     561           0 :         if (unlikely((u32)mult < ceil)) {
     562           0 :                 u32 bound = -ceil % ceil;
     563           0 :                 while (unlikely((u32)mult < bound))
     564           0 :                         mult = (u64)ceil * get_random_u32();
     565             :         }
     566           0 :         return mult >> 32;
     567             : }
     568             : EXPORT_SYMBOL(__get_random_u32_below);
     569             : 
     570             : #ifdef CONFIG_SMP
     571             : /*
     572             :  * This function is called when the CPU is coming up, with entry
     573             :  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
     574             :  */
     575             : int __cold random_prepare_cpu(unsigned int cpu)
     576             : {
     577             :         /*
     578             :          * When the cpu comes back online, immediately invalidate both
     579             :          * the per-cpu crng and all batches, so that we serve fresh
     580             :          * randomness.
     581             :          */
     582             :         per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
     583             :         per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
     584             :         per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
     585             :         per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
     586             :         per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
     587             :         return 0;
     588             : }
     589             : #endif
     590             : 
     591             : 
     592             : /**********************************************************************
     593             :  *
     594             :  * Entropy accumulation and extraction routines.
     595             :  *
     596             :  * Callers may add entropy via:
     597             :  *
     598             :  *     static void mix_pool_bytes(const void *buf, size_t len)
     599             :  *
     600             :  * After which, if added entropy should be credited:
     601             :  *
     602             :  *     static void credit_init_bits(size_t bits)
     603             :  *
     604             :  * Finally, extract entropy via:
     605             :  *
     606             :  *     static void extract_entropy(void *buf, size_t len)
     607             :  *
     608             :  **********************************************************************/
     609             : 
     610             : enum {
     611             :         POOL_BITS = BLAKE2S_HASH_SIZE * 8,
     612             :         POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
     613             :         POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
     614             : };
     615             : 
     616             : static struct {
     617             :         struct blake2s_state hash;
     618             :         spinlock_t lock;
     619             :         unsigned int init_bits;
     620             : } input_pool = {
     621             :         .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
     622             :                     BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
     623             :                     BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
     624             :         .hash.outlen = BLAKE2S_HASH_SIZE,
     625             :         .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
     626             : };
     627             : 
     628             : static void _mix_pool_bytes(const void *buf, size_t len)
     629             : {
     630        1891 :         blake2s_update(&input_pool.hash, buf, len);
     631             : }
     632             : 
     633             : /*
     634             :  * This function adds bytes into the input pool. It does not
     635             :  * update the initialization bit counter; the caller should call
     636             :  * credit_init_bits if this is appropriate.
     637             :  */
     638          30 : static void mix_pool_bytes(const void *buf, size_t len)
     639             : {
     640             :         unsigned long flags;
     641             : 
     642          30 :         spin_lock_irqsave(&input_pool.lock, flags);
     643          30 :         _mix_pool_bytes(buf, len);
     644          30 :         spin_unlock_irqrestore(&input_pool.lock, flags);
     645          30 : }
     646             : 
     647             : /*
     648             :  * This is an HKDF-like construction for using the hashed collected entropy
     649             :  * as a PRF key, that's then expanded block-by-block.
     650             :  */
     651          12 : static void extract_entropy(void *buf, size_t len)
     652             : {
     653             :         unsigned long flags;
     654             :         u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
     655             :         struct {
     656             :                 unsigned long rdseed[32 / sizeof(long)];
     657             :                 size_t counter;
     658             :         } block;
     659             :         size_t i, longs;
     660             : 
     661          36 :         for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
     662          12 :                 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
     663             :                 if (longs) {
     664             :                         i += longs;
     665             :                         continue;
     666             :                 }
     667          24 :                 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
     668          12 :                 if (longs) {
     669          12 :                         i += longs;
     670          12 :                         continue;
     671             :                 }
     672           0 :                 block.rdseed[i++] = random_get_entropy();
     673             :         }
     674             : 
     675          12 :         spin_lock_irqsave(&input_pool.lock, flags);
     676             : 
     677             :         /* seed = HASHPRF(last_key, entropy_input) */
     678          12 :         blake2s_final(&input_pool.hash, seed);
     679             : 
     680             :         /* next_key = HASHPRF(seed, RDSEED || 0) */
     681          12 :         block.counter = 0;
     682          12 :         blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
     683          12 :         blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
     684             : 
     685          12 :         spin_unlock_irqrestore(&input_pool.lock, flags);
     686             :         memzero_explicit(next_key, sizeof(next_key));
     687             : 
     688          24 :         while (len) {
     689          12 :                 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
     690             :                 /* output = HASHPRF(seed, RDSEED || ++counter) */
     691          12 :                 ++block.counter;
     692          12 :                 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
     693          12 :                 len -= i;
     694          12 :                 buf += i;
     695             :         }
     696             : 
     697          12 :         memzero_explicit(seed, sizeof(seed));
     698          12 :         memzero_explicit(&block, sizeof(block));
     699          12 : }
     700             : 
     701             : #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
     702             : 
     703           1 : static void __cold _credit_init_bits(size_t bits)
     704             : {
     705             :         static struct execute_work set_ready;
     706             :         unsigned int new, orig, add;
     707             :         unsigned long flags;
     708             : 
     709           1 :         if (!bits)
     710             :                 return;
     711             : 
     712           1 :         add = min_t(size_t, bits, POOL_BITS);
     713             : 
     714           1 :         orig = READ_ONCE(input_pool.init_bits);
     715             :         do {
     716           1 :                 new = min_t(unsigned int, POOL_BITS, orig + add);
     717           3 :         } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
     718             : 
     719           1 :         if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
     720           1 :                 crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
     721           1 :                 if (static_key_initialized)
     722           0 :                         execute_in_process_context(crng_set_ready, &set_ready);
     723           1 :                 atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
     724           1 :                 wake_up_interruptible(&crng_init_wait);
     725           1 :                 kill_fasync(&fasync, SIGIO, POLL_IN);
     726           1 :                 pr_notice("crng init done\n");
     727           1 :                 if (urandom_warning.missed)
     728           0 :                         pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
     729             :                                   urandom_warning.missed);
     730           0 :         } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
     731           0 :                 spin_lock_irqsave(&base_crng.lock, flags);
     732             :                 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
     733           0 :                 if (crng_init == CRNG_EMPTY) {
     734           0 :                         extract_entropy(base_crng.key, sizeof(base_crng.key));
     735           0 :                         crng_init = CRNG_EARLY;
     736             :                 }
     737             :                 spin_unlock_irqrestore(&base_crng.lock, flags);
     738             :         }
     739             : }
     740             : 
     741             : 
     742             : /**********************************************************************
     743             :  *
     744             :  * Entropy collection routines.
     745             :  *
     746             :  * The following exported functions are used for pushing entropy into
     747             :  * the above entropy accumulation routines:
     748             :  *
     749             :  *      void add_device_randomness(const void *buf, size_t len);
     750             :  *      void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
     751             :  *      void add_bootloader_randomness(const void *buf, size_t len);
     752             :  *      void add_vmfork_randomness(const void *unique_vm_id, size_t len);
     753             :  *      void add_interrupt_randomness(int irq);
     754             :  *      void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
     755             :  *      void add_disk_randomness(struct gendisk *disk);
     756             :  *
     757             :  * add_device_randomness() adds data to the input pool that
     758             :  * is likely to differ between two devices (or possibly even per boot).
     759             :  * This would be things like MAC addresses or serial numbers, or the
     760             :  * read-out of the RTC. This does *not* credit any actual entropy to
     761             :  * the pool, but it initializes the pool to different values for devices
     762             :  * that might otherwise be identical and have very little entropy
     763             :  * available to them (particularly common in the embedded world).
     764             :  *
     765             :  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
     766             :  * entropy as specified by the caller. If the entropy pool is full it will
     767             :  * block until more entropy is needed.
     768             :  *
     769             :  * add_bootloader_randomness() is called by bootloader drivers, such as EFI
     770             :  * and device tree, and credits its input depending on whether or not the
     771             :  * command line option 'random.trust_bootloader'.
     772             :  *
     773             :  * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
     774             :  * representing the current instance of a VM to the pool, without crediting,
     775             :  * and then force-reseeds the crng so that it takes effect immediately.
     776             :  *
     777             :  * add_interrupt_randomness() uses the interrupt timing as random
     778             :  * inputs to the entropy pool. Using the cycle counters and the irq source
     779             :  * as inputs, it feeds the input pool roughly once a second or after 64
     780             :  * interrupts, crediting 1 bit of entropy for whichever comes first.
     781             :  *
     782             :  * add_input_randomness() uses the input layer interrupt timing, as well
     783             :  * as the event type information from the hardware.
     784             :  *
     785             :  * add_disk_randomness() uses what amounts to the seek time of block
     786             :  * layer request events, on a per-disk_devt basis, as input to the
     787             :  * entropy pool. Note that high-speed solid state drives with very low
     788             :  * seek times do not make for good sources of entropy, as their seek
     789             :  * times are usually fairly consistent.
     790             :  *
     791             :  * The last two routines try to estimate how many bits of entropy
     792             :  * to credit. They do this by keeping track of the first and second
     793             :  * order deltas of the event timings.
     794             :  *
     795             :  **********************************************************************/
     796             : 
     797             : static bool trust_cpu __initdata = true;
     798             : static bool trust_bootloader __initdata = true;
     799           0 : static int __init parse_trust_cpu(char *arg)
     800             : {
     801           0 :         return kstrtobool(arg, &trust_cpu);
     802             : }
     803           0 : static int __init parse_trust_bootloader(char *arg)
     804             : {
     805           0 :         return kstrtobool(arg, &trust_bootloader);
     806             : }
     807             : early_param("random.trust_cpu", parse_trust_cpu);
     808             : early_param("random.trust_bootloader", parse_trust_bootloader);
     809             : 
     810           0 : static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
     811             : {
     812           0 :         unsigned long flags, entropy = random_get_entropy();
     813             : 
     814             :         /*
     815             :          * Encode a representation of how long the system has been suspended,
     816             :          * in a way that is distinct from prior system suspends.
     817             :          */
     818           0 :         ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
     819             : 
     820           0 :         spin_lock_irqsave(&input_pool.lock, flags);
     821           0 :         _mix_pool_bytes(&action, sizeof(action));
     822           0 :         _mix_pool_bytes(stamps, sizeof(stamps));
     823           0 :         _mix_pool_bytes(&entropy, sizeof(entropy));
     824           0 :         spin_unlock_irqrestore(&input_pool.lock, flags);
     825             : 
     826           0 :         if (crng_ready() && (action == PM_RESTORE_PREPARE ||
     827             :             (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
     828             :              !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
     829           0 :                 crng_reseed(NULL);
     830           0 :                 pr_notice("crng reseeded on system resumption\n");
     831             :         }
     832           0 :         return 0;
     833             : }
     834             : 
     835             : static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
     836             : 
     837             : /*
     838             :  * This is called extremely early, before time keeping functionality is
     839             :  * available, but arch randomness is. Interrupts are not yet enabled.
     840             :  */
     841           1 : void __init random_init_early(const char *command_line)
     842             : {
     843             :         unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
     844             :         size_t i, longs, arch_bits;
     845             : 
     846             : #if defined(LATENT_ENTROPY_PLUGIN)
     847             :         static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
     848             :         _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
     849             : #endif
     850             : 
     851           3 :         for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
     852           1 :                 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
     853             :                 if (longs) {
     854             :                         _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
     855             :                         i += longs;
     856             :                         continue;
     857             :                 }
     858           2 :                 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
     859           1 :                 if (longs) {
     860           2 :                         _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
     861           1 :                         i += longs;
     862           1 :                         continue;
     863             :                 }
     864           0 :                 arch_bits -= sizeof(*entropy) * 8;
     865           0 :                 ++i;
     866             :         }
     867             : 
     868           2 :         _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
     869           2 :         _mix_pool_bytes(command_line, strlen(command_line));
     870             : 
     871             :         /* Reseed if already seeded by earlier phases. */
     872           1 :         if (crng_ready())
     873           1 :                 crng_reseed(NULL);
     874           0 :         else if (trust_cpu)
     875           0 :                 _credit_init_bits(arch_bits);
     876           1 : }
     877             : 
     878             : /*
     879             :  * This is called a little bit after the prior function, and now there is
     880             :  * access to timestamps counters. Interrupts are not yet enabled.
     881             :  */
     882           1 : void __init random_init(void)
     883             : {
     884           1 :         unsigned long entropy = random_get_entropy();
     885           1 :         ktime_t now = ktime_get_real();
     886             : 
     887           1 :         _mix_pool_bytes(&now, sizeof(now));
     888           1 :         _mix_pool_bytes(&entropy, sizeof(entropy));
     889           1 :         add_latent_entropy();
     890             : 
     891             :         /*
     892             :          * If we were initialized by the cpu or bootloader before jump labels
     893             :          * are initialized, then we should enable the static branch here, where
     894             :          * it's guaranteed that jump labels have been initialized.
     895             :          */
     896           1 :         if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
     897             :                 crng_set_ready(NULL);
     898             : 
     899             :         /* Reseed if already seeded by earlier phases. */
     900           1 :         if (crng_ready())
     901           1 :                 crng_reseed(NULL);
     902             : 
     903           1 :         WARN_ON(register_pm_notifier(&pm_notifier));
     904             : 
     905           1 :         WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
     906             :                        "entropy collection will consequently suffer.");
     907           1 : }
     908             : 
     909             : /*
     910             :  * Add device- or boot-specific data to the input pool to help
     911             :  * initialize it.
     912             :  *
     913             :  * None of this adds any entropy; it is meant to avoid the problem of
     914             :  * the entropy pool having similar initial state across largely
     915             :  * identical devices.
     916             :  */
     917         928 : void add_device_randomness(const void *buf, size_t len)
     918             : {
     919         928 :         unsigned long entropy = random_get_entropy();
     920             :         unsigned long flags;
     921             : 
     922         928 :         spin_lock_irqsave(&input_pool.lock, flags);
     923         928 :         _mix_pool_bytes(&entropy, sizeof(entropy));
     924         928 :         _mix_pool_bytes(buf, len);
     925         928 :         spin_unlock_irqrestore(&input_pool.lock, flags);
     926         928 : }
     927             : EXPORT_SYMBOL(add_device_randomness);
     928             : 
     929             : /*
     930             :  * Interface for in-kernel drivers of true hardware RNGs. Those devices
     931             :  * may produce endless random bits, so this function will sleep for
     932             :  * some amount of time after, if the sleep_after parameter is true.
     933             :  */
     934           0 : void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
     935             : {
     936           0 :         mix_pool_bytes(buf, len);
     937           0 :         credit_init_bits(entropy);
     938             : 
     939             :         /*
     940             :          * Throttle writing to once every reseed interval, unless we're not yet
     941             :          * initialized or no entropy is credited.
     942             :          */
     943           0 :         if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
     944           0 :                 schedule_timeout_interruptible(crng_reseed_interval());
     945           0 : }
     946             : EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
     947             : 
     948             : /*
     949             :  * Handle random seed passed by bootloader, and credit it depending
     950             :  * on the command line option 'random.trust_bootloader'.
     951             :  */
     952           1 : void __init add_bootloader_randomness(const void *buf, size_t len)
     953             : {
     954           1 :         mix_pool_bytes(buf, len);
     955           1 :         if (trust_bootloader)
     956           1 :                 credit_init_bits(len * 8);
     957           1 : }
     958             : 
     959             : #if IS_ENABLED(CONFIG_VMGENID)
     960             : static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
     961             : 
     962             : /*
     963             :  * Handle a new unique VM ID, which is unique, not secret, so we
     964             :  * don't credit it, but we do immediately force a reseed after so
     965             :  * that it's used by the crng posthaste.
     966             :  */
     967             : void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
     968             : {
     969             :         add_device_randomness(unique_vm_id, len);
     970             :         if (crng_ready()) {
     971             :                 crng_reseed(NULL);
     972             :                 pr_notice("crng reseeded due to virtual machine fork\n");
     973             :         }
     974             :         blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
     975             : }
     976             : #if IS_MODULE(CONFIG_VMGENID)
     977             : EXPORT_SYMBOL_GPL(add_vmfork_randomness);
     978             : #endif
     979             : 
     980             : int __cold register_random_vmfork_notifier(struct notifier_block *nb)
     981             : {
     982             :         return blocking_notifier_chain_register(&vmfork_chain, nb);
     983             : }
     984             : EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
     985             : 
     986             : int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
     987             : {
     988             :         return blocking_notifier_chain_unregister(&vmfork_chain, nb);
     989             : }
     990             : EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
     991             : #endif
     992             : 
     993             : struct fast_pool {
     994             :         unsigned long pool[4];
     995             :         unsigned long last;
     996             :         unsigned int count;
     997             :         struct timer_list mix;
     998             : };
     999             : 
    1000             : static void mix_interrupt_randomness(struct timer_list *work);
    1001             : 
    1002             : static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
    1003             : #ifdef CONFIG_64BIT
    1004             : #define FASTMIX_PERM SIPHASH_PERMUTATION
    1005             :         .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
    1006             : #else
    1007             : #define FASTMIX_PERM HSIPHASH_PERMUTATION
    1008             :         .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
    1009             : #endif
    1010             :         .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
    1011             : };
    1012             : 
    1013             : /*
    1014             :  * This is [Half]SipHash-1-x, starting from an empty key. Because
    1015             :  * the key is fixed, it assumes that its inputs are non-malicious,
    1016             :  * and therefore this has no security on its own. s represents the
    1017             :  * four-word SipHash state, while v represents a two-word input.
    1018             :  */
    1019        2943 : static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
    1020             : {
    1021        2943 :         s[3] ^= v1;
    1022       20601 :         FASTMIX_PERM(s[0], s[1], s[2], s[3]);
    1023        2943 :         s[0] ^= v1;
    1024        2943 :         s[3] ^= v2;
    1025       20601 :         FASTMIX_PERM(s[0], s[1], s[2], s[3]);
    1026        2943 :         s[0] ^= v2;
    1027        2943 : }
    1028             : 
    1029             : #ifdef CONFIG_SMP
    1030             : /*
    1031             :  * This function is called when the CPU has just come online, with
    1032             :  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
    1033             :  */
    1034             : int __cold random_online_cpu(unsigned int cpu)
    1035             : {
    1036             :         /*
    1037             :          * During CPU shutdown and before CPU onlining, add_interrupt_
    1038             :          * randomness() may schedule mix_interrupt_randomness(), and
    1039             :          * set the MIX_INFLIGHT flag. However, because the worker can
    1040             :          * be scheduled on a different CPU during this period, that
    1041             :          * flag will never be cleared. For that reason, we zero out
    1042             :          * the flag here, which runs just after workqueues are onlined
    1043             :          * for the CPU again. This also has the effect of setting the
    1044             :          * irq randomness count to zero so that new accumulated irqs
    1045             :          * are fresh.
    1046             :          */
    1047             :         per_cpu_ptr(&irq_randomness, cpu)->count = 0;
    1048             :         return 0;
    1049             : }
    1050             : #endif
    1051             : 
    1052          29 : static void mix_interrupt_randomness(struct timer_list *work)
    1053             : {
    1054          29 :         struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
    1055             :         /*
    1056             :          * The size of the copied stack pool is explicitly 2 longs so that we
    1057             :          * only ever ingest half of the siphash output each time, retaining
    1058             :          * the other half as the next "key" that carries over. The entropy is
    1059             :          * supposed to be sufficiently dispersed between bits so on average
    1060             :          * we don't wind up "losing" some.
    1061             :          */
    1062             :         unsigned long pool[2];
    1063             :         unsigned int count;
    1064             : 
    1065             :         /* Check to see if we're running on the wrong CPU due to hotplug. */
    1066             :         local_irq_disable();
    1067          29 :         if (fast_pool != this_cpu_ptr(&irq_randomness)) {
    1068             :                 local_irq_enable();
    1069           0 :                 return;
    1070             :         }
    1071             : 
    1072             :         /*
    1073             :          * Copy the pool to the stack so that the mixer always has a
    1074             :          * consistent view, before we reenable irqs again.
    1075             :          */
    1076          29 :         memcpy(pool, fast_pool->pool, sizeof(pool));
    1077          29 :         count = fast_pool->count;
    1078          29 :         fast_pool->count = 0;
    1079          29 :         fast_pool->last = jiffies;
    1080             :         local_irq_enable();
    1081             : 
    1082          29 :         mix_pool_bytes(pool, sizeof(pool));
    1083          29 :         credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
    1084             : 
    1085          29 :         memzero_explicit(pool, sizeof(pool));
    1086             : }
    1087             : 
    1088        2943 : void add_interrupt_randomness(int irq)
    1089             : {
    1090             :         enum { MIX_INFLIGHT = 1U << 31 };
    1091        2943 :         unsigned long entropy = random_get_entropy();
    1092        2943 :         struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
    1093        2943 :         struct pt_regs *regs = get_irq_regs();
    1094             :         unsigned int new_count;
    1095             : 
    1096        2943 :         fast_mix(fast_pool->pool, entropy,
    1097        5886 :                  (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
    1098        2943 :         new_count = ++fast_pool->count;
    1099             : 
    1100        2943 :         if (new_count & MIX_INFLIGHT)
    1101             :                 return;
    1102             : 
    1103        2914 :         if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
    1104             :                 return;
    1105             : 
    1106          29 :         fast_pool->count |= MIX_INFLIGHT;
    1107          58 :         if (!timer_pending(&fast_pool->mix)) {
    1108          29 :                 fast_pool->mix.expires = jiffies;
    1109          29 :                 add_timer_on(&fast_pool->mix, raw_smp_processor_id());
    1110             :         }
    1111             : }
    1112             : EXPORT_SYMBOL_GPL(add_interrupt_randomness);
    1113             : 
    1114             : /* There is one of these per entropy source */
    1115             : struct timer_rand_state {
    1116             :         unsigned long last_time;
    1117             :         long last_delta, last_delta2;
    1118             : };
    1119             : 
    1120             : /*
    1121             :  * This function adds entropy to the entropy "pool" by using timing
    1122             :  * delays. It uses the timer_rand_state structure to make an estimate
    1123             :  * of how many bits of entropy this call has added to the pool. The
    1124             :  * value "num" is also added to the pool; it should somehow describe
    1125             :  * the type of event that just happened.
    1126             :  */
    1127           0 : static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
    1128             : {
    1129           0 :         unsigned long entropy = random_get_entropy(), now = jiffies, flags;
    1130             :         long delta, delta2, delta3;
    1131             :         unsigned int bits;
    1132             : 
    1133             :         /*
    1134             :          * If we're in a hard IRQ, add_interrupt_randomness() will be called
    1135             :          * sometime after, so mix into the fast pool.
    1136             :          */
    1137           0 :         if (in_hardirq()) {
    1138           0 :                 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
    1139             :         } else {
    1140           0 :                 spin_lock_irqsave(&input_pool.lock, flags);
    1141           0 :                 _mix_pool_bytes(&entropy, sizeof(entropy));
    1142           0 :                 _mix_pool_bytes(&num, sizeof(num));
    1143             :                 spin_unlock_irqrestore(&input_pool.lock, flags);
    1144             :         }
    1145             : 
    1146           0 :         if (crng_ready())
    1147           0 :                 return;
    1148             : 
    1149             :         /*
    1150             :          * Calculate number of bits of randomness we probably added.
    1151             :          * We take into account the first, second and third-order deltas
    1152             :          * in order to make our estimate.
    1153             :          */
    1154           0 :         delta = now - READ_ONCE(state->last_time);
    1155           0 :         WRITE_ONCE(state->last_time, now);
    1156             : 
    1157           0 :         delta2 = delta - READ_ONCE(state->last_delta);
    1158           0 :         WRITE_ONCE(state->last_delta, delta);
    1159             : 
    1160           0 :         delta3 = delta2 - READ_ONCE(state->last_delta2);
    1161           0 :         WRITE_ONCE(state->last_delta2, delta2);
    1162             : 
    1163           0 :         if (delta < 0)
    1164           0 :                 delta = -delta;
    1165           0 :         if (delta2 < 0)
    1166           0 :                 delta2 = -delta2;
    1167           0 :         if (delta3 < 0)
    1168           0 :                 delta3 = -delta3;
    1169           0 :         if (delta > delta2)
    1170           0 :                 delta = delta2;
    1171           0 :         if (delta > delta3)
    1172           0 :                 delta = delta3;
    1173             : 
    1174             :         /*
    1175             :          * delta is now minimum absolute delta. Round down by 1 bit
    1176             :          * on general principles, and limit entropy estimate to 11 bits.
    1177             :          */
    1178           0 :         bits = min(fls(delta >> 1), 11);
    1179             : 
    1180             :         /*
    1181             :          * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
    1182             :          * will run after this, which uses a different crediting scheme of 1 bit
    1183             :          * per every 64 interrupts. In order to let that function do accounting
    1184             :          * close to the one in this function, we credit a full 64/64 bit per bit,
    1185             :          * and then subtract one to account for the extra one added.
    1186             :          */
    1187           0 :         if (in_hardirq())
    1188           0 :                 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
    1189             :         else
    1190           0 :                 _credit_init_bits(bits);
    1191             : }
    1192             : 
    1193           0 : void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
    1194             : {
    1195             :         static unsigned char last_value;
    1196             :         static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
    1197             : 
    1198             :         /* Ignore autorepeat and the like. */
    1199           0 :         if (value == last_value)
    1200             :                 return;
    1201             : 
    1202           0 :         last_value = value;
    1203           0 :         add_timer_randomness(&input_timer_state,
    1204           0 :                              (type << 4) ^ code ^ (code >> 4) ^ value);
    1205             : }
    1206             : EXPORT_SYMBOL_GPL(add_input_randomness);
    1207             : 
    1208             : #ifdef CONFIG_BLOCK
    1209           0 : void add_disk_randomness(struct gendisk *disk)
    1210             : {
    1211           0 :         if (!disk || !disk->random)
    1212             :                 return;
    1213             :         /* First major is 1, so we get >= 0x200 here. */
    1214           0 :         add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
    1215             : }
    1216             : EXPORT_SYMBOL_GPL(add_disk_randomness);
    1217             : 
    1218           0 : void __cold rand_initialize_disk(struct gendisk *disk)
    1219             : {
    1220             :         struct timer_rand_state *state;
    1221             : 
    1222             :         /*
    1223             :          * If kzalloc returns null, we just won't use that entropy
    1224             :          * source.
    1225             :          */
    1226           0 :         state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
    1227           0 :         if (state) {
    1228           0 :                 state->last_time = INITIAL_JIFFIES;
    1229           0 :                 disk->random = state;
    1230             :         }
    1231           0 : }
    1232             : #endif
    1233             : 
    1234             : struct entropy_timer_state {
    1235             :         unsigned long entropy;
    1236             :         struct timer_list timer;
    1237             :         atomic_t samples;
    1238             :         unsigned int samples_per_bit;
    1239             : };
    1240             : 
    1241             : /*
    1242             :  * Each time the timer fires, we expect that we got an unpredictable jump in
    1243             :  * the cycle counter. Even if the timer is running on another CPU, the timer
    1244             :  * activity will be touching the stack of the CPU that is generating entropy.
    1245             :  *
    1246             :  * Note that we don't re-arm the timer in the timer itself - we are happy to be
    1247             :  * scheduled away, since that just makes the load more complex, but we do not
    1248             :  * want the timer to keep ticking unless the entropy loop is running.
    1249             :  *
    1250             :  * So the re-arming always happens in the entropy loop itself.
    1251             :  */
    1252           0 : static void __cold entropy_timer(struct timer_list *timer)
    1253             : {
    1254           0 :         struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
    1255           0 :         unsigned long entropy = random_get_entropy();
    1256             : 
    1257           0 :         mix_pool_bytes(&entropy, sizeof(entropy));
    1258           0 :         if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
    1259           0 :                 credit_init_bits(1);
    1260           0 : }
    1261             : 
    1262             : /*
    1263             :  * If we have an actual cycle counter, see if we can generate enough entropy
    1264             :  * with timing noise.
    1265             :  */
    1266           0 : static void __cold try_to_generate_entropy(void)
    1267             : {
    1268             :         enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
    1269             :         u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
    1270           0 :         struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
    1271           0 :         unsigned int i, num_different = 0;
    1272           0 :         unsigned long last = random_get_entropy();
    1273           0 :         int cpu = -1;
    1274             : 
    1275           0 :         for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
    1276           0 :                 stack->entropy = random_get_entropy();
    1277           0 :                 if (stack->entropy != last)
    1278           0 :                         ++num_different;
    1279           0 :                 last = stack->entropy;
    1280             :         }
    1281           0 :         stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
    1282           0 :         if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
    1283           0 :                 return;
    1284             : 
    1285           0 :         atomic_set(&stack->samples, 0);
    1286           0 :         timer_setup_on_stack(&stack->timer, entropy_timer, 0);
    1287           0 :         while (!crng_ready() && !signal_pending(current)) {
    1288             :                 /*
    1289             :                  * Check !timer_pending() and then ensure that any previous callback has finished
    1290             :                  * executing by checking try_to_del_timer_sync(), before queueing the next one.
    1291             :                  */
    1292           0 :                 if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
    1293             :                         struct cpumask timer_cpus;
    1294             :                         unsigned int num_cpus;
    1295             : 
    1296             :                         /*
    1297             :                          * Preemption must be disabled here, both to read the current CPU number
    1298             :                          * and to avoid scheduling a timer on a dead CPU.
    1299             :                          */
    1300           0 :                         preempt_disable();
    1301             : 
    1302             :                         /* Only schedule callbacks on timer CPUs that are online. */
    1303           0 :                         cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
    1304           0 :                         num_cpus = cpumask_weight(&timer_cpus);
    1305             :                         /* In very bizarre case of misconfiguration, fallback to all online. */
    1306           0 :                         if (unlikely(num_cpus == 0)) {
    1307           0 :                                 timer_cpus = *cpu_online_mask;
    1308           0 :                                 num_cpus = cpumask_weight(&timer_cpus);
    1309             :                         }
    1310             : 
    1311             :                         /* Basic CPU round-robin, which avoids the current CPU. */
    1312             :                         do {
    1313           0 :                                 cpu = cpumask_next(cpu, &timer_cpus);
    1314           0 :                                 if (cpu >= nr_cpu_ids)
    1315           0 :                                         cpu = cpumask_first(&timer_cpus);
    1316           0 :                         } while (cpu == smp_processor_id() && num_cpus > 1);
    1317             : 
    1318             :                         /* Expiring the timer at `jiffies` means it's the next tick. */
    1319           0 :                         stack->timer.expires = jiffies;
    1320             : 
    1321           0 :                         add_timer_on(&stack->timer, cpu);
    1322             : 
    1323           0 :                         preempt_enable();
    1324             :                 }
    1325           0 :                 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
    1326           0 :                 schedule();
    1327           0 :                 stack->entropy = random_get_entropy();
    1328             :         }
    1329           0 :         mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
    1330             : 
    1331           0 :         del_timer_sync(&stack->timer);
    1332           0 :         destroy_timer_on_stack(&stack->timer);
    1333             : }
    1334             : 
    1335             : 
    1336             : /**********************************************************************
    1337             :  *
    1338             :  * Userspace reader/writer interfaces.
    1339             :  *
    1340             :  * getrandom(2) is the primary modern interface into the RNG and should
    1341             :  * be used in preference to anything else.
    1342             :  *
    1343             :  * Reading from /dev/random has the same functionality as calling
    1344             :  * getrandom(2) with flags=0. In earlier versions, however, it had
    1345             :  * vastly different semantics and should therefore be avoided, to
    1346             :  * prevent backwards compatibility issues.
    1347             :  *
    1348             :  * Reading from /dev/urandom has the same functionality as calling
    1349             :  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
    1350             :  * waiting for the RNG to be ready, it should not be used.
    1351             :  *
    1352             :  * Writing to either /dev/random or /dev/urandom adds entropy to
    1353             :  * the input pool but does not credit it.
    1354             :  *
    1355             :  * Polling on /dev/random indicates when the RNG is initialized, on
    1356             :  * the read side, and when it wants new entropy, on the write side.
    1357             :  *
    1358             :  * Both /dev/random and /dev/urandom have the same set of ioctls for
    1359             :  * adding entropy, getting the entropy count, zeroing the count, and
    1360             :  * reseeding the crng.
    1361             :  *
    1362             :  **********************************************************************/
    1363             : 
    1364           0 : SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
    1365             : {
    1366             :         struct iov_iter iter;
    1367             :         struct iovec iov;
    1368             :         int ret;
    1369             : 
    1370           0 :         if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
    1371             :                 return -EINVAL;
    1372             : 
    1373             :         /*
    1374             :          * Requesting insecure and blocking randomness at the same time makes
    1375             :          * no sense.
    1376             :          */
    1377           0 :         if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
    1378             :                 return -EINVAL;
    1379             : 
    1380           0 :         if (!crng_ready() && !(flags & GRND_INSECURE)) {
    1381           0 :                 if (flags & GRND_NONBLOCK)
    1382             :                         return -EAGAIN;
    1383           0 :                 ret = wait_for_random_bytes();
    1384           0 :                 if (unlikely(ret))
    1385           0 :                         return ret;
    1386             :         }
    1387             : 
    1388           0 :         ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
    1389           0 :         if (unlikely(ret))
    1390           0 :                 return ret;
    1391           0 :         return get_random_bytes_user(&iter);
    1392             : }
    1393             : 
    1394           0 : static __poll_t random_poll(struct file *file, poll_table *wait)
    1395             : {
    1396           0 :         poll_wait(file, &crng_init_wait, wait);
    1397           0 :         return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
    1398             : }
    1399             : 
    1400           0 : static ssize_t write_pool_user(struct iov_iter *iter)
    1401             : {
    1402             :         u8 block[BLAKE2S_BLOCK_SIZE];
    1403           0 :         ssize_t ret = 0;
    1404             :         size_t copied;
    1405             : 
    1406           0 :         if (unlikely(!iov_iter_count(iter)))
    1407             :                 return 0;
    1408             : 
    1409             :         for (;;) {
    1410           0 :                 copied = copy_from_iter(block, sizeof(block), iter);
    1411           0 :                 ret += copied;
    1412           0 :                 mix_pool_bytes(block, copied);
    1413           0 :                 if (!iov_iter_count(iter) || copied != sizeof(block))
    1414             :                         break;
    1415             : 
    1416             :                 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
    1417           0 :                 if (ret % PAGE_SIZE == 0) {
    1418           0 :                         if (signal_pending(current))
    1419             :                                 break;
    1420           0 :                         cond_resched();
    1421             :                 }
    1422             :         }
    1423             : 
    1424           0 :         memzero_explicit(block, sizeof(block));
    1425           0 :         return ret ? ret : -EFAULT;
    1426             : }
    1427             : 
    1428           0 : static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
    1429             : {
    1430           0 :         return write_pool_user(iter);
    1431             : }
    1432             : 
    1433           0 : static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
    1434             : {
    1435             :         static int maxwarn = 10;
    1436             : 
    1437             :         /*
    1438             :          * Opportunistically attempt to initialize the RNG on platforms that
    1439             :          * have fast cycle counters, but don't (for now) require it to succeed.
    1440             :          */
    1441           0 :         if (!crng_ready())
    1442           0 :                 try_to_generate_entropy();
    1443             : 
    1444           0 :         if (!crng_ready()) {
    1445           0 :                 if (!ratelimit_disable && maxwarn <= 0)
    1446           0 :                         ++urandom_warning.missed;
    1447           0 :                 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
    1448           0 :                         --maxwarn;
    1449           0 :                         pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
    1450             :                                   current->comm, iov_iter_count(iter));
    1451             :                 }
    1452             :         }
    1453             : 
    1454           0 :         return get_random_bytes_user(iter);
    1455             : }
    1456             : 
    1457           0 : static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
    1458             : {
    1459             :         int ret;
    1460             : 
    1461           0 :         if (!crng_ready() &&
    1462           0 :             ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
    1463           0 :              (kiocb->ki_filp->f_flags & O_NONBLOCK)))
    1464             :                 return -EAGAIN;
    1465             : 
    1466           0 :         ret = wait_for_random_bytes();
    1467           0 :         if (ret != 0)
    1468           0 :                 return ret;
    1469           0 :         return get_random_bytes_user(iter);
    1470             : }
    1471             : 
    1472           0 : static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
    1473             : {
    1474           0 :         int __user *p = (int __user *)arg;
    1475             :         int ent_count;
    1476             : 
    1477           0 :         switch (cmd) {
    1478             :         case RNDGETENTCNT:
    1479             :                 /* Inherently racy, no point locking. */
    1480           0 :                 if (put_user(input_pool.init_bits, p))
    1481             :                         return -EFAULT;
    1482           0 :                 return 0;
    1483             :         case RNDADDTOENTCNT:
    1484           0 :                 if (!capable(CAP_SYS_ADMIN))
    1485             :                         return -EPERM;
    1486           0 :                 if (get_user(ent_count, p))
    1487             :                         return -EFAULT;
    1488           0 :                 if (ent_count < 0)
    1489             :                         return -EINVAL;
    1490           0 :                 credit_init_bits(ent_count);
    1491             :                 return 0;
    1492             :         case RNDADDENTROPY: {
    1493             :                 struct iov_iter iter;
    1494             :                 struct iovec iov;
    1495             :                 ssize_t ret;
    1496             :                 int len;
    1497             : 
    1498           0 :                 if (!capable(CAP_SYS_ADMIN))
    1499             :                         return -EPERM;
    1500           0 :                 if (get_user(ent_count, p++))
    1501             :                         return -EFAULT;
    1502           0 :                 if (ent_count < 0)
    1503             :                         return -EINVAL;
    1504           0 :                 if (get_user(len, p++))
    1505             :                         return -EFAULT;
    1506           0 :                 ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
    1507           0 :                 if (unlikely(ret))
    1508             :                         return ret;
    1509           0 :                 ret = write_pool_user(&iter);
    1510           0 :                 if (unlikely(ret < 0))
    1511             :                         return ret;
    1512             :                 /* Since we're crediting, enforce that it was all written into the pool. */
    1513           0 :                 if (unlikely(ret != len))
    1514             :                         return -EFAULT;
    1515           0 :                 credit_init_bits(ent_count);
    1516             :                 return 0;
    1517             :         }
    1518             :         case RNDZAPENTCNT:
    1519             :         case RNDCLEARPOOL:
    1520             :                 /* No longer has any effect. */
    1521           0 :                 if (!capable(CAP_SYS_ADMIN))
    1522             :                         return -EPERM;
    1523           0 :                 return 0;
    1524             :         case RNDRESEEDCRNG:
    1525           0 :                 if (!capable(CAP_SYS_ADMIN))
    1526             :                         return -EPERM;
    1527           0 :                 if (!crng_ready())
    1528             :                         return -ENODATA;
    1529           0 :                 crng_reseed(NULL);
    1530           0 :                 return 0;
    1531             :         default:
    1532             :                 return -EINVAL;
    1533             :         }
    1534             : }
    1535             : 
    1536           0 : static int random_fasync(int fd, struct file *filp, int on)
    1537             : {
    1538           0 :         return fasync_helper(fd, filp, on, &fasync);
    1539             : }
    1540             : 
    1541             : const struct file_operations random_fops = {
    1542             :         .read_iter = random_read_iter,
    1543             :         .write_iter = random_write_iter,
    1544             :         .poll = random_poll,
    1545             :         .unlocked_ioctl = random_ioctl,
    1546             :         .compat_ioctl = compat_ptr_ioctl,
    1547             :         .fasync = random_fasync,
    1548             :         .llseek = noop_llseek,
    1549             :         .splice_read = generic_file_splice_read,
    1550             :         .splice_write = iter_file_splice_write,
    1551             : };
    1552             : 
    1553             : const struct file_operations urandom_fops = {
    1554             :         .read_iter = urandom_read_iter,
    1555             :         .write_iter = random_write_iter,
    1556             :         .unlocked_ioctl = random_ioctl,
    1557             :         .compat_ioctl = compat_ptr_ioctl,
    1558             :         .fasync = random_fasync,
    1559             :         .llseek = noop_llseek,
    1560             :         .splice_read = generic_file_splice_read,
    1561             :         .splice_write = iter_file_splice_write,
    1562             : };
    1563             : 
    1564             : 
    1565             : /********************************************************************
    1566             :  *
    1567             :  * Sysctl interface.
    1568             :  *
    1569             :  * These are partly unused legacy knobs with dummy values to not break
    1570             :  * userspace and partly still useful things. They are usually accessible
    1571             :  * in /proc/sys/kernel/random/ and are as follows:
    1572             :  *
    1573             :  * - boot_id - a UUID representing the current boot.
    1574             :  *
    1575             :  * - uuid - a random UUID, different each time the file is read.
    1576             :  *
    1577             :  * - poolsize - the number of bits of entropy that the input pool can
    1578             :  *   hold, tied to the POOL_BITS constant.
    1579             :  *
    1580             :  * - entropy_avail - the number of bits of entropy currently in the
    1581             :  *   input pool. Always <= poolsize.
    1582             :  *
    1583             :  * - write_wakeup_threshold - the amount of entropy in the input pool
    1584             :  *   below which write polls to /dev/random will unblock, requesting
    1585             :  *   more entropy, tied to the POOL_READY_BITS constant. It is writable
    1586             :  *   to avoid breaking old userspaces, but writing to it does not
    1587             :  *   change any behavior of the RNG.
    1588             :  *
    1589             :  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
    1590             :  *   It is writable to avoid breaking old userspaces, but writing
    1591             :  *   to it does not change any behavior of the RNG.
    1592             :  *
    1593             :  ********************************************************************/
    1594             : 
    1595             : #ifdef CONFIG_SYSCTL
    1596             : 
    1597             : #include <linux/sysctl.h>
    1598             : 
    1599             : static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
    1600             : static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
    1601             : static int sysctl_poolsize = POOL_BITS;
    1602             : static u8 sysctl_bootid[UUID_SIZE];
    1603             : 
    1604             : /*
    1605             :  * This function is used to return both the bootid UUID, and random
    1606             :  * UUID. The difference is in whether table->data is NULL; if it is,
    1607             :  * then a new UUID is generated and returned to the user.
    1608             :  */
    1609           0 : static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
    1610             :                         size_t *lenp, loff_t *ppos)
    1611             : {
    1612             :         u8 tmp_uuid[UUID_SIZE], *uuid;
    1613             :         char uuid_string[UUID_STRING_LEN + 1];
    1614           0 :         struct ctl_table fake_table = {
    1615             :                 .data = uuid_string,
    1616             :                 .maxlen = UUID_STRING_LEN
    1617             :         };
    1618             : 
    1619           0 :         if (write)
    1620             :                 return -EPERM;
    1621             : 
    1622           0 :         uuid = table->data;
    1623           0 :         if (!uuid) {
    1624           0 :                 uuid = tmp_uuid;
    1625           0 :                 generate_random_uuid(uuid);
    1626             :         } else {
    1627             :                 static DEFINE_SPINLOCK(bootid_spinlock);
    1628             : 
    1629           0 :                 spin_lock(&bootid_spinlock);
    1630           0 :                 if (!uuid[8])
    1631           0 :                         generate_random_uuid(uuid);
    1632             :                 spin_unlock(&bootid_spinlock);
    1633             :         }
    1634             : 
    1635           0 :         snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
    1636           0 :         return proc_dostring(&fake_table, 0, buf, lenp, ppos);
    1637             : }
    1638             : 
    1639             : /* The same as proc_dointvec, but writes don't change anything. */
    1640           0 : static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
    1641             :                             size_t *lenp, loff_t *ppos)
    1642             : {
    1643           0 :         return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
    1644             : }
    1645             : 
    1646             : static struct ctl_table random_table[] = {
    1647             :         {
    1648             :                 .procname       = "poolsize",
    1649             :                 .data           = &sysctl_poolsize,
    1650             :                 .maxlen         = sizeof(int),
    1651             :                 .mode           = 0444,
    1652             :                 .proc_handler   = proc_dointvec,
    1653             :         },
    1654             :         {
    1655             :                 .procname       = "entropy_avail",
    1656             :                 .data           = &input_pool.init_bits,
    1657             :                 .maxlen         = sizeof(int),
    1658             :                 .mode           = 0444,
    1659             :                 .proc_handler   = proc_dointvec,
    1660             :         },
    1661             :         {
    1662             :                 .procname       = "write_wakeup_threshold",
    1663             :                 .data           = &sysctl_random_write_wakeup_bits,
    1664             :                 .maxlen         = sizeof(int),
    1665             :                 .mode           = 0644,
    1666             :                 .proc_handler   = proc_do_rointvec,
    1667             :         },
    1668             :         {
    1669             :                 .procname       = "urandom_min_reseed_secs",
    1670             :                 .data           = &sysctl_random_min_urandom_seed,
    1671             :                 .maxlen         = sizeof(int),
    1672             :                 .mode           = 0644,
    1673             :                 .proc_handler   = proc_do_rointvec,
    1674             :         },
    1675             :         {
    1676             :                 .procname       = "boot_id",
    1677             :                 .data           = &sysctl_bootid,
    1678             :                 .mode           = 0444,
    1679             :                 .proc_handler   = proc_do_uuid,
    1680             :         },
    1681             :         {
    1682             :                 .procname       = "uuid",
    1683             :                 .mode           = 0444,
    1684             :                 .proc_handler   = proc_do_uuid,
    1685             :         },
    1686             :         { }
    1687             : };
    1688             : 
    1689             : /*
    1690             :  * random_init() is called before sysctl_init(),
    1691             :  * so we cannot call register_sysctl_init() in random_init()
    1692             :  */
    1693           1 : static int __init random_sysctls_init(void)
    1694             : {
    1695           1 :         register_sysctl_init("kernel/random", random_table);
    1696           1 :         return 0;
    1697             : }
    1698             : device_initcall(random_sysctls_init);
    1699             : #endif

Generated by: LCOV version 1.14