LCOV - code coverage report
Current view: top level - include/linux - random.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 21 4.8 %
Date: 2023-08-24 13:40:31 Functions: 0 2 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : 
       3             : #ifndef _LINUX_RANDOM_H
       4             : #define _LINUX_RANDOM_H
       5             : 
       6             : #include <linux/bug.h>
       7             : #include <linux/kernel.h>
       8             : #include <linux/list.h>
       9             : 
      10             : #include <uapi/linux/random.h>
      11             : 
      12             : struct notifier_block;
      13             : 
      14             : void add_device_randomness(const void *buf, size_t len);
      15             : void __init add_bootloader_randomness(const void *buf, size_t len);
      16             : void add_input_randomness(unsigned int type, unsigned int code,
      17             :                           unsigned int value) __latent_entropy;
      18             : void add_interrupt_randomness(int irq) __latent_entropy;
      19             : void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
      20             : 
      21             : static inline void add_latent_entropy(void)
      22             : {
      23             : #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
      24             :         add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
      25             : #else
      26         357 :         add_device_randomness(NULL, 0);
      27             : #endif
      28             : }
      29             : 
      30             : #if IS_ENABLED(CONFIG_VMGENID)
      31             : void add_vmfork_randomness(const void *unique_vm_id, size_t len);
      32             : int register_random_vmfork_notifier(struct notifier_block *nb);
      33             : int unregister_random_vmfork_notifier(struct notifier_block *nb);
      34             : #else
      35             : static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
      36             : static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
      37             : #endif
      38             : 
      39             : void get_random_bytes(void *buf, size_t len);
      40             : u8 get_random_u8(void);
      41             : u16 get_random_u16(void);
      42             : u32 get_random_u32(void);
      43             : u64 get_random_u64(void);
      44             : static inline unsigned long get_random_long(void)
      45             : {
      46             : #if BITS_PER_LONG == 64
      47           0 :         return get_random_u64();
      48             : #else
      49             :         return get_random_u32();
      50             : #endif
      51             : }
      52             : 
      53             : u32 __get_random_u32_below(u32 ceil);
      54             : 
      55             : /*
      56             :  * Returns a random integer in the interval [0, ceil), with uniform
      57             :  * distribution, suitable for all uses. Fastest when ceil is a constant, but
      58             :  * still fast for variable ceil as well.
      59             :  */
      60           0 : static inline u32 get_random_u32_below(u32 ceil)
      61             : {
      62           0 :         if (!__builtin_constant_p(ceil))
      63           0 :                 return __get_random_u32_below(ceil);
      64             : 
      65             :         /*
      66             :          * For the fast path, below, all operations on ceil are precomputed by
      67             :          * the compiler, so this incurs no overhead for checking pow2, doing
      68             :          * divisions, or branching based on integer size. The resultant
      69             :          * algorithm does traditional reciprocal multiplication (typically
      70             :          * optimized by the compiler into shifts and adds), rejecting samples
      71             :          * whose lower half would indicate a range indivisible by ceil.
      72             :          */
      73           0 :         BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
      74           0 :         if (ceil <= 1)
      75             :                 return 0;
      76             :         for (;;) {
      77           0 :                 if (ceil <= 1U << 8) {
      78           0 :                         u32 mult = ceil * get_random_u8();
      79           0 :                         if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
      80           0 :                                 return mult >> 8;
      81           0 :                 } else if (ceil <= 1U << 16) {
      82           0 :                         u32 mult = ceil * get_random_u16();
      83           0 :                         if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
      84           0 :                                 return mult >> 16;
      85             :                 } else {
      86           0 :                         u64 mult = (u64)ceil * get_random_u32();
      87           0 :                         if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
      88           0 :                                 return mult >> 32;
      89             :                 }
      90             :         }
      91             : }
      92             : 
      93             : /*
      94             :  * Returns a random integer in the interval (floor, U32_MAX], with uniform
      95             :  * distribution, suitable for all uses. Fastest when floor is a constant, but
      96             :  * still fast for variable floor as well.
      97             :  */
      98             : static inline u32 get_random_u32_above(u32 floor)
      99             : {
     100             :         BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
     101             :                          "get_random_u32_above() must take floor < U32_MAX");
     102             :         return floor + 1 + get_random_u32_below(U32_MAX - floor);
     103             : }
     104             : 
     105             : /*
     106             :  * Returns a random integer in the interval [floor, ceil], with uniform
     107             :  * distribution, suitable for all uses. Fastest when floor and ceil are
     108             :  * constant, but still fast for variable floor and ceil as well.
     109             :  */
     110           0 : static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
     111             : {
     112           0 :         BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
     113             :                          (floor > ceil || ceil - floor == U32_MAX),
     114             :                          "get_random_u32_inclusive() must take floor <= ceil");
     115           0 :         return floor + get_random_u32_below(ceil - floor + 1);
     116             : }
     117             : 
     118             : void __init random_init_early(const char *command_line);
     119             : void __init random_init(void);
     120             : bool rng_is_initialized(void);
     121             : int wait_for_random_bytes(void);
     122             : int execute_with_initialized_rng(struct notifier_block *nb);
     123             : 
     124             : /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
     125             :  * Returns the result of the call to wait_for_random_bytes. */
     126             : static inline int get_random_bytes_wait(void *buf, size_t nbytes)
     127             : {
     128             :         int ret = wait_for_random_bytes();
     129             :         get_random_bytes(buf, nbytes);
     130             :         return ret;
     131             : }
     132             : 
     133             : #define declare_get_random_var_wait(name, ret_type) \
     134             :         static inline int get_random_ ## name ## _wait(ret_type *out) { \
     135             :                 int ret = wait_for_random_bytes(); \
     136             :                 if (unlikely(ret)) \
     137             :                         return ret; \
     138             :                 *out = get_random_ ## name(); \
     139             :                 return 0; \
     140             :         }
     141             : declare_get_random_var_wait(u8, u8)
     142             : declare_get_random_var_wait(u16, u16)
     143             : declare_get_random_var_wait(u32, u32)
     144             : declare_get_random_var_wait(u64, u32)
     145             : declare_get_random_var_wait(long, unsigned long)
     146             : #undef declare_get_random_var
     147             : 
     148             : /*
     149             :  * This is designed to be standalone for just prandom
     150             :  * users, but for now we include it from <linux/random.h>
     151             :  * for legacy reasons.
     152             :  */
     153             : #include <linux/prandom.h>
     154             : 
     155             : #ifdef CONFIG_SMP
     156             : int random_prepare_cpu(unsigned int cpu);
     157             : int random_online_cpu(unsigned int cpu);
     158             : #endif
     159             : 
     160             : #ifndef MODULE
     161             : extern const struct file_operations random_fops, urandom_fops;
     162             : #endif
     163             : 
     164             : #endif /* _LINUX_RANDOM_H */

Generated by: LCOV version 1.14