LCOV - code coverage report
Current view: top level - arch/x86/include/asm - word-at-a-time.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 7 7 100.0 %
Date: 2023-04-06 08:38:28 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _ASM_WORD_AT_A_TIME_H
       3             : #define _ASM_WORD_AT_A_TIME_H
       4             : 
       5             : #include <linux/kernel.h>
       6             : 
       7             : /*
       8             :  * This is largely generic for little-endian machines, but the
       9             :  * optimal byte mask counting is probably going to be something
      10             :  * that is architecture-specific. If you have a reliably fast
      11             :  * bit count instruction, that might be better than the multiply
      12             :  * and shift, for example.
      13             :  */
      14             : struct word_at_a_time {
      15             :         const unsigned long one_bits, high_bits;
      16             : };
      17             : 
      18             : #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
      19             : 
      20             : #ifdef CONFIG_64BIT
      21             : 
      22             : /*
      23             :  * Jan Achrenius on G+: microoptimized version of
      24             :  * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
      25             :  * that works for the bytemasks without having to
      26             :  * mask them first.
      27             :  */
      28             : static inline long count_masked_bytes(unsigned long mask)
      29             : {
      30         371 :         return mask*0x0001020304050608ul >> 56;
      31             : }
      32             : 
      33             : #else   /* 32-bit case */
      34             : 
      35             : /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
      36             : static inline long count_masked_bytes(long mask)
      37             : {
      38             :         /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
      39             :         long a = (0x0ff0001+mask) >> 23;
      40             :         /* Fix the 1 for 00 case */
      41             :         return a & mask;
      42             : }
      43             : 
      44             : #endif
      45             : 
      46             : /* Return nonzero if it has a zero */
      47             : static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
      48             : {
      49         750 :         unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
      50         750 :         *bits = mask;
      51             :         return mask;
      52             : }
      53             : 
      54             : static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
      55             : {
      56             :         return bits;
      57             : }
      58             : 
      59             : static inline unsigned long create_zero_mask(unsigned long bits)
      60             : {
      61         371 :         bits = (bits - 1) & ~bits;
      62         371 :         return bits >> 7;
      63             : }
      64             : 
      65             : /* The mask we created is directly usable as a bytemask */
      66             : #define zero_bytemask(mask) (mask)
      67             : 
      68             : static inline unsigned long find_zero(unsigned long mask)
      69             : {
      70         371 :         return count_masked_bytes(mask);
      71             : }
      72             : 
      73             : /*
      74             :  * Load an unaligned word from kernel space.
      75             :  *
      76             :  * In the (very unlikely) case of the word being a page-crosser
      77             :  * and the next page not being mapped, take the exception and
      78             :  * return zeroes in the non-existing part.
      79             :  */
      80             : static inline unsigned long load_unaligned_zeropad(const void *addr)
      81             : {
      82             :         unsigned long ret;
      83             : 
      84           8 :         asm volatile(
      85             :                 "1:        mov %[mem], %[ret]\n"
      86             :                 "2:\n"
      87             :                 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD)
      88             :                 : [ret] "=r" (ret)
      89             :                 : [mem] "m" (*(unsigned long *)addr));
      90             : 
      91             :         return ret;
      92             : }
      93             : 
      94             : #endif /* _ASM_WORD_AT_A_TIME_H */

Generated by: LCOV version 1.14