LCOV - code coverage report
Current view: top level - arch/x86/lib - csum-partial_64.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 34 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 2 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * arch/x86_64/lib/csum-partial.c
       4             :  *
       5             :  * This file contains network checksum routines that are better done
       6             :  * in an architecture-specific manner due to speed.
       7             :  */
       8             :  
       9             : #include <linux/compiler.h>
      10             : #include <linux/export.h>
      11             : #include <asm/checksum.h>
      12             : #include <asm/word-at-a-time.h>
      13             : 
      14             : static inline unsigned short from32to16(unsigned a) 
      15             : {
      16           0 :         unsigned short b = a >> 16; 
      17           0 :         asm("addw %w2,%w0\n\t"
      18             :             "adcw $0,%w0\n" 
      19             :             : "=r" (b)
      20             :             : "0" (b), "r" (a));
      21             :         return b;
      22             : }
      23             : 
      24             : /*
      25             :  * Do a checksum on an arbitrary memory area.
      26             :  * Returns a 32bit checksum.
      27             :  *
      28             :  * This isn't as time critical as it used to be because many NICs
      29             :  * do hardware checksumming these days.
      30             :  *
      31             :  * Still, with CHECKSUM_COMPLETE this is called to compute
      32             :  * checksums on IPv6 headers (40 bytes) and other small parts.
      33             :  * it's best to have buff aligned on a 64-bit boundary
      34             :  */
      35           0 : __wsum csum_partial(const void *buff, int len, __wsum sum)
      36             : {
      37           0 :         u64 temp64 = (__force u64)sum;
      38             :         unsigned odd, result;
      39             : 
      40           0 :         odd = 1 & (unsigned long) buff;
      41           0 :         if (unlikely(odd)) {
      42           0 :                 if (unlikely(len == 0))
      43             :                         return sum;
      44           0 :                 temp64 = ror32((__force u32)sum, 8);
      45           0 :                 temp64 += (*(unsigned char *)buff << 8);
      46           0 :                 len--;
      47           0 :                 buff++;
      48             :         }
      49             : 
      50           0 :         while (unlikely(len >= 64)) {
      51           0 :                 asm("addq 0*8(%[src]),%[res]\n\t"
      52             :                     "adcq 1*8(%[src]),%[res]\n\t"
      53             :                     "adcq 2*8(%[src]),%[res]\n\t"
      54             :                     "adcq 3*8(%[src]),%[res]\n\t"
      55             :                     "adcq 4*8(%[src]),%[res]\n\t"
      56             :                     "adcq 5*8(%[src]),%[res]\n\t"
      57             :                     "adcq 6*8(%[src]),%[res]\n\t"
      58             :                     "adcq 7*8(%[src]),%[res]\n\t"
      59             :                     "adcq $0,%[res]"
      60             :                     : [res] "+r" (temp64)
      61             :                     : [src] "r" (buff)
      62             :                     : "memory");
      63           0 :                 buff += 64;
      64           0 :                 len -= 64;
      65             :         }
      66             : 
      67           0 :         if (len & 32) {
      68           0 :                 asm("addq 0*8(%[src]),%[res]\n\t"
      69             :                     "adcq 1*8(%[src]),%[res]\n\t"
      70             :                     "adcq 2*8(%[src]),%[res]\n\t"
      71             :                     "adcq 3*8(%[src]),%[res]\n\t"
      72             :                     "adcq $0,%[res]"
      73             :                         : [res] "+r" (temp64)
      74             :                         : [src] "r" (buff)
      75             :                         : "memory");
      76           0 :                 buff += 32;
      77             :         }
      78           0 :         if (len & 16) {
      79           0 :                 asm("addq 0*8(%[src]),%[res]\n\t"
      80             :                     "adcq 1*8(%[src]),%[res]\n\t"
      81             :                     "adcq $0,%[res]"
      82             :                         : [res] "+r" (temp64)
      83             :                         : [src] "r" (buff)
      84             :                         : "memory");
      85           0 :                 buff += 16;
      86             :         }
      87           0 :         if (len & 8) {
      88           0 :                 asm("addq 0*8(%[src]),%[res]\n\t"
      89             :                     "adcq $0,%[res]"
      90             :                         : [res] "+r" (temp64)
      91             :                         : [src] "r" (buff)
      92             :                         : "memory");
      93           0 :                 buff += 8;
      94             :         }
      95           0 :         if (len & 7) {
      96           0 :                 unsigned int shift = (8 - (len & 7)) * 8;
      97             :                 unsigned long trail;
      98             : 
      99           0 :                 trail = (load_unaligned_zeropad(buff) << shift) >> shift;
     100             : 
     101           0 :                 asm("addq %[trail],%[res]\n\t"
     102             :                     "adcq $0,%[res]"
     103             :                         : [res] "+r" (temp64)
     104             :                         : [trail] "r" (trail));
     105             :         }
     106           0 :         result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
     107           0 :         if (unlikely(odd)) {
     108           0 :                 result = from32to16(result);
     109           0 :                 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
     110             :         }
     111             :         return (__force __wsum)result;
     112             : }
     113             : EXPORT_SYMBOL(csum_partial);
     114             : 
     115             : /*
     116             :  * this routine is used for miscellaneous IP-like checksums, mainly
     117             :  * in icmp.c
     118             :  */
     119           0 : __sum16 ip_compute_csum(const void *buff, int len)
     120             : {
     121           0 :         return csum_fold(csum_partial(buff,len,0));
     122             : }
     123             : EXPORT_SYMBOL(ip_compute_csum);

Generated by: LCOV version 1.14