LCOV - code coverage report
Current view: top level - include/linux - percpu_counter.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 20 5.0 %
Date: 2023-03-27 20:00:47 Functions: 0 2 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_PERCPU_COUNTER_H
       3             : #define _LINUX_PERCPU_COUNTER_H
       4             : /*
       5             :  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
       6             :  *
       7             :  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
       8             :  */
       9             : 
      10             : #include <linux/spinlock.h>
      11             : #include <linux/smp.h>
      12             : #include <linux/list.h>
      13             : #include <linux/threads.h>
      14             : #include <linux/percpu.h>
      15             : #include <linux/types.h>
      16             : 
      17             : /* percpu_counter batch for local add or sub */
      18             : #define PERCPU_COUNTER_LOCAL_BATCH      INT_MAX
      19             : 
      20             : #ifdef CONFIG_SMP
      21             : 
      22             : struct percpu_counter {
      23             :         raw_spinlock_t lock;
      24             :         s64 count;
      25             : #ifdef CONFIG_HOTPLUG_CPU
      26             :         struct list_head list;  /* All percpu_counters are on a list */
      27             : #endif
      28             :         s32 __percpu *counters;
      29             : };
      30             : 
      31             : extern int percpu_counter_batch;
      32             : 
      33             : int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
      34             :                           struct lock_class_key *key);
      35             : 
      36             : #define percpu_counter_init(fbc, value, gfp)                            \
      37             :         ({                                                              \
      38             :                 static struct lock_class_key __key;                     \
      39             :                                                                         \
      40             :                 __percpu_counter_init(fbc, value, gfp, &__key);             \
      41             :         })
      42             : 
      43             : void percpu_counter_destroy(struct percpu_counter *fbc);
      44             : void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
      45             : void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
      46             :                               s32 batch);
      47             : s64 __percpu_counter_sum(struct percpu_counter *fbc);
      48             : s64 percpu_counter_sum_all(struct percpu_counter *fbc);
      49             : int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
      50             : void percpu_counter_sync(struct percpu_counter *fbc);
      51             : 
      52             : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
      53             : {
      54             :         return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
      55             : }
      56             : 
      57             : static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
      58             : {
      59             :         percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
      60             : }
      61             : 
      62             : /*
      63             :  * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
      64             :  * are accumulated in local per cpu counter and not in fbc->count until
      65             :  * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
      66             :  * write efficient.
      67             :  * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
      68             :  * used to add up the counts from each CPU to account for all the local
      69             :  * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
      70             :  * should be used when a counter is updated frequently and read rarely.
      71             :  */
      72             : static inline void
      73             : percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
      74             : {
      75             :         percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
      76             : }
      77             : 
      78             : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
      79             : {
      80             :         s64 ret = __percpu_counter_sum(fbc);
      81             :         return ret < 0 ? 0 : ret;
      82             : }
      83             : 
      84             : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
      85             : {
      86             :         return __percpu_counter_sum(fbc);
      87             : }
      88             : 
      89             : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
      90             : {
      91             :         return fbc->count;
      92             : }
      93             : 
      94             : /*
      95             :  * It is possible for the percpu_counter_read() to return a small negative
      96             :  * number for some counter which should never be negative.
      97             :  *
      98             :  */
      99             : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
     100             : {
     101             :         /* Prevent reloads of fbc->count */
     102             :         s64 ret = READ_ONCE(fbc->count);
     103             : 
     104             :         if (ret >= 0)
     105             :                 return ret;
     106             :         return 0;
     107             : }
     108             : 
     109             : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
     110             : {
     111             :         return (fbc->counters != NULL);
     112             : }
     113             : 
     114             : #else /* !CONFIG_SMP */
     115             : 
     116             : struct percpu_counter {
     117             :         s64 count;
     118             : };
     119             : 
     120             : static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
     121             :                                       gfp_t gfp)
     122             : {
     123          10 :         fbc->count = amount;
     124             :         return 0;
     125             : }
     126             : 
     127             : static inline void percpu_counter_destroy(struct percpu_counter *fbc)
     128             : {
     129             : }
     130             : 
     131             : static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
     132             : {
     133           0 :         fbc->count = amount;
     134             : }
     135             : 
     136             : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
     137             : {
     138           0 :         if (fbc->count > rhs)
     139             :                 return 1;
     140           0 :         else if (fbc->count < rhs)
     141             :                 return -1;
     142             :         else
     143             :                 return 0;
     144             : }
     145             : 
     146             : static inline int
     147             : __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
     148             : {
     149             :         return percpu_counter_compare(fbc, rhs);
     150             : }
     151             : 
     152             : static inline void
     153             : percpu_counter_add(struct percpu_counter *fbc, s64 amount)
     154             : {
     155             :         unsigned long flags;
     156             : 
     157           0 :         local_irq_save(flags);
     158           0 :         fbc->count += amount;
     159           0 :         local_irq_restore(flags);
     160             : }
     161             : 
     162             : /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
     163             : static inline void
     164             : percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
     165             : {
     166             :         percpu_counter_add(fbc, amount);
     167             : }
     168             : 
     169             : static inline void
     170             : percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
     171             : {
     172           0 :         percpu_counter_add(fbc, amount);
     173             : }
     174             : 
     175             : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
     176             : {
     177           0 :         return fbc->count;
     178             : }
     179             : 
     180             : /*
     181             :  * percpu_counter is intended to track positive numbers. In the UP case the
     182             :  * number should never be negative.
     183             :  */
     184             : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
     185             : {
     186           0 :         return fbc->count;
     187             : }
     188             : 
     189             : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
     190             : {
     191           0 :         return percpu_counter_read_positive(fbc);
     192             : }
     193             : 
     194             : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
     195             : {
     196           0 :         return percpu_counter_read(fbc);
     197             : }
     198             : 
     199             : static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
     200             : {
     201           0 :         return percpu_counter_read(fbc);
     202             : }
     203             : 
     204             : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
     205             : {
     206             :         return true;
     207             : }
     208             : 
     209             : static inline void percpu_counter_sync(struct percpu_counter *fbc)
     210             : {
     211             : }
     212             : #endif  /* CONFIG_SMP */
     213             : 
     214           0 : static inline void percpu_counter_inc(struct percpu_counter *fbc)
     215             : {
     216           0 :         percpu_counter_add(fbc, 1);
     217           0 : }
     218             : 
     219           0 : static inline void percpu_counter_dec(struct percpu_counter *fbc)
     220             : {
     221           0 :         percpu_counter_add(fbc, -1);
     222           0 : }
     223             : 
     224             : static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
     225             : {
     226           0 :         percpu_counter_add(fbc, -amount);
     227             : }
     228             : 
     229             : static inline void
     230             : percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
     231             : {
     232             :         percpu_counter_add_local(fbc, -amount);
     233             : }
     234             : 
     235             : #endif /* _LINUX_PERCPU_COUNTER_H */

Generated by: LCOV version 1.14