LCOV - code coverage report
Current view: top level - include/linux - page_ref.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 2 23 8.7 %
Date: 2023-08-24 13:40:31 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_PAGE_REF_H
       3             : #define _LINUX_PAGE_REF_H
       4             : 
       5             : #include <linux/atomic.h>
       6             : #include <linux/mm_types.h>
       7             : #include <linux/page-flags.h>
       8             : #include <linux/tracepoint-defs.h>
       9             : 
      10             : DECLARE_TRACEPOINT(page_ref_set);
      11             : DECLARE_TRACEPOINT(page_ref_mod);
      12             : DECLARE_TRACEPOINT(page_ref_mod_and_test);
      13             : DECLARE_TRACEPOINT(page_ref_mod_and_return);
      14             : DECLARE_TRACEPOINT(page_ref_mod_unless);
      15             : DECLARE_TRACEPOINT(page_ref_freeze);
      16             : DECLARE_TRACEPOINT(page_ref_unfreeze);
      17             : 
      18             : #ifdef CONFIG_DEBUG_PAGE_REF
      19             : 
      20             : /*
      21             :  * Ideally we would want to use the trace_<tracepoint>_enabled() helper
      22             :  * functions. But due to include header file issues, that is not
      23             :  * feasible. Instead we have to open code the static key functions.
      24             :  *
      25             :  * See trace_##name##_enabled(void) in include/linux/tracepoint.h
      26             :  */
      27             : #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
      28             : 
      29             : extern void __page_ref_set(struct page *page, int v);
      30             : extern void __page_ref_mod(struct page *page, int v);
      31             : extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
      32             : extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
      33             : extern void __page_ref_mod_unless(struct page *page, int v, int u);
      34             : extern void __page_ref_freeze(struct page *page, int v, int ret);
      35             : extern void __page_ref_unfreeze(struct page *page, int v);
      36             : 
      37             : #else
      38             : 
      39             : #define page_ref_tracepoint_active(t) false
      40             : 
      41             : static inline void __page_ref_set(struct page *page, int v)
      42             : {
      43             : }
      44             : static inline void __page_ref_mod(struct page *page, int v)
      45             : {
      46             : }
      47             : static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
      48             : {
      49             : }
      50             : static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
      51             : {
      52             : }
      53             : static inline void __page_ref_mod_unless(struct page *page, int v, int u)
      54             : {
      55             : }
      56             : static inline void __page_ref_freeze(struct page *page, int v, int ret)
      57             : {
      58             : }
      59             : static inline void __page_ref_unfreeze(struct page *page, int v)
      60             : {
      61             : }
      62             : 
      63             : #endif
      64             : 
      65             : static inline int page_ref_count(const struct page *page)
      66             : {
      67           0 :         return atomic_read(&page->_refcount);
      68             : }
      69             : 
      70             : /**
      71             :  * folio_ref_count - The reference count on this folio.
      72             :  * @folio: The folio.
      73             :  *
      74             :  * The refcount is usually incremented by calls to folio_get() and
      75             :  * decremented by calls to folio_put().  Some typical users of the
      76             :  * folio refcount:
      77             :  *
      78             :  * - Each reference from a page table
      79             :  * - The page cache
      80             :  * - Filesystem private data
      81             :  * - The LRU list
      82             :  * - Pipes
      83             :  * - Direct IO which references this page in the process address space
      84             :  *
      85             :  * Return: The number of references to this folio.
      86             :  */
      87             : static inline int folio_ref_count(const struct folio *folio)
      88             : {
      89           0 :         return page_ref_count(&folio->page);
      90             : }
      91             : 
      92             : static inline int page_count(const struct page *page)
      93             : {
      94           0 :         return folio_ref_count(page_folio(page));
      95             : }
      96             : 
      97             : static inline void set_page_count(struct page *page, int v)
      98             : {
      99     1041408 :         atomic_set(&page->_refcount, v);
     100             :         if (page_ref_tracepoint_active(page_ref_set))
     101             :                 __page_ref_set(page, v);
     102             : }
     103             : 
     104             : static inline void folio_set_count(struct folio *folio, int v)
     105             : {
     106             :         set_page_count(&folio->page, v);
     107             : }
     108             : 
     109             : /*
     110             :  * Setup the page count before being freed into the page allocator for
     111             :  * the first time (boot or memory hotplug)
     112             :  */
     113             : static inline void init_page_count(struct page *page)
     114             : {
     115      265447 :         set_page_count(page, 1);
     116             : }
     117             : 
     118             : static inline void page_ref_add(struct page *page, int nr)
     119             : {
     120           0 :         atomic_add(nr, &page->_refcount);
     121             :         if (page_ref_tracepoint_active(page_ref_mod))
     122             :                 __page_ref_mod(page, nr);
     123             : }
     124             : 
     125             : static inline void folio_ref_add(struct folio *folio, int nr)
     126             : {
     127           0 :         page_ref_add(&folio->page, nr);
     128             : }
     129             : 
     130             : static inline void page_ref_sub(struct page *page, int nr)
     131             : {
     132           0 :         atomic_sub(nr, &page->_refcount);
     133             :         if (page_ref_tracepoint_active(page_ref_mod))
     134             :                 __page_ref_mod(page, -nr);
     135             : }
     136             : 
     137             : static inline void folio_ref_sub(struct folio *folio, int nr)
     138             : {
     139           0 :         page_ref_sub(&folio->page, nr);
     140             : }
     141             : 
     142             : static inline int page_ref_sub_return(struct page *page, int nr)
     143             : {
     144             :         int ret = atomic_sub_return(nr, &page->_refcount);
     145             : 
     146             :         if (page_ref_tracepoint_active(page_ref_mod_and_return))
     147             :                 __page_ref_mod_and_return(page, -nr, ret);
     148             :         return ret;
     149             : }
     150             : 
     151             : static inline int folio_ref_sub_return(struct folio *folio, int nr)
     152             : {
     153             :         return page_ref_sub_return(&folio->page, nr);
     154             : }
     155             : 
     156             : static inline void page_ref_inc(struct page *page)
     157             : {
     158           0 :         atomic_inc(&page->_refcount);
     159             :         if (page_ref_tracepoint_active(page_ref_mod))
     160             :                 __page_ref_mod(page, 1);
     161             : }
     162             : 
     163             : static inline void folio_ref_inc(struct folio *folio)
     164             : {
     165           0 :         page_ref_inc(&folio->page);
     166             : }
     167             : 
     168             : static inline void page_ref_dec(struct page *page)
     169             : {
     170             :         atomic_dec(&page->_refcount);
     171             :         if (page_ref_tracepoint_active(page_ref_mod))
     172             :                 __page_ref_mod(page, -1);
     173             : }
     174             : 
     175             : static inline void folio_ref_dec(struct folio *folio)
     176             : {
     177             :         page_ref_dec(&folio->page);
     178             : }
     179             : 
     180             : static inline int page_ref_sub_and_test(struct page *page, int nr)
     181             : {
     182           0 :         int ret = atomic_sub_and_test(nr, &page->_refcount);
     183             : 
     184             :         if (page_ref_tracepoint_active(page_ref_mod_and_test))
     185             :                 __page_ref_mod_and_test(page, -nr, ret);
     186             :         return ret;
     187             : }
     188             : 
     189             : static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
     190             : {
     191           0 :         return page_ref_sub_and_test(&folio->page, nr);
     192             : }
     193             : 
     194             : static inline int page_ref_inc_return(struct page *page)
     195             : {
     196             :         int ret = atomic_inc_return(&page->_refcount);
     197             : 
     198             :         if (page_ref_tracepoint_active(page_ref_mod_and_return))
     199             :                 __page_ref_mod_and_return(page, 1, ret);
     200             :         return ret;
     201             : }
     202             : 
     203             : static inline int folio_ref_inc_return(struct folio *folio)
     204             : {
     205             :         return page_ref_inc_return(&folio->page);
     206             : }
     207             : 
     208             : static inline int page_ref_dec_and_test(struct page *page)
     209             : {
     210           0 :         int ret = atomic_dec_and_test(&page->_refcount);
     211             : 
     212             :         if (page_ref_tracepoint_active(page_ref_mod_and_test))
     213             :                 __page_ref_mod_and_test(page, -1, ret);
     214             :         return ret;
     215             : }
     216             : 
     217             : static inline int folio_ref_dec_and_test(struct folio *folio)
     218             : {
     219             :         return page_ref_dec_and_test(&folio->page);
     220             : }
     221             : 
     222             : static inline int page_ref_dec_return(struct page *page)
     223             : {
     224             :         int ret = atomic_dec_return(&page->_refcount);
     225             : 
     226             :         if (page_ref_tracepoint_active(page_ref_mod_and_return))
     227             :                 __page_ref_mod_and_return(page, -1, ret);
     228             :         return ret;
     229             : }
     230             : 
     231             : static inline int folio_ref_dec_return(struct folio *folio)
     232             : {
     233             :         return page_ref_dec_return(&folio->page);
     234             : }
     235             : 
     236             : static inline bool page_ref_add_unless(struct page *page, int nr, int u)
     237             : {
     238           0 :         bool ret = atomic_add_unless(&page->_refcount, nr, u);
     239             : 
     240             :         if (page_ref_tracepoint_active(page_ref_mod_unless))
     241             :                 __page_ref_mod_unless(page, nr, ret);
     242             :         return ret;
     243             : }
     244             : 
     245             : static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
     246             : {
     247           0 :         return page_ref_add_unless(&folio->page, nr, u);
     248             : }
     249             : 
     250             : /**
     251             :  * folio_try_get - Attempt to increase the refcount on a folio.
     252             :  * @folio: The folio.
     253             :  *
     254             :  * If you do not already have a reference to a folio, you can attempt to
     255             :  * get one using this function.  It may fail if, for example, the folio
     256             :  * has been freed since you found a pointer to it, or it is frozen for
     257             :  * the purposes of splitting or migration.
     258             :  *
     259             :  * Return: True if the reference count was successfully incremented.
     260             :  */
     261             : static inline bool folio_try_get(struct folio *folio)
     262             : {
     263           0 :         return folio_ref_add_unless(folio, 1, 0);
     264             : }
     265             : 
     266             : static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
     267             : {
     268             : #ifdef CONFIG_TINY_RCU
     269             :         /*
     270             :          * The caller guarantees the folio will not be freed from interrupt
     271             :          * context, so (on !SMP) we only need preemption to be disabled
     272             :          * and TINY_RCU does that for us.
     273             :          */
     274             : # ifdef CONFIG_PREEMPT_COUNT
     275             :         VM_BUG_ON(!in_atomic() && !irqs_disabled());
     276             : # endif
     277             :         VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
     278           0 :         folio_ref_add(folio, count);
     279             : #else
     280             :         if (unlikely(!folio_ref_add_unless(folio, count, 0))) {
     281             :                 /* Either the folio has been freed, or will be freed. */
     282             :                 return false;
     283             :         }
     284             : #endif
     285             :         return true;
     286             : }
     287             : 
     288             : /**
     289             :  * folio_try_get_rcu - Attempt to increase the refcount on a folio.
     290             :  * @folio: The folio.
     291             :  *
     292             :  * This is a version of folio_try_get() optimised for non-SMP kernels.
     293             :  * If you are still holding the rcu_read_lock() after looking up the
     294             :  * page and know that the page cannot have its refcount decreased to
     295             :  * zero in interrupt context, you can use this instead of folio_try_get().
     296             :  *
     297             :  * Example users include get_user_pages_fast() (as pages are not unmapped
     298             :  * from interrupt context) and the page cache lookups (as pages are not
     299             :  * truncated from interrupt context).  We also know that pages are not
     300             :  * frozen in interrupt context for the purposes of splitting or migration.
     301             :  *
     302             :  * You can also use this function if you're holding a lock that prevents
     303             :  * pages being frozen & removed; eg the i_pages lock for the page cache
     304             :  * or the mmap_lock or page table lock for page tables.  In this case,
     305             :  * it will always succeed, and you could have used a plain folio_get(),
     306             :  * but it's sometimes more convenient to have a common function called
     307             :  * from both locked and RCU-protected contexts.
     308             :  *
     309             :  * Return: True if the reference count was successfully incremented.
     310             :  */
     311             : static inline bool folio_try_get_rcu(struct folio *folio)
     312             : {
     313           0 :         return folio_ref_try_add_rcu(folio, 1);
     314             : }
     315             : 
     316             : static inline int page_ref_freeze(struct page *page, int count)
     317             : {
     318           0 :         int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
     319             : 
     320             :         if (page_ref_tracepoint_active(page_ref_freeze))
     321             :                 __page_ref_freeze(page, count, ret);
     322             :         return ret;
     323             : }
     324             : 
     325             : static inline int folio_ref_freeze(struct folio *folio, int count)
     326             : {
     327           0 :         return page_ref_freeze(&folio->page, count);
     328             : }
     329             : 
     330             : static inline void page_ref_unfreeze(struct page *page, int count)
     331             : {
     332             :         VM_BUG_ON_PAGE(page_count(page) != 0, page);
     333             :         VM_BUG_ON(count == 0);
     334             : 
     335           0 :         atomic_set_release(&page->_refcount, count);
     336             :         if (page_ref_tracepoint_active(page_ref_unfreeze))
     337             :                 __page_ref_unfreeze(page, count);
     338             : }
     339             : 
     340             : static inline void folio_ref_unfreeze(struct folio *folio, int count)
     341             : {
     342           0 :         page_ref_unfreeze(&folio->page, count);
     343             : }
     344             : #endif

Generated by: LCOV version 1.14