LCOV - code coverage report
Current view: top level - include/linux - mm_inline.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 36 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef LINUX_MM_INLINE_H
       3             : #define LINUX_MM_INLINE_H
       4             : 
       5             : #include <linux/atomic.h>
       6             : #include <linux/huge_mm.h>
       7             : #include <linux/swap.h>
       8             : #include <linux/string.h>
       9             : #include <linux/userfaultfd_k.h>
      10             : #include <linux/swapops.h>
      11             : 
      12             : /**
      13             :  * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
      14             :  * @folio: The folio to test.
      15             :  *
      16             :  * We would like to get this info without a page flag, but the state
      17             :  * needs to survive until the folio is last deleted from the LRU, which
      18             :  * could be as far down as __page_cache_release.
      19             :  *
      20             :  * Return: An integer (not a boolean!) used to sort a folio onto the
      21             :  * right LRU list and to account folios correctly.
      22             :  * 1 if @folio is a regular filesystem backed page cache folio
      23             :  * or a lazily freed anonymous folio (e.g. via MADV_FREE).
      24             :  * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
      25             :  * ram or swap backed folio.
      26             :  */
      27             : static inline int folio_is_file_lru(struct folio *folio)
      28             : {
      29           0 :         return !folio_test_swapbacked(folio);
      30             : }
      31             : 
      32             : static inline int page_is_file_lru(struct page *page)
      33             : {
      34           0 :         return folio_is_file_lru(page_folio(page));
      35             : }
      36             : 
      37             : static __always_inline void __update_lru_size(struct lruvec *lruvec,
      38             :                                 enum lru_list lru, enum zone_type zid,
      39             :                                 long nr_pages)
      40             : {
      41           0 :         struct pglist_data *pgdat = lruvec_pgdat(lruvec);
      42             : 
      43             :         lockdep_assert_held(&lruvec->lru_lock);
      44           0 :         WARN_ON_ONCE(nr_pages != (int)nr_pages);
      45             : 
      46           0 :         __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
      47           0 :         __mod_zone_page_state(&pgdat->node_zones[zid],
      48           0 :                                 NR_ZONE_LRU_BASE + lru, nr_pages);
      49             : }
      50             : 
      51             : static __always_inline void update_lru_size(struct lruvec *lruvec,
      52             :                                 enum lru_list lru, enum zone_type zid,
      53             :                                 long nr_pages)
      54             : {
      55           0 :         __update_lru_size(lruvec, lru, zid, nr_pages);
      56             : #ifdef CONFIG_MEMCG
      57             :         mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
      58             : #endif
      59             : }
      60             : 
      61             : /**
      62             :  * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
      63             :  * @folio: The folio that was on lru and now has a zero reference.
      64             :  */
      65             : static __always_inline void __folio_clear_lru_flags(struct folio *folio)
      66             : {
      67             :         VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
      68             : 
      69           0 :         __folio_clear_lru(folio);
      70             : 
      71             :         /* this shouldn't happen, so leave the flags to bad_page() */
      72           0 :         if (folio_test_active(folio) && folio_test_unevictable(folio))
      73             :                 return;
      74             : 
      75           0 :         __folio_clear_active(folio);
      76             :         __folio_clear_unevictable(folio);
      77             : }
      78             : 
      79             : /**
      80             :  * folio_lru_list - Which LRU list should a folio be on?
      81             :  * @folio: The folio to test.
      82             :  *
      83             :  * Return: The LRU list a folio should be on, as an index
      84             :  * into the array of LRU lists.
      85             :  */
      86             : static __always_inline enum lru_list folio_lru_list(struct folio *folio)
      87             : {
      88             :         enum lru_list lru;
      89             : 
      90             :         VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
      91             : 
      92           0 :         if (folio_test_unevictable(folio))
      93             :                 return LRU_UNEVICTABLE;
      94             : 
      95           0 :         lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
      96           0 :         if (folio_test_active(folio))
      97           0 :                 lru += LRU_ACTIVE;
      98             : 
      99             :         return lru;
     100             : }
     101             : 
     102             : #ifdef CONFIG_LRU_GEN
     103             : 
     104             : #ifdef CONFIG_LRU_GEN_ENABLED
     105             : static inline bool lru_gen_enabled(void)
     106             : {
     107             :         DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
     108             : 
     109             :         return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
     110             : }
     111             : #else
     112             : static inline bool lru_gen_enabled(void)
     113             : {
     114             :         DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
     115             : 
     116             :         return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
     117             : }
     118             : #endif
     119             : 
     120             : static inline bool lru_gen_in_fault(void)
     121             : {
     122             :         return current->in_lru_fault;
     123             : }
     124             : 
     125             : static inline int lru_gen_from_seq(unsigned long seq)
     126             : {
     127             :         return seq % MAX_NR_GENS;
     128             : }
     129             : 
     130             : static inline int lru_hist_from_seq(unsigned long seq)
     131             : {
     132             :         return seq % NR_HIST_GENS;
     133             : }
     134             : 
     135             : static inline int lru_tier_from_refs(int refs)
     136             : {
     137             :         VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
     138             : 
     139             :         /* see the comment in folio_lru_refs() */
     140             :         return order_base_2(refs + 1);
     141             : }
     142             : 
     143             : static inline int folio_lru_refs(struct folio *folio)
     144             : {
     145             :         unsigned long flags = READ_ONCE(folio->flags);
     146             :         bool workingset = flags & BIT(PG_workingset);
     147             : 
     148             :         /*
     149             :          * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
     150             :          * total number of accesses is N>1, since N=0,1 both map to the first
     151             :          * tier. lru_tier_from_refs() will account for this off-by-one. Also see
     152             :          * the comment on MAX_NR_TIERS.
     153             :          */
     154             :         return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
     155             : }
     156             : 
     157             : static inline int folio_lru_gen(struct folio *folio)
     158             : {
     159             :         unsigned long flags = READ_ONCE(folio->flags);
     160             : 
     161             :         return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
     162             : }
     163             : 
     164             : static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
     165             : {
     166             :         unsigned long max_seq = lruvec->lrugen.max_seq;
     167             : 
     168             :         VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
     169             : 
     170             :         /* see the comment on MIN_NR_GENS */
     171             :         return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
     172             : }
     173             : 
     174             : static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
     175             :                                        int old_gen, int new_gen)
     176             : {
     177             :         int type = folio_is_file_lru(folio);
     178             :         int zone = folio_zonenum(folio);
     179             :         int delta = folio_nr_pages(folio);
     180             :         enum lru_list lru = type * LRU_INACTIVE_FILE;
     181             :         struct lru_gen_folio *lrugen = &lruvec->lrugen;
     182             : 
     183             :         VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
     184             :         VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
     185             :         VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
     186             : 
     187             :         if (old_gen >= 0)
     188             :                 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
     189             :                            lrugen->nr_pages[old_gen][type][zone] - delta);
     190             :         if (new_gen >= 0)
     191             :                 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
     192             :                            lrugen->nr_pages[new_gen][type][zone] + delta);
     193             : 
     194             :         /* addition */
     195             :         if (old_gen < 0) {
     196             :                 if (lru_gen_is_active(lruvec, new_gen))
     197             :                         lru += LRU_ACTIVE;
     198             :                 __update_lru_size(lruvec, lru, zone, delta);
     199             :                 return;
     200             :         }
     201             : 
     202             :         /* deletion */
     203             :         if (new_gen < 0) {
     204             :                 if (lru_gen_is_active(lruvec, old_gen))
     205             :                         lru += LRU_ACTIVE;
     206             :                 __update_lru_size(lruvec, lru, zone, -delta);
     207             :                 return;
     208             :         }
     209             : 
     210             :         /* promotion */
     211             :         if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
     212             :                 __update_lru_size(lruvec, lru, zone, -delta);
     213             :                 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
     214             :         }
     215             : 
     216             :         /* demotion requires isolation, e.g., lru_deactivate_fn() */
     217             :         VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
     218             : }
     219             : 
     220             : static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
     221             : {
     222             :         unsigned long seq;
     223             :         unsigned long flags;
     224             :         int gen = folio_lru_gen(folio);
     225             :         int type = folio_is_file_lru(folio);
     226             :         int zone = folio_zonenum(folio);
     227             :         struct lru_gen_folio *lrugen = &lruvec->lrugen;
     228             : 
     229             :         VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
     230             : 
     231             :         if (folio_test_unevictable(folio) || !lrugen->enabled)
     232             :                 return false;
     233             :         /*
     234             :          * There are three common cases for this page:
     235             :          * 1. If it's hot, e.g., freshly faulted in or previously hot and
     236             :          *    migrated, add it to the youngest generation.
     237             :          * 2. If it's cold but can't be evicted immediately, i.e., an anon page
     238             :          *    not in swapcache or a dirty page pending writeback, add it to the
     239             :          *    second oldest generation.
     240             :          * 3. Everything else (clean, cold) is added to the oldest generation.
     241             :          */
     242             :         if (folio_test_active(folio))
     243             :                 seq = lrugen->max_seq;
     244             :         else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
     245             :                  (folio_test_reclaim(folio) &&
     246             :                   (folio_test_dirty(folio) || folio_test_writeback(folio))))
     247             :                 seq = lrugen->min_seq[type] + 1;
     248             :         else
     249             :                 seq = lrugen->min_seq[type];
     250             : 
     251             :         gen = lru_gen_from_seq(seq);
     252             :         flags = (gen + 1UL) << LRU_GEN_PGOFF;
     253             :         /* see the comment on MIN_NR_GENS about PG_active */
     254             :         set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
     255             : 
     256             :         lru_gen_update_size(lruvec, folio, -1, gen);
     257             :         /* for folio_rotate_reclaimable() */
     258             :         if (reclaiming)
     259             :                 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
     260             :         else
     261             :                 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
     262             : 
     263             :         return true;
     264             : }
     265             : 
     266             : static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
     267             : {
     268             :         unsigned long flags;
     269             :         int gen = folio_lru_gen(folio);
     270             : 
     271             :         if (gen < 0)
     272             :                 return false;
     273             : 
     274             :         VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
     275             :         VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
     276             : 
     277             :         /* for folio_migrate_flags() */
     278             :         flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
     279             :         flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
     280             :         gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
     281             : 
     282             :         lru_gen_update_size(lruvec, folio, gen, -1);
     283             :         list_del(&folio->lru);
     284             : 
     285             :         return true;
     286             : }
     287             : 
     288             : #else /* !CONFIG_LRU_GEN */
     289             : 
     290             : static inline bool lru_gen_enabled(void)
     291             : {
     292             :         return false;
     293             : }
     294             : 
     295             : static inline bool lru_gen_in_fault(void)
     296             : {
     297             :         return false;
     298             : }
     299             : 
     300             : static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
     301             : {
     302             :         return false;
     303             : }
     304             : 
     305             : static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
     306             : {
     307             :         return false;
     308             : }
     309             : 
     310             : #endif /* CONFIG_LRU_GEN */
     311             : 
     312             : static __always_inline
     313             : void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
     314             : {
     315           0 :         enum lru_list lru = folio_lru_list(folio);
     316             : 
     317           0 :         if (lru_gen_add_folio(lruvec, folio, false))
     318             :                 return;
     319             : 
     320           0 :         update_lru_size(lruvec, lru, folio_zonenum(folio),
     321             :                         folio_nr_pages(folio));
     322           0 :         if (lru != LRU_UNEVICTABLE)
     323           0 :                 list_add(&folio->lru, &lruvec->lists[lru]);
     324             : }
     325             : 
     326             : static __always_inline
     327             : void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
     328             : {
     329           0 :         enum lru_list lru = folio_lru_list(folio);
     330             : 
     331           0 :         if (lru_gen_add_folio(lruvec, folio, true))
     332             :                 return;
     333             : 
     334           0 :         update_lru_size(lruvec, lru, folio_zonenum(folio),
     335             :                         folio_nr_pages(folio));
     336             :         /* This is not expected to be used on LRU_UNEVICTABLE */
     337           0 :         list_add_tail(&folio->lru, &lruvec->lists[lru]);
     338             : }
     339             : 
     340             : static __always_inline
     341             : void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
     342             : {
     343           0 :         enum lru_list lru = folio_lru_list(folio);
     344             : 
     345           0 :         if (lru_gen_del_folio(lruvec, folio, false))
     346             :                 return;
     347             : 
     348           0 :         if (lru != LRU_UNEVICTABLE)
     349           0 :                 list_del(&folio->lru);
     350           0 :         update_lru_size(lruvec, lru, folio_zonenum(folio),
     351           0 :                         -folio_nr_pages(folio));
     352             : }
     353             : 
     354             : #ifdef CONFIG_ANON_VMA_NAME
     355             : /*
     356             :  * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
     357             :  * either keep holding the lock while using the returned pointer or it should
     358             :  * raise anon_vma_name refcount before releasing the lock.
     359             :  */
     360             : extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
     361             : extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
     362             : extern void anon_vma_name_free(struct kref *kref);
     363             : 
     364             : /* mmap_lock should be read-locked */
     365             : static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
     366             : {
     367             :         if (anon_name)
     368             :                 kref_get(&anon_name->kref);
     369             : }
     370             : 
     371             : static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
     372             : {
     373             :         if (anon_name)
     374             :                 kref_put(&anon_name->kref, anon_vma_name_free);
     375             : }
     376             : 
     377             : static inline
     378             : struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
     379             : {
     380             :         /* Prevent anon_name refcount saturation early on */
     381             :         if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
     382             :                 anon_vma_name_get(anon_name);
     383             :                 return anon_name;
     384             : 
     385             :         }
     386             :         return anon_vma_name_alloc(anon_name->name);
     387             : }
     388             : 
     389             : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
     390             :                                      struct vm_area_struct *new_vma)
     391             : {
     392             :         struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
     393             : 
     394             :         if (anon_name)
     395             :                 new_vma->anon_name = anon_vma_name_reuse(anon_name);
     396             : }
     397             : 
     398             : static inline void free_anon_vma_name(struct vm_area_struct *vma)
     399             : {
     400             :         /*
     401             :          * Not using anon_vma_name because it generates a warning if mmap_lock
     402             :          * is not held, which might be the case here.
     403             :          */
     404             :         anon_vma_name_put(vma->anon_name);
     405             : }
     406             : 
     407             : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
     408             :                                     struct anon_vma_name *anon_name2)
     409             : {
     410             :         if (anon_name1 == anon_name2)
     411             :                 return true;
     412             : 
     413             :         return anon_name1 && anon_name2 &&
     414             :                 !strcmp(anon_name1->name, anon_name2->name);
     415             : }
     416             : 
     417             : #else /* CONFIG_ANON_VMA_NAME */
     418             : static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
     419             : {
     420             :         return NULL;
     421             : }
     422             : 
     423             : static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
     424             : {
     425             :         return NULL;
     426             : }
     427             : 
     428             : static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
     429             : static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
     430             : static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
     431             :                                      struct vm_area_struct *new_vma) {}
     432             : static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
     433             : 
     434             : static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
     435             :                                     struct anon_vma_name *anon_name2)
     436             : {
     437             :         return true;
     438             : }
     439             : 
     440             : #endif  /* CONFIG_ANON_VMA_NAME */
     441             : 
     442             : static inline void init_tlb_flush_pending(struct mm_struct *mm)
     443             : {
     444           0 :         atomic_set(&mm->tlb_flush_pending, 0);
     445             : }
     446             : 
     447             : static inline void inc_tlb_flush_pending(struct mm_struct *mm)
     448             : {
     449           0 :         atomic_inc(&mm->tlb_flush_pending);
     450             :         /*
     451             :          * The only time this value is relevant is when there are indeed pages
     452             :          * to flush. And we'll only flush pages after changing them, which
     453             :          * requires the PTL.
     454             :          *
     455             :          * So the ordering here is:
     456             :          *
     457             :          *      atomic_inc(&mm->tlb_flush_pending);
     458             :          *      spin_lock(&ptl);
     459             :          *      ...
     460             :          *      set_pte_at();
     461             :          *      spin_unlock(&ptl);
     462             :          *
     463             :          *                              spin_lock(&ptl)
     464             :          *                              mm_tlb_flush_pending();
     465             :          *                              ....
     466             :          *                              spin_unlock(&ptl);
     467             :          *
     468             :          *      flush_tlb_range();
     469             :          *      atomic_dec(&mm->tlb_flush_pending);
     470             :          *
     471             :          * Where the increment if constrained by the PTL unlock, it thus
     472             :          * ensures that the increment is visible if the PTE modification is
     473             :          * visible. After all, if there is no PTE modification, nobody cares
     474             :          * about TLB flushes either.
     475             :          *
     476             :          * This very much relies on users (mm_tlb_flush_pending() and
     477             :          * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
     478             :          * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
     479             :          * locks (PPC) the unlock of one doesn't order against the lock of
     480             :          * another PTL.
     481             :          *
     482             :          * The decrement is ordered by the flush_tlb_range(), such that
     483             :          * mm_tlb_flush_pending() will not return false unless all flushes have
     484             :          * completed.
     485             :          */
     486             : }
     487             : 
     488             : static inline void dec_tlb_flush_pending(struct mm_struct *mm)
     489             : {
     490             :         /*
     491             :          * See inc_tlb_flush_pending().
     492             :          *
     493             :          * This cannot be smp_mb__before_atomic() because smp_mb() simply does
     494             :          * not order against TLB invalidate completion, which is what we need.
     495             :          *
     496             :          * Therefore we must rely on tlb_flush_*() to guarantee order.
     497             :          */
     498           0 :         atomic_dec(&mm->tlb_flush_pending);
     499             : }
     500             : 
     501             : static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
     502             : {
     503             :         /*
     504             :          * Must be called after having acquired the PTL; orders against that
     505             :          * PTLs release and therefore ensures that if we observe the modified
     506             :          * PTE we must also observe the increment from inc_tlb_flush_pending().
     507             :          *
     508             :          * That is, it only guarantees to return true if there is a flush
     509             :          * pending for _this_ PTL.
     510             :          */
     511             :         return atomic_read(&mm->tlb_flush_pending);
     512             : }
     513             : 
     514             : static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
     515             : {
     516             :         /*
     517             :          * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
     518             :          * for which there is a TLB flush pending in order to guarantee
     519             :          * we've seen both that PTE modification and the increment.
     520             :          *
     521             :          * (no requirement on actually still holding the PTL, that is irrelevant)
     522             :          */
     523           0 :         return atomic_read(&mm->tlb_flush_pending) > 1;
     524             : }
     525             : 
     526             : /*
     527             :  * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
     528             :  * replace a none pte.  NOTE!  This should only be called when *pte is already
     529             :  * cleared so we will never accidentally replace something valuable.  Meanwhile
     530             :  * none pte also means we are not demoting the pte so tlb flushed is not needed.
     531             :  * E.g., when pte cleared the caller should have taken care of the tlb flush.
     532             :  *
     533             :  * Must be called with pgtable lock held so that no thread will see the none
     534             :  * pte, and if they see it, they'll fault and serialize at the pgtable lock.
     535             :  *
     536             :  * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
     537             :  */
     538             : static inline void
     539             : pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
     540             :                               pte_t *pte, pte_t pteval)
     541             : {
     542             : #ifdef CONFIG_PTE_MARKER_UFFD_WP
     543             :         bool arm_uffd_pte = false;
     544             : 
     545             :         /* The current status of the pte should be "cleared" before calling */
     546             :         WARN_ON_ONCE(!pte_none(ptep_get(pte)));
     547             : 
     548             :         /*
     549             :          * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
     550             :          * thing, because when zapping either it means it's dropping the
     551             :          * page, or in TTU where the present pte will be quickly replaced
     552             :          * with a swap pte.  There's no way of leaking the bit.
     553             :          */
     554             :         if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
     555             :                 return;
     556             : 
     557             :         /* A uffd-wp wr-protected normal pte */
     558             :         if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
     559             :                 arm_uffd_pte = true;
     560             : 
     561             :         /*
     562             :          * A uffd-wp wr-protected swap pte.  Note: this should even cover an
     563             :          * existing pte marker with uffd-wp bit set.
     564             :          */
     565             :         if (unlikely(pte_swp_uffd_wp_any(pteval)))
     566             :                 arm_uffd_pte = true;
     567             : 
     568             :         if (unlikely(arm_uffd_pte))
     569             :                 set_pte_at(vma->vm_mm, addr, pte,
     570             :                            make_pte_marker(PTE_MARKER_UFFD_WP));
     571             : #endif
     572             : }
     573             : 
     574             : static inline bool vma_has_recency(struct vm_area_struct *vma)
     575             : {
     576           0 :         if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
     577             :                 return false;
     578             : 
     579           0 :         if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
     580             :                 return false;
     581             : 
     582             :         return true;
     583             : }
     584             : 
     585             : #endif

Generated by: LCOV version 1.14