LCOV - code coverage report
Current view: top level - mm - internal.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 23 106 21.7 %
Date: 2023-07-19 18:55:55 Functions: 1 10 10.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* internal.h: mm/ internal definitions
       3             :  *
       4             :  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
       5             :  * Written by David Howells (dhowells@redhat.com)
       6             :  */
       7             : #ifndef __MM_INTERNAL_H
       8             : #define __MM_INTERNAL_H
       9             : 
      10             : #include <linux/fs.h>
      11             : #include <linux/mm.h>
      12             : #include <linux/pagemap.h>
      13             : #include <linux/rmap.h>
      14             : #include <linux/tracepoint-defs.h>
      15             : 
      16             : struct folio_batch;
      17             : 
      18             : /*
      19             :  * The set of flags that only affect watermark checking and reclaim
      20             :  * behaviour. This is used by the MM to obey the caller constraints
      21             :  * about IO, FS and watermark checking while ignoring placement
      22             :  * hints such as HIGHMEM usage.
      23             :  */
      24             : #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
      25             :                         __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
      26             :                         __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
      27             :                         __GFP_NOLOCKDEP)
      28             : 
      29             : /* The GFP flags allowed during early boot */
      30             : #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
      31             : 
      32             : /* Control allocation cpuset and node placement constraints */
      33             : #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
      34             : 
      35             : /* Do not use these with a slab allocator */
      36             : #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
      37             : 
      38             : /*
      39             :  * Different from WARN_ON_ONCE(), no warning will be issued
      40             :  * when we specify __GFP_NOWARN.
      41             :  */
      42             : #define WARN_ON_ONCE_GFP(cond, gfp)     ({                              \
      43             :         static bool __section(".data.once") __warned;                 \
      44             :         int __ret_warn_once = !!(cond);                                 \
      45             :                                                                         \
      46             :         if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
      47             :                 __warned = true;                                        \
      48             :                 WARN_ON(1);                                             \
      49             :         }                                                               \
      50             :         unlikely(__ret_warn_once);                                      \
      51             : })
      52             : 
      53             : void page_writeback_init(void);
      54             : 
      55             : /*
      56             :  * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
      57             :  * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
      58             :  * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE).  Hugetlb currently
      59             :  * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
      60             :  */
      61             : #define COMPOUND_MAPPED         0x800000
      62             : #define FOLIO_PAGES_MAPPED      (COMPOUND_MAPPED - 1)
      63             : 
      64             : /*
      65             :  * How many individual pages have an elevated _mapcount.  Excludes
      66             :  * the folio's entire_mapcount.
      67             :  */
      68             : static inline int folio_nr_pages_mapped(struct folio *folio)
      69             : {
      70           0 :         return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
      71             : }
      72             : 
      73             : static inline void *folio_raw_mapping(struct folio *folio)
      74             : {
      75           0 :         unsigned long mapping = (unsigned long)folio->mapping;
      76             : 
      77           0 :         return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
      78             : }
      79             : 
      80             : void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
      81             :                                                 int nr_throttled);
      82             : static inline void acct_reclaim_writeback(struct folio *folio)
      83             : {
      84           0 :         pg_data_t *pgdat = folio_pgdat(folio);
      85           0 :         int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
      86             : 
      87           0 :         if (nr_throttled)
      88           0 :                 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
      89             : }
      90             : 
      91             : static inline void wake_throttle_isolated(pg_data_t *pgdat)
      92             : {
      93             :         wait_queue_head_t *wqh;
      94             : 
      95           0 :         wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
      96           0 :         if (waitqueue_active(wqh))
      97           0 :                 wake_up(wqh);
      98             : }
      99             : 
     100             : vm_fault_t do_swap_page(struct vm_fault *vmf);
     101             : void folio_rotate_reclaimable(struct folio *folio);
     102             : bool __folio_end_writeback(struct folio *folio);
     103             : void deactivate_file_folio(struct folio *folio);
     104             : void folio_activate(struct folio *folio);
     105             : 
     106             : void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
     107             :                    struct vm_area_struct *start_vma, unsigned long floor,
     108             :                    unsigned long ceiling, bool mm_wr_locked);
     109             : void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
     110             : 
     111             : struct zap_details;
     112             : void unmap_page_range(struct mmu_gather *tlb,
     113             :                              struct vm_area_struct *vma,
     114             :                              unsigned long addr, unsigned long end,
     115             :                              struct zap_details *details);
     116             : 
     117             : void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
     118             :                 unsigned int order);
     119             : void force_page_cache_ra(struct readahead_control *, unsigned long nr);
     120             : static inline void force_page_cache_readahead(struct address_space *mapping,
     121             :                 struct file *file, pgoff_t index, unsigned long nr_to_read)
     122             : {
     123           0 :         DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
     124           0 :         force_page_cache_ra(&ractl, nr_to_read);
     125             : }
     126             : 
     127             : unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
     128             :                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
     129             : unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
     130             :                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
     131             : void filemap_free_folio(struct address_space *mapping, struct folio *folio);
     132             : int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
     133             : bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
     134             :                 loff_t end);
     135             : long invalidate_inode_page(struct page *page);
     136             : unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
     137             :                 pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
     138             : 
     139             : /**
     140             :  * folio_evictable - Test whether a folio is evictable.
     141             :  * @folio: The folio to test.
     142             :  *
     143             :  * Test whether @folio is evictable -- i.e., should be placed on
     144             :  * active/inactive lists vs unevictable list.
     145             :  *
     146             :  * Reasons folio might not be evictable:
     147             :  * 1. folio's mapping marked unevictable
     148             :  * 2. One of the pages in the folio is part of an mlocked VMA
     149             :  */
     150           0 : static inline bool folio_evictable(struct folio *folio)
     151             : {
     152             :         bool ret;
     153             : 
     154             :         /* Prevent address_space of inode and swap cache from being freed */
     155             :         rcu_read_lock();
     156           0 :         ret = !mapping_unevictable(folio_mapping(folio)) &&
     157           0 :                         !folio_test_mlocked(folio);
     158             :         rcu_read_unlock();
     159           0 :         return ret;
     160             : }
     161             : 
     162             : /*
     163             :  * Turn a non-refcounted page (->_refcount == 0) into refcounted with
     164             :  * a count of one.
     165             :  */
     166             : static inline void set_page_refcounted(struct page *page)
     167             : {
     168             :         VM_BUG_ON_PAGE(PageTail(page), page);
     169             :         VM_BUG_ON_PAGE(page_ref_count(page), page);
     170       44778 :         set_page_count(page, 1);
     171             : }
     172             : 
     173             : extern unsigned long highest_memmap_pfn;
     174             : 
     175             : /*
     176             :  * Maximum number of reclaim retries without progress before the OOM
     177             :  * killer is consider the only way forward.
     178             :  */
     179             : #define MAX_RECLAIM_RETRIES 16
     180             : 
     181             : /*
     182             :  * in mm/early_ioremap.c
     183             :  */
     184             : pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
     185             :                                         unsigned long size, pgprot_t prot);
     186             : 
     187             : /*
     188             :  * in mm/vmscan.c:
     189             :  */
     190             : bool isolate_lru_page(struct page *page);
     191             : bool folio_isolate_lru(struct folio *folio);
     192             : void putback_lru_page(struct page *page);
     193             : void folio_putback_lru(struct folio *folio);
     194             : extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
     195             : 
     196             : /*
     197             :  * in mm/rmap.c:
     198             :  */
     199             : pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
     200             : 
     201             : /*
     202             :  * in mm/page_alloc.c
     203             :  */
     204             : #define K(x) ((x) << (PAGE_SHIFT-10))
     205             : 
     206             : extern char * const zone_names[MAX_NR_ZONES];
     207             : 
     208             : /* perform sanity checks on struct pages being allocated or freed */
     209             : DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
     210             : 
     211       44278 : static inline bool is_check_pages_enabled(void)
     212             : {
     213      343510 :         return static_branch_unlikely(&check_pages_enabled);
     214             : }
     215             : 
     216             : /*
     217             :  * Structure for holding the mostly immutable allocation parameters passed
     218             :  * between functions involved in allocations, including the alloc_pages*
     219             :  * family of functions.
     220             :  *
     221             :  * nodemask, migratetype and highest_zoneidx are initialized only once in
     222             :  * __alloc_pages() and then never change.
     223             :  *
     224             :  * zonelist, preferred_zone and highest_zoneidx are set first in
     225             :  * __alloc_pages() for the fast path, and might be later changed
     226             :  * in __alloc_pages_slowpath(). All other functions pass the whole structure
     227             :  * by a const pointer.
     228             :  */
     229             : struct alloc_context {
     230             :         struct zonelist *zonelist;
     231             :         nodemask_t *nodemask;
     232             :         struct zoneref *preferred_zoneref;
     233             :         int migratetype;
     234             : 
     235             :         /*
     236             :          * highest_zoneidx represents highest usable zone index of
     237             :          * the allocation request. Due to the nature of the zone,
     238             :          * memory on lower zone than the highest_zoneidx will be
     239             :          * protected by lowmem_reserve[highest_zoneidx].
     240             :          *
     241             :          * highest_zoneidx is also used by reclaim/compaction to limit
     242             :          * the target zone since higher zone than this index cannot be
     243             :          * usable for this allocation request.
     244             :          */
     245             :         enum zone_type highest_zoneidx;
     246             :         bool spread_dirty_pages;
     247             : };
     248             : 
     249             : /*
     250             :  * This function returns the order of a free page in the buddy system. In
     251             :  * general, page_zone(page)->lock must be held by the caller to prevent the
     252             :  * page from being allocated in parallel and returning garbage as the order.
     253             :  * If a caller does not hold page_zone(page)->lock, it must guarantee that the
     254             :  * page cannot be allocated or merged in parallel. Alternatively, it must
     255             :  * handle invalid values gracefully, and use buddy_order_unsafe() below.
     256             :  */
     257             : static inline unsigned int buddy_order(struct page *page)
     258             : {
     259             :         /* PageBuddy() must be checked by the caller */
     260        1388 :         return page_private(page);
     261             : }
     262             : 
     263             : /*
     264             :  * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
     265             :  * PageBuddy() should be checked first by the caller to minimize race window,
     266             :  * and invalid values must be handled gracefully.
     267             :  *
     268             :  * READ_ONCE is used so that if the caller assigns the result into a local
     269             :  * variable and e.g. tests it for valid range before using, the compiler cannot
     270             :  * decide to remove the variable and inline the page_private(page) multiple
     271             :  * times, potentially observing different values in the tests and the actual
     272             :  * use of the result.
     273             :  */
     274             : #define buddy_order_unsafe(page)        READ_ONCE(page_private(page))
     275             : 
     276             : /*
     277             :  * This function checks whether a page is free && is the buddy
     278             :  * we can coalesce a page and its buddy if
     279             :  * (a) the buddy is not in a hole (check before calling!) &&
     280             :  * (b) the buddy is in the buddy system &&
     281             :  * (c) a page and its buddy have the same order &&
     282             :  * (d) a page and its buddy are in the same zone.
     283             :  *
     284             :  * For recording whether a page is in the buddy system, we set PageBuddy.
     285             :  * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
     286             :  *
     287             :  * For recording page's order, we use page_private(page).
     288             :  */
     289             : static inline bool page_is_buddy(struct page *page, struct page *buddy,
     290             :                                  unsigned int order)
     291             : {
     292        5638 :         if (!page_is_guard(buddy) && !PageBuddy(buddy))
     293             :                 return false;
     294             : 
     295        2768 :         if (buddy_order(buddy) != order)
     296             :                 return false;
     297             : 
     298             :         /*
     299             :          * zone check is done late to avoid uselessly calculating
     300             :          * zone/node ids for pages that could never merge.
     301             :          */
     302        4113 :         if (page_zone_id(page) != page_zone_id(buddy))
     303             :                 return false;
     304             : 
     305             :         VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
     306             : 
     307             :         return true;
     308             : }
     309             : 
     310             : /*
     311             :  * Locate the struct page for both the matching buddy in our
     312             :  * pair (buddy1) and the combined O(n+1) page they form (page).
     313             :  *
     314             :  * 1) Any buddy B1 will have an order O twin B2 which satisfies
     315             :  * the following equation:
     316             :  *     B2 = B1 ^ (1 << O)
     317             :  * For example, if the starting buddy (buddy2) is #8 its order
     318             :  * 1 buddy is #10:
     319             :  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
     320             :  *
     321             :  * 2) Any buddy B will have an order O+1 parent P which
     322             :  * satisfies the following equation:
     323             :  *     P = B & ~(1 << O)
     324             :  *
     325             :  * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
     326             :  */
     327             : static inline unsigned long
     328             : __find_buddy_pfn(unsigned long page_pfn, unsigned int order)
     329             : {
     330        2819 :         return page_pfn ^ (1 << order);
     331             : }
     332             : 
     333             : /*
     334             :  * Find the buddy of @page and validate it.
     335             :  * @page: The input page
     336             :  * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
     337             :  *       function is used in the performance-critical __free_one_page().
     338             :  * @order: The order of the page
     339             :  * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
     340             :  *             page_to_pfn().
     341             :  *
     342             :  * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
     343             :  * not the same as @page. The validation is necessary before use it.
     344             :  *
     345             :  * Return: the found buddy page or NULL if not found.
     346             :  */
     347             : static inline struct page *find_buddy_page_pfn(struct page *page,
     348             :                         unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
     349             : {
     350        2819 :         unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
     351             :         struct page *buddy;
     352             : 
     353        2819 :         buddy = page + (__buddy_pfn - pfn);
     354             :         if (buddy_pfn)
     355        1874 :                 *buddy_pfn = __buddy_pfn;
     356             : 
     357        2819 :         if (page_is_buddy(page, buddy, order))
     358             :                 return buddy;
     359             :         return NULL;
     360             : }
     361             : 
     362             : extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
     363             :                                 unsigned long end_pfn, struct zone *zone);
     364             : 
     365             : static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
     366             :                                 unsigned long end_pfn, struct zone *zone)
     367             : {
     368           0 :         if (zone->contiguous)
     369           0 :                 return pfn_to_page(start_pfn);
     370             : 
     371           0 :         return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
     372             : }
     373             : 
     374             : extern int __isolate_free_page(struct page *page, unsigned int order);
     375             : extern void __putback_isolated_page(struct page *page, unsigned int order,
     376             :                                     int mt);
     377             : extern void memblock_free_pages(struct page *page, unsigned long pfn,
     378             :                                         unsigned int order);
     379             : extern void __free_pages_core(struct page *page, unsigned int order);
     380             : 
     381             : static inline void prep_compound_head(struct page *page, unsigned int order)
     382             : {
     383         102 :         struct folio *folio = (struct folio *)page;
     384             : 
     385         102 :         set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
     386         102 :         set_compound_order(page, order);
     387         204 :         atomic_set(&folio->_entire_mapcount, -1);
     388         204 :         atomic_set(&folio->_nr_pages_mapped, 0);
     389         204 :         atomic_set(&folio->_pincount, 0);
     390             : }
     391             : 
     392             : static inline void prep_compound_tail(struct page *head, int tail_idx)
     393             : {
     394         184 :         struct page *p = head + tail_idx;
     395             : 
     396         184 :         p->mapping = TAIL_MAPPING;
     397         184 :         set_compound_head(p, head);
     398         368 :         set_page_private(p, 0);
     399             : }
     400             : 
     401             : extern void prep_compound_page(struct page *page, unsigned int order);
     402             : 
     403             : extern void post_alloc_hook(struct page *page, unsigned int order,
     404             :                                         gfp_t gfp_flags);
     405             : extern int user_min_free_kbytes;
     406             : 
     407             : extern void free_unref_page(struct page *page, unsigned int order);
     408             : extern void free_unref_page_list(struct list_head *list);
     409             : 
     410             : extern void zone_pcp_reset(struct zone *zone);
     411             : extern void zone_pcp_disable(struct zone *zone);
     412             : extern void zone_pcp_enable(struct zone *zone);
     413             : extern void zone_pcp_init(struct zone *zone);
     414             : 
     415             : extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
     416             :                           phys_addr_t min_addr,
     417             :                           int nid, bool exact_nid);
     418             : 
     419             : int split_free_page(struct page *free_page,
     420             :                         unsigned int order, unsigned long split_pfn_offset);
     421             : 
     422             : /*
     423             :  * This will have no effect, other than possibly generating a warning, if the
     424             :  * caller passes in a non-large folio.
     425             :  */
     426             : static inline void folio_set_order(struct folio *folio, unsigned int order)
     427             : {
     428             :         if (WARN_ON_ONCE(!folio_test_large(folio)))
     429             :                 return;
     430             : 
     431             :         folio->_folio_order = order;
     432             : #ifdef CONFIG_64BIT
     433             :         /*
     434             :          * When hugetlb dissolves a folio, we need to clear the tail
     435             :          * page, rather than setting nr_pages to 1.
     436             :          */
     437             :         folio->_folio_nr_pages = order ? 1U << order : 0;
     438             : #endif
     439             : }
     440             : 
     441             : #if defined CONFIG_COMPACTION || defined CONFIG_CMA
     442             : 
     443             : /*
     444             :  * in mm/compaction.c
     445             :  */
     446             : /*
     447             :  * compact_control is used to track pages being migrated and the free pages
     448             :  * they are being migrated to during memory compaction. The free_pfn starts
     449             :  * at the end of a zone and migrate_pfn begins at the start. Movable pages
     450             :  * are moved to the end of a zone during a compaction run and the run
     451             :  * completes when free_pfn <= migrate_pfn
     452             :  */
     453             : struct compact_control {
     454             :         struct list_head freepages;     /* List of free pages to migrate to */
     455             :         struct list_head migratepages;  /* List of pages being migrated */
     456             :         unsigned int nr_freepages;      /* Number of isolated free pages */
     457             :         unsigned int nr_migratepages;   /* Number of pages to migrate */
     458             :         unsigned long free_pfn;         /* isolate_freepages search base */
     459             :         /*
     460             :          * Acts as an in/out parameter to page isolation for migration.
     461             :          * isolate_migratepages uses it as a search base.
     462             :          * isolate_migratepages_block will update the value to the next pfn
     463             :          * after the last isolated one.
     464             :          */
     465             :         unsigned long migrate_pfn;
     466             :         unsigned long fast_start_pfn;   /* a pfn to start linear scan from */
     467             :         struct zone *zone;
     468             :         unsigned long total_migrate_scanned;
     469             :         unsigned long total_free_scanned;
     470             :         unsigned short fast_search_fail;/* failures to use free list searches */
     471             :         short search_order;             /* order to start a fast search at */
     472             :         const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
     473             :         int order;                      /* order a direct compactor needs */
     474             :         int migratetype;                /* migratetype of direct compactor */
     475             :         const unsigned int alloc_flags; /* alloc flags of a direct compactor */
     476             :         const int highest_zoneidx;      /* zone index of a direct compactor */
     477             :         enum migrate_mode mode;         /* Async or sync migration mode */
     478             :         bool ignore_skip_hint;          /* Scan blocks even if marked skip */
     479             :         bool no_set_skip_hint;          /* Don't mark blocks for skipping */
     480             :         bool ignore_block_suitable;     /* Scan blocks considered unsuitable */
     481             :         bool direct_compaction;         /* False from kcompactd or /proc/... */
     482             :         bool proactive_compaction;      /* kcompactd proactive compaction */
     483             :         bool whole_zone;                /* Whole zone should/has been scanned */
     484             :         bool contended;                 /* Signal lock contention */
     485             :         bool finish_pageblock;          /* Scan the remainder of a pageblock. Used
     486             :                                          * when there are potentially transient
     487             :                                          * isolation or migration failures to
     488             :                                          * ensure forward progress.
     489             :                                          */
     490             :         bool alloc_contig;              /* alloc_contig_range allocation */
     491             : };
     492             : 
     493             : /*
     494             :  * Used in direct compaction when a page should be taken from the freelists
     495             :  * immediately when one is created during the free path.
     496             :  */
     497             : struct capture_control {
     498             :         struct compact_control *cc;
     499             :         struct page *page;
     500             : };
     501             : 
     502             : unsigned long
     503             : isolate_freepages_range(struct compact_control *cc,
     504             :                         unsigned long start_pfn, unsigned long end_pfn);
     505             : int
     506             : isolate_migratepages_range(struct compact_control *cc,
     507             :                            unsigned long low_pfn, unsigned long end_pfn);
     508             : 
     509             : int __alloc_contig_migrate_range(struct compact_control *cc,
     510             :                                         unsigned long start, unsigned long end);
     511             : 
     512             : /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
     513             : void init_cma_reserved_pageblock(struct page *page);
     514             : 
     515             : #endif /* CONFIG_COMPACTION || CONFIG_CMA */
     516             : 
     517             : int find_suitable_fallback(struct free_area *area, unsigned int order,
     518             :                         int migratetype, bool only_stealable, bool *can_steal);
     519             : 
     520             : static inline bool free_area_empty(struct free_area *area, int migratetype)
     521             : {
     522         348 :         return list_empty(&area->free_list[migratetype]);
     523             : }
     524             : 
     525             : /*
     526             :  * These three helpers classifies VMAs for virtual memory accounting.
     527             :  */
     528             : 
     529             : /*
     530             :  * Executable code area - executable, not writable, not stack
     531             :  */
     532             : static inline bool is_exec_mapping(vm_flags_t flags)
     533             : {
     534           0 :         return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
     535             : }
     536             : 
     537             : /*
     538             :  * Stack area - automatically grows in one direction
     539             :  *
     540             :  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
     541             :  * do_mmap() forbids all other combinations.
     542             :  */
     543             : static inline bool is_stack_mapping(vm_flags_t flags)
     544             : {
     545           0 :         return (flags & VM_STACK) == VM_STACK;
     546             : }
     547             : 
     548             : /*
     549             :  * Data area - private, writable, not stack
     550             :  */
     551             : static inline bool is_data_mapping(vm_flags_t flags)
     552             : {
     553           0 :         return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
     554             : }
     555             : 
     556             : /* mm/util.c */
     557             : struct anon_vma *folio_anon_vma(struct folio *folio);
     558             : 
     559             : #ifdef CONFIG_MMU
     560             : void unmap_mapping_folio(struct folio *folio);
     561             : extern long populate_vma_page_range(struct vm_area_struct *vma,
     562             :                 unsigned long start, unsigned long end, int *locked);
     563             : extern long faultin_vma_page_range(struct vm_area_struct *vma,
     564             :                                    unsigned long start, unsigned long end,
     565             :                                    bool write, int *locked);
     566             : extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
     567             :                               unsigned long len);
     568             : /*
     569             :  * mlock_vma_folio() and munlock_vma_folio():
     570             :  * should be called with vma's mmap_lock held for read or write,
     571             :  * under page table lock for the pte/pmd being added or removed.
     572             :  *
     573             :  * mlock is usually called at the end of page_add_*_rmap(), munlock at
     574             :  * the end of page_remove_rmap(); but new anon folios are managed by
     575             :  * folio_add_lru_vma() calling mlock_new_folio().
     576             :  *
     577             :  * @compound is used to include pmd mappings of THPs, but filter out
     578             :  * pte mappings of THPs, which cannot be consistently counted: a pte
     579             :  * mapping of the THP head cannot be distinguished by the page alone.
     580             :  */
     581             : void mlock_folio(struct folio *folio);
     582           0 : static inline void mlock_vma_folio(struct folio *folio,
     583             :                         struct vm_area_struct *vma, bool compound)
     584             : {
     585             :         /*
     586             :          * The VM_SPECIAL check here serves two purposes.
     587             :          * 1) VM_IO check prevents migration from double-counting during mlock.
     588             :          * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
     589             :          *    is never left set on a VM_SPECIAL vma, there is an interval while
     590             :          *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
     591             :          *    still be set while VM_SPECIAL bits are added: so ignore it then.
     592             :          */
     593           0 :         if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
     594           0 :             (compound || !folio_test_large(folio)))
     595           0 :                 mlock_folio(folio);
     596           0 : }
     597             : 
     598             : void munlock_folio(struct folio *folio);
     599           0 : static inline void munlock_vma_folio(struct folio *folio,
     600             :                         struct vm_area_struct *vma, bool compound)
     601             : {
     602           0 :         if (unlikely(vma->vm_flags & VM_LOCKED) &&
     603           0 :             (compound || !folio_test_large(folio)))
     604           0 :                 munlock_folio(folio);
     605           0 : }
     606             : 
     607             : void mlock_new_folio(struct folio *folio);
     608             : bool need_mlock_drain(int cpu);
     609             : void mlock_drain_local(void);
     610             : void mlock_drain_remote(int cpu);
     611             : 
     612             : extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
     613             : 
     614             : /*
     615             :  * Return the start of user virtual address at the specific offset within
     616             :  * a vma.
     617             :  */
     618             : static inline unsigned long
     619             : vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
     620             :                   struct vm_area_struct *vma)
     621             : {
     622             :         unsigned long address;
     623             : 
     624           0 :         if (pgoff >= vma->vm_pgoff) {
     625           0 :                 address = vma->vm_start +
     626           0 :                         ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
     627             :                 /* Check for address beyond vma (or wrapped through 0?) */
     628           0 :                 if (address < vma->vm_start || address >= vma->vm_end)
     629           0 :                         address = -EFAULT;
     630           0 :         } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
     631             :                 /* Test above avoids possibility of wrap to 0 on 32-bit */
     632           0 :                 address = vma->vm_start;
     633             :         } else {
     634             :                 address = -EFAULT;
     635             :         }
     636             :         return address;
     637             : }
     638             : 
     639             : /*
     640             :  * Return the start of user virtual address of a page within a vma.
     641             :  * Returns -EFAULT if all of the page is outside the range of vma.
     642             :  * If page is a compound head, the entire compound page is considered.
     643             :  */
     644             : static inline unsigned long
     645           0 : vma_address(struct page *page, struct vm_area_struct *vma)
     646             : {
     647             :         VM_BUG_ON_PAGE(PageKsm(page), page);    /* KSM page->index unusable */
     648           0 :         return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
     649             : }
     650             : 
     651             : /*
     652             :  * Then at what user virtual address will none of the range be found in vma?
     653             :  * Assumes that vma_address() already returned a good starting address.
     654             :  */
     655             : static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
     656             : {
     657           0 :         struct vm_area_struct *vma = pvmw->vma;
     658             :         pgoff_t pgoff;
     659             :         unsigned long address;
     660             : 
     661             :         /* Common case, plus ->pgoff is invalid for KSM */
     662           0 :         if (pvmw->nr_pages == 1)
     663           0 :                 return pvmw->address + PAGE_SIZE;
     664             : 
     665           0 :         pgoff = pvmw->pgoff + pvmw->nr_pages;
     666           0 :         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
     667             :         /* Check for address beyond vma (or wrapped through 0?) */
     668           0 :         if (address < vma->vm_start || address > vma->vm_end)
     669           0 :                 address = vma->vm_end;
     670             :         return address;
     671             : }
     672             : 
     673           0 : static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
     674             :                                                     struct file *fpin)
     675             : {
     676           0 :         int flags = vmf->flags;
     677             : 
     678           0 :         if (fpin)
     679             :                 return fpin;
     680             : 
     681             :         /*
     682             :          * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
     683             :          * anything, so we only pin the file and drop the mmap_lock if only
     684             :          * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
     685             :          */
     686           0 :         if (fault_flag_allow_retry_first(flags) &&
     687           0 :             !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
     688           0 :                 fpin = get_file(vmf->vma->vm_file);
     689           0 :                 mmap_read_unlock(vmf->vma->vm_mm);
     690             :         }
     691             :         return fpin;
     692             : }
     693             : #else /* !CONFIG_MMU */
     694             : static inline void unmap_mapping_folio(struct folio *folio) { }
     695             : static inline void mlock_new_folio(struct folio *folio) { }
     696             : static inline bool need_mlock_drain(int cpu) { return false; }
     697             : static inline void mlock_drain_local(void) { }
     698             : static inline void mlock_drain_remote(int cpu) { }
     699             : static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
     700             : {
     701             : }
     702             : #endif /* !CONFIG_MMU */
     703             : 
     704             : /* Memory initialisation debug and verification */
     705             : #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
     706             : DECLARE_STATIC_KEY_TRUE(deferred_pages);
     707             : 
     708             : bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
     709             : #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
     710             : 
     711             : enum mminit_level {
     712             :         MMINIT_WARNING,
     713             :         MMINIT_VERIFY,
     714             :         MMINIT_TRACE
     715             : };
     716             : 
     717             : #ifdef CONFIG_DEBUG_MEMORY_INIT
     718             : 
     719             : extern int mminit_loglevel;
     720             : 
     721             : #define mminit_dprintk(level, prefix, fmt, arg...) \
     722             : do { \
     723             :         if (level < mminit_loglevel) { \
     724             :                 if (level <= MMINIT_WARNING) \
     725             :                         pr_warn("mminit::" prefix " " fmt, ##arg);  \
     726             :                 else \
     727             :                         printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
     728             :         } \
     729             : } while (0)
     730             : 
     731             : extern void mminit_verify_pageflags_layout(void);
     732             : extern void mminit_verify_zonelist(void);
     733             : #else
     734             : 
     735             : static inline void mminit_dprintk(enum mminit_level level,
     736             :                                 const char *prefix, const char *fmt, ...)
     737             : {
     738             : }
     739             : 
     740             : static inline void mminit_verify_pageflags_layout(void)
     741             : {
     742             : }
     743             : 
     744             : static inline void mminit_verify_zonelist(void)
     745             : {
     746             : }
     747             : #endif /* CONFIG_DEBUG_MEMORY_INIT */
     748             : 
     749             : #define NODE_RECLAIM_NOSCAN     -2
     750             : #define NODE_RECLAIM_FULL       -1
     751             : #define NODE_RECLAIM_SOME       0
     752             : #define NODE_RECLAIM_SUCCESS    1
     753             : 
     754             : #ifdef CONFIG_NUMA
     755             : extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
     756             : extern int find_next_best_node(int node, nodemask_t *used_node_mask);
     757             : #else
     758             : static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
     759             :                                 unsigned int order)
     760             : {
     761             :         return NODE_RECLAIM_NOSCAN;
     762             : }
     763             : static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
     764             : {
     765             :         return NUMA_NO_NODE;
     766             : }
     767             : #endif
     768             : 
     769             : /*
     770             :  * mm/memory-failure.c
     771             :  */
     772             : extern int hwpoison_filter(struct page *p);
     773             : 
     774             : extern u32 hwpoison_filter_dev_major;
     775             : extern u32 hwpoison_filter_dev_minor;
     776             : extern u64 hwpoison_filter_flags_mask;
     777             : extern u64 hwpoison_filter_flags_value;
     778             : extern u64 hwpoison_filter_memcg;
     779             : extern u32 hwpoison_filter_enable;
     780             : 
     781             : extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
     782             :         unsigned long, unsigned long,
     783             :         unsigned long, unsigned long);
     784             : 
     785             : extern void set_pageblock_order(void);
     786             : unsigned long reclaim_pages(struct list_head *folio_list);
     787             : unsigned int reclaim_clean_pages_from_list(struct zone *zone,
     788             :                                             struct list_head *folio_list);
     789             : /* The ALLOC_WMARK bits are used as an index to zone->watermark */
     790             : #define ALLOC_WMARK_MIN         WMARK_MIN
     791             : #define ALLOC_WMARK_LOW         WMARK_LOW
     792             : #define ALLOC_WMARK_HIGH        WMARK_HIGH
     793             : #define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
     794             : 
     795             : /* Mask to get the watermark bits */
     796             : #define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
     797             : 
     798             : /*
     799             :  * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
     800             :  * cannot assume a reduced access to memory reserves is sufficient for
     801             :  * !MMU
     802             :  */
     803             : #ifdef CONFIG_MMU
     804             : #define ALLOC_OOM               0x08
     805             : #else
     806             : #define ALLOC_OOM               ALLOC_NO_WATERMARKS
     807             : #endif
     808             : 
     809             : #define ALLOC_NON_BLOCK          0x10 /* Caller cannot block. Allow access
     810             :                                        * to 25% of the min watermark or
     811             :                                        * 62.5% if __GFP_HIGH is set.
     812             :                                        */
     813             : #define ALLOC_MIN_RESERVE        0x20 /* __GFP_HIGH set. Allow access to 50%
     814             :                                        * of the min watermark.
     815             :                                        */
     816             : #define ALLOC_CPUSET             0x40 /* check for correct cpuset */
     817             : #define ALLOC_CMA                0x80 /* allow allocations from CMA areas */
     818             : #ifdef CONFIG_ZONE_DMA32
     819             : #define ALLOC_NOFRAGMENT        0x100 /* avoid mixing pageblock types */
     820             : #else
     821             : #define ALLOC_NOFRAGMENT          0x0
     822             : #endif
     823             : #define ALLOC_HIGHATOMIC        0x200 /* Allows access to MIGRATE_HIGHATOMIC */
     824             : #define ALLOC_KSWAPD            0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
     825             : 
     826             : /* Flags that allow allocations below the min watermark. */
     827             : #define ALLOC_RESERVES (ALLOC_NON_BLOCK|ALLOC_MIN_RESERVE|ALLOC_HIGHATOMIC|ALLOC_OOM)
     828             : 
     829             : enum ttu_flags;
     830             : struct tlbflush_unmap_batch;
     831             : 
     832             : 
     833             : /*
     834             :  * only for MM internal work items which do not depend on
     835             :  * any allocations or locks which might depend on allocations
     836             :  */
     837             : extern struct workqueue_struct *mm_percpu_wq;
     838             : 
     839             : #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
     840             : void try_to_unmap_flush(void);
     841             : void try_to_unmap_flush_dirty(void);
     842             : void flush_tlb_batched_pending(struct mm_struct *mm);
     843             : #else
     844             : static inline void try_to_unmap_flush(void)
     845             : {
     846             : }
     847             : static inline void try_to_unmap_flush_dirty(void)
     848             : {
     849             : }
     850             : static inline void flush_tlb_batched_pending(struct mm_struct *mm)
     851             : {
     852             : }
     853             : #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
     854             : 
     855             : extern const struct trace_print_flags pageflag_names[];
     856             : extern const struct trace_print_flags pagetype_names[];
     857             : extern const struct trace_print_flags vmaflag_names[];
     858             : extern const struct trace_print_flags gfpflag_names[];
     859             : 
     860             : static inline bool is_migrate_highatomic(enum migratetype migratetype)
     861             : {
     862             :         return migratetype == MIGRATE_HIGHATOMIC;
     863             : }
     864             : 
     865           0 : static inline bool is_migrate_highatomic_page(struct page *page)
     866             : {
     867           0 :         return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
     868             : }
     869             : 
     870             : void setup_zone_pageset(struct zone *zone);
     871             : 
     872             : struct migration_target_control {
     873             :         int nid;                /* preferred node id */
     874             :         nodemask_t *nmask;
     875             :         gfp_t gfp_mask;
     876             : };
     877             : 
     878             : /*
     879             :  * mm/filemap.c
     880             :  */
     881             : size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
     882             :                               struct folio *folio, loff_t fpos, size_t size);
     883             : 
     884             : /*
     885             :  * mm/vmalloc.c
     886             :  */
     887             : #ifdef CONFIG_MMU
     888             : void __init vmalloc_init(void);
     889             : int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
     890             :                 pgprot_t prot, struct page **pages, unsigned int page_shift);
     891             : #else
     892             : static inline void vmalloc_init(void)
     893             : {
     894             : }
     895             : 
     896             : static inline
     897             : int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
     898             :                 pgprot_t prot, struct page **pages, unsigned int page_shift)
     899             : {
     900             :         return -EINVAL;
     901             : }
     902             : #endif
     903             : 
     904             : int __must_check __vmap_pages_range_noflush(unsigned long addr,
     905             :                                unsigned long end, pgprot_t prot,
     906             :                                struct page **pages, unsigned int page_shift);
     907             : 
     908             : void vunmap_range_noflush(unsigned long start, unsigned long end);
     909             : 
     910             : void __vunmap_range_noflush(unsigned long start, unsigned long end);
     911             : 
     912             : int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
     913             :                       unsigned long addr, int page_nid, int *flags);
     914             : 
     915             : void free_zone_device_page(struct page *page);
     916             : int migrate_device_coherent_page(struct page *page);
     917             : 
     918             : /*
     919             :  * mm/gup.c
     920             :  */
     921             : struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
     922             : int __must_check try_grab_page(struct page *page, unsigned int flags);
     923             : 
     924             : enum {
     925             :         /* mark page accessed */
     926             :         FOLL_TOUCH = 1 << 16,
     927             :         /* a retry, previous pass started an IO */
     928             :         FOLL_TRIED = 1 << 17,
     929             :         /* we are working on non-current tsk/mm */
     930             :         FOLL_REMOTE = 1 << 18,
     931             :         /* pages must be released via unpin_user_page */
     932             :         FOLL_PIN = 1 << 19,
     933             :         /* gup_fast: prevent fall-back to slow gup */
     934             :         FOLL_FAST_ONLY = 1 << 20,
     935             :         /* allow unlocking the mmap lock */
     936             :         FOLL_UNLOCKABLE = 1 << 21,
     937             : };
     938             : 
     939             : /*
     940             :  * Indicates for which pages that are write-protected in the page table,
     941             :  * whether GUP has to trigger unsharing via FAULT_FLAG_UNSHARE such that the
     942             :  * GUP pin will remain consistent with the pages mapped into the page tables
     943             :  * of the MM.
     944             :  *
     945             :  * Temporary unmapping of PageAnonExclusive() pages or clearing of
     946             :  * PageAnonExclusive() has to protect against concurrent GUP:
     947             :  * * Ordinary GUP: Using the PT lock
     948             :  * * GUP-fast and fork(): mm->write_protect_seq
     949             :  * * GUP-fast and KSM or temporary unmapping (swap, migration): see
     950             :  *    page_try_share_anon_rmap()
     951             :  *
     952             :  * Must be called with the (sub)page that's actually referenced via the
     953             :  * page table entry, which might not necessarily be the head page for a
     954             :  * PTE-mapped THP.
     955             :  *
     956             :  * If the vma is NULL, we're coming from the GUP-fast path and might have
     957             :  * to fallback to the slow path just to lookup the vma.
     958             :  */
     959           0 : static inline bool gup_must_unshare(struct vm_area_struct *vma,
     960             :                                     unsigned int flags, struct page *page)
     961             : {
     962             :         /*
     963             :          * FOLL_WRITE is implicitly handled correctly as the page table entry
     964             :          * has to be writable -- and if it references (part of) an anonymous
     965             :          * folio, that part is required to be marked exclusive.
     966             :          */
     967           0 :         if ((flags & (FOLL_WRITE | FOLL_PIN)) != FOLL_PIN)
     968             :                 return false;
     969             :         /*
     970             :          * Note: PageAnon(page) is stable until the page is actually getting
     971             :          * freed.
     972             :          */
     973           0 :         if (!PageAnon(page)) {
     974             :                 /*
     975             :                  * We only care about R/O long-term pining: R/O short-term
     976             :                  * pinning does not have the semantics to observe successive
     977             :                  * changes through the process page tables.
     978             :                  */
     979           0 :                 if (!(flags & FOLL_LONGTERM))
     980             :                         return false;
     981             : 
     982             :                 /* We really need the vma ... */
     983           0 :                 if (!vma)
     984             :                         return true;
     985             : 
     986             :                 /*
     987             :                  * ... because we only care about writable private ("COW")
     988             :                  * mappings where we have to break COW early.
     989             :                  */
     990           0 :                 return is_cow_mapping(vma->vm_flags);
     991             :         }
     992             : 
     993             :         /* Paired with a memory barrier in page_try_share_anon_rmap(). */
     994             :         if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
     995             :                 smp_rmb();
     996             : 
     997             :         /*
     998             :          * Note that PageKsm() pages cannot be exclusive, and consequently,
     999             :          * cannot get pinned.
    1000             :          */
    1001           0 :         return !PageAnonExclusive(page);
    1002             : }
    1003             : 
    1004             : extern bool mirrored_kernelcore;
    1005             : 
    1006             : static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
    1007             : {
    1008             :         /*
    1009             :          * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty
    1010             :          * enablements, because when without soft-dirty being compiled in,
    1011             :          * VM_SOFTDIRTY is defined as 0x0, then !(vm_flags & VM_SOFTDIRTY)
    1012             :          * will be constantly true.
    1013             :          */
    1014             :         if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
    1015             :                 return false;
    1016             : 
    1017             :         /*
    1018             :          * Soft-dirty is kind of special: its tracking is enabled when the
    1019             :          * vma flags not set.
    1020             :          */
    1021             :         return !(vma->vm_flags & VM_SOFTDIRTY);
    1022             : }
    1023             : 
    1024             : /*
    1025             :  * VMA Iterator functions shared between nommu and mmap
    1026             :  */
    1027             : static inline int vma_iter_prealloc(struct vma_iterator *vmi)
    1028             : {
    1029           0 :         return mas_preallocate(&vmi->mas, GFP_KERNEL);
    1030             : }
    1031             : 
    1032             : static inline void vma_iter_clear(struct vma_iterator *vmi,
    1033             :                                   unsigned long start, unsigned long end)
    1034             : {
    1035           0 :         mas_set_range(&vmi->mas, start, end - 1);
    1036           0 :         mas_store_prealloc(&vmi->mas, NULL);
    1037             : }
    1038             : 
    1039             : static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
    1040             : {
    1041           0 :         return mas_walk(&vmi->mas);
    1042             : }
    1043             : 
    1044             : /* Store a VMA with preallocated memory */
    1045           0 : static inline void vma_iter_store(struct vma_iterator *vmi,
    1046             :                                   struct vm_area_struct *vma)
    1047             : {
    1048             : 
    1049             : #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
    1050             :         if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) {
    1051             :                 printk("%lu > %lu\n", vmi->mas.index, vma->vm_start);
    1052             :                 printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
    1053             :                 printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
    1054             :                 mt_dump(vmi->mas.tree);
    1055             :         }
    1056             :         if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last <  vma->vm_start)) {
    1057             :                 printk("%lu < %lu\n", vmi->mas.last, vma->vm_start);
    1058             :                 printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end);
    1059             :                 printk("into slot    %lu-%lu", vmi->mas.index, vmi->mas.last);
    1060             :                 mt_dump(vmi->mas.tree);
    1061             :         }
    1062             : #endif
    1063             : 
    1064           0 :         if (vmi->mas.node != MAS_START &&
    1065           0 :             ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
    1066             :                 vma_iter_invalidate(vmi);
    1067             : 
    1068           0 :         vmi->mas.index = vma->vm_start;
    1069           0 :         vmi->mas.last = vma->vm_end - 1;
    1070           0 :         mas_store_prealloc(&vmi->mas, vma);
    1071           0 : }
    1072             : 
    1073           0 : static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
    1074             :                         struct vm_area_struct *vma, gfp_t gfp)
    1075             : {
    1076           0 :         if (vmi->mas.node != MAS_START &&
    1077           0 :             ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
    1078             :                 vma_iter_invalidate(vmi);
    1079             : 
    1080           0 :         vmi->mas.index = vma->vm_start;
    1081           0 :         vmi->mas.last = vma->vm_end - 1;
    1082           0 :         mas_store_gfp(&vmi->mas, vma, gfp);
    1083           0 :         if (unlikely(mas_is_err(&vmi->mas)))
    1084             :                 return -ENOMEM;
    1085             : 
    1086           0 :         return 0;
    1087             : }
    1088             : 
    1089             : /*
    1090             :  * VMA lock generalization
    1091             :  */
    1092             : struct vma_prepare {
    1093             :         struct vm_area_struct *vma;
    1094             :         struct vm_area_struct *adj_next;
    1095             :         struct file *file;
    1096             :         struct address_space *mapping;
    1097             :         struct anon_vma *anon_vma;
    1098             :         struct vm_area_struct *insert;
    1099             :         struct vm_area_struct *remove;
    1100             :         struct vm_area_struct *remove2;
    1101             : };
    1102             : #endif  /* __MM_INTERNAL_H */

Generated by: LCOV version 1.14