LCOV - code coverage report
Current view: top level - include/linux - mm_types.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 6 16.7 %
Date: 2023-03-27 20:00:47 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_MM_TYPES_H
       3             : #define _LINUX_MM_TYPES_H
       4             : 
       5             : #include <linux/mm_types_task.h>
       6             : 
       7             : #include <linux/auxvec.h>
       8             : #include <linux/kref.h>
       9             : #include <linux/list.h>
      10             : #include <linux/spinlock.h>
      11             : #include <linux/rbtree.h>
      12             : #include <linux/maple_tree.h>
      13             : #include <linux/rwsem.h>
      14             : #include <linux/completion.h>
      15             : #include <linux/cpumask.h>
      16             : #include <linux/uprobes.h>
      17             : #include <linux/rcupdate.h>
      18             : #include <linux/page-flags-layout.h>
      19             : #include <linux/workqueue.h>
      20             : #include <linux/seqlock.h>
      21             : #include <linux/percpu_counter.h>
      22             : 
      23             : #include <asm/mmu.h>
      24             : 
      25             : #ifndef AT_VECTOR_SIZE_ARCH
      26             : #define AT_VECTOR_SIZE_ARCH 0
      27             : #endif
      28             : #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
      29             : 
      30             : #define INIT_PASID      0
      31             : 
      32             : struct address_space;
      33             : struct mem_cgroup;
      34             : 
      35             : /*
      36             :  * Each physical page in the system has a struct page associated with
      37             :  * it to keep track of whatever it is we are using the page for at the
      38             :  * moment. Note that we have no way to track which tasks are using
      39             :  * a page, though if it is a pagecache page, rmap structures can tell us
      40             :  * who is mapping it.
      41             :  *
      42             :  * If you allocate the page using alloc_pages(), you can use some of the
      43             :  * space in struct page for your own purposes.  The five words in the main
      44             :  * union are available, except for bit 0 of the first word which must be
      45             :  * kept clear.  Many users use this word to store a pointer to an object
      46             :  * which is guaranteed to be aligned.  If you use the same storage as
      47             :  * page->mapping, you must restore it to NULL before freeing the page.
      48             :  *
      49             :  * If your page will not be mapped to userspace, you can also use the four
      50             :  * bytes in the mapcount union, but you must call page_mapcount_reset()
      51             :  * before freeing it.
      52             :  *
      53             :  * If you want to use the refcount field, it must be used in such a way
      54             :  * that other CPUs temporarily incrementing and then decrementing the
      55             :  * refcount does not cause problems.  On receiving the page from
      56             :  * alloc_pages(), the refcount will be positive.
      57             :  *
      58             :  * If you allocate pages of order > 0, you can use some of the fields
      59             :  * in each subpage, but you may need to restore some of their values
      60             :  * afterwards.
      61             :  *
      62             :  * SLUB uses cmpxchg_double() to atomically update its freelist and counters.
      63             :  * That requires that freelist & counters in struct slab be adjacent and
      64             :  * double-word aligned. Because struct slab currently just reinterprets the
      65             :  * bits of struct page, we align all struct pages to double-word boundaries,
      66             :  * and ensure that 'freelist' is aligned within struct slab.
      67             :  */
      68             : #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
      69             : #define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
      70             : #else
      71             : #define _struct_page_alignment  __aligned(sizeof(unsigned long))
      72             : #endif
      73             : 
      74             : struct page {
      75             :         unsigned long flags;            /* Atomic flags, some possibly
      76             :                                          * updated asynchronously */
      77             :         /*
      78             :          * Five words (20/40 bytes) are available in this union.
      79             :          * WARNING: bit 0 of the first word is used for PageTail(). That
      80             :          * means the other users of this union MUST NOT use the bit to
      81             :          * avoid collision and false-positive PageTail().
      82             :          */
      83             :         union {
      84             :                 struct {        /* Page cache and anonymous pages */
      85             :                         /**
      86             :                          * @lru: Pageout list, eg. active_list protected by
      87             :                          * lruvec->lru_lock.  Sometimes used as a generic list
      88             :                          * by the page owner.
      89             :                          */
      90             :                         union {
      91             :                                 struct list_head lru;
      92             : 
      93             :                                 /* Or, for the Unevictable "LRU list" slot */
      94             :                                 struct {
      95             :                                         /* Always even, to negate PageTail */
      96             :                                         void *__filler;
      97             :                                         /* Count page's or folio's mlocks */
      98             :                                         unsigned int mlock_count;
      99             :                                 };
     100             : 
     101             :                                 /* Or, free page */
     102             :                                 struct list_head buddy_list;
     103             :                                 struct list_head pcp_list;
     104             :                         };
     105             :                         /* See page-flags.h for PAGE_MAPPING_FLAGS */
     106             :                         struct address_space *mapping;
     107             :                         union {
     108             :                                 pgoff_t index;          /* Our offset within mapping. */
     109             :                                 unsigned long share;    /* share count for fsdax */
     110             :                         };
     111             :                         /**
     112             :                          * @private: Mapping-private opaque data.
     113             :                          * Usually used for buffer_heads if PagePrivate.
     114             :                          * Used for swp_entry_t if PageSwapCache.
     115             :                          * Indicates order in the buddy system if PageBuddy.
     116             :                          */
     117             :                         unsigned long private;
     118             :                 };
     119             :                 struct {        /* page_pool used by netstack */
     120             :                         /**
     121             :                          * @pp_magic: magic value to avoid recycling non
     122             :                          * page_pool allocated pages.
     123             :                          */
     124             :                         unsigned long pp_magic;
     125             :                         struct page_pool *pp;
     126             :                         unsigned long _pp_mapping_pad;
     127             :                         unsigned long dma_addr;
     128             :                         union {
     129             :                                 /**
     130             :                                  * dma_addr_upper: might require a 64-bit
     131             :                                  * value on 32-bit architectures.
     132             :                                  */
     133             :                                 unsigned long dma_addr_upper;
     134             :                                 /**
     135             :                                  * For frag page support, not supported in
     136             :                                  * 32-bit architectures with 64-bit DMA.
     137             :                                  */
     138             :                                 atomic_long_t pp_frag_count;
     139             :                         };
     140             :                 };
     141             :                 struct {        /* Tail pages of compound page */
     142             :                         unsigned long compound_head;    /* Bit zero is set */
     143             :                 };
     144             :                 struct {        /* Page table pages */
     145             :                         unsigned long _pt_pad_1;        /* compound_head */
     146             :                         pgtable_t pmd_huge_pte; /* protected by page->ptl */
     147             :                         unsigned long _pt_pad_2;        /* mapping */
     148             :                         union {
     149             :                                 struct mm_struct *pt_mm; /* x86 pgds only */
     150             :                                 atomic_t pt_frag_refcount; /* powerpc */
     151             :                         };
     152             : #if ALLOC_SPLIT_PTLOCKS
     153             :                         spinlock_t *ptl;
     154             : #else
     155             :                         spinlock_t ptl;
     156             : #endif
     157             :                 };
     158             :                 struct {        /* ZONE_DEVICE pages */
     159             :                         /** @pgmap: Points to the hosting device page map. */
     160             :                         struct dev_pagemap *pgmap;
     161             :                         void *zone_device_data;
     162             :                         /*
     163             :                          * ZONE_DEVICE private pages are counted as being
     164             :                          * mapped so the next 3 words hold the mapping, index,
     165             :                          * and private fields from the source anonymous or
     166             :                          * page cache page while the page is migrated to device
     167             :                          * private memory.
     168             :                          * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
     169             :                          * use the mapping, index, and private fields when
     170             :                          * pmem backed DAX files are mapped.
     171             :                          */
     172             :                 };
     173             : 
     174             :                 /** @rcu_head: You can use this to free a page by RCU. */
     175             :                 struct rcu_head rcu_head;
     176             :         };
     177             : 
     178             :         union {         /* This union is 4 bytes in size. */
     179             :                 /*
     180             :                  * If the page can be mapped to userspace, encodes the number
     181             :                  * of times this page is referenced by a page table.
     182             :                  */
     183             :                 atomic_t _mapcount;
     184             : 
     185             :                 /*
     186             :                  * If the page is neither PageSlab nor mappable to userspace,
     187             :                  * the value stored here may help determine what this page
     188             :                  * is used for.  See page-flags.h for a list of page types
     189             :                  * which are currently stored here.
     190             :                  */
     191             :                 unsigned int page_type;
     192             :         };
     193             : 
     194             :         /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
     195             :         atomic_t _refcount;
     196             : 
     197             : #ifdef CONFIG_MEMCG
     198             :         unsigned long memcg_data;
     199             : #endif
     200             : 
     201             :         /*
     202             :          * On machines where all RAM is mapped into kernel address space,
     203             :          * we can simply calculate the virtual address. On machines with
     204             :          * highmem some memory is mapped into kernel virtual memory
     205             :          * dynamically, so we need a place to store that address.
     206             :          * Note that this field could be 16 bits on x86 ... ;)
     207             :          *
     208             :          * Architectures with slow multiplication can define
     209             :          * WANT_PAGE_VIRTUAL in asm/page.h
     210             :          */
     211             : #if defined(WANT_PAGE_VIRTUAL)
     212             :         void *virtual;                  /* Kernel virtual address (NULL if
     213             :                                            not kmapped, ie. highmem) */
     214             : #endif /* WANT_PAGE_VIRTUAL */
     215             : 
     216             : #ifdef CONFIG_KMSAN
     217             :         /*
     218             :          * KMSAN metadata for this page:
     219             :          *  - shadow page: every bit indicates whether the corresponding
     220             :          *    bit of the original page is initialized (0) or not (1);
     221             :          *  - origin page: every 4 bytes contain an id of the stack trace
     222             :          *    where the uninitialized value was created.
     223             :          */
     224             :         struct page *kmsan_shadow;
     225             :         struct page *kmsan_origin;
     226             : #endif
     227             : 
     228             : #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
     229             :         int _last_cpupid;
     230             : #endif
     231             : } _struct_page_alignment;
     232             : 
     233             : /*
     234             :  * struct encoded_page - a nonexistent type marking this pointer
     235             :  *
     236             :  * An 'encoded_page' pointer is a pointer to a regular 'struct page', but
     237             :  * with the low bits of the pointer indicating extra context-dependent
     238             :  * information. Not super-common, but happens in mmu_gather and mlock
     239             :  * handling, and this acts as a type system check on that use.
     240             :  *
     241             :  * We only really have two guaranteed bits in general, although you could
     242             :  * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
     243             :  * for more.
     244             :  *
     245             :  * Use the supplied helper functions to endcode/decode the pointer and bits.
     246             :  */
     247             : struct encoded_page;
     248             : #define ENCODE_PAGE_BITS 3ul
     249             : static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags)
     250             : {
     251             :         BUILD_BUG_ON(flags > ENCODE_PAGE_BITS);
     252             :         return (struct encoded_page *)(flags | (unsigned long)page);
     253             : }
     254             : 
     255             : static inline unsigned long encoded_page_flags(struct encoded_page *page)
     256             : {
     257             :         return ENCODE_PAGE_BITS & (unsigned long)page;
     258             : }
     259             : 
     260             : static inline struct page *encoded_page_ptr(struct encoded_page *page)
     261             : {
     262           0 :         return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
     263             : }
     264             : 
     265             : /**
     266             :  * struct folio - Represents a contiguous set of bytes.
     267             :  * @flags: Identical to the page flags.
     268             :  * @lru: Least Recently Used list; tracks how recently this folio was used.
     269             :  * @mlock_count: Number of times this folio has been pinned by mlock().
     270             :  * @mapping: The file this page belongs to, or refers to the anon_vma for
     271             :  *    anonymous memory.
     272             :  * @index: Offset within the file, in units of pages.  For anonymous memory,
     273             :  *    this is the index from the beginning of the mmap.
     274             :  * @private: Filesystem per-folio data (see folio_attach_private()).
     275             :  *    Used for swp_entry_t if folio_test_swapcache().
     276             :  * @_mapcount: Do not access this member directly.  Use folio_mapcount() to
     277             :  *    find out how many times this folio is mapped by userspace.
     278             :  * @_refcount: Do not access this member directly.  Use folio_ref_count()
     279             :  *    to find how many references there are to this folio.
     280             :  * @memcg_data: Memory Control Group data.
     281             :  * @_folio_dtor: Which destructor to use for this folio.
     282             :  * @_folio_order: Do not use directly, call folio_order().
     283             :  * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
     284             :  * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
     285             :  * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
     286             :  * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
     287             :  * @_hugetlb_subpool: Do not use directly, use accessor in hugetlb.h.
     288             :  * @_hugetlb_cgroup: Do not use directly, use accessor in hugetlb_cgroup.h.
     289             :  * @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
     290             :  * @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
     291             :  * @_deferred_list: Folios to be split under memory pressure.
     292             :  *
     293             :  * A folio is a physically, virtually and logically contiguous set
     294             :  * of bytes.  It is a power-of-two in size, and it is aligned to that
     295             :  * same power-of-two.  It is at least as large as %PAGE_SIZE.  If it is
     296             :  * in the page cache, it is at a file offset which is a multiple of that
     297             :  * power-of-two.  It may be mapped into userspace at an address which is
     298             :  * at an arbitrary page offset, but its kernel virtual address is aligned
     299             :  * to its size.
     300             :  */
     301             : struct folio {
     302             :         /* private: don't document the anon union */
     303             :         union {
     304             :                 struct {
     305             :         /* public: */
     306             :                         unsigned long flags;
     307             :                         union {
     308             :                                 struct list_head lru;
     309             :         /* private: avoid cluttering the output */
     310             :                                 struct {
     311             :                                         void *__filler;
     312             :         /* public: */
     313             :                                         unsigned int mlock_count;
     314             :         /* private: */
     315             :                                 };
     316             :         /* public: */
     317             :                         };
     318             :                         struct address_space *mapping;
     319             :                         pgoff_t index;
     320             :                         void *private;
     321             :                         atomic_t _mapcount;
     322             :                         atomic_t _refcount;
     323             : #ifdef CONFIG_MEMCG
     324             :                         unsigned long memcg_data;
     325             : #endif
     326             :         /* private: the union with struct page is transitional */
     327             :                 };
     328             :                 struct page page;
     329             :         };
     330             :         union {
     331             :                 struct {
     332             :                         unsigned long _flags_1;
     333             :                         unsigned long _head_1;
     334             :         /* public: */
     335             :                         unsigned char _folio_dtor;
     336             :                         unsigned char _folio_order;
     337             :                         atomic_t _entire_mapcount;
     338             :                         atomic_t _nr_pages_mapped;
     339             :                         atomic_t _pincount;
     340             : #ifdef CONFIG_64BIT
     341             :                         unsigned int _folio_nr_pages;
     342             : #endif
     343             :         /* private: the union with struct page is transitional */
     344             :                 };
     345             :                 struct page __page_1;
     346             :         };
     347             :         union {
     348             :                 struct {
     349             :                         unsigned long _flags_2;
     350             :                         unsigned long _head_2;
     351             :         /* public: */
     352             :                         void *_hugetlb_subpool;
     353             :                         void *_hugetlb_cgroup;
     354             :                         void *_hugetlb_cgroup_rsvd;
     355             :                         void *_hugetlb_hwpoison;
     356             :         /* private: the union with struct page is transitional */
     357             :                 };
     358             :                 struct {
     359             :                         unsigned long _flags_2a;
     360             :                         unsigned long _head_2a;
     361             :         /* public: */
     362             :                         struct list_head _deferred_list;
     363             :         /* private: the union with struct page is transitional */
     364             :                 };
     365             :                 struct page __page_2;
     366             :         };
     367             : };
     368             : 
     369             : #define FOLIO_MATCH(pg, fl)                                             \
     370             :         static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
     371             : FOLIO_MATCH(flags, flags);
     372             : FOLIO_MATCH(lru, lru);
     373             : FOLIO_MATCH(mapping, mapping);
     374             : FOLIO_MATCH(compound_head, lru);
     375             : FOLIO_MATCH(index, index);
     376             : FOLIO_MATCH(private, private);
     377             : FOLIO_MATCH(_mapcount, _mapcount);
     378             : FOLIO_MATCH(_refcount, _refcount);
     379             : #ifdef CONFIG_MEMCG
     380             : FOLIO_MATCH(memcg_data, memcg_data);
     381             : #endif
     382             : #undef FOLIO_MATCH
     383             : #define FOLIO_MATCH(pg, fl)                                             \
     384             :         static_assert(offsetof(struct folio, fl) ==                     \
     385             :                         offsetof(struct page, pg) + sizeof(struct page))
     386             : FOLIO_MATCH(flags, _flags_1);
     387             : FOLIO_MATCH(compound_head, _head_1);
     388             : #undef FOLIO_MATCH
     389             : #define FOLIO_MATCH(pg, fl)                                             \
     390             :         static_assert(offsetof(struct folio, fl) ==                     \
     391             :                         offsetof(struct page, pg) + 2 * sizeof(struct page))
     392             : FOLIO_MATCH(flags, _flags_2);
     393             : FOLIO_MATCH(compound_head, _head_2);
     394             : #undef FOLIO_MATCH
     395             : 
     396             : /*
     397             :  * Used for sizing the vmemmap region on some architectures
     398             :  */
     399             : #define STRUCT_PAGE_MAX_SHIFT   (order_base_2(sizeof(struct page)))
     400             : 
     401             : #define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
     402             : #define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
     403             : 
     404             : /*
     405             :  * page_private can be used on tail pages.  However, PagePrivate is only
     406             :  * checked by the VM on the head page.  So page_private on the tail pages
     407             :  * should be used for data that's ancillary to the head page (eg attaching
     408             :  * buffer heads to tail pages after attaching buffer heads to the head page)
     409             :  */
     410             : #define page_private(page)              ((page)->private)
     411             : 
     412             : static inline void set_page_private(struct page *page, unsigned long private)
     413             : {
     414       52003 :         page->private = private;
     415             : }
     416             : 
     417             : static inline void *folio_get_private(struct folio *folio)
     418             : {
     419             :         return folio->private;
     420             : }
     421             : 
     422             : struct page_frag_cache {
     423             :         void * va;
     424             : #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
     425             :         __u16 offset;
     426             :         __u16 size;
     427             : #else
     428             :         __u32 offset;
     429             : #endif
     430             :         /* we maintain a pagecount bias, so that we dont dirty cache line
     431             :          * containing page->_refcount every time we allocate a fragment.
     432             :          */
     433             :         unsigned int            pagecnt_bias;
     434             :         bool pfmemalloc;
     435             : };
     436             : 
     437             : typedef unsigned long vm_flags_t;
     438             : 
     439             : /*
     440             :  * A region containing a mapping of a non-memory backed file under NOMMU
     441             :  * conditions.  These are held in a global tree and are pinned by the VMAs that
     442             :  * map parts of them.
     443             :  */
     444             : struct vm_region {
     445             :         struct rb_node  vm_rb;          /* link in global region tree */
     446             :         vm_flags_t      vm_flags;       /* VMA vm_flags */
     447             :         unsigned long   vm_start;       /* start address of region */
     448             :         unsigned long   vm_end;         /* region initialised to here */
     449             :         unsigned long   vm_top;         /* region allocated to here */
     450             :         unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
     451             :         struct file     *vm_file;       /* the backing file or NULL */
     452             : 
     453             :         int             vm_usage;       /* region usage count (access under nommu_region_sem) */
     454             :         bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
     455             :                                                 * this region */
     456             : };
     457             : 
     458             : #ifdef CONFIG_USERFAULTFD
     459             : #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
     460             : struct vm_userfaultfd_ctx {
     461             :         struct userfaultfd_ctx *ctx;
     462             : };
     463             : #else /* CONFIG_USERFAULTFD */
     464             : #define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
     465             : struct vm_userfaultfd_ctx {};
     466             : #endif /* CONFIG_USERFAULTFD */
     467             : 
     468             : struct anon_vma_name {
     469             :         struct kref kref;
     470             :         /* The name needs to be at the end because it is dynamically sized. */
     471             :         char name[];
     472             : };
     473             : 
     474             : /*
     475             :  * This struct describes a virtual memory area. There is one of these
     476             :  * per VM-area/task. A VM area is any part of the process virtual memory
     477             :  * space that has a special rule for the page-fault handlers (ie a shared
     478             :  * library, the executable area etc).
     479             :  */
     480             : struct vm_area_struct {
     481             :         /* The first cache line has the info for VMA tree walking. */
     482             : 
     483             :         unsigned long vm_start;         /* Our start address within vm_mm. */
     484             :         unsigned long vm_end;           /* The first byte after our end address
     485             :                                            within vm_mm. */
     486             : 
     487             :         struct mm_struct *vm_mm;        /* The address space we belong to. */
     488             : 
     489             :         /*
     490             :          * Access permissions of this VMA.
     491             :          * See vmf_insert_mixed_prot() for discussion.
     492             :          */
     493             :         pgprot_t vm_page_prot;
     494             : 
     495             :         /*
     496             :          * Flags, see mm.h.
     497             :          * To modify use vm_flags_{init|reset|set|clear|mod} functions.
     498             :          */
     499             :         union {
     500             :                 const vm_flags_t vm_flags;
     501             :                 vm_flags_t __private __vm_flags;
     502             :         };
     503             : 
     504             :         /*
     505             :          * For areas with an address space and backing store,
     506             :          * linkage into the address_space->i_mmap interval tree.
     507             :          *
     508             :          */
     509             :         struct {
     510             :                 struct rb_node rb;
     511             :                 unsigned long rb_subtree_last;
     512             :         } shared;
     513             : 
     514             :         /*
     515             :          * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
     516             :          * list, after a COW of one of the file pages.  A MAP_SHARED vma
     517             :          * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
     518             :          * or brk vma (with NULL file) can only be in an anon_vma list.
     519             :          */
     520             :         struct list_head anon_vma_chain; /* Serialized by mmap_lock &
     521             :                                           * page_table_lock */
     522             :         struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
     523             : 
     524             :         /* Function pointers to deal with this struct. */
     525             :         const struct vm_operations_struct *vm_ops;
     526             : 
     527             :         /* Information about our backing store: */
     528             :         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
     529             :                                            units */
     530             :         struct file * vm_file;          /* File we map to (can be NULL). */
     531             :         void * vm_private_data;         /* was vm_pte (shared mem) */
     532             : 
     533             : #ifdef CONFIG_ANON_VMA_NAME
     534             :         /*
     535             :          * For private and shared anonymous mappings, a pointer to a null
     536             :          * terminated string containing the name given to the vma, or NULL if
     537             :          * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
     538             :          */
     539             :         struct anon_vma_name *anon_name;
     540             : #endif
     541             : #ifdef CONFIG_SWAP
     542             :         atomic_long_t swap_readahead_info;
     543             : #endif
     544             : #ifndef CONFIG_MMU
     545             :         struct vm_region *vm_region;    /* NOMMU mapping region */
     546             : #endif
     547             : #ifdef CONFIG_NUMA
     548             :         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
     549             : #endif
     550             :         struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
     551             : } __randomize_layout;
     552             : 
     553             : struct kioctx_table;
     554             : struct mm_struct {
     555             :         struct {
     556             :                 struct maple_tree mm_mt;
     557             : #ifdef CONFIG_MMU
     558             :                 unsigned long (*get_unmapped_area) (struct file *filp,
     559             :                                 unsigned long addr, unsigned long len,
     560             :                                 unsigned long pgoff, unsigned long flags);
     561             : #endif
     562             :                 unsigned long mmap_base;        /* base of mmap area */
     563             :                 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
     564             : #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
     565             :                 /* Base addresses for compatible mmap() */
     566             :                 unsigned long mmap_compat_base;
     567             :                 unsigned long mmap_compat_legacy_base;
     568             : #endif
     569             :                 unsigned long task_size;        /* size of task vm space */
     570             :                 pgd_t * pgd;
     571             : 
     572             : #ifdef CONFIG_MEMBARRIER
     573             :                 /**
     574             :                  * @membarrier_state: Flags controlling membarrier behavior.
     575             :                  *
     576             :                  * This field is close to @pgd to hopefully fit in the same
     577             :                  * cache-line, which needs to be touched by switch_mm().
     578             :                  */
     579             :                 atomic_t membarrier_state;
     580             : #endif
     581             : 
     582             :                 /**
     583             :                  * @mm_users: The number of users including userspace.
     584             :                  *
     585             :                  * Use mmget()/mmget_not_zero()/mmput() to modify. When this
     586             :                  * drops to 0 (i.e. when the task exits and there are no other
     587             :                  * temporary reference holders), we also release a reference on
     588             :                  * @mm_count (which may then free the &struct mm_struct if
     589             :                  * @mm_count also drops to 0).
     590             :                  */
     591             :                 atomic_t mm_users;
     592             : 
     593             :                 /**
     594             :                  * @mm_count: The number of references to &struct mm_struct
     595             :                  * (@mm_users count as 1).
     596             :                  *
     597             :                  * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
     598             :                  * &struct mm_struct is freed.
     599             :                  */
     600             :                 atomic_t mm_count;
     601             : #ifdef CONFIG_SCHED_MM_CID
     602             :                 /**
     603             :                  * @cid_lock: Protect cid bitmap updates vs lookups.
     604             :                  *
     605             :                  * Prevent situations where updates to the cid bitmap happen
     606             :                  * concurrently with lookups. Those can lead to situations
     607             :                  * where a lookup cannot find a free bit simply because it was
     608             :                  * unlucky enough to load, non-atomically, bitmap words as they
     609             :                  * were being concurrently updated by the updaters.
     610             :                  */
     611             :                 raw_spinlock_t cid_lock;
     612             : #endif
     613             : #ifdef CONFIG_MMU
     614             :                 atomic_long_t pgtables_bytes;   /* size of all page tables */
     615             : #endif
     616             :                 int map_count;                  /* number of VMAs */
     617             : 
     618             :                 spinlock_t page_table_lock; /* Protects page tables and some
     619             :                                              * counters
     620             :                                              */
     621             :                 /*
     622             :                  * With some kernel config, the current mmap_lock's offset
     623             :                  * inside 'mm_struct' is at 0x120, which is very optimal, as
     624             :                  * its two hot fields 'count' and 'owner' sit in 2 different
     625             :                  * cachelines,  and when mmap_lock is highly contended, both
     626             :                  * of the 2 fields will be accessed frequently, current layout
     627             :                  * will help to reduce cache bouncing.
     628             :                  *
     629             :                  * So please be careful with adding new fields before
     630             :                  * mmap_lock, which can easily push the 2 fields into one
     631             :                  * cacheline.
     632             :                  */
     633             :                 struct rw_semaphore mmap_lock;
     634             : 
     635             :                 struct list_head mmlist; /* List of maybe swapped mm's. These
     636             :                                           * are globally strung together off
     637             :                                           * init_mm.mmlist, and are protected
     638             :                                           * by mmlist_lock
     639             :                                           */
     640             : 
     641             : 
     642             :                 unsigned long hiwater_rss; /* High-watermark of RSS usage */
     643             :                 unsigned long hiwater_vm;  /* High-water virtual memory usage */
     644             : 
     645             :                 unsigned long total_vm;    /* Total pages mapped */
     646             :                 unsigned long locked_vm;   /* Pages that have PG_mlocked set */
     647             :                 atomic64_t    pinned_vm;   /* Refcount permanently increased */
     648             :                 unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
     649             :                 unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
     650             :                 unsigned long stack_vm;    /* VM_STACK */
     651             :                 unsigned long def_flags;
     652             : 
     653             :                 /**
     654             :                  * @write_protect_seq: Locked when any thread is write
     655             :                  * protecting pages mapped by this mm to enforce a later COW,
     656             :                  * for instance during page table copying for fork().
     657             :                  */
     658             :                 seqcount_t write_protect_seq;
     659             : 
     660             :                 spinlock_t arg_lock; /* protect the below fields */
     661             : 
     662             :                 unsigned long start_code, end_code, start_data, end_data;
     663             :                 unsigned long start_brk, brk, start_stack;
     664             :                 unsigned long arg_start, arg_end, env_start, env_end;
     665             : 
     666             :                 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
     667             : 
     668             :                 struct percpu_counter rss_stat[NR_MM_COUNTERS];
     669             : 
     670             :                 struct linux_binfmt *binfmt;
     671             : 
     672             :                 /* Architecture-specific MM context */
     673             :                 mm_context_t context;
     674             : 
     675             :                 unsigned long flags; /* Must use atomic bitops to access */
     676             : 
     677             : #ifdef CONFIG_AIO
     678             :                 spinlock_t                      ioctx_lock;
     679             :                 struct kioctx_table __rcu       *ioctx_table;
     680             : #endif
     681             : #ifdef CONFIG_MEMCG
     682             :                 /*
     683             :                  * "owner" points to a task that is regarded as the canonical
     684             :                  * user/owner of this mm. All of the following must be true in
     685             :                  * order for it to be changed:
     686             :                  *
     687             :                  * current == mm->owner
     688             :                  * current->mm != mm
     689             :                  * new_owner->mm == mm
     690             :                  * new_owner->alloc_lock is held
     691             :                  */
     692             :                 struct task_struct __rcu *owner;
     693             : #endif
     694             :                 struct user_namespace *user_ns;
     695             : 
     696             :                 /* store ref to file /proc/<pid>/exe symlink points to */
     697             :                 struct file __rcu *exe_file;
     698             : #ifdef CONFIG_MMU_NOTIFIER
     699             :                 struct mmu_notifier_subscriptions *notifier_subscriptions;
     700             : #endif
     701             : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
     702             :                 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
     703             : #endif
     704             : #ifdef CONFIG_NUMA_BALANCING
     705             :                 /*
     706             :                  * numa_next_scan is the next time that PTEs will be remapped
     707             :                  * PROT_NONE to trigger NUMA hinting faults; such faults gather
     708             :                  * statistics and migrate pages to new nodes if necessary.
     709             :                  */
     710             :                 unsigned long numa_next_scan;
     711             : 
     712             :                 /* Restart point for scanning and remapping PTEs. */
     713             :                 unsigned long numa_scan_offset;
     714             : 
     715             :                 /* numa_scan_seq prevents two threads remapping PTEs. */
     716             :                 int numa_scan_seq;
     717             : #endif
     718             :                 /*
     719             :                  * An operation with batched TLB flushing is going on. Anything
     720             :                  * that can move process memory needs to flush the TLB when
     721             :                  * moving a PROT_NONE mapped page.
     722             :                  */
     723             :                 atomic_t tlb_flush_pending;
     724             : #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
     725             :                 /* See flush_tlb_batched_pending() */
     726             :                 atomic_t tlb_flush_batched;
     727             : #endif
     728             :                 struct uprobes_state uprobes_state;
     729             : #ifdef CONFIG_PREEMPT_RT
     730             :                 struct rcu_head delayed_drop;
     731             : #endif
     732             : #ifdef CONFIG_HUGETLB_PAGE
     733             :                 atomic_long_t hugetlb_usage;
     734             : #endif
     735             :                 struct work_struct async_put_work;
     736             : 
     737             : #ifdef CONFIG_IOMMU_SVA
     738             :                 u32 pasid;
     739             : #endif
     740             : #ifdef CONFIG_KSM
     741             :                 /*
     742             :                  * Represent how many pages of this process are involved in KSM
     743             :                  * merging.
     744             :                  */
     745             :                 unsigned long ksm_merging_pages;
     746             :                 /*
     747             :                  * Represent how many pages are checked for ksm merging
     748             :                  * including merged and not merged.
     749             :                  */
     750             :                 unsigned long ksm_rmap_items;
     751             : #endif
     752             : #ifdef CONFIG_LRU_GEN
     753             :                 struct {
     754             :                         /* this mm_struct is on lru_gen_mm_list */
     755             :                         struct list_head list;
     756             :                         /*
     757             :                          * Set when switching to this mm_struct, as a hint of
     758             :                          * whether it has been used since the last time per-node
     759             :                          * page table walkers cleared the corresponding bits.
     760             :                          */
     761             :                         unsigned long bitmap;
     762             : #ifdef CONFIG_MEMCG
     763             :                         /* points to the memcg of "owner" above */
     764             :                         struct mem_cgroup *memcg;
     765             : #endif
     766             :                 } lru_gen;
     767             : #endif /* CONFIG_LRU_GEN */
     768             :         } __randomize_layout;
     769             : 
     770             :         /*
     771             :          * The mm_cpumask needs to be at the end of mm_struct, because it
     772             :          * is dynamically sized based on nr_cpu_ids.
     773             :          */
     774             :         unsigned long cpu_bitmap[];
     775             : };
     776             : 
     777             : #define MM_MT_FLAGS     (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
     778             : extern struct mm_struct init_mm;
     779             : 
     780             : /* Pointer magic because the dynamic array size confuses some compilers. */
     781             : static inline void mm_init_cpumask(struct mm_struct *mm)
     782             : {
     783           0 :         unsigned long cpu_bitmap = (unsigned long)mm;
     784             : 
     785           0 :         cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
     786           0 :         cpumask_clear((struct cpumask *)cpu_bitmap);
     787             : }
     788             : 
     789             : /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
     790             : static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
     791             : {
     792             :         return (struct cpumask *)&mm->cpu_bitmap;
     793             : }
     794             : 
     795             : #ifdef CONFIG_LRU_GEN
     796             : 
     797             : struct lru_gen_mm_list {
     798             :         /* mm_struct list for page table walkers */
     799             :         struct list_head fifo;
     800             :         /* protects the list above */
     801             :         spinlock_t lock;
     802             : };
     803             : 
     804             : void lru_gen_add_mm(struct mm_struct *mm);
     805             : void lru_gen_del_mm(struct mm_struct *mm);
     806             : #ifdef CONFIG_MEMCG
     807             : void lru_gen_migrate_mm(struct mm_struct *mm);
     808             : #endif
     809             : 
     810             : static inline void lru_gen_init_mm(struct mm_struct *mm)
     811             : {
     812             :         INIT_LIST_HEAD(&mm->lru_gen.list);
     813             :         mm->lru_gen.bitmap = 0;
     814             : #ifdef CONFIG_MEMCG
     815             :         mm->lru_gen.memcg = NULL;
     816             : #endif
     817             : }
     818             : 
     819             : static inline void lru_gen_use_mm(struct mm_struct *mm)
     820             : {
     821             :         /*
     822             :          * When the bitmap is set, page reclaim knows this mm_struct has been
     823             :          * used since the last time it cleared the bitmap. So it might be worth
     824             :          * walking the page tables of this mm_struct to clear the accessed bit.
     825             :          */
     826             :         WRITE_ONCE(mm->lru_gen.bitmap, -1);
     827             : }
     828             : 
     829             : #else /* !CONFIG_LRU_GEN */
     830             : 
     831             : static inline void lru_gen_add_mm(struct mm_struct *mm)
     832             : {
     833             : }
     834             : 
     835             : static inline void lru_gen_del_mm(struct mm_struct *mm)
     836             : {
     837             : }
     838             : 
     839             : #ifdef CONFIG_MEMCG
     840             : static inline void lru_gen_migrate_mm(struct mm_struct *mm)
     841             : {
     842             : }
     843             : #endif
     844             : 
     845             : static inline void lru_gen_init_mm(struct mm_struct *mm)
     846             : {
     847             : }
     848             : 
     849             : static inline void lru_gen_use_mm(struct mm_struct *mm)
     850             : {
     851             : }
     852             : 
     853             : #endif /* CONFIG_LRU_GEN */
     854             : 
     855             : struct vma_iterator {
     856             :         struct ma_state mas;
     857             : };
     858             : 
     859             : #define VMA_ITERATOR(name, __mm, __addr)                                \
     860             :         struct vma_iterator name = {                                    \
     861             :                 .mas = {                                                \
     862             :                         .tree = &(__mm)->mm_mt,                          \
     863             :                         .index = __addr,                                \
     864             :                         .node = MAS_START,                              \
     865             :                 },                                                      \
     866             :         }
     867             : 
     868             : static inline void vma_iter_init(struct vma_iterator *vmi,
     869             :                 struct mm_struct *mm, unsigned long addr)
     870             : {
     871           0 :         mas_init(&vmi->mas, &mm->mm_mt, addr);
     872             : }
     873             : 
     874             : #ifdef CONFIG_SCHED_MM_CID
     875             : /* Accessor for struct mm_struct's cidmask. */
     876             : static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
     877             : {
     878             :         unsigned long cid_bitmap = (unsigned long)mm;
     879             : 
     880             :         cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
     881             :         /* Skip cpu_bitmap */
     882             :         cid_bitmap += cpumask_size();
     883             :         return (struct cpumask *)cid_bitmap;
     884             : }
     885             : 
     886             : static inline void mm_init_cid(struct mm_struct *mm)
     887             : {
     888             :         raw_spin_lock_init(&mm->cid_lock);
     889             :         cpumask_clear(mm_cidmask(mm));
     890             : }
     891             : 
     892             : static inline unsigned int mm_cid_size(void)
     893             : {
     894             :         return cpumask_size();
     895             : }
     896             : #else /* CONFIG_SCHED_MM_CID */
     897             : static inline void mm_init_cid(struct mm_struct *mm) { }
     898             : static inline unsigned int mm_cid_size(void)
     899             : {
     900             :         return 0;
     901             : }
     902             : #endif /* CONFIG_SCHED_MM_CID */
     903             : 
     904             : struct mmu_gather;
     905             : extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
     906             : extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
     907             : extern void tlb_finish_mmu(struct mmu_gather *tlb);
     908             : 
     909             : struct vm_fault;
     910             : 
     911             : /**
     912             :  * typedef vm_fault_t - Return type for page fault handlers.
     913             :  *
     914             :  * Page fault handlers return a bitmask of %VM_FAULT values.
     915             :  */
     916             : typedef __bitwise unsigned int vm_fault_t;
     917             : 
     918             : /**
     919             :  * enum vm_fault_reason - Page fault handlers return a bitmask of
     920             :  * these values to tell the core VM what happened when handling the
     921             :  * fault. Used to decide whether a process gets delivered SIGBUS or
     922             :  * just gets major/minor fault counters bumped up.
     923             :  *
     924             :  * @VM_FAULT_OOM:               Out Of Memory
     925             :  * @VM_FAULT_SIGBUS:            Bad access
     926             :  * @VM_FAULT_MAJOR:             Page read from storage
     927             :  * @VM_FAULT_HWPOISON:          Hit poisoned small page
     928             :  * @VM_FAULT_HWPOISON_LARGE:    Hit poisoned large page. Index encoded
     929             :  *                              in upper bits
     930             :  * @VM_FAULT_SIGSEGV:           segmentation fault
     931             :  * @VM_FAULT_NOPAGE:            ->fault installed the pte, not return page
     932             :  * @VM_FAULT_LOCKED:            ->fault locked the returned page
     933             :  * @VM_FAULT_RETRY:             ->fault blocked, must retry
     934             :  * @VM_FAULT_FALLBACK:          huge page fault failed, fall back to small
     935             :  * @VM_FAULT_DONE_COW:          ->fault has fully handled COW
     936             :  * @VM_FAULT_NEEDDSYNC:         ->fault did not modify page tables and needs
     937             :  *                              fsync() to complete (for synchronous page faults
     938             :  *                              in DAX)
     939             :  * @VM_FAULT_COMPLETED:         ->fault completed, meanwhile mmap lock released
     940             :  * @VM_FAULT_HINDEX_MASK:       mask HINDEX value
     941             :  *
     942             :  */
     943             : enum vm_fault_reason {
     944             :         VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
     945             :         VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
     946             :         VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
     947             :         VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
     948             :         VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
     949             :         VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
     950             :         VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
     951             :         VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
     952             :         VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
     953             :         VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
     954             :         VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
     955             :         VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
     956             :         VM_FAULT_COMPLETED      = (__force vm_fault_t)0x004000,
     957             :         VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
     958             : };
     959             : 
     960             : /* Encode hstate index for a hwpoisoned large page */
     961             : #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
     962             : #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
     963             : 
     964             : #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |        \
     965             :                         VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
     966             :                         VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
     967             : 
     968             : #define VM_FAULT_RESULT_TRACE \
     969             :         { VM_FAULT_OOM,                 "OOM" },      \
     970             :         { VM_FAULT_SIGBUS,              "SIGBUS" },   \
     971             :         { VM_FAULT_MAJOR,               "MAJOR" },    \
     972             :         { VM_FAULT_HWPOISON,            "HWPOISON" }, \
     973             :         { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },   \
     974             :         { VM_FAULT_SIGSEGV,             "SIGSEGV" },  \
     975             :         { VM_FAULT_NOPAGE,              "NOPAGE" },   \
     976             :         { VM_FAULT_LOCKED,              "LOCKED" },   \
     977             :         { VM_FAULT_RETRY,               "RETRY" },    \
     978             :         { VM_FAULT_FALLBACK,            "FALLBACK" }, \
     979             :         { VM_FAULT_DONE_COW,            "DONE_COW" }, \
     980             :         { VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
     981             : 
     982             : struct vm_special_mapping {
     983             :         const char *name;       /* The name, e.g. "[vdso]". */
     984             : 
     985             :         /*
     986             :          * If .fault is not provided, this points to a
     987             :          * NULL-terminated array of pages that back the special mapping.
     988             :          *
     989             :          * This must not be NULL unless .fault is provided.
     990             :          */
     991             :         struct page **pages;
     992             : 
     993             :         /*
     994             :          * If non-NULL, then this is called to resolve page faults
     995             :          * on the special mapping.  If used, .pages is not checked.
     996             :          */
     997             :         vm_fault_t (*fault)(const struct vm_special_mapping *sm,
     998             :                                 struct vm_area_struct *vma,
     999             :                                 struct vm_fault *vmf);
    1000             : 
    1001             :         int (*mremap)(const struct vm_special_mapping *sm,
    1002             :                      struct vm_area_struct *new_vma);
    1003             : };
    1004             : 
    1005             : enum tlb_flush_reason {
    1006             :         TLB_FLUSH_ON_TASK_SWITCH,
    1007             :         TLB_REMOTE_SHOOTDOWN,
    1008             :         TLB_LOCAL_SHOOTDOWN,
    1009             :         TLB_LOCAL_MM_SHOOTDOWN,
    1010             :         TLB_REMOTE_SEND_IPI,
    1011             :         NR_TLB_FLUSH_REASONS,
    1012             : };
    1013             : 
    1014             :  /*
    1015             :   * A swap entry has to fit into a "unsigned long", as the entry is hidden
    1016             :   * in the "index" field of the swapper address space.
    1017             :   */
    1018             : typedef struct {
    1019             :         unsigned long val;
    1020             : } swp_entry_t;
    1021             : 
    1022             : /**
    1023             :  * enum fault_flag - Fault flag definitions.
    1024             :  * @FAULT_FLAG_WRITE: Fault was a write fault.
    1025             :  * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
    1026             :  * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
    1027             :  * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
    1028             :  * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
    1029             :  * @FAULT_FLAG_TRIED: The fault has been tried once.
    1030             :  * @FAULT_FLAG_USER: The fault originated in userspace.
    1031             :  * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
    1032             :  * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
    1033             :  * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
    1034             :  * @FAULT_FLAG_UNSHARE: The fault is an unsharing request to break COW in a
    1035             :  *                      COW mapping, making sure that an exclusive anon page is
    1036             :  *                      mapped after the fault.
    1037             :  * @FAULT_FLAG_ORIG_PTE_VALID: whether the fault has vmf->orig_pte cached.
    1038             :  *                        We should only access orig_pte if this flag set.
    1039             :  *
    1040             :  * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
    1041             :  * whether we would allow page faults to retry by specifying these two
    1042             :  * fault flags correctly.  Currently there can be three legal combinations:
    1043             :  *
    1044             :  * (a) ALLOW_RETRY and !TRIED:  this means the page fault allows retry, and
    1045             :  *                              this is the first try
    1046             :  *
    1047             :  * (b) ALLOW_RETRY and TRIED:   this means the page fault allows retry, and
    1048             :  *                              we've already tried at least once
    1049             :  *
    1050             :  * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
    1051             :  *
    1052             :  * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
    1053             :  * be used.  Note that page faults can be allowed to retry for multiple times,
    1054             :  * in which case we'll have an initial fault with flags (a) then later on
    1055             :  * continuous faults with flags (b).  We should always try to detect pending
    1056             :  * signals before a retry to make sure the continuous page faults can still be
    1057             :  * interrupted if necessary.
    1058             :  *
    1059             :  * The combination FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE is illegal.
    1060             :  * FAULT_FLAG_UNSHARE is ignored and treated like an ordinary read fault when
    1061             :  * applied to mappings that are not COW mappings.
    1062             :  */
    1063             : enum fault_flag {
    1064             :         FAULT_FLAG_WRITE =              1 << 0,
    1065             :         FAULT_FLAG_MKWRITE =            1 << 1,
    1066             :         FAULT_FLAG_ALLOW_RETRY =        1 << 2,
    1067             :         FAULT_FLAG_RETRY_NOWAIT =       1 << 3,
    1068             :         FAULT_FLAG_KILLABLE =           1 << 4,
    1069             :         FAULT_FLAG_TRIED =              1 << 5,
    1070             :         FAULT_FLAG_USER =               1 << 6,
    1071             :         FAULT_FLAG_REMOTE =             1 << 7,
    1072             :         FAULT_FLAG_INSTRUCTION =        1 << 8,
    1073             :         FAULT_FLAG_INTERRUPTIBLE =      1 << 9,
    1074             :         FAULT_FLAG_UNSHARE =            1 << 10,
    1075             :         FAULT_FLAG_ORIG_PTE_VALID =     1 << 11,
    1076             : };
    1077             : 
    1078             : typedef unsigned int __bitwise zap_flags_t;
    1079             : 
    1080             : /*
    1081             :  * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
    1082             :  * other. Here is what they mean, and how to use them:
    1083             :  *
    1084             :  *
    1085             :  * FIXME: For pages which are part of a filesystem, mappings are subject to the
    1086             :  * lifetime enforced by the filesystem and we need guarantees that longterm
    1087             :  * users like RDMA and V4L2 only establish mappings which coordinate usage with
    1088             :  * the filesystem.  Ideas for this coordination include revoking the longterm
    1089             :  * pin, delaying writeback, bounce buffer page writeback, etc.  As FS DAX was
    1090             :  * added after the problem with filesystems was found FS DAX VMAs are
    1091             :  * specifically failed.  Filesystem pages are still subject to bugs and use of
    1092             :  * FOLL_LONGTERM should be avoided on those pages.
    1093             :  *
    1094             :  * In the CMA case: long term pins in a CMA region would unnecessarily fragment
    1095             :  * that region.  And so, CMA attempts to migrate the page before pinning, when
    1096             :  * FOLL_LONGTERM is specified.
    1097             :  *
    1098             :  * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
    1099             :  * but an additional pin counting system) will be invoked. This is intended for
    1100             :  * anything that gets a page reference and then touches page data (for example,
    1101             :  * Direct IO). This lets the filesystem know that some non-file-system entity is
    1102             :  * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
    1103             :  * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
    1104             :  * a call to unpin_user_page().
    1105             :  *
    1106             :  * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
    1107             :  * and separate refcounting mechanisms, however, and that means that each has
    1108             :  * its own acquire and release mechanisms:
    1109             :  *
    1110             :  *     FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
    1111             :  *
    1112             :  *     FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
    1113             :  *
    1114             :  * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
    1115             :  * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
    1116             :  * calls applied to them, and that's perfectly OK. This is a constraint on the
    1117             :  * callers, not on the pages.)
    1118             :  *
    1119             :  * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
    1120             :  * directly by the caller. That's in order to help avoid mismatches when
    1121             :  * releasing pages: get_user_pages*() pages must be released via put_page(),
    1122             :  * while pin_user_pages*() pages must be released via unpin_user_page().
    1123             :  *
    1124             :  * Please see Documentation/core-api/pin_user_pages.rst for more information.
    1125             :  */
    1126             : 
    1127             : enum {
    1128             :         /* check pte is writable */
    1129             :         FOLL_WRITE = 1 << 0,
    1130             :         /* do get_page on page */
    1131             :         FOLL_GET = 1 << 1,
    1132             :         /* give error on hole if it would be zero */
    1133             :         FOLL_DUMP = 1 << 2,
    1134             :         /* get_user_pages read/write w/o permission */
    1135             :         FOLL_FORCE = 1 << 3,
    1136             :         /*
    1137             :          * if a disk transfer is needed, start the IO and return without waiting
    1138             :          * upon it
    1139             :          */
    1140             :         FOLL_NOWAIT = 1 << 4,
    1141             :         /* do not fault in pages */
    1142             :         FOLL_NOFAULT = 1 << 5,
    1143             :         /* check page is hwpoisoned */
    1144             :         FOLL_HWPOISON = 1 << 6,
    1145             :         /* don't do file mappings */
    1146             :         FOLL_ANON = 1 << 7,
    1147             :         /*
    1148             :          * FOLL_LONGTERM indicates that the page will be held for an indefinite
    1149             :          * time period _often_ under userspace control.  This is in contrast to
    1150             :          * iov_iter_get_pages(), whose usages are transient.
    1151             :          */
    1152             :         FOLL_LONGTERM = 1 << 8,
    1153             :         /* split huge pmd before returning */
    1154             :         FOLL_SPLIT_PMD = 1 << 9,
    1155             :         /* allow returning PCI P2PDMA pages */
    1156             :         FOLL_PCI_P2PDMA = 1 << 10,
    1157             :         /* allow interrupts from generic signals */
    1158             :         FOLL_INTERRUPTIBLE = 1 << 11,
    1159             : 
    1160             :         /* See also internal only FOLL flags in mm/internal.h */
    1161             : };
    1162             : 
    1163             : #endif /* _LINUX_MM_TYPES_H */

Generated by: LCOV version 1.14