LCOV - code coverage report
Current view: top level - mm - gup.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 576 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 47 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : #include <linux/kernel.h>
       3             : #include <linux/errno.h>
       4             : #include <linux/err.h>
       5             : #include <linux/spinlock.h>
       6             : 
       7             : #include <linux/mm.h>
       8             : #include <linux/memremap.h>
       9             : #include <linux/pagemap.h>
      10             : #include <linux/rmap.h>
      11             : #include <linux/swap.h>
      12             : #include <linux/swapops.h>
      13             : #include <linux/secretmem.h>
      14             : 
      15             : #include <linux/sched/signal.h>
      16             : #include <linux/rwsem.h>
      17             : #include <linux/hugetlb.h>
      18             : #include <linux/migrate.h>
      19             : #include <linux/mm_inline.h>
      20             : #include <linux/sched/mm.h>
      21             : #include <linux/shmem_fs.h>
      22             : 
      23             : #include <asm/mmu_context.h>
      24             : #include <asm/tlbflush.h>
      25             : 
      26             : #include "internal.h"
      27             : 
      28             : struct follow_page_context {
      29             :         struct dev_pagemap *pgmap;
      30             :         unsigned int page_mask;
      31             : };
      32             : 
      33             : static inline void sanity_check_pinned_pages(struct page **pages,
      34             :                                              unsigned long npages)
      35             : {
      36             :         if (!IS_ENABLED(CONFIG_DEBUG_VM))
      37             :                 return;
      38             : 
      39             :         /*
      40             :          * We only pin anonymous pages if they are exclusive. Once pinned, we
      41             :          * can no longer turn them possibly shared and PageAnonExclusive() will
      42             :          * stick around until the page is freed.
      43             :          *
      44             :          * We'd like to verify that our pinned anonymous pages are still mapped
      45             :          * exclusively. The issue with anon THP is that we don't know how
      46             :          * they are/were mapped when pinning them. However, for anon
      47             :          * THP we can assume that either the given page (PTE-mapped THP) or
      48             :          * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
      49             :          * neither is the case, there is certainly something wrong.
      50             :          */
      51             :         for (; npages; npages--, pages++) {
      52             :                 struct page *page = *pages;
      53             :                 struct folio *folio = page_folio(page);
      54             : 
      55             :                 if (is_zero_page(page) ||
      56             :                     !folio_test_anon(folio))
      57             :                         continue;
      58             :                 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
      59             :                         VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
      60             :                 else
      61             :                         /* Either a PTE-mapped or a PMD-mapped THP. */
      62             :                         VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
      63             :                                        !PageAnonExclusive(page), page);
      64             :         }
      65             : }
      66             : 
      67             : /*
      68             :  * Return the folio with ref appropriately incremented,
      69             :  * or NULL if that failed.
      70             :  */
      71           0 : static inline struct folio *try_get_folio(struct page *page, int refs)
      72             : {
      73             :         struct folio *folio;
      74             : 
      75             : retry:
      76           0 :         folio = page_folio(page);
      77           0 :         if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
      78             :                 return NULL;
      79           0 :         if (unlikely(!folio_ref_try_add_rcu(folio, refs)))
      80             :                 return NULL;
      81             : 
      82             :         /*
      83             :          * At this point we have a stable reference to the folio; but it
      84             :          * could be that between calling page_folio() and the refcount
      85             :          * increment, the folio was split, in which case we'd end up
      86             :          * holding a reference on a folio that has nothing to do with the page
      87             :          * we were given anymore.
      88             :          * So now that the folio is stable, recheck that the page still
      89             :          * belongs to this folio.
      90             :          */
      91           0 :         if (unlikely(page_folio(page) != folio)) {
      92           0 :                 if (!put_devmap_managed_page_refs(&folio->page, refs))
      93             :                         folio_put_refs(folio, refs);
      94             :                 goto retry;
      95             :         }
      96             : 
      97             :         return folio;
      98             : }
      99             : 
     100             : /**
     101             :  * try_grab_folio() - Attempt to get or pin a folio.
     102             :  * @page:  pointer to page to be grabbed
     103             :  * @refs:  the value to (effectively) add to the folio's refcount
     104             :  * @flags: gup flags: these are the FOLL_* flag values.
     105             :  *
     106             :  * "grab" names in this file mean, "look at flags to decide whether to use
     107             :  * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
     108             :  *
     109             :  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
     110             :  * same time. (That's true throughout the get_user_pages*() and
     111             :  * pin_user_pages*() APIs.) Cases:
     112             :  *
     113             :  *    FOLL_GET: folio's refcount will be incremented by @refs.
     114             :  *
     115             :  *    FOLL_PIN on large folios: folio's refcount will be incremented by
     116             :  *    @refs, and its pincount will be incremented by @refs.
     117             :  *
     118             :  *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
     119             :  *    @refs * GUP_PIN_COUNTING_BIAS.
     120             :  *
     121             :  * Return: The folio containing @page (with refcount appropriately
     122             :  * incremented) for success, or NULL upon failure. If neither FOLL_GET
     123             :  * nor FOLL_PIN was set, that's considered failure, and furthermore,
     124             :  * a likely bug in the caller, so a warning is also emitted.
     125             :  */
     126           0 : struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
     127             : {
     128             :         struct folio *folio;
     129             : 
     130           0 :         if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
     131             :                 return NULL;
     132             : 
     133             :         if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
     134             :                 return NULL;
     135             : 
     136           0 :         if (flags & FOLL_GET)
     137           0 :                 return try_get_folio(page, refs);
     138             : 
     139             :         /* FOLL_PIN is set */
     140             : 
     141             :         /*
     142             :          * Don't take a pin on the zero page - it's not going anywhere
     143             :          * and it is used in a *lot* of places.
     144             :          */
     145           0 :         if (is_zero_page(page))
     146           0 :                 return page_folio(page);
     147             : 
     148           0 :         folio = try_get_folio(page, refs);
     149           0 :         if (!folio)
     150             :                 return NULL;
     151             : 
     152             :         /*
     153             :          * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
     154             :          * right zone, so fail and let the caller fall back to the slow
     155             :          * path.
     156             :          */
     157           0 :         if (unlikely((flags & FOLL_LONGTERM) &&
     158             :                      !folio_is_longterm_pinnable(folio))) {
     159           0 :                 if (!put_devmap_managed_page_refs(&folio->page, refs))
     160             :                         folio_put_refs(folio, refs);
     161             :                 return NULL;
     162             :         }
     163             : 
     164             :         /*
     165             :          * When pinning a large folio, use an exact count to track it.
     166             :          *
     167             :          * However, be sure to *also* increment the normal folio
     168             :          * refcount field at least once, so that the folio really
     169             :          * is pinned.  That's why the refcount from the earlier
     170             :          * try_get_folio() is left intact.
     171             :          */
     172           0 :         if (folio_test_large(folio))
     173           0 :                 atomic_add(refs, &folio->_pincount);
     174             :         else
     175           0 :                 folio_ref_add(folio,
     176           0 :                                 refs * (GUP_PIN_COUNTING_BIAS - 1));
     177             :         /*
     178             :          * Adjust the pincount before re-checking the PTE for changes.
     179             :          * This is essentially a smp_mb() and is paired with a memory
     180             :          * barrier in page_try_share_anon_rmap().
     181             :          */
     182           0 :         smp_mb__after_atomic();
     183             : 
     184           0 :         node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
     185             : 
     186           0 :         return folio;
     187             : }
     188             : 
     189           0 : static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
     190             : {
     191           0 :         if (flags & FOLL_PIN) {
     192           0 :                 if (is_zero_folio(folio))
     193             :                         return;
     194           0 :                 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
     195           0 :                 if (folio_test_large(folio))
     196           0 :                         atomic_sub(refs, &folio->_pincount);
     197             :                 else
     198           0 :                         refs *= GUP_PIN_COUNTING_BIAS;
     199             :         }
     200             : 
     201           0 :         if (!put_devmap_managed_page_refs(&folio->page, refs))
     202             :                 folio_put_refs(folio, refs);
     203             : }
     204             : 
     205             : /**
     206             :  * try_grab_page() - elevate a page's refcount by a flag-dependent amount
     207             :  * @page:    pointer to page to be grabbed
     208             :  * @flags:   gup flags: these are the FOLL_* flag values.
     209             :  *
     210             :  * This might not do anything at all, depending on the flags argument.
     211             :  *
     212             :  * "grab" names in this file mean, "look at flags to decide whether to use
     213             :  * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
     214             :  *
     215             :  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
     216             :  * time. Cases: please see the try_grab_folio() documentation, with
     217             :  * "refs=1".
     218             :  *
     219             :  * Return: 0 for success, or if no action was required (if neither FOLL_PIN
     220             :  * nor FOLL_GET was set, nothing is done). A negative error code for failure:
     221             :  *
     222             :  *   -ENOMEM            FOLL_GET or FOLL_PIN was set, but the page could not
     223             :  *                      be grabbed.
     224             :  */
     225           0 : int __must_check try_grab_page(struct page *page, unsigned int flags)
     226             : {
     227           0 :         struct folio *folio = page_folio(page);
     228             : 
     229           0 :         if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
     230             :                 return -ENOMEM;
     231             : 
     232             :         if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
     233             :                 return -EREMOTEIO;
     234             : 
     235           0 :         if (flags & FOLL_GET)
     236             :                 folio_ref_inc(folio);
     237           0 :         else if (flags & FOLL_PIN) {
     238             :                 /*
     239             :                  * Don't take a pin on the zero page - it's not going anywhere
     240             :                  * and it is used in a *lot* of places.
     241             :                  */
     242           0 :                 if (is_zero_page(page))
     243             :                         return 0;
     244             : 
     245             :                 /*
     246             :                  * Similar to try_grab_folio(): be sure to *also*
     247             :                  * increment the normal page refcount field at least once,
     248             :                  * so that the page really is pinned.
     249             :                  */
     250           0 :                 if (folio_test_large(folio)) {
     251           0 :                         folio_ref_add(folio, 1);
     252           0 :                         atomic_add(1, &folio->_pincount);
     253             :                 } else {
     254             :                         folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
     255             :                 }
     256             : 
     257           0 :                 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, 1);
     258             :         }
     259             : 
     260             :         return 0;
     261             : }
     262             : 
     263             : /**
     264             :  * unpin_user_page() - release a dma-pinned page
     265             :  * @page:            pointer to page to be released
     266             :  *
     267             :  * Pages that were pinned via pin_user_pages*() must be released via either
     268             :  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
     269             :  * that such pages can be separately tracked and uniquely handled. In
     270             :  * particular, interactions with RDMA and filesystems need special handling.
     271             :  */
     272           0 : void unpin_user_page(struct page *page)
     273             : {
     274           0 :         sanity_check_pinned_pages(&page, 1);
     275           0 :         gup_put_folio(page_folio(page), 1, FOLL_PIN);
     276           0 : }
     277             : EXPORT_SYMBOL(unpin_user_page);
     278             : 
     279             : /**
     280             :  * folio_add_pin - Try to get an additional pin on a pinned folio
     281             :  * @folio: The folio to be pinned
     282             :  *
     283             :  * Get an additional pin on a folio we already have a pin on.  Makes no change
     284             :  * if the folio is a zero_page.
     285             :  */
     286           0 : void folio_add_pin(struct folio *folio)
     287             : {
     288           0 :         if (is_zero_folio(folio))
     289             :                 return;
     290             : 
     291             :         /*
     292             :          * Similar to try_grab_folio(): be sure to *also* increment the normal
     293             :          * page refcount field at least once, so that the page really is
     294             :          * pinned.
     295             :          */
     296           0 :         if (folio_test_large(folio)) {
     297           0 :                 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
     298           0 :                 folio_ref_inc(folio);
     299           0 :                 atomic_inc(&folio->_pincount);
     300             :         } else {
     301           0 :                 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
     302             :                 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
     303             :         }
     304             : }
     305             : 
     306           0 : static inline struct folio *gup_folio_range_next(struct page *start,
     307             :                 unsigned long npages, unsigned long i, unsigned int *ntails)
     308             : {
     309           0 :         struct page *next = nth_page(start, i);
     310           0 :         struct folio *folio = page_folio(next);
     311           0 :         unsigned int nr = 1;
     312             : 
     313           0 :         if (folio_test_large(folio))
     314           0 :                 nr = min_t(unsigned int, npages - i,
     315             :                            folio_nr_pages(folio) - folio_page_idx(folio, next));
     316             : 
     317           0 :         *ntails = nr;
     318           0 :         return folio;
     319             : }
     320             : 
     321           0 : static inline struct folio *gup_folio_next(struct page **list,
     322             :                 unsigned long npages, unsigned long i, unsigned int *ntails)
     323             : {
     324           0 :         struct folio *folio = page_folio(list[i]);
     325             :         unsigned int nr;
     326             : 
     327           0 :         for (nr = i + 1; nr < npages; nr++) {
     328           0 :                 if (page_folio(list[nr]) != folio)
     329             :                         break;
     330             :         }
     331             : 
     332           0 :         *ntails = nr - i;
     333           0 :         return folio;
     334             : }
     335             : 
     336             : /**
     337             :  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
     338             :  * @pages:  array of pages to be maybe marked dirty, and definitely released.
     339             :  * @npages: number of pages in the @pages array.
     340             :  * @make_dirty: whether to mark the pages dirty
     341             :  *
     342             :  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
     343             :  * variants called on that page.
     344             :  *
     345             :  * For each page in the @pages array, make that page (or its head page, if a
     346             :  * compound page) dirty, if @make_dirty is true, and if the page was previously
     347             :  * listed as clean. In any case, releases all pages using unpin_user_page(),
     348             :  * possibly via unpin_user_pages(), for the non-dirty case.
     349             :  *
     350             :  * Please see the unpin_user_page() documentation for details.
     351             :  *
     352             :  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
     353             :  * required, then the caller should a) verify that this is really correct,
     354             :  * because _lock() is usually required, and b) hand code it:
     355             :  * set_page_dirty_lock(), unpin_user_page().
     356             :  *
     357             :  */
     358           0 : void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
     359             :                                  bool make_dirty)
     360             : {
     361             :         unsigned long i;
     362             :         struct folio *folio;
     363             :         unsigned int nr;
     364             : 
     365           0 :         if (!make_dirty) {
     366           0 :                 unpin_user_pages(pages, npages);
     367           0 :                 return;
     368             :         }
     369             : 
     370             :         sanity_check_pinned_pages(pages, npages);
     371           0 :         for (i = 0; i < npages; i += nr) {
     372           0 :                 folio = gup_folio_next(pages, npages, i, &nr);
     373             :                 /*
     374             :                  * Checking PageDirty at this point may race with
     375             :                  * clear_page_dirty_for_io(), but that's OK. Two key
     376             :                  * cases:
     377             :                  *
     378             :                  * 1) This code sees the page as already dirty, so it
     379             :                  * skips the call to set_page_dirty(). That could happen
     380             :                  * because clear_page_dirty_for_io() called
     381             :                  * page_mkclean(), followed by set_page_dirty().
     382             :                  * However, now the page is going to get written back,
     383             :                  * which meets the original intention of setting it
     384             :                  * dirty, so all is well: clear_page_dirty_for_io() goes
     385             :                  * on to call TestClearPageDirty(), and write the page
     386             :                  * back.
     387             :                  *
     388             :                  * 2) This code sees the page as clean, so it calls
     389             :                  * set_page_dirty(). The page stays dirty, despite being
     390             :                  * written back, so it gets written back again in the
     391             :                  * next writeback cycle. This is harmless.
     392             :                  */
     393           0 :                 if (!folio_test_dirty(folio)) {
     394           0 :                         folio_lock(folio);
     395           0 :                         folio_mark_dirty(folio);
     396           0 :                         folio_unlock(folio);
     397             :                 }
     398           0 :                 gup_put_folio(folio, nr, FOLL_PIN);
     399             :         }
     400             : }
     401             : EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
     402             : 
     403             : /**
     404             :  * unpin_user_page_range_dirty_lock() - release and optionally dirty
     405             :  * gup-pinned page range
     406             :  *
     407             :  * @page:  the starting page of a range maybe marked dirty, and definitely released.
     408             :  * @npages: number of consecutive pages to release.
     409             :  * @make_dirty: whether to mark the pages dirty
     410             :  *
     411             :  * "gup-pinned page range" refers to a range of pages that has had one of the
     412             :  * pin_user_pages() variants called on that page.
     413             :  *
     414             :  * For the page ranges defined by [page .. page+npages], make that range (or
     415             :  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
     416             :  * page range was previously listed as clean.
     417             :  *
     418             :  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
     419             :  * required, then the caller should a) verify that this is really correct,
     420             :  * because _lock() is usually required, and b) hand code it:
     421             :  * set_page_dirty_lock(), unpin_user_page().
     422             :  *
     423             :  */
     424           0 : void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
     425             :                                       bool make_dirty)
     426             : {
     427             :         unsigned long i;
     428             :         struct folio *folio;
     429             :         unsigned int nr;
     430             : 
     431           0 :         for (i = 0; i < npages; i += nr) {
     432           0 :                 folio = gup_folio_range_next(page, npages, i, &nr);
     433           0 :                 if (make_dirty && !folio_test_dirty(folio)) {
     434           0 :                         folio_lock(folio);
     435           0 :                         folio_mark_dirty(folio);
     436           0 :                         folio_unlock(folio);
     437             :                 }
     438           0 :                 gup_put_folio(folio, nr, FOLL_PIN);
     439             :         }
     440           0 : }
     441             : EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
     442             : 
     443             : static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
     444             : {
     445             :         unsigned long i;
     446             :         struct folio *folio;
     447             :         unsigned int nr;
     448             : 
     449             :         /*
     450             :          * Don't perform any sanity checks because we might have raced with
     451             :          * fork() and some anonymous pages might now actually be shared --
     452             :          * which is why we're unpinning after all.
     453             :          */
     454             :         for (i = 0; i < npages; i += nr) {
     455             :                 folio = gup_folio_next(pages, npages, i, &nr);
     456             :                 gup_put_folio(folio, nr, FOLL_PIN);
     457             :         }
     458             : }
     459             : 
     460             : /**
     461             :  * unpin_user_pages() - release an array of gup-pinned pages.
     462             :  * @pages:  array of pages to be marked dirty and released.
     463             :  * @npages: number of pages in the @pages array.
     464             :  *
     465             :  * For each page in the @pages array, release the page using unpin_user_page().
     466             :  *
     467             :  * Please see the unpin_user_page() documentation for details.
     468             :  */
     469           0 : void unpin_user_pages(struct page **pages, unsigned long npages)
     470             : {
     471             :         unsigned long i;
     472             :         struct folio *folio;
     473             :         unsigned int nr;
     474             : 
     475             :         /*
     476             :          * If this WARN_ON() fires, then the system *might* be leaking pages (by
     477             :          * leaving them pinned), but probably not. More likely, gup/pup returned
     478             :          * a hard -ERRNO error to the caller, who erroneously passed it here.
     479             :          */
     480           0 :         if (WARN_ON(IS_ERR_VALUE(npages)))
     481           0 :                 return;
     482             : 
     483             :         sanity_check_pinned_pages(pages, npages);
     484           0 :         for (i = 0; i < npages; i += nr) {
     485           0 :                 folio = gup_folio_next(pages, npages, i, &nr);
     486           0 :                 gup_put_folio(folio, nr, FOLL_PIN);
     487             :         }
     488             : }
     489             : EXPORT_SYMBOL(unpin_user_pages);
     490             : 
     491             : /*
     492             :  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
     493             :  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
     494             :  * cache bouncing on large SMP machines for concurrent pinned gups.
     495             :  */
     496           0 : static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
     497             : {
     498           0 :         if (!test_bit(MMF_HAS_PINNED, mm_flags))
     499             :                 set_bit(MMF_HAS_PINNED, mm_flags);
     500           0 : }
     501             : 
     502             : #ifdef CONFIG_MMU
     503             : static struct page *no_page_table(struct vm_area_struct *vma,
     504             :                 unsigned int flags)
     505             : {
     506             :         /*
     507             :          * When core dumping an enormous anonymous area that nobody
     508             :          * has touched so far, we don't want to allocate unnecessary pages or
     509             :          * page tables.  Return error instead of NULL to skip handle_mm_fault,
     510             :          * then get_dump_page() will return NULL to leave a hole in the dump.
     511             :          * But we can only make this optimization where a hole would surely
     512             :          * be zero-filled if handle_mm_fault() actually did handle it.
     513             :          */
     514           0 :         if ((flags & FOLL_DUMP) &&
     515           0 :                         (vma_is_anonymous(vma) || !vma->vm_ops->fault))
     516             :                 return ERR_PTR(-EFAULT);
     517             :         return NULL;
     518             : }
     519             : 
     520           0 : static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
     521             :                 pte_t *pte, unsigned int flags)
     522             : {
     523           0 :         if (flags & FOLL_TOUCH) {
     524           0 :                 pte_t orig_entry = ptep_get(pte);
     525           0 :                 pte_t entry = orig_entry;
     526             : 
     527           0 :                 if (flags & FOLL_WRITE)
     528             :                         entry = pte_mkdirty(entry);
     529           0 :                 entry = pte_mkyoung(entry);
     530             : 
     531           0 :                 if (!pte_same(orig_entry, entry)) {
     532           0 :                         set_pte_at(vma->vm_mm, address, pte, entry);
     533             :                         update_mmu_cache(vma, address, pte);
     534             :                 }
     535             :         }
     536             : 
     537             :         /* Proper page table entry exists, but no corresponding struct page */
     538           0 :         return -EEXIST;
     539             : }
     540             : 
     541             : /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
     542           0 : static inline bool can_follow_write_pte(pte_t pte, struct page *page,
     543             :                                         struct vm_area_struct *vma,
     544             :                                         unsigned int flags)
     545             : {
     546             :         /* If the pte is writable, we can write to the page. */
     547           0 :         if (pte_write(pte))
     548             :                 return true;
     549             : 
     550             :         /* Maybe FOLL_FORCE is set to override it? */
     551           0 :         if (!(flags & FOLL_FORCE))
     552             :                 return false;
     553             : 
     554             :         /* But FOLL_FORCE has no effect on shared mappings */
     555           0 :         if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
     556             :                 return false;
     557             : 
     558             :         /* ... or read-only private ones */
     559           0 :         if (!(vma->vm_flags & VM_MAYWRITE))
     560             :                 return false;
     561             : 
     562             :         /* ... or already writable ones that just need to take a write fault */
     563           0 :         if (vma->vm_flags & VM_WRITE)
     564             :                 return false;
     565             : 
     566             :         /*
     567             :          * See can_change_pte_writable(): we broke COW and could map the page
     568             :          * writable if we have an exclusive anonymous page ...
     569             :          */
     570           0 :         if (!page || !PageAnon(page) || !PageAnonExclusive(page))
     571             :                 return false;
     572             : 
     573             :         /* ... and a write-fault isn't required for other reasons. */
     574             :         if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
     575             :                 return false;
     576             :         return !userfaultfd_pte_wp(vma, pte);
     577             : }
     578             : 
     579           0 : static struct page *follow_page_pte(struct vm_area_struct *vma,
     580             :                 unsigned long address, pmd_t *pmd, unsigned int flags,
     581             :                 struct dev_pagemap **pgmap)
     582             : {
     583           0 :         struct mm_struct *mm = vma->vm_mm;
     584             :         struct page *page;
     585             :         spinlock_t *ptl;
     586             :         pte_t *ptep, pte;
     587             :         int ret;
     588             : 
     589             :         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
     590           0 :         if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
     591             :                          (FOLL_PIN | FOLL_GET)))
     592             :                 return ERR_PTR(-EINVAL);
     593             : 
     594           0 :         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
     595           0 :         if (!ptep)
     596             :                 return no_page_table(vma, flags);
     597           0 :         pte = ptep_get(ptep);
     598           0 :         if (!pte_present(pte))
     599             :                 goto no_page;
     600           0 :         if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
     601             :                 goto no_page;
     602             : 
     603           0 :         page = vm_normal_page(vma, address, pte);
     604             : 
     605             :         /*
     606             :          * We only care about anon pages in can_follow_write_pte() and don't
     607             :          * have to worry about pte_devmap() because they are never anon.
     608             :          */
     609           0 :         if ((flags & FOLL_WRITE) &&
     610           0 :             !can_follow_write_pte(pte, page, vma, flags)) {
     611             :                 page = NULL;
     612             :                 goto out;
     613             :         }
     614             : 
     615             :         if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
     616             :                 /*
     617             :                  * Only return device mapping pages in the FOLL_GET or FOLL_PIN
     618             :                  * case since they are only valid while holding the pgmap
     619             :                  * reference.
     620             :                  */
     621             :                 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
     622             :                 if (*pgmap)
     623             :                         page = pte_page(pte);
     624             :                 else
     625             :                         goto no_page;
     626           0 :         } else if (unlikely(!page)) {
     627           0 :                 if (flags & FOLL_DUMP) {
     628             :                         /* Avoid special (like zero) pages in core dumps */
     629             :                         page = ERR_PTR(-EFAULT);
     630             :                         goto out;
     631             :                 }
     632             : 
     633           0 :                 if (is_zero_pfn(pte_pfn(pte))) {
     634           0 :                         page = pte_page(pte);
     635             :                 } else {
     636           0 :                         ret = follow_pfn_pte(vma, address, ptep, flags);
     637           0 :                         page = ERR_PTR(ret);
     638             :                         goto out;
     639             :                 }
     640             :         }
     641             : 
     642           0 :         if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
     643             :                 page = ERR_PTR(-EMLINK);
     644             :                 goto out;
     645             :         }
     646             : 
     647             :         VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
     648             :                        !PageAnonExclusive(page), page);
     649             : 
     650             :         /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
     651           0 :         ret = try_grab_page(page, flags);
     652           0 :         if (unlikely(ret)) {
     653           0 :                 page = ERR_PTR(ret);
     654             :                 goto out;
     655             :         }
     656             : 
     657             :         /*
     658             :          * We need to make the page accessible if and only if we are going
     659             :          * to access its content (the FOLL_PIN case).  Please see
     660             :          * Documentation/core-api/pin_user_pages.rst for details.
     661             :          */
     662             :         if (flags & FOLL_PIN) {
     663             :                 ret = arch_make_page_accessible(page);
     664             :                 if (ret) {
     665             :                         unpin_user_page(page);
     666             :                         page = ERR_PTR(ret);
     667             :                         goto out;
     668             :                 }
     669             :         }
     670           0 :         if (flags & FOLL_TOUCH) {
     671           0 :                 if ((flags & FOLL_WRITE) &&
     672           0 :                     !pte_dirty(pte) && !PageDirty(page))
     673           0 :                         set_page_dirty(page);
     674             :                 /*
     675             :                  * pte_mkyoung() would be more correct here, but atomic care
     676             :                  * is needed to avoid losing the dirty bit: it is easier to use
     677             :                  * mark_page_accessed().
     678             :                  */
     679           0 :                 mark_page_accessed(page);
     680             :         }
     681             : out:
     682           0 :         pte_unmap_unlock(ptep, ptl);
     683             :         return page;
     684             : no_page:
     685           0 :         pte_unmap_unlock(ptep, ptl);
     686           0 :         if (!pte_none(pte))
     687             :                 return NULL;
     688             :         return no_page_table(vma, flags);
     689             : }
     690             : 
     691           0 : static struct page *follow_pmd_mask(struct vm_area_struct *vma,
     692             :                                     unsigned long address, pud_t *pudp,
     693             :                                     unsigned int flags,
     694             :                                     struct follow_page_context *ctx)
     695             : {
     696             :         pmd_t *pmd, pmdval;
     697             :         spinlock_t *ptl;
     698             :         struct page *page;
     699           0 :         struct mm_struct *mm = vma->vm_mm;
     700             : 
     701           0 :         pmd = pmd_offset(pudp, address);
     702           0 :         pmdval = pmdp_get_lockless(pmd);
     703           0 :         if (pmd_none(pmdval))
     704             :                 return no_page_table(vma, flags);
     705           0 :         if (!pmd_present(pmdval))
     706             :                 return no_page_table(vma, flags);
     707           0 :         if (pmd_devmap(pmdval)) {
     708             :                 ptl = pmd_lock(mm, pmd);
     709             :                 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
     710             :                 spin_unlock(ptl);
     711             :                 if (page)
     712             :                         return page;
     713             :         }
     714           0 :         if (likely(!pmd_trans_huge(pmdval)))
     715           0 :                 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
     716             : 
     717             :         if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
     718             :                 return no_page_table(vma, flags);
     719             : 
     720             :         ptl = pmd_lock(mm, pmd);
     721             :         if (unlikely(!pmd_present(*pmd))) {
     722             :                 spin_unlock(ptl);
     723             :                 return no_page_table(vma, flags);
     724             :         }
     725             :         if (unlikely(!pmd_trans_huge(*pmd))) {
     726             :                 spin_unlock(ptl);
     727             :                 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
     728             :         }
     729             :         if (flags & FOLL_SPLIT_PMD) {
     730             :                 spin_unlock(ptl);
     731             :                 split_huge_pmd(vma, pmd, address);
     732             :                 /* If pmd was left empty, stuff a page table in there quickly */
     733             :                 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
     734             :                         follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
     735             :         }
     736             :         page = follow_trans_huge_pmd(vma, address, pmd, flags);
     737             :         spin_unlock(ptl);
     738             :         ctx->page_mask = HPAGE_PMD_NR - 1;
     739             :         return page;
     740             : }
     741             : 
     742           0 : static struct page *follow_pud_mask(struct vm_area_struct *vma,
     743             :                                     unsigned long address, p4d_t *p4dp,
     744             :                                     unsigned int flags,
     745             :                                     struct follow_page_context *ctx)
     746             : {
     747             :         pud_t *pud;
     748             :         spinlock_t *ptl;
     749             :         struct page *page;
     750           0 :         struct mm_struct *mm = vma->vm_mm;
     751             : 
     752           0 :         pud = pud_offset(p4dp, address);
     753           0 :         if (pud_none(*pud))
     754             :                 return no_page_table(vma, flags);
     755             :         if (pud_devmap(*pud)) {
     756             :                 ptl = pud_lock(mm, pud);
     757             :                 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
     758             :                 spin_unlock(ptl);
     759             :                 if (page)
     760             :                         return page;
     761             :         }
     762           0 :         if (unlikely(pud_bad(*pud)))
     763             :                 return no_page_table(vma, flags);
     764             : 
     765           0 :         return follow_pmd_mask(vma, address, pud, flags, ctx);
     766             : }
     767             : 
     768             : static struct page *follow_p4d_mask(struct vm_area_struct *vma,
     769             :                                     unsigned long address, pgd_t *pgdp,
     770             :                                     unsigned int flags,
     771             :                                     struct follow_page_context *ctx)
     772             : {
     773             :         p4d_t *p4d;
     774             : 
     775           0 :         p4d = p4d_offset(pgdp, address);
     776             :         if (p4d_none(*p4d))
     777             :                 return no_page_table(vma, flags);
     778             :         BUILD_BUG_ON(p4d_huge(*p4d));
     779             :         if (unlikely(p4d_bad(*p4d)))
     780             :                 return no_page_table(vma, flags);
     781             : 
     782           0 :         return follow_pud_mask(vma, address, p4d, flags, ctx);
     783             : }
     784             : 
     785             : /**
     786             :  * follow_page_mask - look up a page descriptor from a user-virtual address
     787             :  * @vma: vm_area_struct mapping @address
     788             :  * @address: virtual address to look up
     789             :  * @flags: flags modifying lookup behaviour
     790             :  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
     791             :  *       pointer to output page_mask
     792             :  *
     793             :  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
     794             :  *
     795             :  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
     796             :  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
     797             :  *
     798             :  * When getting an anonymous page and the caller has to trigger unsharing
     799             :  * of a shared anonymous page first, -EMLINK is returned. The caller should
     800             :  * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
     801             :  * relevant with FOLL_PIN and !FOLL_WRITE.
     802             :  *
     803             :  * On output, the @ctx->page_mask is set according to the size of the page.
     804             :  *
     805             :  * Return: the mapped (struct page *), %NULL if no mapping exists, or
     806             :  * an error pointer if there is a mapping to something not represented
     807             :  * by a page descriptor (see also vm_normal_page()).
     808             :  */
     809             : static struct page *follow_page_mask(struct vm_area_struct *vma,
     810             :                               unsigned long address, unsigned int flags,
     811             :                               struct follow_page_context *ctx)
     812             : {
     813             :         pgd_t *pgd;
     814             :         struct page *page;
     815           0 :         struct mm_struct *mm = vma->vm_mm;
     816             : 
     817           0 :         ctx->page_mask = 0;
     818             : 
     819             :         /*
     820             :          * Call hugetlb_follow_page_mask for hugetlb vmas as it will use
     821             :          * special hugetlb page table walking code.  This eliminates the
     822             :          * need to check for hugetlb entries in the general walking code.
     823             :          *
     824             :          * hugetlb_follow_page_mask is only for follow_page() handling here.
     825             :          * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
     826             :          */
     827           0 :         if (is_vm_hugetlb_page(vma)) {
     828             :                 page = hugetlb_follow_page_mask(vma, address, flags);
     829             :                 if (!page)
     830             :                         page = no_page_table(vma, flags);
     831             :                 return page;
     832             :         }
     833             : 
     834           0 :         pgd = pgd_offset(mm, address);
     835             : 
     836             :         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
     837             :                 return no_page_table(vma, flags);
     838             : 
     839           0 :         return follow_p4d_mask(vma, address, pgd, flags, ctx);
     840             : }
     841             : 
     842           0 : struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
     843             :                          unsigned int foll_flags)
     844             : {
     845           0 :         struct follow_page_context ctx = { NULL };
     846             :         struct page *page;
     847             : 
     848           0 :         if (vma_is_secretmem(vma))
     849             :                 return NULL;
     850             : 
     851           0 :         if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
     852             :                 return NULL;
     853             : 
     854           0 :         page = follow_page_mask(vma, address, foll_flags, &ctx);
     855             :         if (ctx.pgmap)
     856             :                 put_dev_pagemap(ctx.pgmap);
     857           0 :         return page;
     858             : }
     859             : 
     860             : static int get_gate_page(struct mm_struct *mm, unsigned long address,
     861             :                 unsigned int gup_flags, struct vm_area_struct **vma,
     862             :                 struct page **page)
     863             : {
     864             :         pgd_t *pgd;
     865             :         p4d_t *p4d;
     866             :         pud_t *pud;
     867             :         pmd_t *pmd;
     868             :         pte_t *pte;
     869             :         pte_t entry;
     870             :         int ret = -EFAULT;
     871             : 
     872             :         /* user gate pages are read-only */
     873             :         if (gup_flags & FOLL_WRITE)
     874             :                 return -EFAULT;
     875             :         if (address > TASK_SIZE)
     876             :                 pgd = pgd_offset_k(address);
     877             :         else
     878             :                 pgd = pgd_offset_gate(mm, address);
     879             :         if (pgd_none(*pgd))
     880             :                 return -EFAULT;
     881             :         p4d = p4d_offset(pgd, address);
     882             :         if (p4d_none(*p4d))
     883             :                 return -EFAULT;
     884             :         pud = pud_offset(p4d, address);
     885             :         if (pud_none(*pud))
     886             :                 return -EFAULT;
     887             :         pmd = pmd_offset(pud, address);
     888             :         if (!pmd_present(*pmd))
     889             :                 return -EFAULT;
     890             :         pte = pte_offset_map(pmd, address);
     891             :         if (!pte)
     892             :                 return -EFAULT;
     893             :         entry = ptep_get(pte);
     894             :         if (pte_none(entry))
     895             :                 goto unmap;
     896             :         *vma = get_gate_vma(mm);
     897             :         if (!page)
     898             :                 goto out;
     899             :         *page = vm_normal_page(*vma, address, entry);
     900             :         if (!*page) {
     901             :                 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
     902             :                         goto unmap;
     903             :                 *page = pte_page(entry);
     904             :         }
     905             :         ret = try_grab_page(*page, gup_flags);
     906             :         if (unlikely(ret))
     907             :                 goto unmap;
     908             : out:
     909             :         ret = 0;
     910             : unmap:
     911             :         pte_unmap(pte);
     912             :         return ret;
     913             : }
     914             : 
     915             : /*
     916             :  * mmap_lock must be held on entry.  If @flags has FOLL_UNLOCKABLE but not
     917             :  * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
     918             :  * to 0 and -EBUSY returned.
     919             :  */
     920           0 : static int faultin_page(struct vm_area_struct *vma,
     921             :                 unsigned long address, unsigned int *flags, bool unshare,
     922             :                 int *locked)
     923             : {
     924           0 :         unsigned int fault_flags = 0;
     925             :         vm_fault_t ret;
     926             : 
     927           0 :         if (*flags & FOLL_NOFAULT)
     928             :                 return -EFAULT;
     929           0 :         if (*flags & FOLL_WRITE)
     930           0 :                 fault_flags |= FAULT_FLAG_WRITE;
     931           0 :         if (*flags & FOLL_REMOTE)
     932           0 :                 fault_flags |= FAULT_FLAG_REMOTE;
     933           0 :         if (*flags & FOLL_UNLOCKABLE) {
     934           0 :                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
     935             :                 /*
     936             :                  * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
     937             :                  * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
     938             :                  * That's because some callers may not be prepared to
     939             :                  * handle early exits caused by non-fatal signals.
     940             :                  */
     941           0 :                 if (*flags & FOLL_INTERRUPTIBLE)
     942           0 :                         fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
     943             :         }
     944           0 :         if (*flags & FOLL_NOWAIT)
     945           0 :                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
     946           0 :         if (*flags & FOLL_TRIED) {
     947             :                 /*
     948             :                  * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
     949             :                  * can co-exist
     950             :                  */
     951           0 :                 fault_flags |= FAULT_FLAG_TRIED;
     952             :         }
     953           0 :         if (unshare) {
     954           0 :                 fault_flags |= FAULT_FLAG_UNSHARE;
     955             :                 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
     956             :                 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
     957             :         }
     958             : 
     959           0 :         ret = handle_mm_fault(vma, address, fault_flags, NULL);
     960             : 
     961           0 :         if (ret & VM_FAULT_COMPLETED) {
     962             :                 /*
     963             :                  * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
     964             :                  * mmap lock in the page fault handler. Sanity check this.
     965             :                  */
     966           0 :                 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
     967           0 :                 *locked = 0;
     968             : 
     969             :                 /*
     970             :                  * We should do the same as VM_FAULT_RETRY, but let's not
     971             :                  * return -EBUSY since that's not reflecting the reality of
     972             :                  * what has happened - we've just fully completed a page
     973             :                  * fault, with the mmap lock released.  Use -EAGAIN to show
     974             :                  * that we want to take the mmap lock _again_.
     975             :                  */
     976           0 :                 return -EAGAIN;
     977             :         }
     978             : 
     979           0 :         if (ret & VM_FAULT_ERROR) {
     980           0 :                 int err = vm_fault_to_errno(ret, *flags);
     981             : 
     982           0 :                 if (err)
     983             :                         return err;
     984           0 :                 BUG();
     985             :         }
     986             : 
     987           0 :         if (ret & VM_FAULT_RETRY) {
     988           0 :                 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
     989           0 :                         *locked = 0;
     990             :                 return -EBUSY;
     991             :         }
     992             : 
     993             :         return 0;
     994             : }
     995             : 
     996             : /*
     997             :  * Writing to file-backed mappings which require folio dirty tracking using GUP
     998             :  * is a fundamentally broken operation, as kernel write access to GUP mappings
     999             :  * do not adhere to the semantics expected by a file system.
    1000             :  *
    1001             :  * Consider the following scenario:-
    1002             :  *
    1003             :  * 1. A folio is written to via GUP which write-faults the memory, notifying
    1004             :  *    the file system and dirtying the folio.
    1005             :  * 2. Later, writeback is triggered, resulting in the folio being cleaned and
    1006             :  *    the PTE being marked read-only.
    1007             :  * 3. The GUP caller writes to the folio, as it is mapped read/write via the
    1008             :  *    direct mapping.
    1009             :  * 4. The GUP caller, now done with the page, unpins it and sets it dirty
    1010             :  *    (though it does not have to).
    1011             :  *
    1012             :  * This results in both data being written to a folio without writenotify, and
    1013             :  * the folio being dirtied unexpectedly (if the caller decides to do so).
    1014             :  */
    1015             : static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
    1016             :                                           unsigned long gup_flags)
    1017             : {
    1018             :         /*
    1019             :          * If we aren't pinning then no problematic write can occur. A long term
    1020             :          * pin is the most egregious case so this is the case we disallow.
    1021             :          */
    1022           0 :         if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
    1023             :             (FOLL_PIN | FOLL_LONGTERM))
    1024             :                 return true;
    1025             : 
    1026             :         /*
    1027             :          * If the VMA does not require dirty tracking then no problematic write
    1028             :          * can occur either.
    1029             :          */
    1030           0 :         return !vma_needs_dirty_tracking(vma);
    1031             : }
    1032             : 
    1033           0 : static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
    1034             : {
    1035           0 :         vm_flags_t vm_flags = vma->vm_flags;
    1036           0 :         int write = (gup_flags & FOLL_WRITE);
    1037           0 :         int foreign = (gup_flags & FOLL_REMOTE);
    1038           0 :         bool vma_anon = vma_is_anonymous(vma);
    1039             : 
    1040           0 :         if (vm_flags & (VM_IO | VM_PFNMAP))
    1041             :                 return -EFAULT;
    1042             : 
    1043           0 :         if ((gup_flags & FOLL_ANON) && !vma_anon)
    1044             :                 return -EFAULT;
    1045             : 
    1046             :         if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
    1047             :                 return -EOPNOTSUPP;
    1048             : 
    1049           0 :         if (vma_is_secretmem(vma))
    1050             :                 return -EFAULT;
    1051             : 
    1052           0 :         if (write) {
    1053           0 :                 if (!vma_anon &&
    1054           0 :                     !writable_file_mapping_allowed(vma, gup_flags))
    1055             :                         return -EFAULT;
    1056             : 
    1057           0 :                 if (!(vm_flags & VM_WRITE)) {
    1058           0 :                         if (!(gup_flags & FOLL_FORCE))
    1059             :                                 return -EFAULT;
    1060             :                         /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
    1061           0 :                         if (is_vm_hugetlb_page(vma))
    1062             :                                 return -EFAULT;
    1063             :                         /*
    1064             :                          * We used to let the write,force case do COW in a
    1065             :                          * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
    1066             :                          * set a breakpoint in a read-only mapping of an
    1067             :                          * executable, without corrupting the file (yet only
    1068             :                          * when that file had been opened for writing!).
    1069             :                          * Anon pages in shared mappings are surprising: now
    1070             :                          * just reject it.
    1071             :                          */
    1072           0 :                         if (!is_cow_mapping(vm_flags))
    1073             :                                 return -EFAULT;
    1074             :                 }
    1075           0 :         } else if (!(vm_flags & VM_READ)) {
    1076           0 :                 if (!(gup_flags & FOLL_FORCE))
    1077             :                         return -EFAULT;
    1078             :                 /*
    1079             :                  * Is there actually any vma we can reach here which does not
    1080             :                  * have VM_MAYREAD set?
    1081             :                  */
    1082           0 :                 if (!(vm_flags & VM_MAYREAD))
    1083             :                         return -EFAULT;
    1084             :         }
    1085             :         /*
    1086             :          * gups are always data accesses, not instruction
    1087             :          * fetches, so execute=false here
    1088             :          */
    1089           0 :         if (!arch_vma_access_permitted(vma, write, false, foreign))
    1090             :                 return -EFAULT;
    1091           0 :         return 0;
    1092             : }
    1093             : 
    1094             : /*
    1095             :  * This is "vma_lookup()", but with a warning if we would have
    1096             :  * historically expanded the stack in the GUP code.
    1097             :  */
    1098           0 : static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
    1099             :          unsigned long addr)
    1100             : {
    1101             : #ifdef CONFIG_STACK_GROWSUP
    1102             :         return vma_lookup(mm, addr);
    1103             : #else
    1104             :         static volatile unsigned long next_warn;
    1105             :         struct vm_area_struct *vma;
    1106             :         unsigned long now, next;
    1107             : 
    1108           0 :         vma = find_vma(mm, addr);
    1109           0 :         if (!vma || (addr >= vma->vm_start))
    1110             :                 return vma;
    1111             : 
    1112             :         /* Only warn for half-way relevant accesses */
    1113           0 :         if (!(vma->vm_flags & VM_GROWSDOWN))
    1114             :                 return NULL;
    1115           0 :         if (vma->vm_start - addr > 65536)
    1116             :                 return NULL;
    1117             : 
    1118             :         /* Let's not warn more than once an hour.. */
    1119           0 :         now = jiffies; next = next_warn;
    1120           0 :         if (next && time_before(now, next))
    1121             :                 return NULL;
    1122           0 :         next_warn = now + 60*60*HZ;
    1123             : 
    1124             :         /* Let people know things may have changed. */
    1125           0 :         pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
    1126             :                 current->comm, task_pid_nr(current),
    1127             :                 vma->vm_start, vma->vm_end, addr);
    1128           0 :         dump_stack();
    1129           0 :         return NULL;
    1130             : #endif
    1131             : }
    1132             : 
    1133             : /**
    1134             :  * __get_user_pages() - pin user pages in memory
    1135             :  * @mm:         mm_struct of target mm
    1136             :  * @start:      starting user address
    1137             :  * @nr_pages:   number of pages from start to pin
    1138             :  * @gup_flags:  flags modifying pin behaviour
    1139             :  * @pages:      array that receives pointers to the pages pinned.
    1140             :  *              Should be at least nr_pages long. Or NULL, if caller
    1141             :  *              only intends to ensure the pages are faulted in.
    1142             :  * @locked:     whether we're still with the mmap_lock held
    1143             :  *
    1144             :  * Returns either number of pages pinned (which may be less than the
    1145             :  * number requested), or an error. Details about the return value:
    1146             :  *
    1147             :  * -- If nr_pages is 0, returns 0.
    1148             :  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
    1149             :  * -- If nr_pages is >0, and some pages were pinned, returns the number of
    1150             :  *    pages pinned. Again, this may be less than nr_pages.
    1151             :  * -- 0 return value is possible when the fault would need to be retried.
    1152             :  *
    1153             :  * The caller is responsible for releasing returned @pages, via put_page().
    1154             :  *
    1155             :  * Must be called with mmap_lock held.  It may be released.  See below.
    1156             :  *
    1157             :  * __get_user_pages walks a process's page tables and takes a reference to
    1158             :  * each struct page that each user address corresponds to at a given
    1159             :  * instant. That is, it takes the page that would be accessed if a user
    1160             :  * thread accesses the given user virtual address at that instant.
    1161             :  *
    1162             :  * This does not guarantee that the page exists in the user mappings when
    1163             :  * __get_user_pages returns, and there may even be a completely different
    1164             :  * page there in some cases (eg. if mmapped pagecache has been invalidated
    1165             :  * and subsequently re-faulted). However it does guarantee that the page
    1166             :  * won't be freed completely. And mostly callers simply care that the page
    1167             :  * contains data that was valid *at some point in time*. Typically, an IO
    1168             :  * or similar operation cannot guarantee anything stronger anyway because
    1169             :  * locks can't be held over the syscall boundary.
    1170             :  *
    1171             :  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
    1172             :  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
    1173             :  * appropriate) must be called after the page is finished with, and
    1174             :  * before put_page is called.
    1175             :  *
    1176             :  * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
    1177             :  * be released. If this happens *@locked will be set to 0 on return.
    1178             :  *
    1179             :  * A caller using such a combination of @gup_flags must therefore hold the
    1180             :  * mmap_lock for reading only, and recognize when it's been released. Otherwise,
    1181             :  * it must be held for either reading or writing and will not be released.
    1182             :  *
    1183             :  * In most cases, get_user_pages or get_user_pages_fast should be used
    1184             :  * instead of __get_user_pages. __get_user_pages should be used only if
    1185             :  * you need some special @gup_flags.
    1186             :  */
    1187           0 : static long __get_user_pages(struct mm_struct *mm,
    1188             :                 unsigned long start, unsigned long nr_pages,
    1189             :                 unsigned int gup_flags, struct page **pages,
    1190             :                 int *locked)
    1191             : {
    1192           0 :         long ret = 0, i = 0;
    1193           0 :         struct vm_area_struct *vma = NULL;
    1194           0 :         struct follow_page_context ctx = { NULL };
    1195             : 
    1196           0 :         if (!nr_pages)
    1197             :                 return 0;
    1198             : 
    1199             :         start = untagged_addr_remote(mm, start);
    1200             : 
    1201             :         VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
    1202             : 
    1203             :         do {
    1204             :                 struct page *page;
    1205           0 :                 unsigned int foll_flags = gup_flags;
    1206             :                 unsigned int page_increm;
    1207             : 
    1208             :                 /* first iteration or cross vma bound */
    1209           0 :                 if (!vma || start >= vma->vm_end) {
    1210           0 :                         vma = gup_vma_lookup(mm, start);
    1211             :                         if (!vma && in_gate_area(mm, start)) {
    1212             :                                 ret = get_gate_page(mm, start & PAGE_MASK,
    1213             :                                                 gup_flags, &vma,
    1214             :                                                 pages ? &pages[i] : NULL);
    1215             :                                 if (ret)
    1216             :                                         goto out;
    1217             :                                 ctx.page_mask = 0;
    1218             :                                 goto next_page;
    1219             :                         }
    1220             : 
    1221           0 :                         if (!vma) {
    1222             :                                 ret = -EFAULT;
    1223             :                                 goto out;
    1224             :                         }
    1225           0 :                         ret = check_vma_flags(vma, gup_flags);
    1226           0 :                         if (ret)
    1227             :                                 goto out;
    1228             : 
    1229             :                         if (is_vm_hugetlb_page(vma)) {
    1230             :                                 i = follow_hugetlb_page(mm, vma, pages,
    1231             :                                                         &start, &nr_pages, i,
    1232             :                                                         gup_flags, locked);
    1233             :                                 if (!*locked) {
    1234             :                                         /*
    1235             :                                          * We've got a VM_FAULT_RETRY
    1236             :                                          * and we've lost mmap_lock.
    1237             :                                          * We must stop here.
    1238             :                                          */
    1239             :                                         BUG_ON(gup_flags & FOLL_NOWAIT);
    1240             :                                         goto out;
    1241             :                                 }
    1242             :                                 continue;
    1243             :                         }
    1244             :                 }
    1245             : retry:
    1246             :                 /*
    1247             :                  * If we have a pending SIGKILL, don't keep faulting pages and
    1248             :                  * potentially allocating memory.
    1249             :                  */
    1250           0 :                 if (fatal_signal_pending(current)) {
    1251             :                         ret = -EINTR;
    1252             :                         goto out;
    1253             :                 }
    1254           0 :                 cond_resched();
    1255             : 
    1256           0 :                 page = follow_page_mask(vma, start, foll_flags, &ctx);
    1257           0 :                 if (!page || PTR_ERR(page) == -EMLINK) {
    1258           0 :                         ret = faultin_page(vma, start, &foll_flags,
    1259           0 :                                            PTR_ERR(page) == -EMLINK, locked);
    1260           0 :                         switch (ret) {
    1261             :                         case 0:
    1262             :                                 goto retry;
    1263             :                         case -EBUSY:
    1264             :                         case -EAGAIN:
    1265           0 :                                 ret = 0;
    1266             :                                 fallthrough;
    1267             :                         case -EFAULT:
    1268             :                         case -ENOMEM:
    1269             :                         case -EHWPOISON:
    1270             :                                 goto out;
    1271             :                         }
    1272           0 :                         BUG();
    1273           0 :                 } else if (PTR_ERR(page) == -EEXIST) {
    1274             :                         /*
    1275             :                          * Proper page table entry exists, but no corresponding
    1276             :                          * struct page. If the caller expects **pages to be
    1277             :                          * filled in, bail out now, because that can't be done
    1278             :                          * for this page.
    1279             :                          */
    1280           0 :                         if (pages) {
    1281             :                                 ret = PTR_ERR(page);
    1282             :                                 goto out;
    1283             :                         }
    1284             : 
    1285             :                         goto next_page;
    1286           0 :                 } else if (IS_ERR(page)) {
    1287             :                         ret = PTR_ERR(page);
    1288             :                         goto out;
    1289             :                 }
    1290           0 :                 if (pages) {
    1291           0 :                         pages[i] = page;
    1292           0 :                         flush_anon_page(vma, page, start);
    1293             :                         flush_dcache_page(page);
    1294           0 :                         ctx.page_mask = 0;
    1295             :                 }
    1296             : next_page:
    1297           0 :                 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
    1298           0 :                 if (page_increm > nr_pages)
    1299           0 :                         page_increm = nr_pages;
    1300           0 :                 i += page_increm;
    1301           0 :                 start += page_increm * PAGE_SIZE;
    1302           0 :                 nr_pages -= page_increm;
    1303           0 :         } while (nr_pages);
    1304             : out:
    1305             :         if (ctx.pgmap)
    1306             :                 put_dev_pagemap(ctx.pgmap);
    1307           0 :         return i ? i : ret;
    1308             : }
    1309             : 
    1310             : static bool vma_permits_fault(struct vm_area_struct *vma,
    1311             :                               unsigned int fault_flags)
    1312             : {
    1313           0 :         bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
    1314           0 :         bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
    1315           0 :         vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
    1316             : 
    1317           0 :         if (!(vm_flags & vma->vm_flags))
    1318             :                 return false;
    1319             : 
    1320             :         /*
    1321             :          * The architecture might have a hardware protection
    1322             :          * mechanism other than read/write that can deny access.
    1323             :          *
    1324             :          * gup always represents data access, not instruction
    1325             :          * fetches, so execute=false here:
    1326             :          */
    1327           0 :         if (!arch_vma_access_permitted(vma, write, false, foreign))
    1328             :                 return false;
    1329             : 
    1330             :         return true;
    1331             : }
    1332             : 
    1333             : /**
    1334             :  * fixup_user_fault() - manually resolve a user page fault
    1335             :  * @mm:         mm_struct of target mm
    1336             :  * @address:    user address
    1337             :  * @fault_flags:flags to pass down to handle_mm_fault()
    1338             :  * @unlocked:   did we unlock the mmap_lock while retrying, maybe NULL if caller
    1339             :  *              does not allow retry. If NULL, the caller must guarantee
    1340             :  *              that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
    1341             :  *
    1342             :  * This is meant to be called in the specific scenario where for locking reasons
    1343             :  * we try to access user memory in atomic context (within a pagefault_disable()
    1344             :  * section), this returns -EFAULT, and we want to resolve the user fault before
    1345             :  * trying again.
    1346             :  *
    1347             :  * Typically this is meant to be used by the futex code.
    1348             :  *
    1349             :  * The main difference with get_user_pages() is that this function will
    1350             :  * unconditionally call handle_mm_fault() which will in turn perform all the
    1351             :  * necessary SW fixup of the dirty and young bits in the PTE, while
    1352             :  * get_user_pages() only guarantees to update these in the struct page.
    1353             :  *
    1354             :  * This is important for some architectures where those bits also gate the
    1355             :  * access permission to the page because they are maintained in software.  On
    1356             :  * such architectures, gup() will not be enough to make a subsequent access
    1357             :  * succeed.
    1358             :  *
    1359             :  * This function will not return with an unlocked mmap_lock. So it has not the
    1360             :  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
    1361             :  */
    1362           0 : int fixup_user_fault(struct mm_struct *mm,
    1363             :                      unsigned long address, unsigned int fault_flags,
    1364             :                      bool *unlocked)
    1365             : {
    1366             :         struct vm_area_struct *vma;
    1367             :         vm_fault_t ret;
    1368             : 
    1369           0 :         address = untagged_addr_remote(mm, address);
    1370             : 
    1371           0 :         if (unlocked)
    1372           0 :                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
    1373             : 
    1374             : retry:
    1375           0 :         vma = gup_vma_lookup(mm, address);
    1376           0 :         if (!vma)
    1377             :                 return -EFAULT;
    1378             : 
    1379           0 :         if (!vma_permits_fault(vma, fault_flags))
    1380             :                 return -EFAULT;
    1381             : 
    1382           0 :         if ((fault_flags & FAULT_FLAG_KILLABLE) &&
    1383           0 :             fatal_signal_pending(current))
    1384             :                 return -EINTR;
    1385             : 
    1386           0 :         ret = handle_mm_fault(vma, address, fault_flags, NULL);
    1387             : 
    1388           0 :         if (ret & VM_FAULT_COMPLETED) {
    1389             :                 /*
    1390             :                  * NOTE: it's a pity that we need to retake the lock here
    1391             :                  * to pair with the unlock() in the callers. Ideally we
    1392             :                  * could tell the callers so they do not need to unlock.
    1393             :                  */
    1394           0 :                 mmap_read_lock(mm);
    1395           0 :                 *unlocked = true;
    1396           0 :                 return 0;
    1397             :         }
    1398             : 
    1399           0 :         if (ret & VM_FAULT_ERROR) {
    1400           0 :                 int err = vm_fault_to_errno(ret, 0);
    1401             : 
    1402           0 :                 if (err)
    1403             :                         return err;
    1404           0 :                 BUG();
    1405             :         }
    1406             : 
    1407           0 :         if (ret & VM_FAULT_RETRY) {
    1408           0 :                 mmap_read_lock(mm);
    1409           0 :                 *unlocked = true;
    1410           0 :                 fault_flags |= FAULT_FLAG_TRIED;
    1411           0 :                 goto retry;
    1412             :         }
    1413             : 
    1414             :         return 0;
    1415             : }
    1416             : EXPORT_SYMBOL_GPL(fixup_user_fault);
    1417             : 
    1418             : /*
    1419             :  * GUP always responds to fatal signals.  When FOLL_INTERRUPTIBLE is
    1420             :  * specified, it'll also respond to generic signals.  The caller of GUP
    1421             :  * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
    1422             :  */
    1423           0 : static bool gup_signal_pending(unsigned int flags)
    1424             : {
    1425           0 :         if (fatal_signal_pending(current))
    1426             :                 return true;
    1427             : 
    1428           0 :         if (!(flags & FOLL_INTERRUPTIBLE))
    1429             :                 return false;
    1430             : 
    1431           0 :         return signal_pending(current);
    1432             : }
    1433             : 
    1434             : /*
    1435             :  * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
    1436             :  * the caller. This function may drop the mmap_lock. If it does so, then it will
    1437             :  * set (*locked = 0).
    1438             :  *
    1439             :  * (*locked == 0) means that the caller expects this function to acquire and
    1440             :  * drop the mmap_lock. Therefore, the value of *locked will still be zero when
    1441             :  * the function returns, even though it may have changed temporarily during
    1442             :  * function execution.
    1443             :  *
    1444             :  * Please note that this function, unlike __get_user_pages(), will not return 0
    1445             :  * for nr_pages > 0, unless FOLL_NOWAIT is used.
    1446             :  */
    1447             : static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
    1448             :                                                 unsigned long start,
    1449             :                                                 unsigned long nr_pages,
    1450             :                                                 struct page **pages,
    1451             :                                                 int *locked,
    1452             :                                                 unsigned int flags)
    1453             : {
    1454             :         long ret, pages_done;
    1455           0 :         bool must_unlock = false;
    1456             : 
    1457             :         /*
    1458             :          * The internal caller expects GUP to manage the lock internally and the
    1459             :          * lock must be released when this returns.
    1460             :          */
    1461           0 :         if (!*locked) {
    1462           0 :                 if (mmap_read_lock_killable(mm))
    1463             :                         return -EAGAIN;
    1464           0 :                 must_unlock = true;
    1465           0 :                 *locked = 1;
    1466             :         }
    1467             :         else
    1468             :                 mmap_assert_locked(mm);
    1469             : 
    1470           0 :         if (flags & FOLL_PIN)
    1471           0 :                 mm_set_has_pinned_flag(&mm->flags);
    1472             : 
    1473             :         /*
    1474             :          * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
    1475             :          * is to set FOLL_GET if the caller wants pages[] filled in (but has
    1476             :          * carelessly failed to specify FOLL_GET), so keep doing that, but only
    1477             :          * for FOLL_GET, not for the newer FOLL_PIN.
    1478             :          *
    1479             :          * FOLL_PIN always expects pages to be non-null, but no need to assert
    1480             :          * that here, as any failures will be obvious enough.
    1481             :          */
    1482           0 :         if (pages && !(flags & FOLL_PIN))
    1483           0 :                 flags |= FOLL_GET;
    1484             : 
    1485           0 :         pages_done = 0;
    1486             :         for (;;) {
    1487           0 :                 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
    1488             :                                        locked);
    1489           0 :                 if (!(flags & FOLL_UNLOCKABLE)) {
    1490             :                         /* VM_FAULT_RETRY couldn't trigger, bypass */
    1491             :                         pages_done = ret;
    1492             :                         break;
    1493             :                 }
    1494             : 
    1495             :                 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
    1496           0 :                 if (!*locked) {
    1497           0 :                         BUG_ON(ret < 0);
    1498           0 :                         BUG_ON(ret >= nr_pages);
    1499             :                 }
    1500             : 
    1501           0 :                 if (ret > 0) {
    1502           0 :                         nr_pages -= ret;
    1503           0 :                         pages_done += ret;
    1504           0 :                         if (!nr_pages)
    1505             :                                 break;
    1506             :                 }
    1507           0 :                 if (*locked) {
    1508             :                         /*
    1509             :                          * VM_FAULT_RETRY didn't trigger or it was a
    1510             :                          * FOLL_NOWAIT.
    1511             :                          */
    1512           0 :                         if (!pages_done)
    1513           0 :                                 pages_done = ret;
    1514             :                         break;
    1515             :                 }
    1516             :                 /*
    1517             :                  * VM_FAULT_RETRY triggered, so seek to the faulting offset.
    1518             :                  * For the prefault case (!pages) we only update counts.
    1519             :                  */
    1520           0 :                 if (likely(pages))
    1521           0 :                         pages += ret;
    1522           0 :                 start += ret << PAGE_SHIFT;
    1523             : 
    1524             :                 /* The lock was temporarily dropped, so we must unlock later */
    1525           0 :                 must_unlock = true;
    1526             : 
    1527             : retry:
    1528             :                 /*
    1529             :                  * Repeat on the address that fired VM_FAULT_RETRY
    1530             :                  * with both FAULT_FLAG_ALLOW_RETRY and
    1531             :                  * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
    1532             :                  * by fatal signals of even common signals, depending on
    1533             :                  * the caller's request. So we need to check it before we
    1534             :                  * start trying again otherwise it can loop forever.
    1535             :                  */
    1536           0 :                 if (gup_signal_pending(flags)) {
    1537           0 :                         if (!pages_done)
    1538           0 :                                 pages_done = -EINTR;
    1539             :                         break;
    1540             :                 }
    1541             : 
    1542           0 :                 ret = mmap_read_lock_killable(mm);
    1543           0 :                 if (ret) {
    1544           0 :                         BUG_ON(ret > 0);
    1545           0 :                         if (!pages_done)
    1546           0 :                                 pages_done = ret;
    1547             :                         break;
    1548             :                 }
    1549             : 
    1550           0 :                 *locked = 1;
    1551           0 :                 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
    1552             :                                        pages, locked);
    1553           0 :                 if (!*locked) {
    1554             :                         /* Continue to retry until we succeeded */
    1555           0 :                         BUG_ON(ret != 0);
    1556             :                         goto retry;
    1557             :                 }
    1558           0 :                 if (ret != 1) {
    1559           0 :                         BUG_ON(ret > 1);
    1560           0 :                         if (!pages_done)
    1561           0 :                                 pages_done = ret;
    1562             :                         break;
    1563             :                 }
    1564           0 :                 nr_pages--;
    1565           0 :                 pages_done++;
    1566           0 :                 if (!nr_pages)
    1567             :                         break;
    1568           0 :                 if (likely(pages))
    1569           0 :                         pages++;
    1570           0 :                 start += PAGE_SIZE;
    1571             :         }
    1572           0 :         if (must_unlock && *locked) {
    1573             :                 /*
    1574             :                  * We either temporarily dropped the lock, or the caller
    1575             :                  * requested that we both acquire and drop the lock. Either way,
    1576             :                  * we must now unlock, and notify the caller of that state.
    1577             :                  */
    1578           0 :                 mmap_read_unlock(mm);
    1579           0 :                 *locked = 0;
    1580             :         }
    1581             :         return pages_done;
    1582             : }
    1583             : 
    1584             : /**
    1585             :  * populate_vma_page_range() -  populate a range of pages in the vma.
    1586             :  * @vma:   target vma
    1587             :  * @start: start address
    1588             :  * @end:   end address
    1589             :  * @locked: whether the mmap_lock is still held
    1590             :  *
    1591             :  * This takes care of mlocking the pages too if VM_LOCKED is set.
    1592             :  *
    1593             :  * Return either number of pages pinned in the vma, or a negative error
    1594             :  * code on error.
    1595             :  *
    1596             :  * vma->vm_mm->mmap_lock must be held.
    1597             :  *
    1598             :  * If @locked is NULL, it may be held for read or write and will
    1599             :  * be unperturbed.
    1600             :  *
    1601             :  * If @locked is non-NULL, it must held for read only and may be
    1602             :  * released.  If it's released, *@locked will be set to 0.
    1603             :  */
    1604           0 : long populate_vma_page_range(struct vm_area_struct *vma,
    1605             :                 unsigned long start, unsigned long end, int *locked)
    1606             : {
    1607           0 :         struct mm_struct *mm = vma->vm_mm;
    1608           0 :         unsigned long nr_pages = (end - start) / PAGE_SIZE;
    1609           0 :         int local_locked = 1;
    1610             :         int gup_flags;
    1611             :         long ret;
    1612             : 
    1613             :         VM_BUG_ON(!PAGE_ALIGNED(start));
    1614             :         VM_BUG_ON(!PAGE_ALIGNED(end));
    1615             :         VM_BUG_ON_VMA(start < vma->vm_start, vma);
    1616             :         VM_BUG_ON_VMA(end   > vma->vm_end, vma);
    1617           0 :         mmap_assert_locked(mm);
    1618             : 
    1619             :         /*
    1620             :          * Rightly or wrongly, the VM_LOCKONFAULT case has never used
    1621             :          * faultin_page() to break COW, so it has no work to do here.
    1622             :          */
    1623           0 :         if (vma->vm_flags & VM_LOCKONFAULT)
    1624           0 :                 return nr_pages;
    1625             : 
    1626           0 :         gup_flags = FOLL_TOUCH;
    1627             :         /*
    1628             :          * We want to touch writable mappings with a write fault in order
    1629             :          * to break COW, except for shared mappings because these don't COW
    1630             :          * and we would not want to dirty them for nothing.
    1631             :          */
    1632           0 :         if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
    1633           0 :                 gup_flags |= FOLL_WRITE;
    1634             : 
    1635             :         /*
    1636             :          * We want mlock to succeed for regions that have any permissions
    1637             :          * other than PROT_NONE.
    1638             :          */
    1639           0 :         if (vma_is_accessible(vma))
    1640           0 :                 gup_flags |= FOLL_FORCE;
    1641             : 
    1642           0 :         if (locked)
    1643           0 :                 gup_flags |= FOLL_UNLOCKABLE;
    1644             : 
    1645             :         /*
    1646             :          * We made sure addr is within a VMA, so the following will
    1647             :          * not result in a stack expansion that recurses back here.
    1648             :          */
    1649           0 :         ret = __get_user_pages(mm, start, nr_pages, gup_flags,
    1650             :                                NULL, locked ? locked : &local_locked);
    1651           0 :         lru_add_drain();
    1652           0 :         return ret;
    1653             : }
    1654             : 
    1655             : /*
    1656             :  * faultin_vma_page_range() - populate (prefault) page tables inside the
    1657             :  *                            given VMA range readable/writable
    1658             :  *
    1659             :  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
    1660             :  *
    1661             :  * @vma: target vma
    1662             :  * @start: start address
    1663             :  * @end: end address
    1664             :  * @write: whether to prefault readable or writable
    1665             :  * @locked: whether the mmap_lock is still held
    1666             :  *
    1667             :  * Returns either number of processed pages in the vma, or a negative error
    1668             :  * code on error (see __get_user_pages()).
    1669             :  *
    1670             :  * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
    1671             :  * covered by the VMA. If it's released, *@locked will be set to 0.
    1672             :  */
    1673           0 : long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
    1674             :                             unsigned long end, bool write, int *locked)
    1675             : {
    1676           0 :         struct mm_struct *mm = vma->vm_mm;
    1677           0 :         unsigned long nr_pages = (end - start) / PAGE_SIZE;
    1678             :         int gup_flags;
    1679             :         long ret;
    1680             : 
    1681             :         VM_BUG_ON(!PAGE_ALIGNED(start));
    1682             :         VM_BUG_ON(!PAGE_ALIGNED(end));
    1683             :         VM_BUG_ON_VMA(start < vma->vm_start, vma);
    1684             :         VM_BUG_ON_VMA(end > vma->vm_end, vma);
    1685           0 :         mmap_assert_locked(mm);
    1686             : 
    1687             :         /*
    1688             :          * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
    1689             :          *             the page dirty with FOLL_WRITE -- which doesn't make a
    1690             :          *             difference with !FOLL_FORCE, because the page is writable
    1691             :          *             in the page table.
    1692             :          * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
    1693             :          *                a poisoned page.
    1694             :          * !FOLL_FORCE: Require proper access permissions.
    1695             :          */
    1696           0 :         gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE;
    1697           0 :         if (write)
    1698           0 :                 gup_flags |= FOLL_WRITE;
    1699             : 
    1700             :         /*
    1701             :          * We want to report -EINVAL instead of -EFAULT for any permission
    1702             :          * problems or incompatible mappings.
    1703             :          */
    1704           0 :         if (check_vma_flags(vma, gup_flags))
    1705             :                 return -EINVAL;
    1706             : 
    1707           0 :         ret = __get_user_pages(mm, start, nr_pages, gup_flags,
    1708             :                                NULL, locked);
    1709           0 :         lru_add_drain();
    1710           0 :         return ret;
    1711             : }
    1712             : 
    1713             : /*
    1714             :  * __mm_populate - populate and/or mlock pages within a range of address space.
    1715             :  *
    1716             :  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
    1717             :  * flags. VMAs must be already marked with the desired vm_flags, and
    1718             :  * mmap_lock must not be held.
    1719             :  */
    1720           0 : int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
    1721             : {
    1722           0 :         struct mm_struct *mm = current->mm;
    1723             :         unsigned long end, nstart, nend;
    1724           0 :         struct vm_area_struct *vma = NULL;
    1725           0 :         int locked = 0;
    1726           0 :         long ret = 0;
    1727             : 
    1728           0 :         end = start + len;
    1729             : 
    1730           0 :         for (nstart = start; nstart < end; nstart = nend) {
    1731             :                 /*
    1732             :                  * We want to fault in pages for [nstart; end) address range.
    1733             :                  * Find first corresponding VMA.
    1734             :                  */
    1735           0 :                 if (!locked) {
    1736           0 :                         locked = 1;
    1737           0 :                         mmap_read_lock(mm);
    1738           0 :                         vma = find_vma_intersection(mm, nstart, end);
    1739           0 :                 } else if (nstart >= vma->vm_end)
    1740           0 :                         vma = find_vma_intersection(mm, vma->vm_end, end);
    1741             : 
    1742           0 :                 if (!vma)
    1743             :                         break;
    1744             :                 /*
    1745             :                  * Set [nstart; nend) to intersection of desired address
    1746             :                  * range with the first VMA. Also, skip undesirable VMA types.
    1747             :                  */
    1748           0 :                 nend = min(end, vma->vm_end);
    1749           0 :                 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
    1750           0 :                         continue;
    1751           0 :                 if (nstart < vma->vm_start)
    1752           0 :                         nstart = vma->vm_start;
    1753             :                 /*
    1754             :                  * Now fault in a range of pages. populate_vma_page_range()
    1755             :                  * double checks the vma flags, so that it won't mlock pages
    1756             :                  * if the vma was already munlocked.
    1757             :                  */
    1758           0 :                 ret = populate_vma_page_range(vma, nstart, nend, &locked);
    1759           0 :                 if (ret < 0) {
    1760           0 :                         if (ignore_errors) {
    1761           0 :                                 ret = 0;
    1762           0 :                                 continue;       /* continue at next VMA */
    1763             :                         }
    1764             :                         break;
    1765             :                 }
    1766           0 :                 nend = nstart + ret * PAGE_SIZE;
    1767           0 :                 ret = 0;
    1768             :         }
    1769           0 :         if (locked)
    1770             :                 mmap_read_unlock(mm);
    1771           0 :         return ret;     /* 0 or negative error code */
    1772             : }
    1773             : #else /* CONFIG_MMU */
    1774             : static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
    1775             :                 unsigned long nr_pages, struct page **pages,
    1776             :                 int *locked, unsigned int foll_flags)
    1777             : {
    1778             :         struct vm_area_struct *vma;
    1779             :         bool must_unlock = false;
    1780             :         unsigned long vm_flags;
    1781             :         long i;
    1782             : 
    1783             :         if (!nr_pages)
    1784             :                 return 0;
    1785             : 
    1786             :         /*
    1787             :          * The internal caller expects GUP to manage the lock internally and the
    1788             :          * lock must be released when this returns.
    1789             :          */
    1790             :         if (!*locked) {
    1791             :                 if (mmap_read_lock_killable(mm))
    1792             :                         return -EAGAIN;
    1793             :                 must_unlock = true;
    1794             :                 *locked = 1;
    1795             :         }
    1796             : 
    1797             :         /* calculate required read or write permissions.
    1798             :          * If FOLL_FORCE is set, we only require the "MAY" flags.
    1799             :          */
    1800             :         vm_flags  = (foll_flags & FOLL_WRITE) ?
    1801             :                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
    1802             :         vm_flags &= (foll_flags & FOLL_FORCE) ?
    1803             :                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
    1804             : 
    1805             :         for (i = 0; i < nr_pages; i++) {
    1806             :                 vma = find_vma(mm, start);
    1807             :                 if (!vma)
    1808             :                         break;
    1809             : 
    1810             :                 /* protect what we can, including chardevs */
    1811             :                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
    1812             :                     !(vm_flags & vma->vm_flags))
    1813             :                         break;
    1814             : 
    1815             :                 if (pages) {
    1816             :                         pages[i] = virt_to_page((void *)start);
    1817             :                         if (pages[i])
    1818             :                                 get_page(pages[i]);
    1819             :                 }
    1820             : 
    1821             :                 start = (start + PAGE_SIZE) & PAGE_MASK;
    1822             :         }
    1823             : 
    1824             :         if (must_unlock && *locked) {
    1825             :                 mmap_read_unlock(mm);
    1826             :                 *locked = 0;
    1827             :         }
    1828             : 
    1829             :         return i ? : -EFAULT;
    1830             : }
    1831             : #endif /* !CONFIG_MMU */
    1832             : 
    1833             : /**
    1834             :  * fault_in_writeable - fault in userspace address range for writing
    1835             :  * @uaddr: start of address range
    1836             :  * @size: size of address range
    1837             :  *
    1838             :  * Returns the number of bytes not faulted in (like copy_to_user() and
    1839             :  * copy_from_user()).
    1840             :  */
    1841           0 : size_t fault_in_writeable(char __user *uaddr, size_t size)
    1842             : {
    1843           0 :         char __user *start = uaddr, *end;
    1844             : 
    1845           0 :         if (unlikely(size == 0))
    1846             :                 return 0;
    1847           0 :         if (!user_write_access_begin(uaddr, size))
    1848             :                 return size;
    1849           0 :         if (!PAGE_ALIGNED(uaddr)) {
    1850           0 :                 unsafe_put_user(0, uaddr, out);
    1851           0 :                 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
    1852             :         }
    1853           0 :         end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
    1854           0 :         if (unlikely(end < start))
    1855           0 :                 end = NULL;
    1856           0 :         while (uaddr != end) {
    1857           0 :                 unsafe_put_user(0, uaddr, out);
    1858           0 :                 uaddr += PAGE_SIZE;
    1859             :         }
    1860             : 
    1861             : out:
    1862             :         user_write_access_end();
    1863           0 :         if (size > uaddr - start)
    1864           0 :                 return size - (uaddr - start);
    1865             :         return 0;
    1866             : }
    1867             : EXPORT_SYMBOL(fault_in_writeable);
    1868             : 
    1869             : /**
    1870             :  * fault_in_subpage_writeable - fault in an address range for writing
    1871             :  * @uaddr: start of address range
    1872             :  * @size: size of address range
    1873             :  *
    1874             :  * Fault in a user address range for writing while checking for permissions at
    1875             :  * sub-page granularity (e.g. arm64 MTE). This function should be used when
    1876             :  * the caller cannot guarantee forward progress of a copy_to_user() loop.
    1877             :  *
    1878             :  * Returns the number of bytes not faulted in (like copy_to_user() and
    1879             :  * copy_from_user()).
    1880             :  */
    1881           0 : size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
    1882             : {
    1883             :         size_t faulted_in;
    1884             : 
    1885             :         /*
    1886             :          * Attempt faulting in at page granularity first for page table
    1887             :          * permission checking. The arch-specific probe_subpage_writeable()
    1888             :          * functions may not check for this.
    1889             :          */
    1890           0 :         faulted_in = size - fault_in_writeable(uaddr, size);
    1891             :         if (faulted_in)
    1892             :                 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
    1893             : 
    1894           0 :         return size - faulted_in;
    1895             : }
    1896             : EXPORT_SYMBOL(fault_in_subpage_writeable);
    1897             : 
    1898             : /*
    1899             :  * fault_in_safe_writeable - fault in an address range for writing
    1900             :  * @uaddr: start of address range
    1901             :  * @size: length of address range
    1902             :  *
    1903             :  * Faults in an address range for writing.  This is primarily useful when we
    1904             :  * already know that some or all of the pages in the address range aren't in
    1905             :  * memory.
    1906             :  *
    1907             :  * Unlike fault_in_writeable(), this function is non-destructive.
    1908             :  *
    1909             :  * Note that we don't pin or otherwise hold the pages referenced that we fault
    1910             :  * in.  There's no guarantee that they'll stay in memory for any duration of
    1911             :  * time.
    1912             :  *
    1913             :  * Returns the number of bytes not faulted in, like copy_to_user() and
    1914             :  * copy_from_user().
    1915             :  */
    1916           0 : size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
    1917             : {
    1918           0 :         unsigned long start = (unsigned long)uaddr, end;
    1919           0 :         struct mm_struct *mm = current->mm;
    1920           0 :         bool unlocked = false;
    1921             : 
    1922           0 :         if (unlikely(size == 0))
    1923             :                 return 0;
    1924           0 :         end = PAGE_ALIGN(start + size);
    1925           0 :         if (end < start)
    1926           0 :                 end = 0;
    1927             : 
    1928             :         mmap_read_lock(mm);
    1929             :         do {
    1930           0 :                 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
    1931             :                         break;
    1932           0 :                 start = (start + PAGE_SIZE) & PAGE_MASK;
    1933           0 :         } while (start != end);
    1934           0 :         mmap_read_unlock(mm);
    1935             : 
    1936           0 :         if (size > (unsigned long)uaddr - start)
    1937           0 :                 return size - ((unsigned long)uaddr - start);
    1938             :         return 0;
    1939             : }
    1940             : EXPORT_SYMBOL(fault_in_safe_writeable);
    1941             : 
    1942             : /**
    1943             :  * fault_in_readable - fault in userspace address range for reading
    1944             :  * @uaddr: start of user address range
    1945             :  * @size: size of user address range
    1946             :  *
    1947             :  * Returns the number of bytes not faulted in (like copy_to_user() and
    1948             :  * copy_from_user()).
    1949             :  */
    1950           0 : size_t fault_in_readable(const char __user *uaddr, size_t size)
    1951             : {
    1952           0 :         const char __user *start = uaddr, *end;
    1953             :         volatile char c;
    1954             : 
    1955           0 :         if (unlikely(size == 0))
    1956             :                 return 0;
    1957           0 :         if (!user_read_access_begin(uaddr, size))
    1958             :                 return size;
    1959           0 :         if (!PAGE_ALIGNED(uaddr)) {
    1960           0 :                 unsafe_get_user(c, uaddr, out);
    1961           0 :                 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
    1962             :         }
    1963           0 :         end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
    1964           0 :         if (unlikely(end < start))
    1965           0 :                 end = NULL;
    1966           0 :         while (uaddr != end) {
    1967           0 :                 unsafe_get_user(c, uaddr, out);
    1968           0 :                 uaddr += PAGE_SIZE;
    1969             :         }
    1970             : 
    1971             : out:
    1972             :         user_read_access_end();
    1973           0 :         (void)c;
    1974           0 :         if (size > uaddr - start)
    1975           0 :                 return size - (uaddr - start);
    1976             :         return 0;
    1977             : }
    1978             : EXPORT_SYMBOL(fault_in_readable);
    1979             : 
    1980             : /**
    1981             :  * get_dump_page() - pin user page in memory while writing it to core dump
    1982             :  * @addr: user address
    1983             :  *
    1984             :  * Returns struct page pointer of user page pinned for dump,
    1985             :  * to be freed afterwards by put_page().
    1986             :  *
    1987             :  * Returns NULL on any kind of failure - a hole must then be inserted into
    1988             :  * the corefile, to preserve alignment with its headers; and also returns
    1989             :  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
    1990             :  * allowing a hole to be left in the corefile to save disk space.
    1991             :  *
    1992             :  * Called without mmap_lock (takes and releases the mmap_lock by itself).
    1993             :  */
    1994             : #ifdef CONFIG_ELF_CORE
    1995           0 : struct page *get_dump_page(unsigned long addr)
    1996             : {
    1997             :         struct page *page;
    1998           0 :         int locked = 0;
    1999             :         int ret;
    2000             : 
    2001           0 :         ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
    2002             :                                       FOLL_FORCE | FOLL_DUMP | FOLL_GET);
    2003           0 :         return (ret == 1) ? page : NULL;
    2004             : }
    2005             : #endif /* CONFIG_ELF_CORE */
    2006             : 
    2007             : #ifdef CONFIG_MIGRATION
    2008             : /*
    2009             :  * Returns the number of collected pages. Return value is always >= 0.
    2010             :  */
    2011           0 : static unsigned long collect_longterm_unpinnable_pages(
    2012             :                                         struct list_head *movable_page_list,
    2013             :                                         unsigned long nr_pages,
    2014             :                                         struct page **pages)
    2015             : {
    2016           0 :         unsigned long i, collected = 0;
    2017           0 :         struct folio *prev_folio = NULL;
    2018           0 :         bool drain_allow = true;
    2019             : 
    2020           0 :         for (i = 0; i < nr_pages; i++) {
    2021           0 :                 struct folio *folio = page_folio(pages[i]);
    2022             : 
    2023           0 :                 if (folio == prev_folio)
    2024           0 :                         continue;
    2025           0 :                 prev_folio = folio;
    2026             : 
    2027           0 :                 if (folio_is_longterm_pinnable(folio))
    2028           0 :                         continue;
    2029             : 
    2030           0 :                 collected++;
    2031             : 
    2032           0 :                 if (folio_is_device_coherent(folio))
    2033             :                         continue;
    2034             : 
    2035           0 :                 if (folio_test_hugetlb(folio)) {
    2036             :                         isolate_hugetlb(folio, movable_page_list);
    2037             :                         continue;
    2038             :                 }
    2039             : 
    2040           0 :                 if (!folio_test_lru(folio) && drain_allow) {
    2041           0 :                         lru_add_drain_all();
    2042           0 :                         drain_allow = false;
    2043             :                 }
    2044             : 
    2045           0 :                 if (!folio_isolate_lru(folio))
    2046           0 :                         continue;
    2047             : 
    2048           0 :                 list_add_tail(&folio->lru, movable_page_list);
    2049           0 :                 node_stat_mod_folio(folio,
    2050           0 :                                     NR_ISOLATED_ANON + folio_is_file_lru(folio),
    2051             :                                     folio_nr_pages(folio));
    2052             :         }
    2053             : 
    2054           0 :         return collected;
    2055             : }
    2056             : 
    2057             : /*
    2058             :  * Unpins all pages and migrates device coherent pages and movable_page_list.
    2059             :  * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
    2060             :  * (or partial success).
    2061             :  */
    2062           0 : static int migrate_longterm_unpinnable_pages(
    2063             :                                         struct list_head *movable_page_list,
    2064             :                                         unsigned long nr_pages,
    2065             :                                         struct page **pages)
    2066             : {
    2067             :         int ret;
    2068             :         unsigned long i;
    2069             : 
    2070           0 :         for (i = 0; i < nr_pages; i++) {
    2071           0 :                 struct folio *folio = page_folio(pages[i]);
    2072             : 
    2073           0 :                 if (folio_is_device_coherent(folio)) {
    2074             :                         /*
    2075             :                          * Migration will fail if the page is pinned, so convert
    2076             :                          * the pin on the source page to a normal reference.
    2077             :                          */
    2078             :                         pages[i] = NULL;
    2079             :                         folio_get(folio);
    2080             :                         gup_put_folio(folio, 1, FOLL_PIN);
    2081             : 
    2082             :                         if (migrate_device_coherent_page(&folio->page)) {
    2083             :                                 ret = -EBUSY;
    2084             :                                 goto err;
    2085             :                         }
    2086             : 
    2087             :                         continue;
    2088             :                 }
    2089             : 
    2090             :                 /*
    2091             :                  * We can't migrate pages with unexpected references, so drop
    2092             :                  * the reference obtained by __get_user_pages_locked().
    2093             :                  * Migrating pages have been added to movable_page_list after
    2094             :                  * calling folio_isolate_lru() which takes a reference so the
    2095             :                  * page won't be freed if it's migrating.
    2096             :                  */
    2097           0 :                 unpin_user_page(pages[i]);
    2098           0 :                 pages[i] = NULL;
    2099             :         }
    2100             : 
    2101           0 :         if (!list_empty(movable_page_list)) {
    2102           0 :                 struct migration_target_control mtc = {
    2103             :                         .nid = NUMA_NO_NODE,
    2104             :                         .gfp_mask = GFP_USER | __GFP_NOWARN,
    2105             :                 };
    2106             : 
    2107           0 :                 if (migrate_pages(movable_page_list, alloc_migration_target,
    2108             :                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
    2109             :                                   MR_LONGTERM_PIN, NULL)) {
    2110           0 :                         ret = -ENOMEM;
    2111           0 :                         goto err;
    2112             :                 }
    2113             :         }
    2114             : 
    2115           0 :         putback_movable_pages(movable_page_list);
    2116             : 
    2117           0 :         return -EAGAIN;
    2118             : 
    2119             : err:
    2120           0 :         for (i = 0; i < nr_pages; i++)
    2121           0 :                 if (pages[i])
    2122           0 :                         unpin_user_page(pages[i]);
    2123           0 :         putback_movable_pages(movable_page_list);
    2124             : 
    2125           0 :         return ret;
    2126             : }
    2127             : 
    2128             : /*
    2129             :  * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
    2130             :  * pages in the range are required to be pinned via FOLL_PIN, before calling
    2131             :  * this routine.
    2132             :  *
    2133             :  * If any pages in the range are not allowed to be pinned, then this routine
    2134             :  * will migrate those pages away, unpin all the pages in the range and return
    2135             :  * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
    2136             :  * call this routine again.
    2137             :  *
    2138             :  * If an error other than -EAGAIN occurs, this indicates a migration failure.
    2139             :  * The caller should give up, and propagate the error back up the call stack.
    2140             :  *
    2141             :  * If everything is OK and all pages in the range are allowed to be pinned, then
    2142             :  * this routine leaves all pages pinned and returns zero for success.
    2143             :  */
    2144           0 : static long check_and_migrate_movable_pages(unsigned long nr_pages,
    2145             :                                             struct page **pages)
    2146             : {
    2147             :         unsigned long collected;
    2148           0 :         LIST_HEAD(movable_page_list);
    2149             : 
    2150           0 :         collected = collect_longterm_unpinnable_pages(&movable_page_list,
    2151             :                                                 nr_pages, pages);
    2152           0 :         if (!collected)
    2153             :                 return 0;
    2154             : 
    2155           0 :         return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
    2156             :                                                 pages);
    2157             : }
    2158             : #else
    2159             : static long check_and_migrate_movable_pages(unsigned long nr_pages,
    2160             :                                             struct page **pages)
    2161             : {
    2162             :         return 0;
    2163             : }
    2164             : #endif /* CONFIG_MIGRATION */
    2165             : 
    2166             : /*
    2167             :  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
    2168             :  * allows us to process the FOLL_LONGTERM flag.
    2169             :  */
    2170           0 : static long __gup_longterm_locked(struct mm_struct *mm,
    2171             :                                   unsigned long start,
    2172             :                                   unsigned long nr_pages,
    2173             :                                   struct page **pages,
    2174             :                                   int *locked,
    2175             :                                   unsigned int gup_flags)
    2176             : {
    2177             :         unsigned int flags;
    2178             :         long rc, nr_pinned_pages;
    2179             : 
    2180           0 :         if (!(gup_flags & FOLL_LONGTERM))
    2181             :                 return __get_user_pages_locked(mm, start, nr_pages, pages,
    2182             :                                                locked, gup_flags);
    2183             : 
    2184           0 :         flags = memalloc_pin_save();
    2185             :         do {
    2186           0 :                 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
    2187             :                                                           pages, locked,
    2188             :                                                           gup_flags);
    2189           0 :                 if (nr_pinned_pages <= 0) {
    2190             :                         rc = nr_pinned_pages;
    2191             :                         break;
    2192             :                 }
    2193             : 
    2194             :                 /* FOLL_LONGTERM implies FOLL_PIN */
    2195           0 :                 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
    2196           0 :         } while (rc == -EAGAIN);
    2197           0 :         memalloc_pin_restore(flags);
    2198           0 :         return rc ? rc : nr_pinned_pages;
    2199             : }
    2200             : 
    2201             : /*
    2202             :  * Check that the given flags are valid for the exported gup/pup interface, and
    2203             :  * update them with the required flags that the caller must have set.
    2204             :  */
    2205           0 : static bool is_valid_gup_args(struct page **pages, int *locked,
    2206             :                               unsigned int *gup_flags_p, unsigned int to_set)
    2207             : {
    2208           0 :         unsigned int gup_flags = *gup_flags_p;
    2209             : 
    2210             :         /*
    2211             :          * These flags not allowed to be specified externally to the gup
    2212             :          * interfaces:
    2213             :          * - FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
    2214             :          * - FOLL_REMOTE is internal only and used on follow_page()
    2215             :          * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
    2216             :          */
    2217           0 :         if (WARN_ON_ONCE(gup_flags & (FOLL_PIN | FOLL_TRIED | FOLL_UNLOCKABLE |
    2218             :                                       FOLL_REMOTE | FOLL_FAST_ONLY)))
    2219             :                 return false;
    2220             : 
    2221           0 :         gup_flags |= to_set;
    2222           0 :         if (locked) {
    2223             :                 /* At the external interface locked must be set */
    2224           0 :                 if (WARN_ON_ONCE(*locked != 1))
    2225             :                         return false;
    2226             : 
    2227           0 :                 gup_flags |= FOLL_UNLOCKABLE;
    2228             :         }
    2229             : 
    2230             :         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
    2231           0 :         if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
    2232             :                          (FOLL_PIN | FOLL_GET)))
    2233             :                 return false;
    2234             : 
    2235             :         /* LONGTERM can only be specified when pinning */
    2236           0 :         if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
    2237             :                 return false;
    2238             : 
    2239             :         /* Pages input must be given if using GET/PIN */
    2240           0 :         if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
    2241             :                 return false;
    2242             : 
    2243             :         /* We want to allow the pgmap to be hot-unplugged at all times */
    2244           0 :         if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
    2245             :                          (gup_flags & FOLL_PCI_P2PDMA)))
    2246             :                 return false;
    2247             : 
    2248           0 :         *gup_flags_p = gup_flags;
    2249           0 :         return true;
    2250             : }
    2251             : 
    2252             : #ifdef CONFIG_MMU
    2253             : /**
    2254             :  * get_user_pages_remote() - pin user pages in memory
    2255             :  * @mm:         mm_struct of target mm
    2256             :  * @start:      starting user address
    2257             :  * @nr_pages:   number of pages from start to pin
    2258             :  * @gup_flags:  flags modifying lookup behaviour
    2259             :  * @pages:      array that receives pointers to the pages pinned.
    2260             :  *              Should be at least nr_pages long. Or NULL, if caller
    2261             :  *              only intends to ensure the pages are faulted in.
    2262             :  * @locked:     pointer to lock flag indicating whether lock is held and
    2263             :  *              subsequently whether VM_FAULT_RETRY functionality can be
    2264             :  *              utilised. Lock must initially be held.
    2265             :  *
    2266             :  * Returns either number of pages pinned (which may be less than the
    2267             :  * number requested), or an error. Details about the return value:
    2268             :  *
    2269             :  * -- If nr_pages is 0, returns 0.
    2270             :  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
    2271             :  * -- If nr_pages is >0, and some pages were pinned, returns the number of
    2272             :  *    pages pinned. Again, this may be less than nr_pages.
    2273             :  *
    2274             :  * The caller is responsible for releasing returned @pages, via put_page().
    2275             :  *
    2276             :  * Must be called with mmap_lock held for read or write.
    2277             :  *
    2278             :  * get_user_pages_remote walks a process's page tables and takes a reference
    2279             :  * to each struct page that each user address corresponds to at a given
    2280             :  * instant. That is, it takes the page that would be accessed if a user
    2281             :  * thread accesses the given user virtual address at that instant.
    2282             :  *
    2283             :  * This does not guarantee that the page exists in the user mappings when
    2284             :  * get_user_pages_remote returns, and there may even be a completely different
    2285             :  * page there in some cases (eg. if mmapped pagecache has been invalidated
    2286             :  * and subsequently re-faulted). However it does guarantee that the page
    2287             :  * won't be freed completely. And mostly callers simply care that the page
    2288             :  * contains data that was valid *at some point in time*. Typically, an IO
    2289             :  * or similar operation cannot guarantee anything stronger anyway because
    2290             :  * locks can't be held over the syscall boundary.
    2291             :  *
    2292             :  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
    2293             :  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
    2294             :  * be called after the page is finished with, and before put_page is called.
    2295             :  *
    2296             :  * get_user_pages_remote is typically used for fewer-copy IO operations,
    2297             :  * to get a handle on the memory by some means other than accesses
    2298             :  * via the user virtual addresses. The pages may be submitted for
    2299             :  * DMA to devices or accessed via their kernel linear mapping (via the
    2300             :  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
    2301             :  *
    2302             :  * See also get_user_pages_fast, for performance critical applications.
    2303             :  *
    2304             :  * get_user_pages_remote should be phased out in favor of
    2305             :  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
    2306             :  * should use get_user_pages_remote because it cannot pass
    2307             :  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
    2308             :  */
    2309           0 : long get_user_pages_remote(struct mm_struct *mm,
    2310             :                 unsigned long start, unsigned long nr_pages,
    2311             :                 unsigned int gup_flags, struct page **pages,
    2312             :                 int *locked)
    2313             : {
    2314           0 :         int local_locked = 1;
    2315             : 
    2316           0 :         if (!is_valid_gup_args(pages, locked, &gup_flags,
    2317             :                                FOLL_TOUCH | FOLL_REMOTE))
    2318             :                 return -EINVAL;
    2319             : 
    2320           0 :         return __get_user_pages_locked(mm, start, nr_pages, pages,
    2321             :                                        locked ? locked : &local_locked,
    2322             :                                        gup_flags);
    2323             : }
    2324             : EXPORT_SYMBOL(get_user_pages_remote);
    2325             : 
    2326             : #else /* CONFIG_MMU */
    2327             : long get_user_pages_remote(struct mm_struct *mm,
    2328             :                            unsigned long start, unsigned long nr_pages,
    2329             :                            unsigned int gup_flags, struct page **pages,
    2330             :                            int *locked)
    2331             : {
    2332             :         return 0;
    2333             : }
    2334             : #endif /* !CONFIG_MMU */
    2335             : 
    2336             : /**
    2337             :  * get_user_pages() - pin user pages in memory
    2338             :  * @start:      starting user address
    2339             :  * @nr_pages:   number of pages from start to pin
    2340             :  * @gup_flags:  flags modifying lookup behaviour
    2341             :  * @pages:      array that receives pointers to the pages pinned.
    2342             :  *              Should be at least nr_pages long. Or NULL, if caller
    2343             :  *              only intends to ensure the pages are faulted in.
    2344             :  *
    2345             :  * This is the same as get_user_pages_remote(), just with a less-flexible
    2346             :  * calling convention where we assume that the mm being operated on belongs to
    2347             :  * the current task, and doesn't allow passing of a locked parameter.  We also
    2348             :  * obviously don't pass FOLL_REMOTE in here.
    2349             :  */
    2350           0 : long get_user_pages(unsigned long start, unsigned long nr_pages,
    2351             :                     unsigned int gup_flags, struct page **pages)
    2352             : {
    2353           0 :         int locked = 1;
    2354             : 
    2355           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
    2356             :                 return -EINVAL;
    2357             : 
    2358           0 :         return __get_user_pages_locked(current->mm, start, nr_pages, pages,
    2359             :                                        &locked, gup_flags);
    2360             : }
    2361             : EXPORT_SYMBOL(get_user_pages);
    2362             : 
    2363             : /*
    2364             :  * get_user_pages_unlocked() is suitable to replace the form:
    2365             :  *
    2366             :  *      mmap_read_lock(mm);
    2367             :  *      get_user_pages(mm, ..., pages, NULL);
    2368             :  *      mmap_read_unlock(mm);
    2369             :  *
    2370             :  *  with:
    2371             :  *
    2372             :  *      get_user_pages_unlocked(mm, ..., pages);
    2373             :  *
    2374             :  * It is functionally equivalent to get_user_pages_fast so
    2375             :  * get_user_pages_fast should be used instead if specific gup_flags
    2376             :  * (e.g. FOLL_FORCE) are not required.
    2377             :  */
    2378           0 : long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
    2379             :                              struct page **pages, unsigned int gup_flags)
    2380             : {
    2381           0 :         int locked = 0;
    2382             : 
    2383           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags,
    2384             :                                FOLL_TOUCH | FOLL_UNLOCKABLE))
    2385             :                 return -EINVAL;
    2386             : 
    2387           0 :         return __get_user_pages_locked(current->mm, start, nr_pages, pages,
    2388             :                                        &locked, gup_flags);
    2389             : }
    2390             : EXPORT_SYMBOL(get_user_pages_unlocked);
    2391             : 
    2392             : /*
    2393             :  * Fast GUP
    2394             :  *
    2395             :  * get_user_pages_fast attempts to pin user pages by walking the page
    2396             :  * tables directly and avoids taking locks. Thus the walker needs to be
    2397             :  * protected from page table pages being freed from under it, and should
    2398             :  * block any THP splits.
    2399             :  *
    2400             :  * One way to achieve this is to have the walker disable interrupts, and
    2401             :  * rely on IPIs from the TLB flushing code blocking before the page table
    2402             :  * pages are freed. This is unsuitable for architectures that do not need
    2403             :  * to broadcast an IPI when invalidating TLBs.
    2404             :  *
    2405             :  * Another way to achieve this is to batch up page table containing pages
    2406             :  * belonging to more than one mm_user, then rcu_sched a callback to free those
    2407             :  * pages. Disabling interrupts will allow the fast_gup walker to both block
    2408             :  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
    2409             :  * (which is a relatively rare event). The code below adopts this strategy.
    2410             :  *
    2411             :  * Before activating this code, please be aware that the following assumptions
    2412             :  * are currently made:
    2413             :  *
    2414             :  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
    2415             :  *  free pages containing page tables or TLB flushing requires IPI broadcast.
    2416             :  *
    2417             :  *  *) ptes can be read atomically by the architecture.
    2418             :  *
    2419             :  *  *) access_ok is sufficient to validate userspace address ranges.
    2420             :  *
    2421             :  * The last two assumptions can be relaxed by the addition of helper functions.
    2422             :  *
    2423             :  * This code is based heavily on the PowerPC implementation by Nick Piggin.
    2424             :  */
    2425             : #ifdef CONFIG_HAVE_FAST_GUP
    2426             : 
    2427             : /*
    2428             :  * Used in the GUP-fast path to determine whether a pin is permitted for a
    2429             :  * specific folio.
    2430             :  *
    2431             :  * This call assumes the caller has pinned the folio, that the lowest page table
    2432             :  * level still points to this folio, and that interrupts have been disabled.
    2433             :  *
    2434             :  * Writing to pinned file-backed dirty tracked folios is inherently problematic
    2435             :  * (see comment describing the writable_file_mapping_allowed() function). We
    2436             :  * therefore try to avoid the most egregious case of a long-term mapping doing
    2437             :  * so.
    2438             :  *
    2439             :  * This function cannot be as thorough as that one as the VMA is not available
    2440             :  * in the fast path, so instead we whitelist known good cases and if in doubt,
    2441             :  * fall back to the slow path.
    2442             :  */
    2443             : static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
    2444             : {
    2445             :         struct address_space *mapping;
    2446             :         unsigned long mapping_flags;
    2447             : 
    2448             :         /*
    2449             :          * If we aren't pinning then no problematic write can occur. A long term
    2450             :          * pin is the most egregious case so this is the one we disallow.
    2451             :          */
    2452             :         if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) !=
    2453             :             (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
    2454             :                 return true;
    2455             : 
    2456             :         /* The folio is pinned, so we can safely access folio fields. */
    2457             : 
    2458             :         if (WARN_ON_ONCE(folio_test_slab(folio)))
    2459             :                 return false;
    2460             : 
    2461             :         /* hugetlb mappings do not require dirty-tracking. */
    2462             :         if (folio_test_hugetlb(folio))
    2463             :                 return true;
    2464             : 
    2465             :         /*
    2466             :          * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
    2467             :          * cannot proceed, which means no actions performed under RCU can
    2468             :          * proceed either.
    2469             :          *
    2470             :          * inodes and thus their mappings are freed under RCU, which means the
    2471             :          * mapping cannot be freed beneath us and thus we can safely dereference
    2472             :          * it.
    2473             :          */
    2474             :         lockdep_assert_irqs_disabled();
    2475             : 
    2476             :         /*
    2477             :          * However, there may be operations which _alter_ the mapping, so ensure
    2478             :          * we read it once and only once.
    2479             :          */
    2480             :         mapping = READ_ONCE(folio->mapping);
    2481             : 
    2482             :         /*
    2483             :          * The mapping may have been truncated, in any case we cannot determine
    2484             :          * if this mapping is safe - fall back to slow path to determine how to
    2485             :          * proceed.
    2486             :          */
    2487             :         if (!mapping)
    2488             :                 return false;
    2489             : 
    2490             :         /* Anonymous folios pose no problem. */
    2491             :         mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
    2492             :         if (mapping_flags)
    2493             :                 return mapping_flags & PAGE_MAPPING_ANON;
    2494             : 
    2495             :         /*
    2496             :          * At this point, we know the mapping is non-null and points to an
    2497             :          * address_space object. The only remaining whitelisted file system is
    2498             :          * shmem.
    2499             :          */
    2500             :         return shmem_mapping(mapping);
    2501             : }
    2502             : 
    2503             : static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
    2504             :                                             unsigned int flags,
    2505             :                                             struct page **pages)
    2506             : {
    2507             :         while ((*nr) - nr_start) {
    2508             :                 struct page *page = pages[--(*nr)];
    2509             : 
    2510             :                 ClearPageReferenced(page);
    2511             :                 if (flags & FOLL_PIN)
    2512             :                         unpin_user_page(page);
    2513             :                 else
    2514             :                         put_page(page);
    2515             :         }
    2516             : }
    2517             : 
    2518             : #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
    2519             : /*
    2520             :  * Fast-gup relies on pte change detection to avoid concurrent pgtable
    2521             :  * operations.
    2522             :  *
    2523             :  * To pin the page, fast-gup needs to do below in order:
    2524             :  * (1) pin the page (by prefetching pte), then (2) check pte not changed.
    2525             :  *
    2526             :  * For the rest of pgtable operations where pgtable updates can be racy
    2527             :  * with fast-gup, we need to do (1) clear pte, then (2) check whether page
    2528             :  * is pinned.
    2529             :  *
    2530             :  * Above will work for all pte-level operations, including THP split.
    2531             :  *
    2532             :  * For THP collapse, it's a bit more complicated because fast-gup may be
    2533             :  * walking a pgtable page that is being freed (pte is still valid but pmd
    2534             :  * can be cleared already).  To avoid race in such condition, we need to
    2535             :  * also check pmd here to make sure pmd doesn't change (corresponds to
    2536             :  * pmdp_collapse_flush() in the THP collapse code path).
    2537             :  */
    2538             : static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
    2539             :                          unsigned long end, unsigned int flags,
    2540             :                          struct page **pages, int *nr)
    2541             : {
    2542             :         struct dev_pagemap *pgmap = NULL;
    2543             :         int nr_start = *nr, ret = 0;
    2544             :         pte_t *ptep, *ptem;
    2545             : 
    2546             :         ptem = ptep = pte_offset_map(&pmd, addr);
    2547             :         if (!ptep)
    2548             :                 return 0;
    2549             :         do {
    2550             :                 pte_t pte = ptep_get_lockless(ptep);
    2551             :                 struct page *page;
    2552             :                 struct folio *folio;
    2553             : 
    2554             :                 if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
    2555             :                         goto pte_unmap;
    2556             : 
    2557             :                 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
    2558             :                         goto pte_unmap;
    2559             : 
    2560             :                 if (pte_devmap(pte)) {
    2561             :                         if (unlikely(flags & FOLL_LONGTERM))
    2562             :                                 goto pte_unmap;
    2563             : 
    2564             :                         pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
    2565             :                         if (unlikely(!pgmap)) {
    2566             :                                 undo_dev_pagemap(nr, nr_start, flags, pages);
    2567             :                                 goto pte_unmap;
    2568             :                         }
    2569             :                 } else if (pte_special(pte))
    2570             :                         goto pte_unmap;
    2571             : 
    2572             :                 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
    2573             :                 page = pte_page(pte);
    2574             : 
    2575             :                 folio = try_grab_folio(page, 1, flags);
    2576             :                 if (!folio)
    2577             :                         goto pte_unmap;
    2578             : 
    2579             :                 if (unlikely(page_is_secretmem(page))) {
    2580             :                         gup_put_folio(folio, 1, flags);
    2581             :                         goto pte_unmap;
    2582             :                 }
    2583             : 
    2584             :                 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
    2585             :                     unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
    2586             :                         gup_put_folio(folio, 1, flags);
    2587             :                         goto pte_unmap;
    2588             :                 }
    2589             : 
    2590             :                 if (!folio_fast_pin_allowed(folio, flags)) {
    2591             :                         gup_put_folio(folio, 1, flags);
    2592             :                         goto pte_unmap;
    2593             :                 }
    2594             : 
    2595             :                 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
    2596             :                         gup_put_folio(folio, 1, flags);
    2597             :                         goto pte_unmap;
    2598             :                 }
    2599             : 
    2600             :                 /*
    2601             :                  * We need to make the page accessible if and only if we are
    2602             :                  * going to access its content (the FOLL_PIN case).  Please
    2603             :                  * see Documentation/core-api/pin_user_pages.rst for
    2604             :                  * details.
    2605             :                  */
    2606             :                 if (flags & FOLL_PIN) {
    2607             :                         ret = arch_make_page_accessible(page);
    2608             :                         if (ret) {
    2609             :                                 gup_put_folio(folio, 1, flags);
    2610             :                                 goto pte_unmap;
    2611             :                         }
    2612             :                 }
    2613             :                 folio_set_referenced(folio);
    2614             :                 pages[*nr] = page;
    2615             :                 (*nr)++;
    2616             :         } while (ptep++, addr += PAGE_SIZE, addr != end);
    2617             : 
    2618             :         ret = 1;
    2619             : 
    2620             : pte_unmap:
    2621             :         if (pgmap)
    2622             :                 put_dev_pagemap(pgmap);
    2623             :         pte_unmap(ptem);
    2624             :         return ret;
    2625             : }
    2626             : #else
    2627             : 
    2628             : /*
    2629             :  * If we can't determine whether or not a pte is special, then fail immediately
    2630             :  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
    2631             :  * to be special.
    2632             :  *
    2633             :  * For a futex to be placed on a THP tail page, get_futex_key requires a
    2634             :  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
    2635             :  * useful to have gup_huge_pmd even if we can't operate on ptes.
    2636             :  */
    2637             : static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
    2638             :                          unsigned long end, unsigned int flags,
    2639             :                          struct page **pages, int *nr)
    2640             : {
    2641             :         return 0;
    2642             : }
    2643             : #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
    2644             : 
    2645             : #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
    2646             : static int __gup_device_huge(unsigned long pfn, unsigned long addr,
    2647             :                              unsigned long end, unsigned int flags,
    2648             :                              struct page **pages, int *nr)
    2649             : {
    2650             :         int nr_start = *nr;
    2651             :         struct dev_pagemap *pgmap = NULL;
    2652             : 
    2653             :         do {
    2654             :                 struct page *page = pfn_to_page(pfn);
    2655             : 
    2656             :                 pgmap = get_dev_pagemap(pfn, pgmap);
    2657             :                 if (unlikely(!pgmap)) {
    2658             :                         undo_dev_pagemap(nr, nr_start, flags, pages);
    2659             :                         break;
    2660             :                 }
    2661             : 
    2662             :                 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
    2663             :                         undo_dev_pagemap(nr, nr_start, flags, pages);
    2664             :                         break;
    2665             :                 }
    2666             : 
    2667             :                 SetPageReferenced(page);
    2668             :                 pages[*nr] = page;
    2669             :                 if (unlikely(try_grab_page(page, flags))) {
    2670             :                         undo_dev_pagemap(nr, nr_start, flags, pages);
    2671             :                         break;
    2672             :                 }
    2673             :                 (*nr)++;
    2674             :                 pfn++;
    2675             :         } while (addr += PAGE_SIZE, addr != end);
    2676             : 
    2677             :         put_dev_pagemap(pgmap);
    2678             :         return addr == end;
    2679             : }
    2680             : 
    2681             : static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
    2682             :                                  unsigned long end, unsigned int flags,
    2683             :                                  struct page **pages, int *nr)
    2684             : {
    2685             :         unsigned long fault_pfn;
    2686             :         int nr_start = *nr;
    2687             : 
    2688             :         fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
    2689             :         if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
    2690             :                 return 0;
    2691             : 
    2692             :         if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
    2693             :                 undo_dev_pagemap(nr, nr_start, flags, pages);
    2694             :                 return 0;
    2695             :         }
    2696             :         return 1;
    2697             : }
    2698             : 
    2699             : static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
    2700             :                                  unsigned long end, unsigned int flags,
    2701             :                                  struct page **pages, int *nr)
    2702             : {
    2703             :         unsigned long fault_pfn;
    2704             :         int nr_start = *nr;
    2705             : 
    2706             :         fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
    2707             :         if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
    2708             :                 return 0;
    2709             : 
    2710             :         if (unlikely(pud_val(orig) != pud_val(*pudp))) {
    2711             :                 undo_dev_pagemap(nr, nr_start, flags, pages);
    2712             :                 return 0;
    2713             :         }
    2714             :         return 1;
    2715             : }
    2716             : #else
    2717             : static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
    2718             :                                  unsigned long end, unsigned int flags,
    2719             :                                  struct page **pages, int *nr)
    2720             : {
    2721             :         BUILD_BUG();
    2722             :         return 0;
    2723             : }
    2724             : 
    2725             : static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
    2726             :                                  unsigned long end, unsigned int flags,
    2727             :                                  struct page **pages, int *nr)
    2728             : {
    2729             :         BUILD_BUG();
    2730             :         return 0;
    2731             : }
    2732             : #endif
    2733             : 
    2734             : static int record_subpages(struct page *page, unsigned long addr,
    2735             :                            unsigned long end, struct page **pages)
    2736             : {
    2737             :         int nr;
    2738             : 
    2739             :         for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
    2740             :                 pages[nr] = nth_page(page, nr);
    2741             : 
    2742             :         return nr;
    2743             : }
    2744             : 
    2745             : #ifdef CONFIG_ARCH_HAS_HUGEPD
    2746             : static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
    2747             :                                       unsigned long sz)
    2748             : {
    2749             :         unsigned long __boundary = (addr + sz) & ~(sz-1);
    2750             :         return (__boundary - 1 < end - 1) ? __boundary : end;
    2751             : }
    2752             : 
    2753             : static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
    2754             :                        unsigned long end, unsigned int flags,
    2755             :                        struct page **pages, int *nr)
    2756             : {
    2757             :         unsigned long pte_end;
    2758             :         struct page *page;
    2759             :         struct folio *folio;
    2760             :         pte_t pte;
    2761             :         int refs;
    2762             : 
    2763             :         pte_end = (addr + sz) & ~(sz-1);
    2764             :         if (pte_end < end)
    2765             :                 end = pte_end;
    2766             : 
    2767             :         pte = huge_ptep_get(ptep);
    2768             : 
    2769             :         if (!pte_access_permitted(pte, flags & FOLL_WRITE))
    2770             :                 return 0;
    2771             : 
    2772             :         /* hugepages are never "special" */
    2773             :         VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
    2774             : 
    2775             :         page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
    2776             :         refs = record_subpages(page, addr, end, pages + *nr);
    2777             : 
    2778             :         folio = try_grab_folio(page, refs, flags);
    2779             :         if (!folio)
    2780             :                 return 0;
    2781             : 
    2782             :         if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
    2783             :                 gup_put_folio(folio, refs, flags);
    2784             :                 return 0;
    2785             :         }
    2786             : 
    2787             :         if (!folio_fast_pin_allowed(folio, flags)) {
    2788             :                 gup_put_folio(folio, refs, flags);
    2789             :                 return 0;
    2790             :         }
    2791             : 
    2792             :         if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
    2793             :                 gup_put_folio(folio, refs, flags);
    2794             :                 return 0;
    2795             :         }
    2796             : 
    2797             :         *nr += refs;
    2798             :         folio_set_referenced(folio);
    2799             :         return 1;
    2800             : }
    2801             : 
    2802             : static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
    2803             :                 unsigned int pdshift, unsigned long end, unsigned int flags,
    2804             :                 struct page **pages, int *nr)
    2805             : {
    2806             :         pte_t *ptep;
    2807             :         unsigned long sz = 1UL << hugepd_shift(hugepd);
    2808             :         unsigned long next;
    2809             : 
    2810             :         ptep = hugepte_offset(hugepd, addr, pdshift);
    2811             :         do {
    2812             :                 next = hugepte_addr_end(addr, end, sz);
    2813             :                 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
    2814             :                         return 0;
    2815             :         } while (ptep++, addr = next, addr != end);
    2816             : 
    2817             :         return 1;
    2818             : }
    2819             : #else
    2820             : static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
    2821             :                 unsigned int pdshift, unsigned long end, unsigned int flags,
    2822             :                 struct page **pages, int *nr)
    2823             : {
    2824             :         return 0;
    2825             : }
    2826             : #endif /* CONFIG_ARCH_HAS_HUGEPD */
    2827             : 
    2828             : static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
    2829             :                         unsigned long end, unsigned int flags,
    2830             :                         struct page **pages, int *nr)
    2831             : {
    2832             :         struct page *page;
    2833             :         struct folio *folio;
    2834             :         int refs;
    2835             : 
    2836             :         if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
    2837             :                 return 0;
    2838             : 
    2839             :         if (pmd_devmap(orig)) {
    2840             :                 if (unlikely(flags & FOLL_LONGTERM))
    2841             :                         return 0;
    2842             :                 return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
    2843             :                                              pages, nr);
    2844             :         }
    2845             : 
    2846             :         page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
    2847             :         refs = record_subpages(page, addr, end, pages + *nr);
    2848             : 
    2849             :         folio = try_grab_folio(page, refs, flags);
    2850             :         if (!folio)
    2851             :                 return 0;
    2852             : 
    2853             :         if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
    2854             :                 gup_put_folio(folio, refs, flags);
    2855             :                 return 0;
    2856             :         }
    2857             : 
    2858             :         if (!folio_fast_pin_allowed(folio, flags)) {
    2859             :                 gup_put_folio(folio, refs, flags);
    2860             :                 return 0;
    2861             :         }
    2862             :         if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
    2863             :                 gup_put_folio(folio, refs, flags);
    2864             :                 return 0;
    2865             :         }
    2866             : 
    2867             :         *nr += refs;
    2868             :         folio_set_referenced(folio);
    2869             :         return 1;
    2870             : }
    2871             : 
    2872             : static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
    2873             :                         unsigned long end, unsigned int flags,
    2874             :                         struct page **pages, int *nr)
    2875             : {
    2876             :         struct page *page;
    2877             :         struct folio *folio;
    2878             :         int refs;
    2879             : 
    2880             :         if (!pud_access_permitted(orig, flags & FOLL_WRITE))
    2881             :                 return 0;
    2882             : 
    2883             :         if (pud_devmap(orig)) {
    2884             :                 if (unlikely(flags & FOLL_LONGTERM))
    2885             :                         return 0;
    2886             :                 return __gup_device_huge_pud(orig, pudp, addr, end, flags,
    2887             :                                              pages, nr);
    2888             :         }
    2889             : 
    2890             :         page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
    2891             :         refs = record_subpages(page, addr, end, pages + *nr);
    2892             : 
    2893             :         folio = try_grab_folio(page, refs, flags);
    2894             :         if (!folio)
    2895             :                 return 0;
    2896             : 
    2897             :         if (unlikely(pud_val(orig) != pud_val(*pudp))) {
    2898             :                 gup_put_folio(folio, refs, flags);
    2899             :                 return 0;
    2900             :         }
    2901             : 
    2902             :         if (!folio_fast_pin_allowed(folio, flags)) {
    2903             :                 gup_put_folio(folio, refs, flags);
    2904             :                 return 0;
    2905             :         }
    2906             : 
    2907             :         if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
    2908             :                 gup_put_folio(folio, refs, flags);
    2909             :                 return 0;
    2910             :         }
    2911             : 
    2912             :         *nr += refs;
    2913             :         folio_set_referenced(folio);
    2914             :         return 1;
    2915             : }
    2916             : 
    2917             : static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
    2918             :                         unsigned long end, unsigned int flags,
    2919             :                         struct page **pages, int *nr)
    2920             : {
    2921             :         int refs;
    2922             :         struct page *page;
    2923             :         struct folio *folio;
    2924             : 
    2925             :         if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
    2926             :                 return 0;
    2927             : 
    2928             :         BUILD_BUG_ON(pgd_devmap(orig));
    2929             : 
    2930             :         page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
    2931             :         refs = record_subpages(page, addr, end, pages + *nr);
    2932             : 
    2933             :         folio = try_grab_folio(page, refs, flags);
    2934             :         if (!folio)
    2935             :                 return 0;
    2936             : 
    2937             :         if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
    2938             :                 gup_put_folio(folio, refs, flags);
    2939             :                 return 0;
    2940             :         }
    2941             : 
    2942             :         if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
    2943             :                 gup_put_folio(folio, refs, flags);
    2944             :                 return 0;
    2945             :         }
    2946             : 
    2947             :         if (!folio_fast_pin_allowed(folio, flags)) {
    2948             :                 gup_put_folio(folio, refs, flags);
    2949             :                 return 0;
    2950             :         }
    2951             : 
    2952             :         *nr += refs;
    2953             :         folio_set_referenced(folio);
    2954             :         return 1;
    2955             : }
    2956             : 
    2957             : static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
    2958             :                 unsigned int flags, struct page **pages, int *nr)
    2959             : {
    2960             :         unsigned long next;
    2961             :         pmd_t *pmdp;
    2962             : 
    2963             :         pmdp = pmd_offset_lockless(pudp, pud, addr);
    2964             :         do {
    2965             :                 pmd_t pmd = pmdp_get_lockless(pmdp);
    2966             : 
    2967             :                 next = pmd_addr_end(addr, end);
    2968             :                 if (!pmd_present(pmd))
    2969             :                         return 0;
    2970             : 
    2971             :                 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
    2972             :                              pmd_devmap(pmd))) {
    2973             :                         if (pmd_protnone(pmd) &&
    2974             :                             !gup_can_follow_protnone(flags))
    2975             :                                 return 0;
    2976             : 
    2977             :                         if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
    2978             :                                 pages, nr))
    2979             :                                 return 0;
    2980             : 
    2981             :                 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
    2982             :                         /*
    2983             :                          * architecture have different format for hugetlbfs
    2984             :                          * pmd format and THP pmd format
    2985             :                          */
    2986             :                         if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
    2987             :                                          PMD_SHIFT, next, flags, pages, nr))
    2988             :                                 return 0;
    2989             :                 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
    2990             :                         return 0;
    2991             :         } while (pmdp++, addr = next, addr != end);
    2992             : 
    2993             :         return 1;
    2994             : }
    2995             : 
    2996             : static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
    2997             :                          unsigned int flags, struct page **pages, int *nr)
    2998             : {
    2999             :         unsigned long next;
    3000             :         pud_t *pudp;
    3001             : 
    3002             :         pudp = pud_offset_lockless(p4dp, p4d, addr);
    3003             :         do {
    3004             :                 pud_t pud = READ_ONCE(*pudp);
    3005             : 
    3006             :                 next = pud_addr_end(addr, end);
    3007             :                 if (unlikely(!pud_present(pud)))
    3008             :                         return 0;
    3009             :                 if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
    3010             :                         if (!gup_huge_pud(pud, pudp, addr, next, flags,
    3011             :                                           pages, nr))
    3012             :                                 return 0;
    3013             :                 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
    3014             :                         if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
    3015             :                                          PUD_SHIFT, next, flags, pages, nr))
    3016             :                                 return 0;
    3017             :                 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
    3018             :                         return 0;
    3019             :         } while (pudp++, addr = next, addr != end);
    3020             : 
    3021             :         return 1;
    3022             : }
    3023             : 
    3024             : static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
    3025             :                          unsigned int flags, struct page **pages, int *nr)
    3026             : {
    3027             :         unsigned long next;
    3028             :         p4d_t *p4dp;
    3029             : 
    3030             :         p4dp = p4d_offset_lockless(pgdp, pgd, addr);
    3031             :         do {
    3032             :                 p4d_t p4d = READ_ONCE(*p4dp);
    3033             : 
    3034             :                 next = p4d_addr_end(addr, end);
    3035             :                 if (p4d_none(p4d))
    3036             :                         return 0;
    3037             :                 BUILD_BUG_ON(p4d_huge(p4d));
    3038             :                 if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
    3039             :                         if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
    3040             :                                          P4D_SHIFT, next, flags, pages, nr))
    3041             :                                 return 0;
    3042             :                 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
    3043             :                         return 0;
    3044             :         } while (p4dp++, addr = next, addr != end);
    3045             : 
    3046             :         return 1;
    3047             : }
    3048             : 
    3049             : static void gup_pgd_range(unsigned long addr, unsigned long end,
    3050             :                 unsigned int flags, struct page **pages, int *nr)
    3051             : {
    3052             :         unsigned long next;
    3053             :         pgd_t *pgdp;
    3054             : 
    3055             :         pgdp = pgd_offset(current->mm, addr);
    3056             :         do {
    3057             :                 pgd_t pgd = READ_ONCE(*pgdp);
    3058             : 
    3059             :                 next = pgd_addr_end(addr, end);
    3060             :                 if (pgd_none(pgd))
    3061             :                         return;
    3062             :                 if (unlikely(pgd_huge(pgd))) {
    3063             :                         if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
    3064             :                                           pages, nr))
    3065             :                                 return;
    3066             :                 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
    3067             :                         if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
    3068             :                                          PGDIR_SHIFT, next, flags, pages, nr))
    3069             :                                 return;
    3070             :                 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
    3071             :                         return;
    3072             :         } while (pgdp++, addr = next, addr != end);
    3073             : }
    3074             : #else
    3075             : static inline void gup_pgd_range(unsigned long addr, unsigned long end,
    3076             :                 unsigned int flags, struct page **pages, int *nr)
    3077             : {
    3078             : }
    3079             : #endif /* CONFIG_HAVE_FAST_GUP */
    3080             : 
    3081             : #ifndef gup_fast_permitted
    3082             : /*
    3083             :  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
    3084             :  * we need to fall back to the slow version:
    3085             :  */
    3086             : static bool gup_fast_permitted(unsigned long start, unsigned long end)
    3087             : {
    3088             :         return true;
    3089             : }
    3090             : #endif
    3091             : 
    3092             : static unsigned long lockless_pages_from_mm(unsigned long start,
    3093             :                                             unsigned long end,
    3094             :                                             unsigned int gup_flags,
    3095             :                                             struct page **pages)
    3096             : {
    3097             :         unsigned long flags;
    3098           0 :         int nr_pinned = 0;
    3099             :         unsigned seq;
    3100             : 
    3101             :         if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
    3102             :             !gup_fast_permitted(start, end))
    3103             :                 return 0;
    3104             : 
    3105             :         if (gup_flags & FOLL_PIN) {
    3106             :                 seq = raw_read_seqcount(&current->mm->write_protect_seq);
    3107             :                 if (seq & 1)
    3108             :                         return 0;
    3109             :         }
    3110             : 
    3111             :         /*
    3112             :          * Disable interrupts. The nested form is used, in order to allow full,
    3113             :          * general purpose use of this routine.
    3114             :          *
    3115             :          * With interrupts disabled, we block page table pages from being freed
    3116             :          * from under us. See struct mmu_table_batch comments in
    3117             :          * include/asm-generic/tlb.h for more details.
    3118             :          *
    3119             :          * We do not adopt an rcu_read_lock() here as we also want to block IPIs
    3120             :          * that come from THPs splitting.
    3121             :          */
    3122             :         local_irq_save(flags);
    3123             :         gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
    3124             :         local_irq_restore(flags);
    3125             : 
    3126             :         /*
    3127             :          * When pinning pages for DMA there could be a concurrent write protect
    3128             :          * from fork() via copy_page_range(), in this case always fail fast GUP.
    3129             :          */
    3130             :         if (gup_flags & FOLL_PIN) {
    3131             :                 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
    3132             :                         unpin_user_pages_lockless(pages, nr_pinned);
    3133             :                         return 0;
    3134             :                 } else {
    3135             :                         sanity_check_pinned_pages(pages, nr_pinned);
    3136             :                 }
    3137             :         }
    3138             :         return nr_pinned;
    3139             : }
    3140             : 
    3141           0 : static int internal_get_user_pages_fast(unsigned long start,
    3142             :                                         unsigned long nr_pages,
    3143             :                                         unsigned int gup_flags,
    3144             :                                         struct page **pages)
    3145             : {
    3146             :         unsigned long len, end;
    3147             :         unsigned long nr_pinned;
    3148           0 :         int locked = 0;
    3149             :         int ret;
    3150             : 
    3151           0 :         if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
    3152             :                                        FOLL_FORCE | FOLL_PIN | FOLL_GET |
    3153             :                                        FOLL_FAST_ONLY | FOLL_NOFAULT |
    3154             :                                        FOLL_PCI_P2PDMA)))
    3155             :                 return -EINVAL;
    3156             : 
    3157           0 :         if (gup_flags & FOLL_PIN)
    3158           0 :                 mm_set_has_pinned_flag(&current->mm->flags);
    3159             : 
    3160           0 :         if (!(gup_flags & FOLL_FAST_ONLY))
    3161             :                 might_lock_read(&current->mm->mmap_lock);
    3162             : 
    3163           0 :         start = untagged_addr(start) & PAGE_MASK;
    3164           0 :         len = nr_pages << PAGE_SHIFT;
    3165           0 :         if (check_add_overflow(start, len, &end))
    3166             :                 return -EOVERFLOW;
    3167           0 :         if (end > TASK_SIZE_MAX)
    3168             :                 return -EFAULT;
    3169           0 :         if (unlikely(!access_ok((void __user *)start, len)))
    3170             :                 return -EFAULT;
    3171             : 
    3172           0 :         nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
    3173           0 :         if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
    3174             :                 return nr_pinned;
    3175             : 
    3176             :         /* Slow path: try to get the remaining pages with get_user_pages */
    3177           0 :         start += nr_pinned << PAGE_SHIFT;
    3178           0 :         pages += nr_pinned;
    3179           0 :         ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
    3180             :                                     pages, &locked,
    3181             :                                     gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
    3182             :         if (ret < 0) {
    3183             :                 /*
    3184             :                  * The caller has to unpin the pages we already pinned so
    3185             :                  * returning -errno is not an option
    3186             :                  */
    3187             :                 if (nr_pinned)
    3188             :                         return nr_pinned;
    3189             :                 return ret;
    3190             :         }
    3191             :         return ret + nr_pinned;
    3192             : }
    3193             : 
    3194             : /**
    3195             :  * get_user_pages_fast_only() - pin user pages in memory
    3196             :  * @start:      starting user address
    3197             :  * @nr_pages:   number of pages from start to pin
    3198             :  * @gup_flags:  flags modifying pin behaviour
    3199             :  * @pages:      array that receives pointers to the pages pinned.
    3200             :  *              Should be at least nr_pages long.
    3201             :  *
    3202             :  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
    3203             :  * the regular GUP.
    3204             :  *
    3205             :  * If the architecture does not support this function, simply return with no
    3206             :  * pages pinned.
    3207             :  *
    3208             :  * Careful, careful! COW breaking can go either way, so a non-write
    3209             :  * access can get ambiguous page results. If you call this function without
    3210             :  * 'write' set, you'd better be sure that you're ok with that ambiguity.
    3211             :  */
    3212           0 : int get_user_pages_fast_only(unsigned long start, int nr_pages,
    3213             :                              unsigned int gup_flags, struct page **pages)
    3214             : {
    3215             :         /*
    3216             :          * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
    3217             :          * because gup fast is always a "pin with a +1 page refcount" request.
    3218             :          *
    3219             :          * FOLL_FAST_ONLY is required in order to match the API description of
    3220             :          * this routine: no fall back to regular ("slow") GUP.
    3221             :          */
    3222           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags,
    3223             :                                FOLL_GET | FOLL_FAST_ONLY))
    3224             :                 return -EINVAL;
    3225             : 
    3226           0 :         return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
    3227             : }
    3228             : EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
    3229             : 
    3230             : /**
    3231             :  * get_user_pages_fast() - pin user pages in memory
    3232             :  * @start:      starting user address
    3233             :  * @nr_pages:   number of pages from start to pin
    3234             :  * @gup_flags:  flags modifying pin behaviour
    3235             :  * @pages:      array that receives pointers to the pages pinned.
    3236             :  *              Should be at least nr_pages long.
    3237             :  *
    3238             :  * Attempt to pin user pages in memory without taking mm->mmap_lock.
    3239             :  * If not successful, it will fall back to taking the lock and
    3240             :  * calling get_user_pages().
    3241             :  *
    3242             :  * Returns number of pages pinned. This may be fewer than the number requested.
    3243             :  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
    3244             :  * -errno.
    3245             :  */
    3246           0 : int get_user_pages_fast(unsigned long start, int nr_pages,
    3247             :                         unsigned int gup_flags, struct page **pages)
    3248             : {
    3249             :         /*
    3250             :          * The caller may or may not have explicitly set FOLL_GET; either way is
    3251             :          * OK. However, internally (within mm/gup.c), gup fast variants must set
    3252             :          * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
    3253             :          * request.
    3254             :          */
    3255           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
    3256             :                 return -EINVAL;
    3257           0 :         return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
    3258             : }
    3259             : EXPORT_SYMBOL_GPL(get_user_pages_fast);
    3260             : 
    3261             : /**
    3262             :  * pin_user_pages_fast() - pin user pages in memory without taking locks
    3263             :  *
    3264             :  * @start:      starting user address
    3265             :  * @nr_pages:   number of pages from start to pin
    3266             :  * @gup_flags:  flags modifying pin behaviour
    3267             :  * @pages:      array that receives pointers to the pages pinned.
    3268             :  *              Should be at least nr_pages long.
    3269             :  *
    3270             :  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
    3271             :  * get_user_pages_fast() for documentation on the function arguments, because
    3272             :  * the arguments here are identical.
    3273             :  *
    3274             :  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
    3275             :  * see Documentation/core-api/pin_user_pages.rst for further details.
    3276             :  *
    3277             :  * Note that if a zero_page is amongst the returned pages, it will not have
    3278             :  * pins in it and unpin_user_page() will not remove pins from it.
    3279             :  */
    3280           0 : int pin_user_pages_fast(unsigned long start, int nr_pages,
    3281             :                         unsigned int gup_flags, struct page **pages)
    3282             : {
    3283           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
    3284             :                 return -EINVAL;
    3285           0 :         return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
    3286             : }
    3287             : EXPORT_SYMBOL_GPL(pin_user_pages_fast);
    3288             : 
    3289             : /**
    3290             :  * pin_user_pages_remote() - pin pages of a remote process
    3291             :  *
    3292             :  * @mm:         mm_struct of target mm
    3293             :  * @start:      starting user address
    3294             :  * @nr_pages:   number of pages from start to pin
    3295             :  * @gup_flags:  flags modifying lookup behaviour
    3296             :  * @pages:      array that receives pointers to the pages pinned.
    3297             :  *              Should be at least nr_pages long.
    3298             :  * @locked:     pointer to lock flag indicating whether lock is held and
    3299             :  *              subsequently whether VM_FAULT_RETRY functionality can be
    3300             :  *              utilised. Lock must initially be held.
    3301             :  *
    3302             :  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
    3303             :  * get_user_pages_remote() for documentation on the function arguments, because
    3304             :  * the arguments here are identical.
    3305             :  *
    3306             :  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
    3307             :  * see Documentation/core-api/pin_user_pages.rst for details.
    3308             :  *
    3309             :  * Note that if a zero_page is amongst the returned pages, it will not have
    3310             :  * pins in it and unpin_user_page*() will not remove pins from it.
    3311             :  */
    3312           0 : long pin_user_pages_remote(struct mm_struct *mm,
    3313             :                            unsigned long start, unsigned long nr_pages,
    3314             :                            unsigned int gup_flags, struct page **pages,
    3315             :                            int *locked)
    3316             : {
    3317           0 :         int local_locked = 1;
    3318             : 
    3319           0 :         if (!is_valid_gup_args(pages, locked, &gup_flags,
    3320             :                                FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
    3321             :                 return 0;
    3322           0 :         return __gup_longterm_locked(mm, start, nr_pages, pages,
    3323             :                                      locked ? locked : &local_locked,
    3324             :                                      gup_flags);
    3325             : }
    3326             : EXPORT_SYMBOL(pin_user_pages_remote);
    3327             : 
    3328             : /**
    3329             :  * pin_user_pages() - pin user pages in memory for use by other devices
    3330             :  *
    3331             :  * @start:      starting user address
    3332             :  * @nr_pages:   number of pages from start to pin
    3333             :  * @gup_flags:  flags modifying lookup behaviour
    3334             :  * @pages:      array that receives pointers to the pages pinned.
    3335             :  *              Should be at least nr_pages long.
    3336             :  *
    3337             :  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
    3338             :  * FOLL_PIN is set.
    3339             :  *
    3340             :  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
    3341             :  * see Documentation/core-api/pin_user_pages.rst for details.
    3342             :  *
    3343             :  * Note that if a zero_page is amongst the returned pages, it will not have
    3344             :  * pins in it and unpin_user_page*() will not remove pins from it.
    3345             :  */
    3346           0 : long pin_user_pages(unsigned long start, unsigned long nr_pages,
    3347             :                     unsigned int gup_flags, struct page **pages)
    3348             : {
    3349           0 :         int locked = 1;
    3350             : 
    3351           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
    3352             :                 return 0;
    3353           0 :         return __gup_longterm_locked(current->mm, start, nr_pages,
    3354             :                                      pages, &locked, gup_flags);
    3355             : }
    3356             : EXPORT_SYMBOL(pin_user_pages);
    3357             : 
    3358             : /*
    3359             :  * pin_user_pages_unlocked() is the FOLL_PIN variant of
    3360             :  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
    3361             :  * FOLL_PIN and rejects FOLL_GET.
    3362             :  *
    3363             :  * Note that if a zero_page is amongst the returned pages, it will not have
    3364             :  * pins in it and unpin_user_page*() will not remove pins from it.
    3365             :  */
    3366           0 : long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
    3367             :                              struct page **pages, unsigned int gup_flags)
    3368             : {
    3369           0 :         int locked = 0;
    3370             : 
    3371           0 :         if (!is_valid_gup_args(pages, NULL, &gup_flags,
    3372             :                                FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
    3373             :                 return 0;
    3374             : 
    3375           0 :         return __gup_longterm_locked(current->mm, start, nr_pages, pages,
    3376             :                                      &locked, gup_flags);
    3377             : }
    3378             : EXPORT_SYMBOL(pin_user_pages_unlocked);

Generated by: LCOV version 1.14