LCOV - code coverage report
Current view: top level - mm - pagewalk.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 184 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 12 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/pagewalk.h>
       3             : #include <linux/highmem.h>
       4             : #include <linux/sched.h>
       5             : #include <linux/hugetlb.h>
       6             : 
       7             : /*
       8             :  * We want to know the real level where a entry is located ignoring any
       9             :  * folding of levels which may be happening. For example if p4d is folded then
      10             :  * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
      11             :  */
      12             : static int real_depth(int depth)
      13             : {
      14             :         if (depth == 3 && PTRS_PER_PMD == 1)
      15             :                 depth = 2;
      16             :         if (depth == 2 && PTRS_PER_PUD == 1)
      17           0 :                 depth = 1;
      18             :         if (depth == 1 && PTRS_PER_P4D == 1)
      19           0 :                 depth = 0;
      20             :         return depth;
      21             : }
      22             : 
      23             : static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
      24             :                                 unsigned long end, struct mm_walk *walk)
      25             : {
      26           0 :         const struct mm_walk_ops *ops = walk->ops;
      27           0 :         int err = 0;
      28             : 
      29             :         for (;;) {
      30           0 :                 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
      31           0 :                 if (err)
      32             :                        break;
      33           0 :                 if (addr >= end - PAGE_SIZE)
      34             :                         break;
      35           0 :                 addr += PAGE_SIZE;
      36           0 :                 pte++;
      37             :         }
      38             :         return err;
      39             : }
      40             : 
      41           0 : static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
      42             :                           struct mm_walk *walk)
      43             : {
      44             :         pte_t *pte;
      45           0 :         int err = 0;
      46             :         spinlock_t *ptl;
      47             : 
      48           0 :         if (walk->no_vma) {
      49           0 :                 pte = pte_offset_map(pmd, addr);
      50             :                 err = walk_pte_range_inner(pte, addr, end, walk);
      51             :                 pte_unmap(pte);
      52             :         } else {
      53           0 :                 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
      54           0 :                 err = walk_pte_range_inner(pte, addr, end, walk);
      55           0 :                 pte_unmap_unlock(pte, ptl);
      56             :         }
      57             : 
      58           0 :         return err;
      59             : }
      60             : 
      61             : #ifdef CONFIG_ARCH_HAS_HUGEPD
      62             : static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
      63             :                              unsigned long end, struct mm_walk *walk, int pdshift)
      64             : {
      65             :         int err = 0;
      66             :         const struct mm_walk_ops *ops = walk->ops;
      67             :         int shift = hugepd_shift(*phpd);
      68             :         int page_size = 1 << shift;
      69             : 
      70             :         if (!ops->pte_entry)
      71             :                 return 0;
      72             : 
      73             :         if (addr & (page_size - 1))
      74             :                 return 0;
      75             : 
      76             :         for (;;) {
      77             :                 pte_t *pte;
      78             : 
      79             :                 spin_lock(&walk->mm->page_table_lock);
      80             :                 pte = hugepte_offset(*phpd, addr, pdshift);
      81             :                 err = ops->pte_entry(pte, addr, addr + page_size, walk);
      82             :                 spin_unlock(&walk->mm->page_table_lock);
      83             : 
      84             :                 if (err)
      85             :                         break;
      86             :                 if (addr >= end - page_size)
      87             :                         break;
      88             :                 addr += page_size;
      89             :         }
      90             :         return err;
      91             : }
      92             : #else
      93             : static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
      94             :                              unsigned long end, struct mm_walk *walk, int pdshift)
      95             : {
      96             :         return 0;
      97             : }
      98             : #endif
      99             : 
     100           0 : static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
     101             :                           struct mm_walk *walk)
     102             : {
     103             :         pmd_t *pmd;
     104             :         unsigned long next;
     105           0 :         const struct mm_walk_ops *ops = walk->ops;
     106           0 :         int err = 0;
     107           0 :         int depth = real_depth(3);
     108             : 
     109           0 :         pmd = pmd_offset(pud, addr);
     110             :         do {
     111             : again:
     112           0 :                 next = pmd_addr_end(addr, end);
     113           0 :                 if (pmd_none(*pmd)) {
     114           0 :                         if (ops->pte_hole)
     115           0 :                                 err = ops->pte_hole(addr, next, depth, walk);
     116           0 :                         if (err)
     117             :                                 break;
     118           0 :                         continue;
     119             :                 }
     120             : 
     121           0 :                 walk->action = ACTION_SUBTREE;
     122             : 
     123             :                 /*
     124             :                  * This implies that each ->pmd_entry() handler
     125             :                  * needs to know about pmd_trans_huge() pmds
     126             :                  */
     127           0 :                 if (ops->pmd_entry)
     128           0 :                         err = ops->pmd_entry(pmd, addr, next, walk);
     129           0 :                 if (err)
     130             :                         break;
     131             : 
     132           0 :                 if (walk->action == ACTION_AGAIN)
     133             :                         goto again;
     134             : 
     135             :                 /*
     136             :                  * Check this here so we only break down trans_huge
     137             :                  * pages when we _need_ to
     138             :                  */
     139           0 :                 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
     140           0 :                     walk->action == ACTION_CONTINUE ||
     141           0 :                     !(ops->pte_entry))
     142           0 :                         continue;
     143             : 
     144             :                 if (walk->vma) {
     145             :                         split_huge_pmd(walk->vma, pmd, addr);
     146             :                         if (pmd_trans_unstable(pmd))
     147             :                                 goto again;
     148             :                 }
     149             : 
     150             :                 if (is_hugepd(__hugepd(pmd_val(*pmd))))
     151             :                         err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
     152             :                 else
     153           0 :                         err = walk_pte_range(pmd, addr, next, walk);
     154           0 :                 if (err)
     155             :                         break;
     156           0 :         } while (pmd++, addr = next, addr != end);
     157             : 
     158           0 :         return err;
     159             : }
     160             : 
     161           0 : static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
     162             :                           struct mm_walk *walk)
     163             : {
     164             :         pud_t *pud;
     165             :         unsigned long next;
     166           0 :         const struct mm_walk_ops *ops = walk->ops;
     167           0 :         int err = 0;
     168           0 :         int depth = real_depth(2);
     169             : 
     170           0 :         pud = pud_offset(p4d, addr);
     171             :         do {
     172             :  again:
     173           0 :                 next = pud_addr_end(addr, end);
     174           0 :                 if (pud_none(*pud)) {
     175           0 :                         if (ops->pte_hole)
     176           0 :                                 err = ops->pte_hole(addr, next, depth, walk);
     177           0 :                         if (err)
     178             :                                 break;
     179           0 :                         continue;
     180             :                 }
     181             : 
     182           0 :                 walk->action = ACTION_SUBTREE;
     183             : 
     184           0 :                 if (ops->pud_entry)
     185           0 :                         err = ops->pud_entry(pud, addr, next, walk);
     186           0 :                 if (err)
     187             :                         break;
     188             : 
     189           0 :                 if (walk->action == ACTION_AGAIN)
     190             :                         goto again;
     191             : 
     192           0 :                 if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
     193           0 :                     walk->action == ACTION_CONTINUE ||
     194           0 :                     !(ops->pmd_entry || ops->pte_entry))
     195           0 :                         continue;
     196             : 
     197             :                 if (walk->vma)
     198             :                         split_huge_pud(walk->vma, pud, addr);
     199           0 :                 if (pud_none(*pud))
     200             :                         goto again;
     201             : 
     202             :                 if (is_hugepd(__hugepd(pud_val(*pud))))
     203             :                         err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
     204             :                 else
     205           0 :                         err = walk_pmd_range(pud, addr, next, walk);
     206           0 :                 if (err)
     207             :                         break;
     208           0 :         } while (pud++, addr = next, addr != end);
     209             : 
     210           0 :         return err;
     211             : }
     212             : 
     213           0 : static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
     214             :                           struct mm_walk *walk)
     215             : {
     216             :         p4d_t *p4d;
     217             :         unsigned long next;
     218           0 :         const struct mm_walk_ops *ops = walk->ops;
     219           0 :         int err = 0;
     220           0 :         int depth = real_depth(1);
     221             : 
     222           0 :         p4d = p4d_offset(pgd, addr);
     223             :         do {
     224           0 :                 next = p4d_addr_end(addr, end);
     225           0 :                 if (p4d_none_or_clear_bad(p4d)) {
     226             :                         if (ops->pte_hole)
     227             :                                 err = ops->pte_hole(addr, next, depth, walk);
     228             :                         if (err)
     229             :                                 break;
     230             :                         continue;
     231             :                 }
     232           0 :                 if (ops->p4d_entry) {
     233           0 :                         err = ops->p4d_entry(p4d, addr, next, walk);
     234           0 :                         if (err)
     235             :                                 break;
     236             :                 }
     237             :                 if (is_hugepd(__hugepd(p4d_val(*p4d))))
     238             :                         err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
     239           0 :                 else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
     240           0 :                         err = walk_pud_range(p4d, addr, next, walk);
     241             :                 if (err)
     242             :                         break;
     243             :         } while (p4d++, addr = next, addr != end);
     244             : 
     245           0 :         return err;
     246             : }
     247             : 
     248           0 : static int walk_pgd_range(unsigned long addr, unsigned long end,
     249             :                           struct mm_walk *walk)
     250             : {
     251             :         pgd_t *pgd;
     252             :         unsigned long next;
     253           0 :         const struct mm_walk_ops *ops = walk->ops;
     254           0 :         int err = 0;
     255             : 
     256           0 :         if (walk->pgd)
     257           0 :                 pgd = walk->pgd + pgd_index(addr);
     258             :         else
     259           0 :                 pgd = pgd_offset(walk->mm, addr);
     260             :         do {
     261           0 :                 next = pgd_addr_end(addr, end);
     262           0 :                 if (pgd_none_or_clear_bad(pgd)) {
     263             :                         if (ops->pte_hole)
     264             :                                 err = ops->pte_hole(addr, next, 0, walk);
     265             :                         if (err)
     266             :                                 break;
     267             :                         continue;
     268             :                 }
     269           0 :                 if (ops->pgd_entry) {
     270           0 :                         err = ops->pgd_entry(pgd, addr, next, walk);
     271           0 :                         if (err)
     272             :                                 break;
     273             :                 }
     274             :                 if (is_hugepd(__hugepd(pgd_val(*pgd))))
     275             :                         err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
     276           0 :                 else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
     277           0 :                         err = walk_p4d_range(pgd, addr, next, walk);
     278           0 :                 if (err)
     279             :                         break;
     280           0 :         } while (pgd++, addr = next, addr != end);
     281             : 
     282           0 :         return err;
     283             : }
     284             : 
     285             : #ifdef CONFIG_HUGETLB_PAGE
     286             : static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
     287             :                                        unsigned long end)
     288             : {
     289             :         unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
     290             :         return boundary < end ? boundary : end;
     291             : }
     292             : 
     293             : static int walk_hugetlb_range(unsigned long addr, unsigned long end,
     294             :                               struct mm_walk *walk)
     295             : {
     296             :         struct vm_area_struct *vma = walk->vma;
     297             :         struct hstate *h = hstate_vma(vma);
     298             :         unsigned long next;
     299             :         unsigned long hmask = huge_page_mask(h);
     300             :         unsigned long sz = huge_page_size(h);
     301             :         pte_t *pte;
     302             :         const struct mm_walk_ops *ops = walk->ops;
     303             :         int err = 0;
     304             : 
     305             :         hugetlb_vma_lock_read(vma);
     306             :         do {
     307             :                 next = hugetlb_entry_end(h, addr, end);
     308             :                 pte = hugetlb_walk(vma, addr & hmask, sz);
     309             :                 if (pte)
     310             :                         err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
     311             :                 else if (ops->pte_hole)
     312             :                         err = ops->pte_hole(addr, next, -1, walk);
     313             :                 if (err)
     314             :                         break;
     315             :         } while (addr = next, addr != end);
     316             :         hugetlb_vma_unlock_read(vma);
     317             : 
     318             :         return err;
     319             : }
     320             : 
     321             : #else /* CONFIG_HUGETLB_PAGE */
     322             : static int walk_hugetlb_range(unsigned long addr, unsigned long end,
     323             :                               struct mm_walk *walk)
     324             : {
     325             :         return 0;
     326             : }
     327             : 
     328             : #endif /* CONFIG_HUGETLB_PAGE */
     329             : 
     330             : /*
     331             :  * Decide whether we really walk over the current vma on [@start, @end)
     332             :  * or skip it via the returned value. Return 0 if we do walk over the
     333             :  * current vma, and return 1 if we skip the vma. Negative values means
     334             :  * error, where we abort the current walk.
     335             :  */
     336           0 : static int walk_page_test(unsigned long start, unsigned long end,
     337             :                         struct mm_walk *walk)
     338             : {
     339           0 :         struct vm_area_struct *vma = walk->vma;
     340           0 :         const struct mm_walk_ops *ops = walk->ops;
     341             : 
     342           0 :         if (ops->test_walk)
     343           0 :                 return ops->test_walk(start, end, walk);
     344             : 
     345             :         /*
     346             :          * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
     347             :          * range, so we don't walk over it as we do for normal vmas. However,
     348             :          * Some callers are interested in handling hole range and they don't
     349             :          * want to just ignore any single address range. Such users certainly
     350             :          * define their ->pte_hole() callbacks, so let's delegate them to handle
     351             :          * vma(VM_PFNMAP).
     352             :          */
     353           0 :         if (vma->vm_flags & VM_PFNMAP) {
     354           0 :                 int err = 1;
     355           0 :                 if (ops->pte_hole)
     356           0 :                         err = ops->pte_hole(start, end, -1, walk);
     357           0 :                 return err ? err : 1;
     358             :         }
     359             :         return 0;
     360             : }
     361             : 
     362           0 : static int __walk_page_range(unsigned long start, unsigned long end,
     363             :                         struct mm_walk *walk)
     364             : {
     365           0 :         int err = 0;
     366           0 :         struct vm_area_struct *vma = walk->vma;
     367           0 :         const struct mm_walk_ops *ops = walk->ops;
     368             : 
     369           0 :         if (ops->pre_vma) {
     370           0 :                 err = ops->pre_vma(start, end, walk);
     371           0 :                 if (err)
     372             :                         return err;
     373             :         }
     374             : 
     375           0 :         if (is_vm_hugetlb_page(vma)) {
     376             :                 if (ops->hugetlb_entry)
     377             :                         err = walk_hugetlb_range(start, end, walk);
     378             :         } else
     379           0 :                 err = walk_pgd_range(start, end, walk);
     380             : 
     381           0 :         if (ops->post_vma)
     382           0 :                 ops->post_vma(walk);
     383             : 
     384             :         return err;
     385             : }
     386             : 
     387             : /**
     388             :  * walk_page_range - walk page table with caller specific callbacks
     389             :  * @mm:         mm_struct representing the target process of page table walk
     390             :  * @start:      start address of the virtual address range
     391             :  * @end:        end address of the virtual address range
     392             :  * @ops:        operation to call during the walk
     393             :  * @private:    private data for callbacks' usage
     394             :  *
     395             :  * Recursively walk the page table tree of the process represented by @mm
     396             :  * within the virtual address range [@start, @end). During walking, we can do
     397             :  * some caller-specific works for each entry, by setting up pmd_entry(),
     398             :  * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
     399             :  * callbacks, the associated entries/pages are just ignored.
     400             :  * The return values of these callbacks are commonly defined like below:
     401             :  *
     402             :  *  - 0  : succeeded to handle the current entry, and if you don't reach the
     403             :  *         end address yet, continue to walk.
     404             :  *  - >0 : succeeded to handle the current entry, and return to the caller
     405             :  *         with caller specific value.
     406             :  *  - <0 : failed to handle the current entry, and return to the caller
     407             :  *         with error code.
     408             :  *
     409             :  * Before starting to walk page table, some callers want to check whether
     410             :  * they really want to walk over the current vma, typically by checking
     411             :  * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
     412             :  * purpose.
     413             :  *
     414             :  * If operations need to be staged before and committed after a vma is walked,
     415             :  * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
     416             :  * since it is intended to handle commit-type operations, can't return any
     417             :  * errors.
     418             :  *
     419             :  * struct mm_walk keeps current values of some common data like vma and pmd,
     420             :  * which are useful for the access from callbacks. If you want to pass some
     421             :  * caller-specific data to callbacks, @private should be helpful.
     422             :  *
     423             :  * Locking:
     424             :  *   Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
     425             :  *   because these function traverse vma list and/or access to vma's data.
     426             :  */
     427           0 : int walk_page_range(struct mm_struct *mm, unsigned long start,
     428             :                 unsigned long end, const struct mm_walk_ops *ops,
     429             :                 void *private)
     430             : {
     431           0 :         int err = 0;
     432             :         unsigned long next;
     433             :         struct vm_area_struct *vma;
     434           0 :         struct mm_walk walk = {
     435             :                 .ops            = ops,
     436             :                 .mm             = mm,
     437             :                 .private        = private,
     438             :         };
     439             : 
     440           0 :         if (start >= end)
     441             :                 return -EINVAL;
     442             : 
     443           0 :         if (!walk.mm)
     444             :                 return -EINVAL;
     445             : 
     446           0 :         mmap_assert_locked(walk.mm);
     447             : 
     448           0 :         vma = find_vma(walk.mm, start);
     449             :         do {
     450           0 :                 if (!vma) { /* after the last vma */
     451           0 :                         walk.vma = NULL;
     452           0 :                         next = end;
     453           0 :                         if (ops->pte_hole)
     454           0 :                                 err = ops->pte_hole(start, next, -1, &walk);
     455           0 :                 } else if (start < vma->vm_start) { /* outside vma */
     456           0 :                         walk.vma = NULL;
     457           0 :                         next = min(end, vma->vm_start);
     458           0 :                         if (ops->pte_hole)
     459           0 :                                 err = ops->pte_hole(start, next, -1, &walk);
     460             :                 } else { /* inside vma */
     461           0 :                         walk.vma = vma;
     462           0 :                         next = min(end, vma->vm_end);
     463           0 :                         vma = find_vma(mm, vma->vm_end);
     464             : 
     465           0 :                         err = walk_page_test(start, next, &walk);
     466           0 :                         if (err > 0) {
     467             :                                 /*
     468             :                                  * positive return values are purely for
     469             :                                  * controlling the pagewalk, so should never
     470             :                                  * be passed to the callers.
     471             :                                  */
     472           0 :                                 err = 0;
     473           0 :                                 continue;
     474             :                         }
     475           0 :                         if (err < 0)
     476             :                                 break;
     477           0 :                         err = __walk_page_range(start, next, &walk);
     478             :                 }
     479           0 :                 if (err)
     480             :                         break;
     481           0 :         } while (start = next, start < end);
     482             :         return err;
     483             : }
     484             : 
     485             : /**
     486             :  * walk_page_range_novma - walk a range of pagetables not backed by a vma
     487             :  * @mm:         mm_struct representing the target process of page table walk
     488             :  * @start:      start address of the virtual address range
     489             :  * @end:        end address of the virtual address range
     490             :  * @ops:        operation to call during the walk
     491             :  * @pgd:        pgd to walk if different from mm->pgd
     492             :  * @private:    private data for callbacks' usage
     493             :  *
     494             :  * Similar to walk_page_range() but can walk any page tables even if they are
     495             :  * not backed by VMAs. Because 'unusual' entries may be walked this function
     496             :  * will also not lock the PTEs for the pte_entry() callback. This is useful for
     497             :  * walking the kernel pages tables or page tables for firmware.
     498             :  */
     499           0 : int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
     500             :                           unsigned long end, const struct mm_walk_ops *ops,
     501             :                           pgd_t *pgd,
     502             :                           void *private)
     503             : {
     504           0 :         struct mm_walk walk = {
     505             :                 .ops            = ops,
     506             :                 .mm             = mm,
     507             :                 .pgd            = pgd,
     508             :                 .private        = private,
     509             :                 .no_vma         = true
     510             :         };
     511             : 
     512           0 :         if (start >= end || !walk.mm)
     513             :                 return -EINVAL;
     514             : 
     515           0 :         mmap_assert_write_locked(walk.mm);
     516             : 
     517           0 :         return walk_pgd_range(start, end, &walk);
     518             : }
     519             : 
     520           0 : int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
     521             :                         unsigned long end, const struct mm_walk_ops *ops,
     522             :                         void *private)
     523             : {
     524           0 :         struct mm_walk walk = {
     525             :                 .ops            = ops,
     526           0 :                 .mm             = vma->vm_mm,
     527             :                 .vma            = vma,
     528             :                 .private        = private,
     529             :         };
     530             : 
     531           0 :         if (start >= end || !walk.mm)
     532             :                 return -EINVAL;
     533           0 :         if (start < vma->vm_start || end > vma->vm_end)
     534             :                 return -EINVAL;
     535             : 
     536           0 :         mmap_assert_locked(walk.mm);
     537           0 :         return __walk_page_range(start, end, &walk);
     538             : }
     539             : 
     540           0 : int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
     541             :                 void *private)
     542             : {
     543           0 :         struct mm_walk walk = {
     544             :                 .ops            = ops,
     545           0 :                 .mm             = vma->vm_mm,
     546             :                 .vma            = vma,
     547             :                 .private        = private,
     548             :         };
     549             : 
     550           0 :         if (!walk.mm)
     551             :                 return -EINVAL;
     552             : 
     553           0 :         mmap_assert_locked(walk.mm);
     554           0 :         return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
     555             : }
     556             : 
     557             : /**
     558             :  * walk_page_mapping - walk all memory areas mapped into a struct address_space.
     559             :  * @mapping: Pointer to the struct address_space
     560             :  * @first_index: First page offset in the address_space
     561             :  * @nr: Number of incremental page offsets to cover
     562             :  * @ops:        operation to call during the walk
     563             :  * @private:    private data for callbacks' usage
     564             :  *
     565             :  * This function walks all memory areas mapped into a struct address_space.
     566             :  * The walk is limited to only the given page-size index range, but if
     567             :  * the index boundaries cross a huge page-table entry, that entry will be
     568             :  * included.
     569             :  *
     570             :  * Also see walk_page_range() for additional information.
     571             :  *
     572             :  * Locking:
     573             :  *   This function can't require that the struct mm_struct::mmap_lock is held,
     574             :  *   since @mapping may be mapped by multiple processes. Instead
     575             :  *   @mapping->i_mmap_rwsem must be held. This might have implications in the
     576             :  *   callbacks, and it's up tho the caller to ensure that the
     577             :  *   struct mm_struct::mmap_lock is not needed.
     578             :  *
     579             :  *   Also this means that a caller can't rely on the struct
     580             :  *   vm_area_struct::vm_flags to be constant across a call,
     581             :  *   except for immutable flags. Callers requiring this shouldn't use
     582             :  *   this function.
     583             :  *
     584             :  * Return: 0 on success, negative error code on failure, positive number on
     585             :  * caller defined premature termination.
     586             :  */
     587           0 : int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
     588             :                       pgoff_t nr, const struct mm_walk_ops *ops,
     589             :                       void *private)
     590             : {
     591           0 :         struct mm_walk walk = {
     592             :                 .ops            = ops,
     593             :                 .private        = private,
     594             :         };
     595             :         struct vm_area_struct *vma;
     596             :         pgoff_t vba, vea, cba, cea;
     597             :         unsigned long start_addr, end_addr;
     598           0 :         int err = 0;
     599             : 
     600             :         lockdep_assert_held(&mapping->i_mmap_rwsem);
     601           0 :         vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
     602             :                                   first_index + nr - 1) {
     603             :                 /* Clip to the vma */
     604           0 :                 vba = vma->vm_pgoff;
     605           0 :                 vea = vba + vma_pages(vma);
     606           0 :                 cba = first_index;
     607           0 :                 cba = max(cba, vba);
     608           0 :                 cea = first_index + nr;
     609           0 :                 cea = min(cea, vea);
     610             : 
     611           0 :                 start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
     612           0 :                 end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
     613           0 :                 if (start_addr >= end_addr)
     614           0 :                         continue;
     615             : 
     616           0 :                 walk.vma = vma;
     617           0 :                 walk.mm = vma->vm_mm;
     618             : 
     619           0 :                 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
     620           0 :                 if (err > 0) {
     621             :                         err = 0;
     622             :                         break;
     623           0 :                 } else if (err < 0)
     624             :                         break;
     625             : 
     626           0 :                 err = __walk_page_range(start_addr, end_addr, &walk);
     627           0 :                 if (err)
     628             :                         break;
     629             :         }
     630             : 
     631           0 :         return err;
     632             : }

Generated by: LCOV version 1.14