LCOV - code coverage report
Current view: top level - mm - page_vma_mapped.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 78 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 4 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : #include <linux/mm.h>
       3             : #include <linux/rmap.h>
       4             : #include <linux/hugetlb.h>
       5             : #include <linux/swap.h>
       6             : #include <linux/swapops.h>
       7             : 
       8             : #include "internal.h"
       9             : 
      10             : static inline bool not_found(struct page_vma_mapped_walk *pvmw)
      11             : {
      12           0 :         page_vma_mapped_walk_done(pvmw);
      13             :         return false;
      14             : }
      15             : 
      16           0 : static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
      17             : {
      18             :         pte_t ptent;
      19             : 
      20           0 :         if (pvmw->flags & PVMW_SYNC) {
      21             :                 /* Use the stricter lookup */
      22           0 :                 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
      23             :                                                 pvmw->address, &pvmw->ptl);
      24           0 :                 *ptlp = pvmw->ptl;
      25           0 :                 return !!pvmw->pte;
      26             :         }
      27             : 
      28             :         /*
      29             :          * It is important to return the ptl corresponding to pte,
      30             :          * in case *pvmw->pmd changes underneath us; so we need to
      31             :          * return it even when choosing not to lock, in case caller
      32             :          * proceeds to loop over next ptes, and finds a match later.
      33             :          * Though, in most cases, page lock already protects this.
      34             :          */
      35           0 :         pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
      36             :                                           pvmw->address, ptlp);
      37           0 :         if (!pvmw->pte)
      38             :                 return false;
      39             : 
      40           0 :         ptent = ptep_get(pvmw->pte);
      41             : 
      42           0 :         if (pvmw->flags & PVMW_MIGRATION) {
      43           0 :                 if (!is_swap_pte(ptent))
      44             :                         return false;
      45           0 :         } else if (is_swap_pte(ptent)) {
      46             :                 swp_entry_t entry;
      47             :                 /*
      48             :                  * Handle un-addressable ZONE_DEVICE memory.
      49             :                  *
      50             :                  * We get here when we are trying to unmap a private
      51             :                  * device page from the process address space. Such
      52             :                  * page is not CPU accessible and thus is mapped as
      53             :                  * a special swap entry, nonetheless it still does
      54             :                  * count as a valid regular mapping for the page
      55             :                  * (and is accounted as such in page maps count).
      56             :                  *
      57             :                  * So handle this special case as if it was a normal
      58             :                  * page mapping ie lock CPU page table and return true.
      59             :                  *
      60             :                  * For more details on device private memory see HMM
      61             :                  * (include/linux/hmm.h or mm/hmm.c).
      62             :                  */
      63           0 :                 entry = pte_to_swp_entry(ptent);
      64           0 :                 if (!is_device_private_entry(entry) &&
      65           0 :                     !is_device_exclusive_entry(entry))
      66             :                         return false;
      67           0 :         } else if (!pte_present(ptent)) {
      68             :                 return false;
      69             :         }
      70           0 :         pvmw->ptl = *ptlp;
      71           0 :         spin_lock(pvmw->ptl);
      72           0 :         return true;
      73             : }
      74             : 
      75             : /**
      76             :  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
      77             :  * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking
      78             :  *
      79             :  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
      80             :  * mapped. check_pte() has to validate this.
      81             :  *
      82             :  * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
      83             :  * arbitrary page.
      84             :  *
      85             :  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
      86             :  * entry that points to @pvmw->page or any subpage in case of THP.
      87             :  *
      88             :  * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
      89             :  * pvmw->page or any subpage in case of THP.
      90             :  *
      91             :  * Otherwise, return false.
      92             :  *
      93             :  */
      94           0 : static bool check_pte(struct page_vma_mapped_walk *pvmw)
      95             : {
      96             :         unsigned long pfn;
      97           0 :         pte_t ptent = ptep_get(pvmw->pte);
      98             : 
      99           0 :         if (pvmw->flags & PVMW_MIGRATION) {
     100             :                 swp_entry_t entry;
     101           0 :                 if (!is_swap_pte(ptent))
     102             :                         return false;
     103           0 :                 entry = pte_to_swp_entry(ptent);
     104             : 
     105           0 :                 if (!is_migration_entry(entry) &&
     106             :                     !is_device_exclusive_entry(entry))
     107             :                         return false;
     108             : 
     109           0 :                 pfn = swp_offset_pfn(entry);
     110           0 :         } else if (is_swap_pte(ptent)) {
     111             :                 swp_entry_t entry;
     112             : 
     113             :                 /* Handle un-addressable ZONE_DEVICE memory */
     114             :                 entry = pte_to_swp_entry(ptent);
     115             :                 if (!is_device_private_entry(entry) &&
     116             :                     !is_device_exclusive_entry(entry))
     117             :                         return false;
     118             : 
     119             :                 pfn = swp_offset_pfn(entry);
     120             :         } else {
     121           0 :                 if (!pte_present(ptent))
     122             :                         return false;
     123             : 
     124           0 :                 pfn = pte_pfn(ptent);
     125             :         }
     126             : 
     127           0 :         return (pfn - pvmw->pfn) < pvmw->nr_pages;
     128             : }
     129             : 
     130             : /* Returns true if the two ranges overlap.  Careful to not overflow. */
     131             : static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
     132             : {
     133             :         if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
     134             :                 return false;
     135             :         if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
     136             :                 return false;
     137             :         return true;
     138             : }
     139             : 
     140             : static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
     141             : {
     142           0 :         pvmw->address = (pvmw->address + size) & ~(size - 1);
     143           0 :         if (!pvmw->address)
     144           0 :                 pvmw->address = ULONG_MAX;
     145             : }
     146             : 
     147             : /**
     148             :  * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
     149             :  * @pvmw->address
     150             :  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
     151             :  * must be set. pmd, pte and ptl must be NULL.
     152             :  *
     153             :  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
     154             :  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
     155             :  * adjusted if needed (for PTE-mapped THPs).
     156             :  *
     157             :  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
     158             :  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
     159             :  * a loop to find all PTEs that map the THP.
     160             :  *
     161             :  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
     162             :  * regardless of which page table level the page is mapped at. @pvmw->pmd is
     163             :  * NULL.
     164             :  *
     165             :  * Returns false if there are no more page table entries for the page in
     166             :  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
     167             :  *
     168             :  * If you need to stop the walk before page_vma_mapped_walk() returned false,
     169             :  * use page_vma_mapped_walk_done(). It will do the housekeeping.
     170             :  */
     171           0 : bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
     172             : {
     173           0 :         struct vm_area_struct *vma = pvmw->vma;
     174           0 :         struct mm_struct *mm = vma->vm_mm;
     175             :         unsigned long end;
     176             :         spinlock_t *ptl;
     177             :         pgd_t *pgd;
     178             :         p4d_t *p4d;
     179             :         pud_t *pud;
     180             :         pmd_t pmde;
     181             : 
     182             :         /* The only possible pmd mapping has been handled on last iteration */
     183           0 :         if (pvmw->pmd && !pvmw->pte)
     184             :                 return not_found(pvmw);
     185             : 
     186           0 :         if (unlikely(is_vm_hugetlb_page(vma))) {
     187             :                 struct hstate *hstate = hstate_vma(vma);
     188             :                 unsigned long size = huge_page_size(hstate);
     189             :                 /* The only possible mapping was handled on last iteration */
     190             :                 if (pvmw->pte)
     191             :                         return not_found(pvmw);
     192             :                 /*
     193             :                  * All callers that get here will already hold the
     194             :                  * i_mmap_rwsem.  Therefore, no additional locks need to be
     195             :                  * taken before calling hugetlb_walk().
     196             :                  */
     197             :                 pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
     198             :                 if (!pvmw->pte)
     199             :                         return false;
     200             : 
     201             :                 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
     202             :                 if (!check_pte(pvmw))
     203             :                         return not_found(pvmw);
     204             :                 return true;
     205             :         }
     206             : 
     207           0 :         end = vma_address_end(pvmw);
     208           0 :         if (pvmw->pte)
     209             :                 goto next_pte;
     210             : restart:
     211             :         do {
     212           0 :                 pgd = pgd_offset(mm, pvmw->address);
     213             :                 if (!pgd_present(*pgd)) {
     214             :                         step_forward(pvmw, PGDIR_SIZE);
     215             :                         continue;
     216             :                 }
     217           0 :                 p4d = p4d_offset(pgd, pvmw->address);
     218             :                 if (!p4d_present(*p4d)) {
     219             :                         step_forward(pvmw, P4D_SIZE);
     220             :                         continue;
     221             :                 }
     222           0 :                 pud = pud_offset(p4d, pvmw->address);
     223           0 :                 if (!pud_present(*pud)) {
     224           0 :                         step_forward(pvmw, PUD_SIZE);
     225           0 :                         continue;
     226             :                 }
     227             : 
     228           0 :                 pvmw->pmd = pmd_offset(pud, pvmw->address);
     229             :                 /*
     230             :                  * Make sure the pmd value isn't cached in a register by the
     231             :                  * compiler and used as a stale value after we've observed a
     232             :                  * subsequent update.
     233             :                  */
     234           0 :                 pmde = pmdp_get_lockless(pvmw->pmd);
     235             : 
     236           0 :                 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) ||
     237           0 :                     (pmd_present(pmde) && pmd_devmap(pmde))) {
     238             :                         pvmw->ptl = pmd_lock(mm, pvmw->pmd);
     239             :                         pmde = *pvmw->pmd;
     240             :                         if (!pmd_present(pmde)) {
     241             :                                 swp_entry_t entry;
     242             : 
     243             :                                 if (!thp_migration_supported() ||
     244             :                                     !(pvmw->flags & PVMW_MIGRATION))
     245             :                                         return not_found(pvmw);
     246             :                                 entry = pmd_to_swp_entry(pmde);
     247             :                                 if (!is_migration_entry(entry) ||
     248             :                                     !check_pmd(swp_offset_pfn(entry), pvmw))
     249             :                                         return not_found(pvmw);
     250             :                                 return true;
     251             :                         }
     252             :                         if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) {
     253             :                                 if (pvmw->flags & PVMW_MIGRATION)
     254             :                                         return not_found(pvmw);
     255             :                                 if (!check_pmd(pmd_pfn(pmde), pvmw))
     256             :                                         return not_found(pvmw);
     257             :                                 return true;
     258             :                         }
     259             :                         /* THP pmd was split under us: handle on pte level */
     260             :                         spin_unlock(pvmw->ptl);
     261             :                         pvmw->ptl = NULL;
     262           0 :                 } else if (!pmd_present(pmde)) {
     263             :                         /*
     264             :                          * If PVMW_SYNC, take and drop THP pmd lock so that we
     265             :                          * cannot return prematurely, while zap_huge_pmd() has
     266             :                          * cleared *pmd but not decremented compound_mapcount().
     267             :                          */
     268             :                         if ((pvmw->flags & PVMW_SYNC) &&
     269             :                             transhuge_vma_suitable(vma, pvmw->address) &&
     270             :                             (pvmw->nr_pages >= HPAGE_PMD_NR)) {
     271             :                                 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
     272             : 
     273             :                                 spin_unlock(ptl);
     274             :                         }
     275           0 :                         step_forward(pvmw, PMD_SIZE);
     276           0 :                         continue;
     277             :                 }
     278           0 :                 if (!map_pte(pvmw, &ptl)) {
     279           0 :                         if (!pvmw->pte)
     280             :                                 goto restart;
     281             :                         goto next_pte;
     282             :                 }
     283             : this_pte:
     284           0 :                 if (check_pte(pvmw))
     285             :                         return true;
     286             : next_pte:
     287             :                 do {
     288           0 :                         pvmw->address += PAGE_SIZE;
     289           0 :                         if (pvmw->address >= end)
     290             :                                 return not_found(pvmw);
     291             :                         /* Did we cross page table boundary? */
     292           0 :                         if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
     293           0 :                                 if (pvmw->ptl) {
     294           0 :                                         spin_unlock(pvmw->ptl);
     295           0 :                                         pvmw->ptl = NULL;
     296             :                                 }
     297           0 :                                 pte_unmap(pvmw->pte);
     298           0 :                                 pvmw->pte = NULL;
     299           0 :                                 goto restart;
     300             :                         }
     301           0 :                         pvmw->pte++;
     302           0 :                 } while (pte_none(ptep_get(pvmw->pte)));
     303             : 
     304           0 :                 if (!pvmw->ptl) {
     305           0 :                         pvmw->ptl = ptl;
     306           0 :                         spin_lock(pvmw->ptl);
     307             :                 }
     308             :                 goto this_pte;
     309           0 :         } while (pvmw->address < end);
     310             : 
     311             :         return false;
     312             : }
     313             : 
     314             : /**
     315             :  * page_mapped_in_vma - check whether a page is really mapped in a VMA
     316             :  * @page: the page to test
     317             :  * @vma: the VMA to test
     318             :  *
     319             :  * Returns 1 if the page is mapped into the page tables of the VMA, 0
     320             :  * if the page is not mapped into the page tables of this VMA.  Only
     321             :  * valid for normal file or anonymous VMAs.
     322             :  */
     323           0 : int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
     324             : {
     325           0 :         struct page_vma_mapped_walk pvmw = {
     326           0 :                 .pfn = page_to_pfn(page),
     327             :                 .nr_pages = 1,
     328             :                 .vma = vma,
     329             :                 .flags = PVMW_SYNC,
     330             :         };
     331             : 
     332           0 :         pvmw.address = vma_address(page, vma);
     333           0 :         if (pvmw.address == -EFAULT)
     334             :                 return 0;
     335           0 :         if (!page_vma_mapped_walk(&pvmw))
     336             :                 return 0;
     337           0 :         page_vma_mapped_walk_done(&pvmw);
     338             :         return 1;
     339             : }

Generated by: LCOV version 1.14