LCOV - code coverage report
Current view: top level - mm - migrate.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 573 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 29 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Memory Migration functionality - linux/mm/migrate.c
       4             :  *
       5             :  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
       6             :  *
       7             :  * Page migration was first developed in the context of the memory hotplug
       8             :  * project. The main authors of the migration code are:
       9             :  *
      10             :  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
      11             :  * Hirokazu Takahashi <taka@valinux.co.jp>
      12             :  * Dave Hansen <haveblue@us.ibm.com>
      13             :  * Christoph Lameter
      14             :  */
      15             : 
      16             : #include <linux/migrate.h>
      17             : #include <linux/export.h>
      18             : #include <linux/swap.h>
      19             : #include <linux/swapops.h>
      20             : #include <linux/pagemap.h>
      21             : #include <linux/buffer_head.h>
      22             : #include <linux/mm_inline.h>
      23             : #include <linux/nsproxy.h>
      24             : #include <linux/pagevec.h>
      25             : #include <linux/ksm.h>
      26             : #include <linux/rmap.h>
      27             : #include <linux/topology.h>
      28             : #include <linux/cpu.h>
      29             : #include <linux/cpuset.h>
      30             : #include <linux/writeback.h>
      31             : #include <linux/mempolicy.h>
      32             : #include <linux/vmalloc.h>
      33             : #include <linux/security.h>
      34             : #include <linux/backing-dev.h>
      35             : #include <linux/compaction.h>
      36             : #include <linux/syscalls.h>
      37             : #include <linux/compat.h>
      38             : #include <linux/hugetlb.h>
      39             : #include <linux/hugetlb_cgroup.h>
      40             : #include <linux/gfp.h>
      41             : #include <linux/pfn_t.h>
      42             : #include <linux/memremap.h>
      43             : #include <linux/userfaultfd_k.h>
      44             : #include <linux/balloon_compaction.h>
      45             : #include <linux/page_idle.h>
      46             : #include <linux/page_owner.h>
      47             : #include <linux/sched/mm.h>
      48             : #include <linux/ptrace.h>
      49             : #include <linux/oom.h>
      50             : #include <linux/memory.h>
      51             : #include <linux/random.h>
      52             : #include <linux/sched/sysctl.h>
      53             : #include <linux/memory-tiers.h>
      54             : 
      55             : #include <asm/tlbflush.h>
      56             : 
      57             : #include <trace/events/migrate.h>
      58             : 
      59             : #include "internal.h"
      60             : 
      61           0 : bool isolate_movable_page(struct page *page, isolate_mode_t mode)
      62             : {
      63           0 :         struct folio *folio = folio_get_nontail_page(page);
      64             :         const struct movable_operations *mops;
      65             : 
      66             :         /*
      67             :          * Avoid burning cycles with pages that are yet under __free_pages(),
      68             :          * or just got freed under us.
      69             :          *
      70             :          * In case we 'win' a race for a movable page being freed under us and
      71             :          * raise its refcount preventing __free_pages() from doing its job
      72             :          * the put_page() at the end of this block will take care of
      73             :          * release this page, thus avoiding a nasty leakage.
      74             :          */
      75           0 :         if (!folio)
      76             :                 goto out;
      77             : 
      78           0 :         if (unlikely(folio_test_slab(folio)))
      79             :                 goto out_putfolio;
      80             :         /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
      81           0 :         smp_rmb();
      82             :         /*
      83             :          * Check movable flag before taking the page lock because
      84             :          * we use non-atomic bitops on newly allocated page flags so
      85             :          * unconditionally grabbing the lock ruins page's owner side.
      86             :          */
      87           0 :         if (unlikely(!__folio_test_movable(folio)))
      88             :                 goto out_putfolio;
      89             :         /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
      90           0 :         smp_rmb();
      91           0 :         if (unlikely(folio_test_slab(folio)))
      92             :                 goto out_putfolio;
      93             : 
      94             :         /*
      95             :          * As movable pages are not isolated from LRU lists, concurrent
      96             :          * compaction threads can race against page migration functions
      97             :          * as well as race against the releasing a page.
      98             :          *
      99             :          * In order to avoid having an already isolated movable page
     100             :          * being (wrongly) re-isolated while it is under migration,
     101             :          * or to avoid attempting to isolate pages being released,
     102             :          * lets be sure we have the page lock
     103             :          * before proceeding with the movable page isolation steps.
     104             :          */
     105           0 :         if (unlikely(!folio_trylock(folio)))
     106             :                 goto out_putfolio;
     107             : 
     108           0 :         if (!folio_test_movable(folio) || folio_test_isolated(folio))
     109             :                 goto out_no_isolated;
     110             : 
     111           0 :         mops = folio_movable_ops(folio);
     112             :         VM_BUG_ON_FOLIO(!mops, folio);
     113             : 
     114           0 :         if (!mops->isolate_page(&folio->page, mode))
     115             :                 goto out_no_isolated;
     116             : 
     117             :         /* Driver shouldn't use PG_isolated bit of page->flags */
     118           0 :         WARN_ON_ONCE(folio_test_isolated(folio));
     119           0 :         folio_set_isolated(folio);
     120           0 :         folio_unlock(folio);
     121             : 
     122           0 :         return true;
     123             : 
     124             : out_no_isolated:
     125           0 :         folio_unlock(folio);
     126             : out_putfolio:
     127             :         folio_put(folio);
     128             : out:
     129             :         return false;
     130             : }
     131             : 
     132             : static void putback_movable_folio(struct folio *folio)
     133             : {
     134           0 :         const struct movable_operations *mops = folio_movable_ops(folio);
     135             : 
     136           0 :         mops->putback_page(&folio->page);
     137           0 :         folio_clear_isolated(folio);
     138             : }
     139             : 
     140             : /*
     141             :  * Put previously isolated pages back onto the appropriate lists
     142             :  * from where they were once taken off for compaction/migration.
     143             :  *
     144             :  * This function shall be used whenever the isolated pageset has been
     145             :  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
     146             :  * and isolate_hugetlb().
     147             :  */
     148           0 : void putback_movable_pages(struct list_head *l)
     149             : {
     150             :         struct folio *folio;
     151             :         struct folio *folio2;
     152             : 
     153           0 :         list_for_each_entry_safe(folio, folio2, l, lru) {
     154           0 :                 if (unlikely(folio_test_hugetlb(folio))) {
     155             :                         folio_putback_active_hugetlb(folio);
     156             :                         continue;
     157             :                 }
     158           0 :                 list_del(&folio->lru);
     159             :                 /*
     160             :                  * We isolated non-lru movable folio so here we can use
     161             :                  * __PageMovable because LRU folio's mapping cannot have
     162             :                  * PAGE_MAPPING_MOVABLE.
     163             :                  */
     164           0 :                 if (unlikely(__folio_test_movable(folio))) {
     165             :                         VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
     166           0 :                         folio_lock(folio);
     167           0 :                         if (folio_test_movable(folio))
     168             :                                 putback_movable_folio(folio);
     169             :                         else
     170             :                                 folio_clear_isolated(folio);
     171           0 :                         folio_unlock(folio);
     172             :                         folio_put(folio);
     173             :                 } else {
     174           0 :                         node_stat_mod_folio(folio, NR_ISOLATED_ANON +
     175           0 :                                         folio_is_file_lru(folio), -folio_nr_pages(folio));
     176           0 :                         folio_putback_lru(folio);
     177             :                 }
     178             :         }
     179           0 : }
     180             : 
     181             : /*
     182             :  * Restore a potential migration pte to a working pte entry
     183             :  */
     184           0 : static bool remove_migration_pte(struct folio *folio,
     185             :                 struct vm_area_struct *vma, unsigned long addr, void *old)
     186             : {
     187           0 :         DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
     188             : 
     189           0 :         while (page_vma_mapped_walk(&pvmw)) {
     190           0 :                 rmap_t rmap_flags = RMAP_NONE;
     191             :                 pte_t pte;
     192             :                 swp_entry_t entry;
     193             :                 struct page *new;
     194           0 :                 unsigned long idx = 0;
     195             : 
     196             :                 /* pgoff is invalid for ksm pages, but they are never large */
     197           0 :                 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
     198           0 :                         idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
     199           0 :                 new = folio_page(folio, idx);
     200             : 
     201             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     202             :                 /* PMD-mapped THP migration entry */
     203             :                 if (!pvmw.pte) {
     204             :                         VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
     205             :                                         !folio_test_pmd_mappable(folio), folio);
     206             :                         remove_migration_pmd(&pvmw, new);
     207             :                         continue;
     208             :                 }
     209             : #endif
     210             : 
     211           0 :                 folio_get(folio);
     212           0 :                 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
     213           0 :                 if (pte_swp_soft_dirty(*pvmw.pte))
     214             :                         pte = pte_mksoft_dirty(pte);
     215             : 
     216             :                 /*
     217             :                  * Recheck VMA as permissions can change since migration started
     218             :                  */
     219           0 :                 entry = pte_to_swp_entry(*pvmw.pte);
     220           0 :                 if (!is_migration_entry_young(entry))
     221             :                         pte = pte_mkold(pte);
     222           0 :                 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
     223             :                         pte = pte_mkdirty(pte);
     224           0 :                 if (is_writable_migration_entry(entry))
     225           0 :                         pte = maybe_mkwrite(pte, vma);
     226             :                 else if (pte_swp_uffd_wp(*pvmw.pte))
     227             :                         pte = pte_mkuffd_wp(pte);
     228             :                 else
     229             :                         pte = pte_wrprotect(pte);
     230             : 
     231           0 :                 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
     232           0 :                         rmap_flags |= RMAP_EXCLUSIVE;
     233             : 
     234           0 :                 if (unlikely(is_device_private_page(new))) {
     235             :                         if (pte_write(pte))
     236             :                                 entry = make_writable_device_private_entry(
     237             :                                                         page_to_pfn(new));
     238             :                         else
     239             :                                 entry = make_readable_device_private_entry(
     240             :                                                         page_to_pfn(new));
     241             :                         pte = swp_entry_to_pte(entry);
     242             :                         if (pte_swp_soft_dirty(*pvmw.pte))
     243             :                                 pte = pte_swp_mksoft_dirty(pte);
     244             :                         if (pte_swp_uffd_wp(*pvmw.pte))
     245             :                                 pte = pte_swp_mkuffd_wp(pte);
     246             :                 }
     247             : 
     248             : #ifdef CONFIG_HUGETLB_PAGE
     249             :                 if (folio_test_hugetlb(folio)) {
     250             :                         unsigned int shift = huge_page_shift(hstate_vma(vma));
     251             : 
     252             :                         pte = pte_mkhuge(pte);
     253             :                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
     254             :                         if (folio_test_anon(folio))
     255             :                                 hugepage_add_anon_rmap(new, vma, pvmw.address,
     256             :                                                        rmap_flags);
     257             :                         else
     258             :                                 page_dup_file_rmap(new, true);
     259             :                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     260             :                 } else
     261             : #endif
     262             :                 {
     263           0 :                         if (folio_test_anon(folio))
     264           0 :                                 page_add_anon_rmap(new, vma, pvmw.address,
     265             :                                                    rmap_flags);
     266             :                         else
     267           0 :                                 page_add_file_rmap(new, vma, false);
     268           0 :                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     269             :                 }
     270           0 :                 if (vma->vm_flags & VM_LOCKED)
     271           0 :                         mlock_drain_local();
     272             : 
     273             :                 trace_remove_migration_pte(pvmw.address, pte_val(pte),
     274             :                                            compound_order(new));
     275             : 
     276             :                 /* No need to invalidate - it was non-present before */
     277             :                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
     278             :         }
     279             : 
     280           0 :         return true;
     281             : }
     282             : 
     283             : /*
     284             :  * Get rid of all migration entries and replace them by
     285             :  * references to the indicated page.
     286             :  */
     287           0 : void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
     288             : {
     289           0 :         struct rmap_walk_control rwc = {
     290             :                 .rmap_one = remove_migration_pte,
     291             :                 .arg = src,
     292             :         };
     293             : 
     294           0 :         if (locked)
     295           0 :                 rmap_walk_locked(dst, &rwc);
     296             :         else
     297           0 :                 rmap_walk(dst, &rwc);
     298           0 : }
     299             : 
     300             : /*
     301             :  * Something used the pte of a page under migration. We need to
     302             :  * get to the page and wait until migration is finished.
     303             :  * When we return from this function the fault will be retried.
     304             :  */
     305           0 : void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
     306             :                                 spinlock_t *ptl)
     307             : {
     308             :         pte_t pte;
     309             :         swp_entry_t entry;
     310             : 
     311           0 :         spin_lock(ptl);
     312           0 :         pte = *ptep;
     313           0 :         if (!is_swap_pte(pte))
     314             :                 goto out;
     315             : 
     316           0 :         entry = pte_to_swp_entry(pte);
     317           0 :         if (!is_migration_entry(entry))
     318             :                 goto out;
     319             : 
     320           0 :         migration_entry_wait_on_locked(entry, ptep, ptl);
     321           0 :         return;
     322             : out:
     323           0 :         pte_unmap_unlock(ptep, ptl);
     324             : }
     325             : 
     326           0 : void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
     327             :                                 unsigned long address)
     328             : {
     329           0 :         spinlock_t *ptl = pte_lockptr(mm, pmd);
     330           0 :         pte_t *ptep = pte_offset_map(pmd, address);
     331           0 :         __migration_entry_wait(mm, ptep, ptl);
     332           0 : }
     333             : 
     334             : #ifdef CONFIG_HUGETLB_PAGE
     335             : /*
     336             :  * The vma read lock must be held upon entry. Holding that lock prevents either
     337             :  * the pte or the ptl from being freed.
     338             :  *
     339             :  * This function will release the vma lock before returning.
     340             :  */
     341             : void __migration_entry_wait_huge(struct vm_area_struct *vma,
     342             :                                  pte_t *ptep, spinlock_t *ptl)
     343             : {
     344             :         pte_t pte;
     345             : 
     346             :         hugetlb_vma_assert_locked(vma);
     347             :         spin_lock(ptl);
     348             :         pte = huge_ptep_get(ptep);
     349             : 
     350             :         if (unlikely(!is_hugetlb_entry_migration(pte))) {
     351             :                 spin_unlock(ptl);
     352             :                 hugetlb_vma_unlock_read(vma);
     353             :         } else {
     354             :                 /*
     355             :                  * If migration entry existed, safe to release vma lock
     356             :                  * here because the pgtable page won't be freed without the
     357             :                  * pgtable lock released.  See comment right above pgtable
     358             :                  * lock release in migration_entry_wait_on_locked().
     359             :                  */
     360             :                 hugetlb_vma_unlock_read(vma);
     361             :                 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
     362             :         }
     363             : }
     364             : 
     365             : void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
     366             : {
     367             :         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
     368             : 
     369             :         __migration_entry_wait_huge(vma, pte, ptl);
     370             : }
     371             : #endif
     372             : 
     373             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     374             : void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
     375             : {
     376             :         spinlock_t *ptl;
     377             : 
     378             :         ptl = pmd_lock(mm, pmd);
     379             :         if (!is_pmd_migration_entry(*pmd))
     380             :                 goto unlock;
     381             :         migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
     382             :         return;
     383             : unlock:
     384             :         spin_unlock(ptl);
     385             : }
     386             : #endif
     387             : 
     388             : static int folio_expected_refs(struct address_space *mapping,
     389             :                 struct folio *folio)
     390             : {
     391           0 :         int refs = 1;
     392           0 :         if (!mapping)
     393             :                 return refs;
     394             : 
     395           0 :         refs += folio_nr_pages(folio);
     396           0 :         if (folio_test_private(folio))
     397           0 :                 refs++;
     398             : 
     399             :         return refs;
     400             : }
     401             : 
     402             : /*
     403             :  * Replace the page in the mapping.
     404             :  *
     405             :  * The number of remaining references must be:
     406             :  * 1 for anonymous pages without a mapping
     407             :  * 2 for pages with a mapping
     408             :  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
     409             :  */
     410           0 : int folio_migrate_mapping(struct address_space *mapping,
     411             :                 struct folio *newfolio, struct folio *folio, int extra_count)
     412             : {
     413           0 :         XA_STATE(xas, &mapping->i_pages, folio_index(folio));
     414             :         struct zone *oldzone, *newzone;
     415             :         int dirty;
     416           0 :         int expected_count = folio_expected_refs(mapping, folio) + extra_count;
     417           0 :         long nr = folio_nr_pages(folio);
     418             : 
     419           0 :         if (!mapping) {
     420             :                 /* Anonymous page without mapping */
     421           0 :                 if (folio_ref_count(folio) != expected_count)
     422             :                         return -EAGAIN;
     423             : 
     424             :                 /* No turning back from here */
     425           0 :                 newfolio->index = folio->index;
     426           0 :                 newfolio->mapping = folio->mapping;
     427           0 :                 if (folio_test_swapbacked(folio))
     428             :                         __folio_set_swapbacked(newfolio);
     429             : 
     430             :                 return MIGRATEPAGE_SUCCESS;
     431             :         }
     432             : 
     433           0 :         oldzone = folio_zone(folio);
     434           0 :         newzone = folio_zone(newfolio);
     435             : 
     436           0 :         xas_lock_irq(&xas);
     437           0 :         if (!folio_ref_freeze(folio, expected_count)) {
     438           0 :                 xas_unlock_irq(&xas);
     439           0 :                 return -EAGAIN;
     440             :         }
     441             : 
     442             :         /*
     443             :          * Now we know that no one else is looking at the folio:
     444             :          * no turning back from here.
     445             :          */
     446           0 :         newfolio->index = folio->index;
     447           0 :         newfolio->mapping = folio->mapping;
     448           0 :         folio_ref_add(newfolio, nr); /* add cache reference */
     449           0 :         if (folio_test_swapbacked(folio)) {
     450           0 :                 __folio_set_swapbacked(newfolio);
     451           0 :                 if (folio_test_swapcache(folio)) {
     452           0 :                         folio_set_swapcache(newfolio);
     453           0 :                         newfolio->private = folio_get_private(folio);
     454             :                 }
     455             :         } else {
     456             :                 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
     457             :         }
     458             : 
     459             :         /* Move dirty while page refs frozen and newpage not yet exposed */
     460           0 :         dirty = folio_test_dirty(folio);
     461           0 :         if (dirty) {
     462           0 :                 folio_clear_dirty(folio);
     463             :                 folio_set_dirty(newfolio);
     464             :         }
     465             : 
     466           0 :         xas_store(&xas, newfolio);
     467             : 
     468             :         /*
     469             :          * Drop cache reference from old page by unfreezing
     470             :          * to one less reference.
     471             :          * We know this isn't the last reference.
     472             :          */
     473           0 :         folio_ref_unfreeze(folio, expected_count - nr);
     474             : 
     475           0 :         xas_unlock(&xas);
     476             :         /* Leave irq disabled to prevent preemption while updating stats */
     477             : 
     478             :         /*
     479             :          * If moved to a different zone then also account
     480             :          * the page for that zone. Other VM counters will be
     481             :          * taken care of when we establish references to the
     482             :          * new page and drop references to the old page.
     483             :          *
     484             :          * Note that anonymous pages are accounted for
     485             :          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
     486             :          * are mapped to swap space.
     487             :          */
     488           0 :         if (newzone != oldzone) {
     489             :                 struct lruvec *old_lruvec, *new_lruvec;
     490             :                 struct mem_cgroup *memcg;
     491             : 
     492           0 :                 memcg = folio_memcg(folio);
     493           0 :                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
     494           0 :                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
     495             : 
     496           0 :                 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
     497           0 :                 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
     498           0 :                 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
     499           0 :                         __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
     500           0 :                         __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
     501             :                 }
     502             : #ifdef CONFIG_SWAP
     503           0 :                 if (folio_test_swapcache(folio)) {
     504           0 :                         __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
     505           0 :                         __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
     506             :                 }
     507             : #endif
     508           0 :                 if (dirty && mapping_can_writeback(mapping)) {
     509           0 :                         __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
     510           0 :                         __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
     511           0 :                         __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
     512             :                         __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
     513             :                 }
     514             :         }
     515             :         local_irq_enable();
     516             : 
     517           0 :         return MIGRATEPAGE_SUCCESS;
     518             : }
     519             : EXPORT_SYMBOL(folio_migrate_mapping);
     520             : 
     521             : /*
     522             :  * The expected number of remaining references is the same as that
     523             :  * of folio_migrate_mapping().
     524             :  */
     525           0 : int migrate_huge_page_move_mapping(struct address_space *mapping,
     526             :                                    struct folio *dst, struct folio *src)
     527             : {
     528           0 :         XA_STATE(xas, &mapping->i_pages, folio_index(src));
     529             :         int expected_count;
     530             : 
     531           0 :         xas_lock_irq(&xas);
     532           0 :         expected_count = 2 + folio_has_private(src);
     533           0 :         if (!folio_ref_freeze(src, expected_count)) {
     534           0 :                 xas_unlock_irq(&xas);
     535           0 :                 return -EAGAIN;
     536             :         }
     537             : 
     538           0 :         dst->index = src->index;
     539           0 :         dst->mapping = src->mapping;
     540             : 
     541           0 :         folio_get(dst);
     542             : 
     543           0 :         xas_store(&xas, dst);
     544             : 
     545           0 :         folio_ref_unfreeze(src, expected_count - 1);
     546             : 
     547           0 :         xas_unlock_irq(&xas);
     548             : 
     549           0 :         return MIGRATEPAGE_SUCCESS;
     550             : }
     551             : 
     552             : /*
     553             :  * Copy the flags and some other ancillary information
     554             :  */
     555           0 : void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
     556             : {
     557             :         int cpupid;
     558             : 
     559           0 :         if (folio_test_error(folio))
     560             :                 folio_set_error(newfolio);
     561           0 :         if (folio_test_referenced(folio))
     562             :                 folio_set_referenced(newfolio);
     563           0 :         if (folio_test_uptodate(folio))
     564             :                 folio_mark_uptodate(newfolio);
     565           0 :         if (folio_test_clear_active(folio)) {
     566             :                 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
     567             :                 folio_set_active(newfolio);
     568           0 :         } else if (folio_test_clear_unevictable(folio))
     569             :                 folio_set_unevictable(newfolio);
     570           0 :         if (folio_test_workingset(folio))
     571             :                 folio_set_workingset(newfolio);
     572           0 :         if (folio_test_checked(folio))
     573             :                 folio_set_checked(newfolio);
     574             :         /*
     575             :          * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
     576             :          * migration entries. We can still have PG_anon_exclusive set on an
     577             :          * effectively unmapped and unreferenced first sub-pages of an
     578             :          * anonymous THP: we can simply copy it here via PG_mappedtodisk.
     579             :          */
     580           0 :         if (folio_test_mappedtodisk(folio))
     581             :                 folio_set_mappedtodisk(newfolio);
     582             : 
     583             :         /* Move dirty on pages not done by folio_migrate_mapping() */
     584           0 :         if (folio_test_dirty(folio))
     585             :                 folio_set_dirty(newfolio);
     586             : 
     587           0 :         if (folio_test_young(folio))
     588             :                 folio_set_young(newfolio);
     589           0 :         if (folio_test_idle(folio))
     590             :                 folio_set_idle(newfolio);
     591             : 
     592             :         /*
     593             :          * Copy NUMA information to the new page, to prevent over-eager
     594             :          * future migrations of this same page.
     595             :          */
     596           0 :         cpupid = page_cpupid_xchg_last(&folio->page, -1);
     597             :         /*
     598             :          * For memory tiering mode, when migrate between slow and fast
     599             :          * memory node, reset cpupid, because that is used to record
     600             :          * page access time in slow memory node.
     601             :          */
     602             :         if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
     603             :                 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
     604             :                 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
     605             : 
     606             :                 if (f_toptier != t_toptier)
     607             :                         cpupid = -1;
     608             :         }
     609           0 :         page_cpupid_xchg_last(&newfolio->page, cpupid);
     610             : 
     611           0 :         folio_migrate_ksm(newfolio, folio);
     612             :         /*
     613             :          * Please do not reorder this without considering how mm/ksm.c's
     614             :          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
     615             :          */
     616           0 :         if (folio_test_swapcache(folio))
     617             :                 folio_clear_swapcache(folio);
     618           0 :         folio_clear_private(folio);
     619             : 
     620             :         /* page->private contains hugetlb specific flags */
     621           0 :         if (!folio_test_hugetlb(folio))
     622           0 :                 folio->private = NULL;
     623             : 
     624             :         /*
     625             :          * If any waiters have accumulated on the new page then
     626             :          * wake them up.
     627             :          */
     628           0 :         if (folio_test_writeback(newfolio))
     629           0 :                 folio_end_writeback(newfolio);
     630             : 
     631             :         /*
     632             :          * PG_readahead shares the same bit with PG_reclaim.  The above
     633             :          * end_page_writeback() may clear PG_readahead mistakenly, so set the
     634             :          * bit after that.
     635             :          */
     636           0 :         if (folio_test_readahead(folio))
     637             :                 folio_set_readahead(newfolio);
     638             : 
     639           0 :         folio_copy_owner(newfolio, folio);
     640             : 
     641           0 :         if (!folio_test_hugetlb(folio))
     642             :                 mem_cgroup_migrate(folio, newfolio);
     643           0 : }
     644             : EXPORT_SYMBOL(folio_migrate_flags);
     645             : 
     646           0 : void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
     647             : {
     648           0 :         folio_copy(newfolio, folio);
     649           0 :         folio_migrate_flags(newfolio, folio);
     650           0 : }
     651             : EXPORT_SYMBOL(folio_migrate_copy);
     652             : 
     653             : /************************************************************
     654             :  *                    Migration functions
     655             :  ***********************************************************/
     656             : 
     657           0 : int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
     658             :                 struct folio *src, enum migrate_mode mode, int extra_count)
     659             : {
     660             :         int rc;
     661             : 
     662           0 :         BUG_ON(folio_test_writeback(src));      /* Writeback must be complete */
     663             : 
     664           0 :         rc = folio_migrate_mapping(mapping, dst, src, extra_count);
     665             : 
     666           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     667             :                 return rc;
     668             : 
     669           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     670             :                 folio_migrate_copy(dst, src);
     671             :         else
     672           0 :                 folio_migrate_flags(dst, src);
     673             :         return MIGRATEPAGE_SUCCESS;
     674             : }
     675             : 
     676             : /**
     677             :  * migrate_folio() - Simple folio migration.
     678             :  * @mapping: The address_space containing the folio.
     679             :  * @dst: The folio to migrate the data to.
     680             :  * @src: The folio containing the current data.
     681             :  * @mode: How to migrate the page.
     682             :  *
     683             :  * Common logic to directly migrate a single LRU folio suitable for
     684             :  * folios that do not use PagePrivate/PagePrivate2.
     685             :  *
     686             :  * Folios are locked upon entry and exit.
     687             :  */
     688           0 : int migrate_folio(struct address_space *mapping, struct folio *dst,
     689             :                 struct folio *src, enum migrate_mode mode)
     690             : {
     691           0 :         return migrate_folio_extra(mapping, dst, src, mode, 0);
     692             : }
     693             : EXPORT_SYMBOL(migrate_folio);
     694             : 
     695             : #ifdef CONFIG_BLOCK
     696             : /* Returns true if all buffers are successfully locked */
     697           0 : static bool buffer_migrate_lock_buffers(struct buffer_head *head,
     698             :                                                         enum migrate_mode mode)
     699             : {
     700           0 :         struct buffer_head *bh = head;
     701             : 
     702             :         /* Simple case, sync compaction */
     703           0 :         if (mode != MIGRATE_ASYNC) {
     704             :                 do {
     705           0 :                         lock_buffer(bh);
     706           0 :                         bh = bh->b_this_page;
     707             : 
     708           0 :                 } while (bh != head);
     709             : 
     710             :                 return true;
     711             :         }
     712             : 
     713             :         /* async case, we cannot block on lock_buffer so use trylock_buffer */
     714             :         do {
     715           0 :                 if (!trylock_buffer(bh)) {
     716             :                         /*
     717             :                          * We failed to lock the buffer and cannot stall in
     718             :                          * async migration. Release the taken locks
     719             :                          */
     720             :                         struct buffer_head *failed_bh = bh;
     721             :                         bh = head;
     722           0 :                         while (bh != failed_bh) {
     723           0 :                                 unlock_buffer(bh);
     724           0 :                                 bh = bh->b_this_page;
     725             :                         }
     726             :                         return false;
     727             :                 }
     728             : 
     729           0 :                 bh = bh->b_this_page;
     730           0 :         } while (bh != head);
     731             :         return true;
     732             : }
     733             : 
     734           0 : static int __buffer_migrate_folio(struct address_space *mapping,
     735             :                 struct folio *dst, struct folio *src, enum migrate_mode mode,
     736             :                 bool check_refs)
     737             : {
     738             :         struct buffer_head *bh, *head;
     739             :         int rc;
     740             :         int expected_count;
     741             : 
     742           0 :         head = folio_buffers(src);
     743           0 :         if (!head)
     744           0 :                 return migrate_folio(mapping, dst, src, mode);
     745             : 
     746             :         /* Check whether page does not have extra refs before we do more work */
     747           0 :         expected_count = folio_expected_refs(mapping, src);
     748           0 :         if (folio_ref_count(src) != expected_count)
     749             :                 return -EAGAIN;
     750             : 
     751           0 :         if (!buffer_migrate_lock_buffers(head, mode))
     752             :                 return -EAGAIN;
     753             : 
     754           0 :         if (check_refs) {
     755             :                 bool busy;
     756             :                 bool invalidated = false;
     757             : 
     758             : recheck_buffers:
     759           0 :                 busy = false;
     760           0 :                 spin_lock(&mapping->private_lock);
     761           0 :                 bh = head;
     762             :                 do {
     763           0 :                         if (atomic_read(&bh->b_count)) {
     764             :                                 busy = true;
     765             :                                 break;
     766             :                         }
     767           0 :                         bh = bh->b_this_page;
     768           0 :                 } while (bh != head);
     769           0 :                 if (busy) {
     770           0 :                         if (invalidated) {
     771             :                                 rc = -EAGAIN;
     772             :                                 goto unlock_buffers;
     773             :                         }
     774           0 :                         spin_unlock(&mapping->private_lock);
     775           0 :                         invalidate_bh_lrus();
     776           0 :                         invalidated = true;
     777           0 :                         goto recheck_buffers;
     778             :                 }
     779             :         }
     780             : 
     781           0 :         rc = folio_migrate_mapping(mapping, dst, src, 0);
     782           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     783             :                 goto unlock_buffers;
     784             : 
     785           0 :         folio_attach_private(dst, folio_detach_private(src));
     786             : 
     787           0 :         bh = head;
     788             :         do {
     789           0 :                 set_bh_page(bh, &dst->page, bh_offset(bh));
     790           0 :                 bh = bh->b_this_page;
     791           0 :         } while (bh != head);
     792             : 
     793           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     794             :                 folio_migrate_copy(dst, src);
     795             :         else
     796           0 :                 folio_migrate_flags(dst, src);
     797             : 
     798             :         rc = MIGRATEPAGE_SUCCESS;
     799             : unlock_buffers:
     800           0 :         if (check_refs)
     801           0 :                 spin_unlock(&mapping->private_lock);
     802             :         bh = head;
     803             :         do {
     804           0 :                 unlock_buffer(bh);
     805           0 :                 bh = bh->b_this_page;
     806           0 :         } while (bh != head);
     807             : 
     808             :         return rc;
     809             : }
     810             : 
     811             : /**
     812             :  * buffer_migrate_folio() - Migration function for folios with buffers.
     813             :  * @mapping: The address space containing @src.
     814             :  * @dst: The folio to migrate to.
     815             :  * @src: The folio to migrate from.
     816             :  * @mode: How to migrate the folio.
     817             :  *
     818             :  * This function can only be used if the underlying filesystem guarantees
     819             :  * that no other references to @src exist. For example attached buffer
     820             :  * heads are accessed only under the folio lock.  If your filesystem cannot
     821             :  * provide this guarantee, buffer_migrate_folio_norefs() may be more
     822             :  * appropriate.
     823             :  *
     824             :  * Return: 0 on success or a negative errno on failure.
     825             :  */
     826           0 : int buffer_migrate_folio(struct address_space *mapping,
     827             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     828             : {
     829           0 :         return __buffer_migrate_folio(mapping, dst, src, mode, false);
     830             : }
     831             : EXPORT_SYMBOL(buffer_migrate_folio);
     832             : 
     833             : /**
     834             :  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
     835             :  * @mapping: The address space containing @src.
     836             :  * @dst: The folio to migrate to.
     837             :  * @src: The folio to migrate from.
     838             :  * @mode: How to migrate the folio.
     839             :  *
     840             :  * Like buffer_migrate_folio() except that this variant is more careful
     841             :  * and checks that there are also no buffer head references. This function
     842             :  * is the right one for mappings where buffer heads are directly looked
     843             :  * up and referenced (such as block device mappings).
     844             :  *
     845             :  * Return: 0 on success or a negative errno on failure.
     846             :  */
     847           0 : int buffer_migrate_folio_norefs(struct address_space *mapping,
     848             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     849             : {
     850           0 :         return __buffer_migrate_folio(mapping, dst, src, mode, true);
     851             : }
     852             : EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
     853             : #endif
     854             : 
     855           0 : int filemap_migrate_folio(struct address_space *mapping,
     856             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     857             : {
     858             :         int ret;
     859             : 
     860           0 :         ret = folio_migrate_mapping(mapping, dst, src, 0);
     861           0 :         if (ret != MIGRATEPAGE_SUCCESS)
     862             :                 return ret;
     863             : 
     864           0 :         if (folio_get_private(src))
     865           0 :                 folio_attach_private(dst, folio_detach_private(src));
     866             : 
     867           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     868             :                 folio_migrate_copy(dst, src);
     869             :         else
     870           0 :                 folio_migrate_flags(dst, src);
     871             :         return MIGRATEPAGE_SUCCESS;
     872             : }
     873             : EXPORT_SYMBOL_GPL(filemap_migrate_folio);
     874             : 
     875             : /*
     876             :  * Writeback a folio to clean the dirty state
     877             :  */
     878           0 : static int writeout(struct address_space *mapping, struct folio *folio)
     879             : {
     880           0 :         struct writeback_control wbc = {
     881             :                 .sync_mode = WB_SYNC_NONE,
     882             :                 .nr_to_write = 1,
     883             :                 .range_start = 0,
     884             :                 .range_end = LLONG_MAX,
     885             :                 .for_reclaim = 1
     886             :         };
     887             :         int rc;
     888             : 
     889           0 :         if (!mapping->a_ops->writepage)
     890             :                 /* No write method for the address space */
     891             :                 return -EINVAL;
     892             : 
     893           0 :         if (!folio_clear_dirty_for_io(folio))
     894             :                 /* Someone else already triggered a write */
     895             :                 return -EAGAIN;
     896             : 
     897             :         /*
     898             :          * A dirty folio may imply that the underlying filesystem has
     899             :          * the folio on some queue. So the folio must be clean for
     900             :          * migration. Writeout may mean we lose the lock and the
     901             :          * folio state is no longer what we checked for earlier.
     902             :          * At this point we know that the migration attempt cannot
     903             :          * be successful.
     904             :          */
     905           0 :         remove_migration_ptes(folio, folio, false);
     906             : 
     907           0 :         rc = mapping->a_ops->writepage(&folio->page, &wbc);
     908             : 
     909           0 :         if (rc != AOP_WRITEPAGE_ACTIVATE)
     910             :                 /* unlocked. Relock */
     911             :                 folio_lock(folio);
     912             : 
     913           0 :         return (rc < 0) ? -EIO : -EAGAIN;
     914             : }
     915             : 
     916             : /*
     917             :  * Default handling if a filesystem does not provide a migration function.
     918             :  */
     919           0 : static int fallback_migrate_folio(struct address_space *mapping,
     920             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     921             : {
     922           0 :         if (folio_test_dirty(src)) {
     923             :                 /* Only writeback folios in full synchronous migration */
     924           0 :                 switch (mode) {
     925             :                 case MIGRATE_SYNC:
     926             :                 case MIGRATE_SYNC_NO_COPY:
     927             :                         break;
     928             :                 default:
     929             :                         return -EBUSY;
     930             :                 }
     931           0 :                 return writeout(mapping, src);
     932             :         }
     933             : 
     934             :         /*
     935             :          * Buffers may be managed in a filesystem specific way.
     936             :          * We must have no buffers or drop them.
     937             :          */
     938           0 :         if (folio_test_private(src) &&
     939           0 :             !filemap_release_folio(src, GFP_KERNEL))
     940           0 :                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
     941             : 
     942           0 :         return migrate_folio(mapping, dst, src, mode);
     943             : }
     944             : 
     945             : /*
     946             :  * Move a page to a newly allocated page
     947             :  * The page is locked and all ptes have been successfully removed.
     948             :  *
     949             :  * The new page will have replaced the old page if this function
     950             :  * is successful.
     951             :  *
     952             :  * Return value:
     953             :  *   < 0 - error code
     954             :  *  MIGRATEPAGE_SUCCESS - success
     955             :  */
     956           0 : static int move_to_new_folio(struct folio *dst, struct folio *src,
     957             :                                 enum migrate_mode mode)
     958             : {
     959           0 :         int rc = -EAGAIN;
     960           0 :         bool is_lru = !__PageMovable(&src->page);
     961             : 
     962             :         VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
     963             :         VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
     964             : 
     965           0 :         if (likely(is_lru)) {
     966           0 :                 struct address_space *mapping = folio_mapping(src);
     967             : 
     968           0 :                 if (!mapping)
     969           0 :                         rc = migrate_folio(mapping, dst, src, mode);
     970           0 :                 else if (mapping->a_ops->migrate_folio)
     971             :                         /*
     972             :                          * Most folios have a mapping and most filesystems
     973             :                          * provide a migrate_folio callback. Anonymous folios
     974             :                          * are part of swap space which also has its own
     975             :                          * migrate_folio callback. This is the most common path
     976             :                          * for page migration.
     977             :                          */
     978           0 :                         rc = mapping->a_ops->migrate_folio(mapping, dst, src,
     979             :                                                                 mode);
     980             :                 else
     981           0 :                         rc = fallback_migrate_folio(mapping, dst, src, mode);
     982             :         } else {
     983             :                 const struct movable_operations *mops;
     984             : 
     985             :                 /*
     986             :                  * In case of non-lru page, it could be released after
     987             :                  * isolation step. In that case, we shouldn't try migration.
     988             :                  */
     989             :                 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
     990           0 :                 if (!folio_test_movable(src)) {
     991           0 :                         rc = MIGRATEPAGE_SUCCESS;
     992             :                         folio_clear_isolated(src);
     993             :                         goto out;
     994             :                 }
     995             : 
     996           0 :                 mops = folio_movable_ops(src);
     997           0 :                 rc = mops->migrate_page(&dst->page, &src->page, mode);
     998           0 :                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
     999             :                                 !folio_test_isolated(src));
    1000             :         }
    1001             : 
    1002             :         /*
    1003             :          * When successful, old pagecache src->mapping must be cleared before
    1004             :          * src is freed; but stats require that PageAnon be left as PageAnon.
    1005             :          */
    1006           0 :         if (rc == MIGRATEPAGE_SUCCESS) {
    1007           0 :                 if (__PageMovable(&src->page)) {
    1008             :                         VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
    1009             : 
    1010             :                         /*
    1011             :                          * We clear PG_movable under page_lock so any compactor
    1012             :                          * cannot try to migrate this page.
    1013             :                          */
    1014             :                         folio_clear_isolated(src);
    1015             :                 }
    1016             : 
    1017             :                 /*
    1018             :                  * Anonymous and movable src->mapping will be cleared by
    1019             :                  * free_pages_prepare so don't reset it here for keeping
    1020             :                  * the type to work PageAnon, for example.
    1021             :                  */
    1022           0 :                 if (!folio_mapping_flags(src))
    1023           0 :                         src->mapping = NULL;
    1024             : 
    1025             :                 if (likely(!folio_is_zone_device(dst)))
    1026             :                         flush_dcache_folio(dst);
    1027             :         }
    1028             : out:
    1029           0 :         return rc;
    1030             : }
    1031             : 
    1032             : /*
    1033             :  * To record some information during migration, we use some unused
    1034             :  * fields (mapping and private) of struct folio of the newly allocated
    1035             :  * destination folio.  This is safe because nobody is using them
    1036             :  * except us.
    1037             :  */
    1038             : union migration_ptr {
    1039             :         struct anon_vma *anon_vma;
    1040             :         struct address_space *mapping;
    1041             : };
    1042             : static void __migrate_folio_record(struct folio *dst,
    1043             :                                    unsigned long page_was_mapped,
    1044             :                                    struct anon_vma *anon_vma)
    1045             : {
    1046           0 :         union migration_ptr ptr = { .anon_vma = anon_vma };
    1047           0 :         dst->mapping = ptr.mapping;
    1048           0 :         dst->private = (void *)page_was_mapped;
    1049             : }
    1050             : 
    1051             : static void __migrate_folio_extract(struct folio *dst,
    1052             :                                    int *page_was_mappedp,
    1053             :                                    struct anon_vma **anon_vmap)
    1054             : {
    1055           0 :         union migration_ptr ptr = { .mapping = dst->mapping };
    1056           0 :         *anon_vmap = ptr.anon_vma;
    1057           0 :         *page_was_mappedp = (unsigned long)dst->private;
    1058           0 :         dst->mapping = NULL;
    1059           0 :         dst->private = NULL;
    1060             : }
    1061             : 
    1062             : /* Restore the source folio to the original state upon failure */
    1063           0 : static void migrate_folio_undo_src(struct folio *src,
    1064             :                                    int page_was_mapped,
    1065             :                                    struct anon_vma *anon_vma,
    1066             :                                    bool locked,
    1067             :                                    struct list_head *ret)
    1068             : {
    1069           0 :         if (page_was_mapped)
    1070           0 :                 remove_migration_ptes(src, src, false);
    1071             :         /* Drop an anon_vma reference if we took one */
    1072           0 :         if (anon_vma)
    1073             :                 put_anon_vma(anon_vma);
    1074           0 :         if (locked)
    1075           0 :                 folio_unlock(src);
    1076           0 :         if (ret)
    1077           0 :                 list_move_tail(&src->lru, ret);
    1078           0 : }
    1079             : 
    1080             : /* Restore the destination folio to the original state upon failure */
    1081           0 : static void migrate_folio_undo_dst(struct folio *dst,
    1082             :                                    bool locked,
    1083             :                                    free_page_t put_new_page,
    1084             :                                    unsigned long private)
    1085             : {
    1086           0 :         if (locked)
    1087           0 :                 folio_unlock(dst);
    1088           0 :         if (put_new_page)
    1089           0 :                 put_new_page(&dst->page, private);
    1090             :         else
    1091             :                 folio_put(dst);
    1092           0 : }
    1093             : 
    1094             : /* Cleanup src folio upon migration success */
    1095           0 : static void migrate_folio_done(struct folio *src,
    1096             :                                enum migrate_reason reason)
    1097             : {
    1098             :         /*
    1099             :          * Compaction can migrate also non-LRU pages which are
    1100             :          * not accounted to NR_ISOLATED_*. They can be recognized
    1101             :          * as __PageMovable
    1102             :          */
    1103           0 :         if (likely(!__folio_test_movable(src)))
    1104           0 :                 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
    1105           0 :                                     folio_is_file_lru(src), -folio_nr_pages(src));
    1106             : 
    1107           0 :         if (reason != MR_MEMORY_FAILURE)
    1108             :                 /* We release the page in page_handle_poison. */
    1109             :                 folio_put(src);
    1110           0 : }
    1111             : 
    1112             : /* Obtain the lock on page, remove all ptes. */
    1113           0 : static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
    1114             :                                unsigned long private, struct folio *src,
    1115             :                                struct folio **dstp, int force, bool avoid_force_lock,
    1116             :                                enum migrate_mode mode, enum migrate_reason reason,
    1117             :                                struct list_head *ret)
    1118             : {
    1119             :         struct folio *dst;
    1120           0 :         int rc = -EAGAIN;
    1121           0 :         struct page *newpage = NULL;
    1122           0 :         int page_was_mapped = 0;
    1123           0 :         struct anon_vma *anon_vma = NULL;
    1124           0 :         bool is_lru = !__PageMovable(&src->page);
    1125           0 :         bool locked = false;
    1126           0 :         bool dst_locked = false;
    1127             : 
    1128           0 :         if (folio_ref_count(src) == 1) {
    1129             :                 /* Folio was freed from under us. So we are done. */
    1130           0 :                 folio_clear_active(src);
    1131           0 :                 folio_clear_unevictable(src);
    1132             :                 /* free_pages_prepare() will clear PG_isolated. */
    1133           0 :                 list_del(&src->lru);
    1134           0 :                 migrate_folio_done(src, reason);
    1135           0 :                 return MIGRATEPAGE_SUCCESS;
    1136             :         }
    1137             : 
    1138           0 :         newpage = get_new_page(&src->page, private);
    1139           0 :         if (!newpage)
    1140             :                 return -ENOMEM;
    1141           0 :         dst = page_folio(newpage);
    1142           0 :         *dstp = dst;
    1143             : 
    1144           0 :         dst->private = NULL;
    1145             : 
    1146           0 :         if (!folio_trylock(src)) {
    1147           0 :                 if (!force || mode == MIGRATE_ASYNC)
    1148             :                         goto out;
    1149             : 
    1150             :                 /*
    1151             :                  * It's not safe for direct compaction to call lock_page.
    1152             :                  * For example, during page readahead pages are added locked
    1153             :                  * to the LRU. Later, when the IO completes the pages are
    1154             :                  * marked uptodate and unlocked. However, the queueing
    1155             :                  * could be merging multiple pages for one bio (e.g.
    1156             :                  * mpage_readahead). If an allocation happens for the
    1157             :                  * second or third page, the process can end up locking
    1158             :                  * the same page twice and deadlocking. Rather than
    1159             :                  * trying to be clever about what pages can be locked,
    1160             :                  * avoid the use of lock_page for direct compaction
    1161             :                  * altogether.
    1162             :                  */
    1163           0 :                 if (current->flags & PF_MEMALLOC)
    1164             :                         goto out;
    1165             : 
    1166             :                 /*
    1167             :                  * We have locked some folios and are going to wait to lock
    1168             :                  * this folio.  To avoid a potential deadlock, let's bail
    1169             :                  * out and not do that. The locked folios will be moved and
    1170             :                  * unlocked, then we can wait to lock this folio.
    1171             :                  */
    1172           0 :                 if (avoid_force_lock) {
    1173             :                         rc = -EDEADLOCK;
    1174             :                         goto out;
    1175             :                 }
    1176             : 
    1177             :                 folio_lock(src);
    1178             :         }
    1179           0 :         locked = true;
    1180             : 
    1181           0 :         if (folio_test_writeback(src)) {
    1182             :                 /*
    1183             :                  * Only in the case of a full synchronous migration is it
    1184             :                  * necessary to wait for PageWriteback. In the async case,
    1185             :                  * the retry loop is too short and in the sync-light case,
    1186             :                  * the overhead of stalling is too much
    1187             :                  */
    1188           0 :                 switch (mode) {
    1189             :                 case MIGRATE_SYNC:
    1190             :                 case MIGRATE_SYNC_NO_COPY:
    1191             :                         break;
    1192             :                 default:
    1193             :                         rc = -EBUSY;
    1194             :                         goto out;
    1195             :                 }
    1196           0 :                 if (!force)
    1197             :                         goto out;
    1198           0 :                 folio_wait_writeback(src);
    1199             :         }
    1200             : 
    1201             :         /*
    1202             :          * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
    1203             :          * we cannot notice that anon_vma is freed while we migrate a page.
    1204             :          * This get_anon_vma() delays freeing anon_vma pointer until the end
    1205             :          * of migration. File cache pages are no problem because of page_lock()
    1206             :          * File Caches may use write_page() or lock_page() in migration, then,
    1207             :          * just care Anon page here.
    1208             :          *
    1209             :          * Only folio_get_anon_vma() understands the subtleties of
    1210             :          * getting a hold on an anon_vma from outside one of its mms.
    1211             :          * But if we cannot get anon_vma, then we won't need it anyway,
    1212             :          * because that implies that the anon page is no longer mapped
    1213             :          * (and cannot be remapped so long as we hold the page lock).
    1214             :          */
    1215           0 :         if (folio_test_anon(src) && !folio_test_ksm(src))
    1216           0 :                 anon_vma = folio_get_anon_vma(src);
    1217             : 
    1218             :         /*
    1219             :          * Block others from accessing the new page when we get around to
    1220             :          * establishing additional references. We are usually the only one
    1221             :          * holding a reference to dst at this point. We used to have a BUG
    1222             :          * here if folio_trylock(dst) fails, but would like to allow for
    1223             :          * cases where there might be a race with the previous use of dst.
    1224             :          * This is much like races on refcount of oldpage: just don't BUG().
    1225             :          */
    1226           0 :         if (unlikely(!folio_trylock(dst)))
    1227             :                 goto out;
    1228           0 :         dst_locked = true;
    1229             : 
    1230           0 :         if (unlikely(!is_lru)) {
    1231           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1232           0 :                 return MIGRATEPAGE_UNMAP;
    1233             :         }
    1234             : 
    1235             :         /*
    1236             :          * Corner case handling:
    1237             :          * 1. When a new swap-cache page is read into, it is added to the LRU
    1238             :          * and treated as swapcache but it has no rmap yet.
    1239             :          * Calling try_to_unmap() against a src->mapping==NULL page will
    1240             :          * trigger a BUG.  So handle it here.
    1241             :          * 2. An orphaned page (see truncate_cleanup_page) might have
    1242             :          * fs-private metadata. The page can be picked up due to memory
    1243             :          * offlining.  Everywhere else except page reclaim, the page is
    1244             :          * invisible to the vm, so the page can not be migrated.  So try to
    1245             :          * free the metadata, so the page can be freed.
    1246             :          */
    1247           0 :         if (!src->mapping) {
    1248           0 :                 if (folio_test_private(src)) {
    1249           0 :                         try_to_free_buffers(src);
    1250           0 :                         goto out;
    1251             :                 }
    1252           0 :         } else if (folio_mapped(src)) {
    1253             :                 /* Establish migration ptes */
    1254             :                 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
    1255             :                                !folio_test_ksm(src) && !anon_vma, src);
    1256           0 :                 try_to_migrate(src, TTU_BATCH_FLUSH);
    1257           0 :                 page_was_mapped = 1;
    1258             :         }
    1259             : 
    1260           0 :         if (!folio_mapped(src)) {
    1261           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1262           0 :                 return MIGRATEPAGE_UNMAP;
    1263             :         }
    1264             : 
    1265             : out:
    1266             :         /*
    1267             :          * A folio that has not been unmapped will be restored to
    1268             :          * right list unless we want to retry.
    1269             :          */
    1270           0 :         if (rc == -EAGAIN || rc == -EDEADLOCK)
    1271           0 :                 ret = NULL;
    1272             : 
    1273           0 :         migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
    1274           0 :         migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
    1275             : 
    1276           0 :         return rc;
    1277             : }
    1278             : 
    1279             : /* Migrate the folio to the newly allocated folio in dst. */
    1280           0 : static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
    1281             :                               struct folio *src, struct folio *dst,
    1282             :                               enum migrate_mode mode, enum migrate_reason reason,
    1283             :                               struct list_head *ret)
    1284             : {
    1285             :         int rc;
    1286           0 :         int page_was_mapped = 0;
    1287           0 :         struct anon_vma *anon_vma = NULL;
    1288           0 :         bool is_lru = !__PageMovable(&src->page);
    1289             :         struct list_head *prev;
    1290             : 
    1291           0 :         __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
    1292           0 :         prev = dst->lru.prev;
    1293           0 :         list_del(&dst->lru);
    1294             : 
    1295           0 :         rc = move_to_new_folio(dst, src, mode);
    1296           0 :         if (rc)
    1297             :                 goto out;
    1298             : 
    1299           0 :         if (unlikely(!is_lru))
    1300             :                 goto out_unlock_both;
    1301             : 
    1302             :         /*
    1303             :          * When successful, push dst to LRU immediately: so that if it
    1304             :          * turns out to be an mlocked page, remove_migration_ptes() will
    1305             :          * automatically build up the correct dst->mlock_count for it.
    1306             :          *
    1307             :          * We would like to do something similar for the old page, when
    1308             :          * unsuccessful, and other cases when a page has been temporarily
    1309             :          * isolated from the unevictable LRU: but this case is the easiest.
    1310             :          */
    1311           0 :         folio_add_lru(dst);
    1312           0 :         if (page_was_mapped)
    1313           0 :                 lru_add_drain();
    1314             : 
    1315           0 :         if (page_was_mapped)
    1316           0 :                 remove_migration_ptes(src, dst, false);
    1317             : 
    1318             : out_unlock_both:
    1319           0 :         folio_unlock(dst);
    1320           0 :         set_page_owner_migrate_reason(&dst->page, reason);
    1321             :         /*
    1322             :          * If migration is successful, decrease refcount of dst,
    1323             :          * which will not free the page because new page owner increased
    1324             :          * refcounter.
    1325             :          */
    1326           0 :         folio_put(dst);
    1327             : 
    1328             :         /*
    1329             :          * A folio that has been migrated has all references removed
    1330             :          * and will be freed.
    1331             :          */
    1332           0 :         list_del(&src->lru);
    1333             :         /* Drop an anon_vma reference if we took one */
    1334           0 :         if (anon_vma)
    1335           0 :                 put_anon_vma(anon_vma);
    1336           0 :         folio_unlock(src);
    1337           0 :         migrate_folio_done(src, reason);
    1338             : 
    1339           0 :         return rc;
    1340             : out:
    1341             :         /*
    1342             :          * A folio that has not been migrated will be restored to
    1343             :          * right list unless we want to retry.
    1344             :          */
    1345           0 :         if (rc == -EAGAIN) {
    1346           0 :                 list_add(&dst->lru, prev);
    1347           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1348           0 :                 return rc;
    1349             :         }
    1350             : 
    1351           0 :         migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
    1352           0 :         migrate_folio_undo_dst(dst, true, put_new_page, private);
    1353             : 
    1354           0 :         return rc;
    1355             : }
    1356             : 
    1357             : /*
    1358             :  * Counterpart of unmap_and_move_page() for hugepage migration.
    1359             :  *
    1360             :  * This function doesn't wait the completion of hugepage I/O
    1361             :  * because there is no race between I/O and migration for hugepage.
    1362             :  * Note that currently hugepage I/O occurs only in direct I/O
    1363             :  * where no lock is held and PG_writeback is irrelevant,
    1364             :  * and writeback status of all subpages are counted in the reference
    1365             :  * count of the head page (i.e. if all subpages of a 2MB hugepage are
    1366             :  * under direct I/O, the reference of the head page is 512 and a bit more.)
    1367             :  * This means that when we try to migrate hugepage whose subpages are
    1368             :  * doing direct I/O, some references remain after try_to_unmap() and
    1369             :  * hugepage migration fails without data corruption.
    1370             :  *
    1371             :  * There is also no race when direct I/O is issued on the page under migration,
    1372             :  * because then pte is replaced with migration swap entry and direct I/O code
    1373             :  * will wait in the page fault for migration to complete.
    1374             :  */
    1375             : static int unmap_and_move_huge_page(new_page_t get_new_page,
    1376             :                                 free_page_t put_new_page, unsigned long private,
    1377             :                                 struct page *hpage, int force,
    1378             :                                 enum migrate_mode mode, int reason,
    1379             :                                 struct list_head *ret)
    1380             : {
    1381             :         struct folio *dst, *src = page_folio(hpage);
    1382             :         int rc = -EAGAIN;
    1383             :         int page_was_mapped = 0;
    1384             :         struct page *new_hpage;
    1385             :         struct anon_vma *anon_vma = NULL;
    1386             :         struct address_space *mapping = NULL;
    1387             : 
    1388             :         if (folio_ref_count(src) == 1) {
    1389             :                 /* page was freed from under us. So we are done. */
    1390             :                 folio_putback_active_hugetlb(src);
    1391             :                 return MIGRATEPAGE_SUCCESS;
    1392             :         }
    1393             : 
    1394             :         new_hpage = get_new_page(hpage, private);
    1395             :         if (!new_hpage)
    1396             :                 return -ENOMEM;
    1397             :         dst = page_folio(new_hpage);
    1398             : 
    1399             :         if (!folio_trylock(src)) {
    1400             :                 if (!force)
    1401             :                         goto out;
    1402             :                 switch (mode) {
    1403             :                 case MIGRATE_SYNC:
    1404             :                 case MIGRATE_SYNC_NO_COPY:
    1405             :                         break;
    1406             :                 default:
    1407             :                         goto out;
    1408             :                 }
    1409             :                 folio_lock(src);
    1410             :         }
    1411             : 
    1412             :         /*
    1413             :          * Check for pages which are in the process of being freed.  Without
    1414             :          * folio_mapping() set, hugetlbfs specific move page routine will not
    1415             :          * be called and we could leak usage counts for subpools.
    1416             :          */
    1417             :         if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
    1418             :                 rc = -EBUSY;
    1419             :                 goto out_unlock;
    1420             :         }
    1421             : 
    1422             :         if (folio_test_anon(src))
    1423             :                 anon_vma = folio_get_anon_vma(src);
    1424             : 
    1425             :         if (unlikely(!folio_trylock(dst)))
    1426             :                 goto put_anon;
    1427             : 
    1428             :         if (folio_mapped(src)) {
    1429             :                 enum ttu_flags ttu = 0;
    1430             : 
    1431             :                 if (!folio_test_anon(src)) {
    1432             :                         /*
    1433             :                          * In shared mappings, try_to_unmap could potentially
    1434             :                          * call huge_pmd_unshare.  Because of this, take
    1435             :                          * semaphore in write mode here and set TTU_RMAP_LOCKED
    1436             :                          * to let lower levels know we have taken the lock.
    1437             :                          */
    1438             :                         mapping = hugetlb_page_mapping_lock_write(hpage);
    1439             :                         if (unlikely(!mapping))
    1440             :                                 goto unlock_put_anon;
    1441             : 
    1442             :                         ttu = TTU_RMAP_LOCKED;
    1443             :                 }
    1444             : 
    1445             :                 try_to_migrate(src, ttu);
    1446             :                 page_was_mapped = 1;
    1447             : 
    1448             :                 if (ttu & TTU_RMAP_LOCKED)
    1449             :                         i_mmap_unlock_write(mapping);
    1450             :         }
    1451             : 
    1452             :         if (!folio_mapped(src))
    1453             :                 rc = move_to_new_folio(dst, src, mode);
    1454             : 
    1455             :         if (page_was_mapped)
    1456             :                 remove_migration_ptes(src,
    1457             :                         rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
    1458             : 
    1459             : unlock_put_anon:
    1460             :         folio_unlock(dst);
    1461             : 
    1462             : put_anon:
    1463             :         if (anon_vma)
    1464             :                 put_anon_vma(anon_vma);
    1465             : 
    1466             :         if (rc == MIGRATEPAGE_SUCCESS) {
    1467             :                 move_hugetlb_state(src, dst, reason);
    1468             :                 put_new_page = NULL;
    1469             :         }
    1470             : 
    1471             : out_unlock:
    1472             :         folio_unlock(src);
    1473             : out:
    1474             :         if (rc == MIGRATEPAGE_SUCCESS)
    1475             :                 folio_putback_active_hugetlb(src);
    1476             :         else if (rc != -EAGAIN)
    1477             :                 list_move_tail(&src->lru, ret);
    1478             : 
    1479             :         /*
    1480             :          * If migration was not successful and there's a freeing callback, use
    1481             :          * it.  Otherwise, put_page() will drop the reference grabbed during
    1482             :          * isolation.
    1483             :          */
    1484             :         if (put_new_page)
    1485             :                 put_new_page(new_hpage, private);
    1486             :         else
    1487             :                 folio_putback_active_hugetlb(dst);
    1488             : 
    1489             :         return rc;
    1490             : }
    1491             : 
    1492           0 : static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
    1493             : {
    1494             :         int rc;
    1495             : 
    1496           0 :         folio_lock(folio);
    1497           0 :         rc = split_folio_to_list(folio, split_folios);
    1498           0 :         folio_unlock(folio);
    1499             :         if (!rc)
    1500           0 :                 list_move_tail(&folio->lru, split_folios);
    1501             : 
    1502           0 :         return rc;
    1503             : }
    1504             : 
    1505             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1506             : #define NR_MAX_BATCHED_MIGRATION        HPAGE_PMD_NR
    1507             : #else
    1508             : #define NR_MAX_BATCHED_MIGRATION        512
    1509             : #endif
    1510             : #define NR_MAX_MIGRATE_PAGES_RETRY      10
    1511             : 
    1512             : struct migrate_pages_stats {
    1513             :         int nr_succeeded;       /* Normal and large folios migrated successfully, in
    1514             :                                    units of base pages */
    1515             :         int nr_failed_pages;    /* Normal and large folios failed to be migrated, in
    1516             :                                    units of base pages.  Untried folios aren't counted */
    1517             :         int nr_thp_succeeded;   /* THP migrated successfully */
    1518             :         int nr_thp_failed;      /* THP failed to be migrated */
    1519             :         int nr_thp_split;       /* THP split before migrating */
    1520             : };
    1521             : 
    1522             : /*
    1523             :  * Returns the number of hugetlb folios that were not migrated, or an error code
    1524             :  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
    1525             :  * any more because the list has become empty or no retryable hugetlb folios
    1526             :  * exist any more. It is caller's responsibility to call putback_movable_pages()
    1527             :  * only if ret != 0.
    1528             :  */
    1529             : static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
    1530             :                             free_page_t put_new_page, unsigned long private,
    1531             :                             enum migrate_mode mode, int reason,
    1532             :                             struct migrate_pages_stats *stats,
    1533             :                             struct list_head *ret_folios)
    1534             : {
    1535             :         int retry = 1;
    1536             :         int nr_failed = 0;
    1537             :         int nr_retry_pages = 0;
    1538             :         int pass = 0;
    1539             :         struct folio *folio, *folio2;
    1540             :         int rc, nr_pages;
    1541             : 
    1542           0 :         for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
    1543           0 :                 retry = 0;
    1544           0 :                 nr_retry_pages = 0;
    1545             : 
    1546           0 :                 list_for_each_entry_safe(folio, folio2, from, lru) {
    1547           0 :                         if (!folio_test_hugetlb(folio))
    1548           0 :                                 continue;
    1549             : 
    1550             :                         nr_pages = folio_nr_pages(folio);
    1551             : 
    1552             :                         cond_resched();
    1553             : 
    1554             :                         /*
    1555             :                          * Migratability of hugepages depends on architectures and
    1556             :                          * their size.  This check is necessary because some callers
    1557             :                          * of hugepage migration like soft offline and memory
    1558             :                          * hotremove don't walk through page tables or check whether
    1559             :                          * the hugepage is pmd-based or not before kicking migration.
    1560             :                          */
    1561             :                         if (!hugepage_migration_supported(folio_hstate(folio))) {
    1562             :                                 nr_failed++;
    1563             :                                 stats->nr_failed_pages += nr_pages;
    1564             :                                 list_move_tail(&folio->lru, ret_folios);
    1565             :                                 continue;
    1566             :                         }
    1567             : 
    1568             :                         rc = unmap_and_move_huge_page(get_new_page,
    1569             :                                                       put_new_page, private,
    1570             :                                                       &folio->page, pass > 2, mode,
    1571             :                                                       reason, ret_folios);
    1572             :                         /*
    1573             :                          * The rules are:
    1574             :                          *      Success: hugetlb folio will be put back
    1575             :                          *      -EAGAIN: stay on the from list
    1576             :                          *      -ENOMEM: stay on the from list
    1577             :                          *      Other errno: put on ret_folios list
    1578             :                          */
    1579             :                         switch(rc) {
    1580             :                         case -ENOMEM:
    1581             :                                 /*
    1582             :                                  * When memory is low, don't bother to try to migrate
    1583             :                                  * other folios, just exit.
    1584             :                                  */
    1585             :                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
    1586             :                                 return -ENOMEM;
    1587             :                         case -EAGAIN:
    1588             :                                 retry++;
    1589             :                                 nr_retry_pages += nr_pages;
    1590             :                                 break;
    1591             :                         case MIGRATEPAGE_SUCCESS:
    1592             :                                 stats->nr_succeeded += nr_pages;
    1593             :                                 break;
    1594             :                         default:
    1595             :                                 /*
    1596             :                                  * Permanent failure (-EBUSY, etc.):
    1597             :                                  * unlike -EAGAIN case, the failed folio is
    1598             :                                  * removed from migration folio list and not
    1599             :                                  * retried in the next outer loop.
    1600             :                                  */
    1601             :                                 nr_failed++;
    1602             :                                 stats->nr_failed_pages += nr_pages;
    1603             :                                 break;
    1604             :                         }
    1605             :                 }
    1606             :         }
    1607             :         /*
    1608             :          * nr_failed is number of hugetlb folios failed to be migrated.  After
    1609             :          * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
    1610             :          * folios as failed.
    1611             :          */
    1612             :         nr_failed += retry;
    1613             :         stats->nr_failed_pages += nr_retry_pages;
    1614             : 
    1615             :         return nr_failed;
    1616             : }
    1617             : 
    1618             : /*
    1619             :  * migrate_pages_batch() first unmaps folios in the from list as many as
    1620             :  * possible, then move the unmapped folios.
    1621             :  */
    1622           0 : static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
    1623             :                 free_page_t put_new_page, unsigned long private,
    1624             :                 enum migrate_mode mode, int reason, struct list_head *ret_folios,
    1625             :                 struct migrate_pages_stats *stats)
    1626             : {
    1627             :         int retry;
    1628           0 :         int large_retry = 1;
    1629           0 :         int thp_retry = 1;
    1630           0 :         int nr_failed = 0;
    1631           0 :         int nr_retry_pages = 0;
    1632           0 :         int nr_large_failed = 0;
    1633           0 :         int pass = 0;
    1634           0 :         bool is_large = false;
    1635           0 :         bool is_thp = false;
    1636           0 :         struct folio *folio, *folio2, *dst = NULL, *dst2;
    1637             :         int rc, rc_saved, nr_pages;
    1638           0 :         LIST_HEAD(split_folios);
    1639           0 :         LIST_HEAD(unmap_folios);
    1640           0 :         LIST_HEAD(dst_folios);
    1641           0 :         bool nosplit = (reason == MR_NUMA_MISPLACED);
    1642           0 :         bool no_split_folio_counting = false;
    1643             :         bool avoid_force_lock;
    1644             : 
    1645             : retry:
    1646           0 :         rc_saved = 0;
    1647           0 :         avoid_force_lock = false;
    1648           0 :         retry = 1;
    1649           0 :         for (pass = 0;
    1650           0 :              pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
    1651           0 :              pass++) {
    1652           0 :                 retry = 0;
    1653           0 :                 large_retry = 0;
    1654           0 :                 thp_retry = 0;
    1655           0 :                 nr_retry_pages = 0;
    1656             : 
    1657           0 :                 list_for_each_entry_safe(folio, folio2, from, lru) {
    1658             :                         /*
    1659             :                          * Large folio statistics is based on the source large
    1660             :                          * folio. Capture required information that might get
    1661             :                          * lost during migration.
    1662             :                          */
    1663           0 :                         is_large = folio_test_large(folio);
    1664           0 :                         is_thp = is_large && folio_test_pmd_mappable(folio);
    1665           0 :                         nr_pages = folio_nr_pages(folio);
    1666             : 
    1667           0 :                         cond_resched();
    1668             : 
    1669             :                         /*
    1670             :                          * Large folio migration might be unsupported or
    1671             :                          * the allocation might be failed so we should retry
    1672             :                          * on the same folio with the large folio split
    1673             :                          * to normal folios.
    1674             :                          *
    1675             :                          * Split folios are put in split_folios, and
    1676             :                          * we will migrate them after the rest of the
    1677             :                          * list is processed.
    1678             :                          */
    1679             :                         if (!thp_migration_supported() && is_thp) {
    1680             :                                 nr_large_failed++;
    1681             :                                 stats->nr_thp_failed++;
    1682             :                                 if (!try_split_folio(folio, &split_folios)) {
    1683             :                                         stats->nr_thp_split++;
    1684             :                                         continue;
    1685             :                                 }
    1686             :                                 stats->nr_failed_pages += nr_pages;
    1687             :                                 list_move_tail(&folio->lru, ret_folios);
    1688             :                                 continue;
    1689             :                         }
    1690             : 
    1691           0 :                         rc = migrate_folio_unmap(get_new_page, put_new_page, private,
    1692             :                                                  folio, &dst, pass > 2, avoid_force_lock,
    1693             :                                                  mode, reason, ret_folios);
    1694             :                         /*
    1695             :                          * The rules are:
    1696             :                          *      Success: folio will be freed
    1697             :                          *      Unmap: folio will be put on unmap_folios list,
    1698             :                          *             dst folio put on dst_folios list
    1699             :                          *      -EAGAIN: stay on the from list
    1700             :                          *      -EDEADLOCK: stay on the from list
    1701             :                          *      -ENOMEM: stay on the from list
    1702             :                          *      Other errno: put on ret_folios list
    1703             :                          */
    1704           0 :                         switch(rc) {
    1705             :                         case -ENOMEM:
    1706             :                                 /*
    1707             :                                  * When memory is low, don't bother to try to migrate
    1708             :                                  * other folios, move unmapped folios, then exit.
    1709             :                                  */
    1710           0 :                                 if (is_large) {
    1711           0 :                                         nr_large_failed++;
    1712             :                                         stats->nr_thp_failed += is_thp;
    1713             :                                         /* Large folio NUMA faulting doesn't split to retry. */
    1714           0 :                                         if (!nosplit) {
    1715           0 :                                                 int ret = try_split_folio(folio, &split_folios);
    1716             : 
    1717           0 :                                                 if (!ret) {
    1718             :                                                         stats->nr_thp_split += is_thp;
    1719             :                                                         break;
    1720           0 :                                                 } else if (reason == MR_LONGTERM_PIN &&
    1721           0 :                                                            ret == -EAGAIN) {
    1722             :                                                         /*
    1723             :                                                          * Try again to split large folio to
    1724             :                                                          * mitigate the failure of longterm pinning.
    1725             :                                                          */
    1726           0 :                                                         large_retry++;
    1727           0 :                                                         thp_retry += is_thp;
    1728           0 :                                                         nr_retry_pages += nr_pages;
    1729           0 :                                                         break;
    1730             :                                                 }
    1731             :                                         }
    1732           0 :                                 } else if (!no_split_folio_counting) {
    1733           0 :                                         nr_failed++;
    1734             :                                 }
    1735             : 
    1736           0 :                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
    1737             :                                 /*
    1738             :                                  * There might be some split folios of fail-to-migrate large
    1739             :                                  * folios left in split_folios list. Move them to ret_folios
    1740             :                                  * list so that they could be put back to the right list by
    1741             :                                  * the caller otherwise the folio refcnt will be leaked.
    1742             :                                  */
    1743           0 :                                 list_splice_init(&split_folios, ret_folios);
    1744             :                                 /* nr_failed isn't updated for not used */
    1745           0 :                                 nr_large_failed += large_retry;
    1746             :                                 stats->nr_thp_failed += thp_retry;
    1747           0 :                                 rc_saved = rc;
    1748           0 :                                 if (list_empty(&unmap_folios))
    1749             :                                         goto out;
    1750             :                                 else
    1751             :                                         goto move;
    1752             :                         case -EDEADLOCK:
    1753             :                                 /*
    1754             :                                  * The folio cannot be locked for potential deadlock.
    1755             :                                  * Go move (and unlock) all locked folios.  Then we can
    1756             :                                  * try again.
    1757             :                                  */
    1758             :                                 rc_saved = rc;
    1759             :                                 goto move;
    1760             :                         case -EAGAIN:
    1761           0 :                                 if (is_large) {
    1762           0 :                                         large_retry++;
    1763           0 :                                         thp_retry += is_thp;
    1764           0 :                                 } else if (!no_split_folio_counting) {
    1765           0 :                                         retry++;
    1766             :                                 }
    1767           0 :                                 nr_retry_pages += nr_pages;
    1768           0 :                                 break;
    1769             :                         case MIGRATEPAGE_SUCCESS:
    1770           0 :                                 stats->nr_succeeded += nr_pages;
    1771             :                                 stats->nr_thp_succeeded += is_thp;
    1772           0 :                                 break;
    1773             :                         case MIGRATEPAGE_UNMAP:
    1774             :                                 /*
    1775             :                                  * We have locked some folios, don't force lock
    1776             :                                  * to avoid deadlock.
    1777             :                                  */
    1778           0 :                                 avoid_force_lock = true;
    1779           0 :                                 list_move_tail(&folio->lru, &unmap_folios);
    1780           0 :                                 list_add_tail(&dst->lru, &dst_folios);
    1781             :                                 break;
    1782             :                         default:
    1783             :                                 /*
    1784             :                                  * Permanent failure (-EBUSY, etc.):
    1785             :                                  * unlike -EAGAIN case, the failed folio is
    1786             :                                  * removed from migration folio list and not
    1787             :                                  * retried in the next outer loop.
    1788             :                                  */
    1789           0 :                                 if (is_large) {
    1790           0 :                                         nr_large_failed++;
    1791             :                                         stats->nr_thp_failed += is_thp;
    1792           0 :                                 } else if (!no_split_folio_counting) {
    1793           0 :                                         nr_failed++;
    1794             :                                 }
    1795             : 
    1796           0 :                                 stats->nr_failed_pages += nr_pages;
    1797           0 :                                 break;
    1798             :                         }
    1799             :                 }
    1800             :         }
    1801           0 :         nr_failed += retry;
    1802           0 :         nr_large_failed += large_retry;
    1803           0 :         stats->nr_thp_failed += thp_retry;
    1804           0 :         stats->nr_failed_pages += nr_retry_pages;
    1805             : move:
    1806             :         /* Flush TLBs for all unmapped folios */
    1807             :         try_to_unmap_flush();
    1808             : 
    1809           0 :         retry = 1;
    1810           0 :         for (pass = 0;
    1811           0 :              pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
    1812           0 :              pass++) {
    1813           0 :                 retry = 0;
    1814           0 :                 large_retry = 0;
    1815           0 :                 thp_retry = 0;
    1816           0 :                 nr_retry_pages = 0;
    1817             : 
    1818           0 :                 dst = list_first_entry(&dst_folios, struct folio, lru);
    1819           0 :                 dst2 = list_next_entry(dst, lru);
    1820           0 :                 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
    1821           0 :                         is_large = folio_test_large(folio);
    1822           0 :                         is_thp = is_large && folio_test_pmd_mappable(folio);
    1823           0 :                         nr_pages = folio_nr_pages(folio);
    1824             : 
    1825           0 :                         cond_resched();
    1826             : 
    1827           0 :                         rc = migrate_folio_move(put_new_page, private,
    1828             :                                                 folio, dst, mode,
    1829             :                                                 reason, ret_folios);
    1830             :                         /*
    1831             :                          * The rules are:
    1832             :                          *      Success: folio will be freed
    1833             :                          *      -EAGAIN: stay on the unmap_folios list
    1834             :                          *      Other errno: put on ret_folios list
    1835             :                          */
    1836           0 :                         switch(rc) {
    1837             :                         case -EAGAIN:
    1838           0 :                                 if (is_large) {
    1839           0 :                                         large_retry++;
    1840           0 :                                         thp_retry += is_thp;
    1841           0 :                                 } else if (!no_split_folio_counting) {
    1842           0 :                                         retry++;
    1843             :                                 }
    1844           0 :                                 nr_retry_pages += nr_pages;
    1845           0 :                                 break;
    1846             :                         case MIGRATEPAGE_SUCCESS:
    1847           0 :                                 stats->nr_succeeded += nr_pages;
    1848             :                                 stats->nr_thp_succeeded += is_thp;
    1849           0 :                                 break;
    1850             :                         default:
    1851           0 :                                 if (is_large) {
    1852           0 :                                         nr_large_failed++;
    1853             :                                         stats->nr_thp_failed += is_thp;
    1854           0 :                                 } else if (!no_split_folio_counting) {
    1855           0 :                                         nr_failed++;
    1856             :                                 }
    1857             : 
    1858           0 :                                 stats->nr_failed_pages += nr_pages;
    1859           0 :                                 break;
    1860             :                         }
    1861           0 :                         dst = dst2;
    1862           0 :                         dst2 = list_next_entry(dst, lru);
    1863             :                 }
    1864             :         }
    1865           0 :         nr_failed += retry;
    1866           0 :         nr_large_failed += large_retry;
    1867           0 :         stats->nr_thp_failed += thp_retry;
    1868           0 :         stats->nr_failed_pages += nr_retry_pages;
    1869             : 
    1870           0 :         if (rc_saved)
    1871             :                 rc = rc_saved;
    1872             :         else
    1873           0 :                 rc = nr_failed + nr_large_failed;
    1874             : out:
    1875             :         /* Cleanup remaining folios */
    1876           0 :         dst = list_first_entry(&dst_folios, struct folio, lru);
    1877           0 :         dst2 = list_next_entry(dst, lru);
    1878           0 :         list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
    1879           0 :                 int page_was_mapped = 0;
    1880           0 :                 struct anon_vma *anon_vma = NULL;
    1881             : 
    1882           0 :                 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
    1883           0 :                 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
    1884             :                                        true, ret_folios);
    1885           0 :                 list_del(&dst->lru);
    1886           0 :                 migrate_folio_undo_dst(dst, true, put_new_page, private);
    1887           0 :                 dst = dst2;
    1888           0 :                 dst2 = list_next_entry(dst, lru);
    1889             :         }
    1890             : 
    1891             :         /*
    1892             :          * Try to migrate split folios of fail-to-migrate large folios, no
    1893             :          * nr_failed counting in this round, since all split folios of a
    1894             :          * large folio is counted as 1 failure in the first round.
    1895             :          */
    1896           0 :         if (rc >= 0 && !list_empty(&split_folios)) {
    1897             :                 /*
    1898             :                  * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
    1899             :                  * retries) to ret_folios to avoid migrating them again.
    1900             :                  */
    1901           0 :                 list_splice_init(from, ret_folios);
    1902             :                 list_splice_init(&split_folios, from);
    1903             :                 no_split_folio_counting = true;
    1904             :                 goto retry;
    1905             :         }
    1906             : 
    1907             :         /*
    1908             :          * We have unlocked all locked folios, so we can force lock now, let's
    1909             :          * try again.
    1910             :          */
    1911           0 :         if (rc == -EDEADLOCK)
    1912             :                 goto retry;
    1913             : 
    1914           0 :         return rc;
    1915             : }
    1916             : 
    1917             : /*
    1918             :  * migrate_pages - migrate the folios specified in a list, to the free folios
    1919             :  *                 supplied as the target for the page migration
    1920             :  *
    1921             :  * @from:               The list of folios to be migrated.
    1922             :  * @get_new_page:       The function used to allocate free folios to be used
    1923             :  *                      as the target of the folio migration.
    1924             :  * @put_new_page:       The function used to free target folios if migration
    1925             :  *                      fails, or NULL if no special handling is necessary.
    1926             :  * @private:            Private data to be passed on to get_new_page()
    1927             :  * @mode:               The migration mode that specifies the constraints for
    1928             :  *                      folio migration, if any.
    1929             :  * @reason:             The reason for folio migration.
    1930             :  * @ret_succeeded:      Set to the number of folios migrated successfully if
    1931             :  *                      the caller passes a non-NULL pointer.
    1932             :  *
    1933             :  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
    1934             :  * are movable any more because the list has become empty or no retryable folios
    1935             :  * exist any more. It is caller's responsibility to call putback_movable_pages()
    1936             :  * only if ret != 0.
    1937             :  *
    1938             :  * Returns the number of {normal folio, large folio, hugetlb} that were not
    1939             :  * migrated, or an error code. The number of large folio splits will be
    1940             :  * considered as the number of non-migrated large folio, no matter how many
    1941             :  * split folios of the large folio are migrated successfully.
    1942             :  */
    1943           0 : int migrate_pages(struct list_head *from, new_page_t get_new_page,
    1944             :                 free_page_t put_new_page, unsigned long private,
    1945             :                 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
    1946             : {
    1947             :         int rc, rc_gather;
    1948             :         int nr_pages;
    1949             :         struct folio *folio, *folio2;
    1950           0 :         LIST_HEAD(folios);
    1951           0 :         LIST_HEAD(ret_folios);
    1952             :         struct migrate_pages_stats stats;
    1953             : 
    1954           0 :         trace_mm_migrate_pages_start(mode, reason);
    1955             : 
    1956           0 :         memset(&stats, 0, sizeof(stats));
    1957             : 
    1958           0 :         rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
    1959             :                                      mode, reason, &stats, &ret_folios);
    1960             :         if (rc_gather < 0)
    1961             :                 goto out;
    1962             : again:
    1963           0 :         nr_pages = 0;
    1964           0 :         list_for_each_entry_safe(folio, folio2, from, lru) {
    1965             :                 /* Retried hugetlb folios will be kept in list  */
    1966           0 :                 if (folio_test_hugetlb(folio)) {
    1967             :                         list_move_tail(&folio->lru, &ret_folios);
    1968             :                         continue;
    1969             :                 }
    1970             : 
    1971           0 :                 nr_pages += folio_nr_pages(folio);
    1972           0 :                 if (nr_pages > NR_MAX_BATCHED_MIGRATION)
    1973             :                         break;
    1974             :         }
    1975           0 :         if (nr_pages > NR_MAX_BATCHED_MIGRATION)
    1976           0 :                 list_cut_before(&folios, from, &folio->lru);
    1977             :         else
    1978             :                 list_splice_init(from, &folios);
    1979           0 :         rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
    1980             :                                  mode, reason, &ret_folios, &stats);
    1981           0 :         list_splice_tail_init(&folios, &ret_folios);
    1982           0 :         if (rc < 0) {
    1983             :                 rc_gather = rc;
    1984             :                 goto out;
    1985             :         }
    1986           0 :         rc_gather += rc;
    1987           0 :         if (!list_empty(from))
    1988             :                 goto again;
    1989             : out:
    1990             :         /*
    1991             :          * Put the permanent failure folio back to migration list, they
    1992             :          * will be put back to the right list by the caller.
    1993             :          */
    1994           0 :         list_splice(&ret_folios, from);
    1995             : 
    1996             :         /*
    1997             :          * Return 0 in case all split folios of fail-to-migrate large folios
    1998             :          * are migrated successfully.
    1999             :          */
    2000           0 :         if (list_empty(from))
    2001           0 :                 rc_gather = 0;
    2002             : 
    2003           0 :         count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
    2004           0 :         count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
    2005           0 :         count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
    2006           0 :         count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
    2007           0 :         count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
    2008           0 :         trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
    2009           0 :                                stats.nr_thp_succeeded, stats.nr_thp_failed,
    2010           0 :                                stats.nr_thp_split, mode, reason);
    2011             : 
    2012           0 :         if (ret_succeeded)
    2013           0 :                 *ret_succeeded = stats.nr_succeeded;
    2014             : 
    2015           0 :         return rc_gather;
    2016             : }
    2017             : 
    2018           0 : struct page *alloc_migration_target(struct page *page, unsigned long private)
    2019             : {
    2020           0 :         struct folio *folio = page_folio(page);
    2021             :         struct migration_target_control *mtc;
    2022             :         gfp_t gfp_mask;
    2023           0 :         unsigned int order = 0;
    2024           0 :         struct folio *hugetlb_folio = NULL;
    2025           0 :         struct folio *new_folio = NULL;
    2026             :         int nid;
    2027             :         int zidx;
    2028             : 
    2029           0 :         mtc = (struct migration_target_control *)private;
    2030           0 :         gfp_mask = mtc->gfp_mask;
    2031           0 :         nid = mtc->nid;
    2032           0 :         if (nid == NUMA_NO_NODE)
    2033           0 :                 nid = folio_nid(folio);
    2034             : 
    2035           0 :         if (folio_test_hugetlb(folio)) {
    2036             :                 struct hstate *h = folio_hstate(folio);
    2037             : 
    2038             :                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
    2039             :                 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
    2040             :                                                 mtc->nmask, gfp_mask);
    2041             :                 return &hugetlb_folio->page;
    2042             :         }
    2043             : 
    2044           0 :         if (folio_test_large(folio)) {
    2045             :                 /*
    2046             :                  * clear __GFP_RECLAIM to make the migration callback
    2047             :                  * consistent with regular THP allocations.
    2048             :                  */
    2049           0 :                 gfp_mask &= ~__GFP_RECLAIM;
    2050           0 :                 gfp_mask |= GFP_TRANSHUGE;
    2051             :                 order = folio_order(folio);
    2052             :         }
    2053           0 :         zidx = zone_idx(folio_zone(folio));
    2054           0 :         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
    2055           0 :                 gfp_mask |= __GFP_HIGHMEM;
    2056             : 
    2057           0 :         new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
    2058             : 
    2059           0 :         return &new_folio->page;
    2060             : }
    2061             : 
    2062             : #ifdef CONFIG_NUMA
    2063             : 
    2064             : static int store_status(int __user *status, int start, int value, int nr)
    2065             : {
    2066             :         while (nr-- > 0) {
    2067             :                 if (put_user(value, status + start))
    2068             :                         return -EFAULT;
    2069             :                 start++;
    2070             :         }
    2071             : 
    2072             :         return 0;
    2073             : }
    2074             : 
    2075             : static int do_move_pages_to_node(struct mm_struct *mm,
    2076             :                 struct list_head *pagelist, int node)
    2077             : {
    2078             :         int err;
    2079             :         struct migration_target_control mtc = {
    2080             :                 .nid = node,
    2081             :                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
    2082             :         };
    2083             : 
    2084             :         err = migrate_pages(pagelist, alloc_migration_target, NULL,
    2085             :                 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
    2086             :         if (err)
    2087             :                 putback_movable_pages(pagelist);
    2088             :         return err;
    2089             : }
    2090             : 
    2091             : /*
    2092             :  * Resolves the given address to a struct page, isolates it from the LRU and
    2093             :  * puts it to the given pagelist.
    2094             :  * Returns:
    2095             :  *     errno - if the page cannot be found/isolated
    2096             :  *     0 - when it doesn't have to be migrated because it is already on the
    2097             :  *         target node
    2098             :  *     1 - when it has been queued
    2099             :  */
    2100             : static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
    2101             :                 int node, struct list_head *pagelist, bool migrate_all)
    2102             : {
    2103             :         struct vm_area_struct *vma;
    2104             :         struct page *page;
    2105             :         int err;
    2106             :         bool isolated;
    2107             : 
    2108             :         mmap_read_lock(mm);
    2109             :         err = -EFAULT;
    2110             :         vma = vma_lookup(mm, addr);
    2111             :         if (!vma || !vma_migratable(vma))
    2112             :                 goto out;
    2113             : 
    2114             :         /* FOLL_DUMP to ignore special (like zero) pages */
    2115             :         page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
    2116             : 
    2117             :         err = PTR_ERR(page);
    2118             :         if (IS_ERR(page))
    2119             :                 goto out;
    2120             : 
    2121             :         err = -ENOENT;
    2122             :         if (!page)
    2123             :                 goto out;
    2124             : 
    2125             :         if (is_zone_device_page(page))
    2126             :                 goto out_putpage;
    2127             : 
    2128             :         err = 0;
    2129             :         if (page_to_nid(page) == node)
    2130             :                 goto out_putpage;
    2131             : 
    2132             :         err = -EACCES;
    2133             :         if (page_mapcount(page) > 1 && !migrate_all)
    2134             :                 goto out_putpage;
    2135             : 
    2136             :         if (PageHuge(page)) {
    2137             :                 if (PageHead(page)) {
    2138             :                         isolated = isolate_hugetlb(page_folio(page), pagelist);
    2139             :                         err = isolated ? 1 : -EBUSY;
    2140             :                 }
    2141             :         } else {
    2142             :                 struct page *head;
    2143             : 
    2144             :                 head = compound_head(page);
    2145             :                 isolated = isolate_lru_page(head);
    2146             :                 if (!isolated) {
    2147             :                         err = -EBUSY;
    2148             :                         goto out_putpage;
    2149             :                 }
    2150             : 
    2151             :                 err = 1;
    2152             :                 list_add_tail(&head->lru, pagelist);
    2153             :                 mod_node_page_state(page_pgdat(head),
    2154             :                         NR_ISOLATED_ANON + page_is_file_lru(head),
    2155             :                         thp_nr_pages(head));
    2156             :         }
    2157             : out_putpage:
    2158             :         /*
    2159             :          * Either remove the duplicate refcount from
    2160             :          * isolate_lru_page() or drop the page ref if it was
    2161             :          * not isolated.
    2162             :          */
    2163             :         put_page(page);
    2164             : out:
    2165             :         mmap_read_unlock(mm);
    2166             :         return err;
    2167             : }
    2168             : 
    2169             : static int move_pages_and_store_status(struct mm_struct *mm, int node,
    2170             :                 struct list_head *pagelist, int __user *status,
    2171             :                 int start, int i, unsigned long nr_pages)
    2172             : {
    2173             :         int err;
    2174             : 
    2175             :         if (list_empty(pagelist))
    2176             :                 return 0;
    2177             : 
    2178             :         err = do_move_pages_to_node(mm, pagelist, node);
    2179             :         if (err) {
    2180             :                 /*
    2181             :                  * Positive err means the number of failed
    2182             :                  * pages to migrate.  Since we are going to
    2183             :                  * abort and return the number of non-migrated
    2184             :                  * pages, so need to include the rest of the
    2185             :                  * nr_pages that have not been attempted as
    2186             :                  * well.
    2187             :                  */
    2188             :                 if (err > 0)
    2189             :                         err += nr_pages - i;
    2190             :                 return err;
    2191             :         }
    2192             :         return store_status(status, start, node, i - start);
    2193             : }
    2194             : 
    2195             : /*
    2196             :  * Migrate an array of page address onto an array of nodes and fill
    2197             :  * the corresponding array of status.
    2198             :  */
    2199             : static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
    2200             :                          unsigned long nr_pages,
    2201             :                          const void __user * __user *pages,
    2202             :                          const int __user *nodes,
    2203             :                          int __user *status, int flags)
    2204             : {
    2205             :         int current_node = NUMA_NO_NODE;
    2206             :         LIST_HEAD(pagelist);
    2207             :         int start, i;
    2208             :         int err = 0, err1;
    2209             : 
    2210             :         lru_cache_disable();
    2211             : 
    2212             :         for (i = start = 0; i < nr_pages; i++) {
    2213             :                 const void __user *p;
    2214             :                 unsigned long addr;
    2215             :                 int node;
    2216             : 
    2217             :                 err = -EFAULT;
    2218             :                 if (get_user(p, pages + i))
    2219             :                         goto out_flush;
    2220             :                 if (get_user(node, nodes + i))
    2221             :                         goto out_flush;
    2222             :                 addr = (unsigned long)untagged_addr(p);
    2223             : 
    2224             :                 err = -ENODEV;
    2225             :                 if (node < 0 || node >= MAX_NUMNODES)
    2226             :                         goto out_flush;
    2227             :                 if (!node_state(node, N_MEMORY))
    2228             :                         goto out_flush;
    2229             : 
    2230             :                 err = -EACCES;
    2231             :                 if (!node_isset(node, task_nodes))
    2232             :                         goto out_flush;
    2233             : 
    2234             :                 if (current_node == NUMA_NO_NODE) {
    2235             :                         current_node = node;
    2236             :                         start = i;
    2237             :                 } else if (node != current_node) {
    2238             :                         err = move_pages_and_store_status(mm, current_node,
    2239             :                                         &pagelist, status, start, i, nr_pages);
    2240             :                         if (err)
    2241             :                                 goto out;
    2242             :                         start = i;
    2243             :                         current_node = node;
    2244             :                 }
    2245             : 
    2246             :                 /*
    2247             :                  * Errors in the page lookup or isolation are not fatal and we simply
    2248             :                  * report them via status
    2249             :                  */
    2250             :                 err = add_page_for_migration(mm, addr, current_node,
    2251             :                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
    2252             : 
    2253             :                 if (err > 0) {
    2254             :                         /* The page is successfully queued for migration */
    2255             :                         continue;
    2256             :                 }
    2257             : 
    2258             :                 /*
    2259             :                  * The move_pages() man page does not have an -EEXIST choice, so
    2260             :                  * use -EFAULT instead.
    2261             :                  */
    2262             :                 if (err == -EEXIST)
    2263             :                         err = -EFAULT;
    2264             : 
    2265             :                 /*
    2266             :                  * If the page is already on the target node (!err), store the
    2267             :                  * node, otherwise, store the err.
    2268             :                  */
    2269             :                 err = store_status(status, i, err ? : current_node, 1);
    2270             :                 if (err)
    2271             :                         goto out_flush;
    2272             : 
    2273             :                 err = move_pages_and_store_status(mm, current_node, &pagelist,
    2274             :                                 status, start, i, nr_pages);
    2275             :                 if (err) {
    2276             :                         /* We have accounted for page i */
    2277             :                         if (err > 0)
    2278             :                                 err--;
    2279             :                         goto out;
    2280             :                 }
    2281             :                 current_node = NUMA_NO_NODE;
    2282             :         }
    2283             : out_flush:
    2284             :         /* Make sure we do not overwrite the existing error */
    2285             :         err1 = move_pages_and_store_status(mm, current_node, &pagelist,
    2286             :                                 status, start, i, nr_pages);
    2287             :         if (err >= 0)
    2288             :                 err = err1;
    2289             : out:
    2290             :         lru_cache_enable();
    2291             :         return err;
    2292             : }
    2293             : 
    2294             : /*
    2295             :  * Determine the nodes of an array of pages and store it in an array of status.
    2296             :  */
    2297             : static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
    2298             :                                 const void __user **pages, int *status)
    2299             : {
    2300             :         unsigned long i;
    2301             : 
    2302             :         mmap_read_lock(mm);
    2303             : 
    2304             :         for (i = 0; i < nr_pages; i++) {
    2305             :                 unsigned long addr = (unsigned long)(*pages);
    2306             :                 struct vm_area_struct *vma;
    2307             :                 struct page *page;
    2308             :                 int err = -EFAULT;
    2309             : 
    2310             :                 vma = vma_lookup(mm, addr);
    2311             :                 if (!vma)
    2312             :                         goto set_status;
    2313             : 
    2314             :                 /* FOLL_DUMP to ignore special (like zero) pages */
    2315             :                 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
    2316             : 
    2317             :                 err = PTR_ERR(page);
    2318             :                 if (IS_ERR(page))
    2319             :                         goto set_status;
    2320             : 
    2321             :                 err = -ENOENT;
    2322             :                 if (!page)
    2323             :                         goto set_status;
    2324             : 
    2325             :                 if (!is_zone_device_page(page))
    2326             :                         err = page_to_nid(page);
    2327             : 
    2328             :                 put_page(page);
    2329             : set_status:
    2330             :                 *status = err;
    2331             : 
    2332             :                 pages++;
    2333             :                 status++;
    2334             :         }
    2335             : 
    2336             :         mmap_read_unlock(mm);
    2337             : }
    2338             : 
    2339             : static int get_compat_pages_array(const void __user *chunk_pages[],
    2340             :                                   const void __user * __user *pages,
    2341             :                                   unsigned long chunk_nr)
    2342             : {
    2343             :         compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
    2344             :         compat_uptr_t p;
    2345             :         int i;
    2346             : 
    2347             :         for (i = 0; i < chunk_nr; i++) {
    2348             :                 if (get_user(p, pages32 + i))
    2349             :                         return -EFAULT;
    2350             :                 chunk_pages[i] = compat_ptr(p);
    2351             :         }
    2352             : 
    2353             :         return 0;
    2354             : }
    2355             : 
    2356             : /*
    2357             :  * Determine the nodes of a user array of pages and store it in
    2358             :  * a user array of status.
    2359             :  */
    2360             : static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
    2361             :                          const void __user * __user *pages,
    2362             :                          int __user *status)
    2363             : {
    2364             : #define DO_PAGES_STAT_CHUNK_NR 16UL
    2365             :         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
    2366             :         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
    2367             : 
    2368             :         while (nr_pages) {
    2369             :                 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
    2370             : 
    2371             :                 if (in_compat_syscall()) {
    2372             :                         if (get_compat_pages_array(chunk_pages, pages,
    2373             :                                                    chunk_nr))
    2374             :                                 break;
    2375             :                 } else {
    2376             :                         if (copy_from_user(chunk_pages, pages,
    2377             :                                       chunk_nr * sizeof(*chunk_pages)))
    2378             :                                 break;
    2379             :                 }
    2380             : 
    2381             :                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
    2382             : 
    2383             :                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
    2384             :                         break;
    2385             : 
    2386             :                 pages += chunk_nr;
    2387             :                 status += chunk_nr;
    2388             :                 nr_pages -= chunk_nr;
    2389             :         }
    2390             :         return nr_pages ? -EFAULT : 0;
    2391             : }
    2392             : 
    2393             : static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
    2394             : {
    2395             :         struct task_struct *task;
    2396             :         struct mm_struct *mm;
    2397             : 
    2398             :         /*
    2399             :          * There is no need to check if current process has the right to modify
    2400             :          * the specified process when they are same.
    2401             :          */
    2402             :         if (!pid) {
    2403             :                 mmget(current->mm);
    2404             :                 *mem_nodes = cpuset_mems_allowed(current);
    2405             :                 return current->mm;
    2406             :         }
    2407             : 
    2408             :         /* Find the mm_struct */
    2409             :         rcu_read_lock();
    2410             :         task = find_task_by_vpid(pid);
    2411             :         if (!task) {
    2412             :                 rcu_read_unlock();
    2413             :                 return ERR_PTR(-ESRCH);
    2414             :         }
    2415             :         get_task_struct(task);
    2416             : 
    2417             :         /*
    2418             :          * Check if this process has the right to modify the specified
    2419             :          * process. Use the regular "ptrace_may_access()" checks.
    2420             :          */
    2421             :         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
    2422             :                 rcu_read_unlock();
    2423             :                 mm = ERR_PTR(-EPERM);
    2424             :                 goto out;
    2425             :         }
    2426             :         rcu_read_unlock();
    2427             : 
    2428             :         mm = ERR_PTR(security_task_movememory(task));
    2429             :         if (IS_ERR(mm))
    2430             :                 goto out;
    2431             :         *mem_nodes = cpuset_mems_allowed(task);
    2432             :         mm = get_task_mm(task);
    2433             : out:
    2434             :         put_task_struct(task);
    2435             :         if (!mm)
    2436             :                 mm = ERR_PTR(-EINVAL);
    2437             :         return mm;
    2438             : }
    2439             : 
    2440             : /*
    2441             :  * Move a list of pages in the address space of the currently executing
    2442             :  * process.
    2443             :  */
    2444             : static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
    2445             :                              const void __user * __user *pages,
    2446             :                              const int __user *nodes,
    2447             :                              int __user *status, int flags)
    2448             : {
    2449             :         struct mm_struct *mm;
    2450             :         int err;
    2451             :         nodemask_t task_nodes;
    2452             : 
    2453             :         /* Check flags */
    2454             :         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
    2455             :                 return -EINVAL;
    2456             : 
    2457             :         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
    2458             :                 return -EPERM;
    2459             : 
    2460             :         mm = find_mm_struct(pid, &task_nodes);
    2461             :         if (IS_ERR(mm))
    2462             :                 return PTR_ERR(mm);
    2463             : 
    2464             :         if (nodes)
    2465             :                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
    2466             :                                     nodes, status, flags);
    2467             :         else
    2468             :                 err = do_pages_stat(mm, nr_pages, pages, status);
    2469             : 
    2470             :         mmput(mm);
    2471             :         return err;
    2472             : }
    2473             : 
    2474             : SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
    2475             :                 const void __user * __user *, pages,
    2476             :                 const int __user *, nodes,
    2477             :                 int __user *, status, int, flags)
    2478             : {
    2479             :         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
    2480             : }
    2481             : 
    2482             : #ifdef CONFIG_NUMA_BALANCING
    2483             : /*
    2484             :  * Returns true if this is a safe migration target node for misplaced NUMA
    2485             :  * pages. Currently it only checks the watermarks which is crude.
    2486             :  */
    2487             : static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
    2488             :                                    unsigned long nr_migrate_pages)
    2489             : {
    2490             :         int z;
    2491             : 
    2492             :         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    2493             :                 struct zone *zone = pgdat->node_zones + z;
    2494             : 
    2495             :                 if (!managed_zone(zone))
    2496             :                         continue;
    2497             : 
    2498             :                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
    2499             :                 if (!zone_watermark_ok(zone, 0,
    2500             :                                        high_wmark_pages(zone) +
    2501             :                                        nr_migrate_pages,
    2502             :                                        ZONE_MOVABLE, 0))
    2503             :                         continue;
    2504             :                 return true;
    2505             :         }
    2506             :         return false;
    2507             : }
    2508             : 
    2509             : static struct page *alloc_misplaced_dst_page(struct page *page,
    2510             :                                            unsigned long data)
    2511             : {
    2512             :         int nid = (int) data;
    2513             :         int order = compound_order(page);
    2514             :         gfp_t gfp = __GFP_THISNODE;
    2515             :         struct folio *new;
    2516             : 
    2517             :         if (order > 0)
    2518             :                 gfp |= GFP_TRANSHUGE_LIGHT;
    2519             :         else {
    2520             :                 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
    2521             :                         __GFP_NOWARN;
    2522             :                 gfp &= ~__GFP_RECLAIM;
    2523             :         }
    2524             :         new = __folio_alloc_node(gfp, order, nid);
    2525             : 
    2526             :         return &new->page;
    2527             : }
    2528             : 
    2529             : static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    2530             : {
    2531             :         int nr_pages = thp_nr_pages(page);
    2532             :         int order = compound_order(page);
    2533             : 
    2534             :         VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
    2535             : 
    2536             :         /* Do not migrate THP mapped by multiple processes */
    2537             :         if (PageTransHuge(page) && total_mapcount(page) > 1)
    2538             :                 return 0;
    2539             : 
    2540             :         /* Avoid migrating to a node that is nearly full */
    2541             :         if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
    2542             :                 int z;
    2543             : 
    2544             :                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
    2545             :                         return 0;
    2546             :                 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    2547             :                         if (managed_zone(pgdat->node_zones + z))
    2548             :                                 break;
    2549             :                 }
    2550             :                 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
    2551             :                 return 0;
    2552             :         }
    2553             : 
    2554             :         if (!isolate_lru_page(page))
    2555             :                 return 0;
    2556             : 
    2557             :         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
    2558             :                             nr_pages);
    2559             : 
    2560             :         /*
    2561             :          * Isolating the page has taken another reference, so the
    2562             :          * caller's reference can be safely dropped without the page
    2563             :          * disappearing underneath us during migration.
    2564             :          */
    2565             :         put_page(page);
    2566             :         return 1;
    2567             : }
    2568             : 
    2569             : /*
    2570             :  * Attempt to migrate a misplaced page to the specified destination
    2571             :  * node. Caller is expected to have an elevated reference count on
    2572             :  * the page that will be dropped by this function before returning.
    2573             :  */
    2574             : int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    2575             :                            int node)
    2576             : {
    2577             :         pg_data_t *pgdat = NODE_DATA(node);
    2578             :         int isolated;
    2579             :         int nr_remaining;
    2580             :         unsigned int nr_succeeded;
    2581             :         LIST_HEAD(migratepages);
    2582             :         int nr_pages = thp_nr_pages(page);
    2583             : 
    2584             :         /*
    2585             :          * Don't migrate file pages that are mapped in multiple processes
    2586             :          * with execute permissions as they are probably shared libraries.
    2587             :          */
    2588             :         if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
    2589             :             (vma->vm_flags & VM_EXEC))
    2590             :                 goto out;
    2591             : 
    2592             :         /*
    2593             :          * Also do not migrate dirty pages as not all filesystems can move
    2594             :          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
    2595             :          */
    2596             :         if (page_is_file_lru(page) && PageDirty(page))
    2597             :                 goto out;
    2598             : 
    2599             :         isolated = numamigrate_isolate_page(pgdat, page);
    2600             :         if (!isolated)
    2601             :                 goto out;
    2602             : 
    2603             :         list_add(&page->lru, &migratepages);
    2604             :         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
    2605             :                                      NULL, node, MIGRATE_ASYNC,
    2606             :                                      MR_NUMA_MISPLACED, &nr_succeeded);
    2607             :         if (nr_remaining) {
    2608             :                 if (!list_empty(&migratepages)) {
    2609             :                         list_del(&page->lru);
    2610             :                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
    2611             :                                         page_is_file_lru(page), -nr_pages);
    2612             :                         putback_lru_page(page);
    2613             :                 }
    2614             :                 isolated = 0;
    2615             :         }
    2616             :         if (nr_succeeded) {
    2617             :                 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
    2618             :                 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
    2619             :                         mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
    2620             :                                             nr_succeeded);
    2621             :         }
    2622             :         BUG_ON(!list_empty(&migratepages));
    2623             :         return isolated;
    2624             : 
    2625             : out:
    2626             :         put_page(page);
    2627             :         return 0;
    2628             : }
    2629             : #endif /* CONFIG_NUMA_BALANCING */
    2630             : #endif /* CONFIG_NUMA */

Generated by: LCOV version 1.14