LCOV - code coverage report
Current view: top level - mm - migrate.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 551 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 29 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Memory Migration functionality - linux/mm/migrate.c
       4             :  *
       5             :  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
       6             :  *
       7             :  * Page migration was first developed in the context of the memory hotplug
       8             :  * project. The main authors of the migration code are:
       9             :  *
      10             :  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
      11             :  * Hirokazu Takahashi <taka@valinux.co.jp>
      12             :  * Dave Hansen <haveblue@us.ibm.com>
      13             :  * Christoph Lameter
      14             :  */
      15             : 
      16             : #include <linux/migrate.h>
      17             : #include <linux/export.h>
      18             : #include <linux/swap.h>
      19             : #include <linux/swapops.h>
      20             : #include <linux/pagemap.h>
      21             : #include <linux/buffer_head.h>
      22             : #include <linux/mm_inline.h>
      23             : #include <linux/nsproxy.h>
      24             : #include <linux/ksm.h>
      25             : #include <linux/rmap.h>
      26             : #include <linux/topology.h>
      27             : #include <linux/cpu.h>
      28             : #include <linux/cpuset.h>
      29             : #include <linux/writeback.h>
      30             : #include <linux/mempolicy.h>
      31             : #include <linux/vmalloc.h>
      32             : #include <linux/security.h>
      33             : #include <linux/backing-dev.h>
      34             : #include <linux/compaction.h>
      35             : #include <linux/syscalls.h>
      36             : #include <linux/compat.h>
      37             : #include <linux/hugetlb.h>
      38             : #include <linux/hugetlb_cgroup.h>
      39             : #include <linux/gfp.h>
      40             : #include <linux/pfn_t.h>
      41             : #include <linux/memremap.h>
      42             : #include <linux/userfaultfd_k.h>
      43             : #include <linux/balloon_compaction.h>
      44             : #include <linux/page_idle.h>
      45             : #include <linux/page_owner.h>
      46             : #include <linux/sched/mm.h>
      47             : #include <linux/ptrace.h>
      48             : #include <linux/oom.h>
      49             : #include <linux/memory.h>
      50             : #include <linux/random.h>
      51             : #include <linux/sched/sysctl.h>
      52             : #include <linux/memory-tiers.h>
      53             : 
      54             : #include <asm/tlbflush.h>
      55             : 
      56             : #include <trace/events/migrate.h>
      57             : 
      58             : #include "internal.h"
      59             : 
      60           0 : bool isolate_movable_page(struct page *page, isolate_mode_t mode)
      61             : {
      62           0 :         struct folio *folio = folio_get_nontail_page(page);
      63             :         const struct movable_operations *mops;
      64             : 
      65             :         /*
      66             :          * Avoid burning cycles with pages that are yet under __free_pages(),
      67             :          * or just got freed under us.
      68             :          *
      69             :          * In case we 'win' a race for a movable page being freed under us and
      70             :          * raise its refcount preventing __free_pages() from doing its job
      71             :          * the put_page() at the end of this block will take care of
      72             :          * release this page, thus avoiding a nasty leakage.
      73             :          */
      74           0 :         if (!folio)
      75             :                 goto out;
      76             : 
      77           0 :         if (unlikely(folio_test_slab(folio)))
      78             :                 goto out_putfolio;
      79             :         /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
      80           0 :         smp_rmb();
      81             :         /*
      82             :          * Check movable flag before taking the page lock because
      83             :          * we use non-atomic bitops on newly allocated page flags so
      84             :          * unconditionally grabbing the lock ruins page's owner side.
      85             :          */
      86           0 :         if (unlikely(!__folio_test_movable(folio)))
      87             :                 goto out_putfolio;
      88             :         /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
      89           0 :         smp_rmb();
      90           0 :         if (unlikely(folio_test_slab(folio)))
      91             :                 goto out_putfolio;
      92             : 
      93             :         /*
      94             :          * As movable pages are not isolated from LRU lists, concurrent
      95             :          * compaction threads can race against page migration functions
      96             :          * as well as race against the releasing a page.
      97             :          *
      98             :          * In order to avoid having an already isolated movable page
      99             :          * being (wrongly) re-isolated while it is under migration,
     100             :          * or to avoid attempting to isolate pages being released,
     101             :          * lets be sure we have the page lock
     102             :          * before proceeding with the movable page isolation steps.
     103             :          */
     104           0 :         if (unlikely(!folio_trylock(folio)))
     105             :                 goto out_putfolio;
     106             : 
     107           0 :         if (!folio_test_movable(folio) || folio_test_isolated(folio))
     108             :                 goto out_no_isolated;
     109             : 
     110           0 :         mops = folio_movable_ops(folio);
     111             :         VM_BUG_ON_FOLIO(!mops, folio);
     112             : 
     113           0 :         if (!mops->isolate_page(&folio->page, mode))
     114             :                 goto out_no_isolated;
     115             : 
     116             :         /* Driver shouldn't use PG_isolated bit of page->flags */
     117           0 :         WARN_ON_ONCE(folio_test_isolated(folio));
     118           0 :         folio_set_isolated(folio);
     119           0 :         folio_unlock(folio);
     120             : 
     121           0 :         return true;
     122             : 
     123             : out_no_isolated:
     124           0 :         folio_unlock(folio);
     125             : out_putfolio:
     126             :         folio_put(folio);
     127             : out:
     128             :         return false;
     129             : }
     130             : 
     131             : static void putback_movable_folio(struct folio *folio)
     132             : {
     133           0 :         const struct movable_operations *mops = folio_movable_ops(folio);
     134             : 
     135           0 :         mops->putback_page(&folio->page);
     136           0 :         folio_clear_isolated(folio);
     137             : }
     138             : 
     139             : /*
     140             :  * Put previously isolated pages back onto the appropriate lists
     141             :  * from where they were once taken off for compaction/migration.
     142             :  *
     143             :  * This function shall be used whenever the isolated pageset has been
     144             :  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
     145             :  * and isolate_hugetlb().
     146             :  */
     147           0 : void putback_movable_pages(struct list_head *l)
     148             : {
     149             :         struct folio *folio;
     150             :         struct folio *folio2;
     151             : 
     152           0 :         list_for_each_entry_safe(folio, folio2, l, lru) {
     153           0 :                 if (unlikely(folio_test_hugetlb(folio))) {
     154             :                         folio_putback_active_hugetlb(folio);
     155             :                         continue;
     156             :                 }
     157           0 :                 list_del(&folio->lru);
     158             :                 /*
     159             :                  * We isolated non-lru movable folio so here we can use
     160             :                  * __PageMovable because LRU folio's mapping cannot have
     161             :                  * PAGE_MAPPING_MOVABLE.
     162             :                  */
     163           0 :                 if (unlikely(__folio_test_movable(folio))) {
     164             :                         VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
     165           0 :                         folio_lock(folio);
     166           0 :                         if (folio_test_movable(folio))
     167             :                                 putback_movable_folio(folio);
     168             :                         else
     169             :                                 folio_clear_isolated(folio);
     170           0 :                         folio_unlock(folio);
     171             :                         folio_put(folio);
     172             :                 } else {
     173           0 :                         node_stat_mod_folio(folio, NR_ISOLATED_ANON +
     174           0 :                                         folio_is_file_lru(folio), -folio_nr_pages(folio));
     175           0 :                         folio_putback_lru(folio);
     176             :                 }
     177             :         }
     178           0 : }
     179             : 
     180             : /*
     181             :  * Restore a potential migration pte to a working pte entry
     182             :  */
     183           0 : static bool remove_migration_pte(struct folio *folio,
     184             :                 struct vm_area_struct *vma, unsigned long addr, void *old)
     185             : {
     186           0 :         DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
     187             : 
     188           0 :         while (page_vma_mapped_walk(&pvmw)) {
     189           0 :                 rmap_t rmap_flags = RMAP_NONE;
     190             :                 pte_t old_pte;
     191             :                 pte_t pte;
     192             :                 swp_entry_t entry;
     193             :                 struct page *new;
     194           0 :                 unsigned long idx = 0;
     195             : 
     196             :                 /* pgoff is invalid for ksm pages, but they are never large */
     197           0 :                 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
     198           0 :                         idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
     199           0 :                 new = folio_page(folio, idx);
     200             : 
     201             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     202             :                 /* PMD-mapped THP migration entry */
     203             :                 if (!pvmw.pte) {
     204             :                         VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
     205             :                                         !folio_test_pmd_mappable(folio), folio);
     206             :                         remove_migration_pmd(&pvmw, new);
     207             :                         continue;
     208             :                 }
     209             : #endif
     210             : 
     211           0 :                 folio_get(folio);
     212           0 :                 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
     213           0 :                 old_pte = ptep_get(pvmw.pte);
     214           0 :                 if (pte_swp_soft_dirty(old_pte))
     215             :                         pte = pte_mksoft_dirty(pte);
     216             : 
     217           0 :                 entry = pte_to_swp_entry(old_pte);
     218           0 :                 if (!is_migration_entry_young(entry))
     219             :                         pte = pte_mkold(pte);
     220           0 :                 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
     221             :                         pte = pte_mkdirty(pte);
     222           0 :                 if (is_writable_migration_entry(entry))
     223             :                         pte = pte_mkwrite(pte);
     224             :                 else if (pte_swp_uffd_wp(old_pte))
     225             :                         pte = pte_mkuffd_wp(pte);
     226             : 
     227           0 :                 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
     228           0 :                         rmap_flags |= RMAP_EXCLUSIVE;
     229             : 
     230           0 :                 if (unlikely(is_device_private_page(new))) {
     231             :                         if (pte_write(pte))
     232             :                                 entry = make_writable_device_private_entry(
     233             :                                                         page_to_pfn(new));
     234             :                         else
     235             :                                 entry = make_readable_device_private_entry(
     236             :                                                         page_to_pfn(new));
     237             :                         pte = swp_entry_to_pte(entry);
     238             :                         if (pte_swp_soft_dirty(old_pte))
     239             :                                 pte = pte_swp_mksoft_dirty(pte);
     240             :                         if (pte_swp_uffd_wp(old_pte))
     241             :                                 pte = pte_swp_mkuffd_wp(pte);
     242             :                 }
     243             : 
     244             : #ifdef CONFIG_HUGETLB_PAGE
     245             :                 if (folio_test_hugetlb(folio)) {
     246             :                         unsigned int shift = huge_page_shift(hstate_vma(vma));
     247             : 
     248             :                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
     249             :                         if (folio_test_anon(folio))
     250             :                                 hugepage_add_anon_rmap(new, vma, pvmw.address,
     251             :                                                        rmap_flags);
     252             :                         else
     253             :                                 page_dup_file_rmap(new, true);
     254             :                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     255             :                 } else
     256             : #endif
     257             :                 {
     258           0 :                         if (folio_test_anon(folio))
     259           0 :                                 page_add_anon_rmap(new, vma, pvmw.address,
     260             :                                                    rmap_flags);
     261             :                         else
     262           0 :                                 page_add_file_rmap(new, vma, false);
     263           0 :                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
     264             :                 }
     265           0 :                 if (vma->vm_flags & VM_LOCKED)
     266           0 :                         mlock_drain_local();
     267             : 
     268             :                 trace_remove_migration_pte(pvmw.address, pte_val(pte),
     269             :                                            compound_order(new));
     270             : 
     271             :                 /* No need to invalidate - it was non-present before */
     272             :                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
     273             :         }
     274             : 
     275           0 :         return true;
     276             : }
     277             : 
     278             : /*
     279             :  * Get rid of all migration entries and replace them by
     280             :  * references to the indicated page.
     281             :  */
     282           0 : void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
     283             : {
     284           0 :         struct rmap_walk_control rwc = {
     285             :                 .rmap_one = remove_migration_pte,
     286             :                 .arg = src,
     287             :         };
     288             : 
     289           0 :         if (locked)
     290           0 :                 rmap_walk_locked(dst, &rwc);
     291             :         else
     292           0 :                 rmap_walk(dst, &rwc);
     293           0 : }
     294             : 
     295             : /*
     296             :  * Something used the pte of a page under migration. We need to
     297             :  * get to the page and wait until migration is finished.
     298             :  * When we return from this function the fault will be retried.
     299             :  */
     300           0 : void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
     301             :                           unsigned long address)
     302             : {
     303             :         spinlock_t *ptl;
     304             :         pte_t *ptep;
     305             :         pte_t pte;
     306             :         swp_entry_t entry;
     307             : 
     308           0 :         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
     309           0 :         if (!ptep)
     310           0 :                 return;
     311             : 
     312           0 :         pte = ptep_get(ptep);
     313             :         pte_unmap(ptep);
     314             : 
     315           0 :         if (!is_swap_pte(pte))
     316             :                 goto out;
     317             : 
     318           0 :         entry = pte_to_swp_entry(pte);
     319           0 :         if (!is_migration_entry(entry))
     320             :                 goto out;
     321             : 
     322           0 :         migration_entry_wait_on_locked(entry, ptl);
     323           0 :         return;
     324             : out:
     325           0 :         spin_unlock(ptl);
     326             : }
     327             : 
     328             : #ifdef CONFIG_HUGETLB_PAGE
     329             : /*
     330             :  * The vma read lock must be held upon entry. Holding that lock prevents either
     331             :  * the pte or the ptl from being freed.
     332             :  *
     333             :  * This function will release the vma lock before returning.
     334             :  */
     335             : void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
     336             : {
     337             :         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
     338             :         pte_t pte;
     339             : 
     340             :         hugetlb_vma_assert_locked(vma);
     341             :         spin_lock(ptl);
     342             :         pte = huge_ptep_get(ptep);
     343             : 
     344             :         if (unlikely(!is_hugetlb_entry_migration(pte))) {
     345             :                 spin_unlock(ptl);
     346             :                 hugetlb_vma_unlock_read(vma);
     347             :         } else {
     348             :                 /*
     349             :                  * If migration entry existed, safe to release vma lock
     350             :                  * here because the pgtable page won't be freed without the
     351             :                  * pgtable lock released.  See comment right above pgtable
     352             :                  * lock release in migration_entry_wait_on_locked().
     353             :                  */
     354             :                 hugetlb_vma_unlock_read(vma);
     355             :                 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
     356             :         }
     357             : }
     358             : #endif
     359             : 
     360             : #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
     361             : void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
     362             : {
     363             :         spinlock_t *ptl;
     364             : 
     365             :         ptl = pmd_lock(mm, pmd);
     366             :         if (!is_pmd_migration_entry(*pmd))
     367             :                 goto unlock;
     368             :         migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
     369             :         return;
     370             : unlock:
     371             :         spin_unlock(ptl);
     372             : }
     373             : #endif
     374             : 
     375             : static int folio_expected_refs(struct address_space *mapping,
     376             :                 struct folio *folio)
     377             : {
     378           0 :         int refs = 1;
     379           0 :         if (!mapping)
     380             :                 return refs;
     381             : 
     382           0 :         refs += folio_nr_pages(folio);
     383           0 :         if (folio_test_private(folio))
     384           0 :                 refs++;
     385             : 
     386             :         return refs;
     387             : }
     388             : 
     389             : /*
     390             :  * Replace the page in the mapping.
     391             :  *
     392             :  * The number of remaining references must be:
     393             :  * 1 for anonymous pages without a mapping
     394             :  * 2 for pages with a mapping
     395             :  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
     396             :  */
     397           0 : int folio_migrate_mapping(struct address_space *mapping,
     398             :                 struct folio *newfolio, struct folio *folio, int extra_count)
     399             : {
     400           0 :         XA_STATE(xas, &mapping->i_pages, folio_index(folio));
     401             :         struct zone *oldzone, *newzone;
     402             :         int dirty;
     403           0 :         int expected_count = folio_expected_refs(mapping, folio) + extra_count;
     404           0 :         long nr = folio_nr_pages(folio);
     405             : 
     406           0 :         if (!mapping) {
     407             :                 /* Anonymous page without mapping */
     408           0 :                 if (folio_ref_count(folio) != expected_count)
     409             :                         return -EAGAIN;
     410             : 
     411             :                 /* No turning back from here */
     412           0 :                 newfolio->index = folio->index;
     413           0 :                 newfolio->mapping = folio->mapping;
     414           0 :                 if (folio_test_swapbacked(folio))
     415             :                         __folio_set_swapbacked(newfolio);
     416             : 
     417             :                 return MIGRATEPAGE_SUCCESS;
     418             :         }
     419             : 
     420           0 :         oldzone = folio_zone(folio);
     421           0 :         newzone = folio_zone(newfolio);
     422             : 
     423           0 :         xas_lock_irq(&xas);
     424           0 :         if (!folio_ref_freeze(folio, expected_count)) {
     425           0 :                 xas_unlock_irq(&xas);
     426           0 :                 return -EAGAIN;
     427             :         }
     428             : 
     429             :         /*
     430             :          * Now we know that no one else is looking at the folio:
     431             :          * no turning back from here.
     432             :          */
     433           0 :         newfolio->index = folio->index;
     434           0 :         newfolio->mapping = folio->mapping;
     435           0 :         folio_ref_add(newfolio, nr); /* add cache reference */
     436           0 :         if (folio_test_swapbacked(folio)) {
     437           0 :                 __folio_set_swapbacked(newfolio);
     438           0 :                 if (folio_test_swapcache(folio)) {
     439           0 :                         folio_set_swapcache(newfolio);
     440           0 :                         newfolio->private = folio_get_private(folio);
     441             :                 }
     442             :         } else {
     443             :                 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
     444             :         }
     445             : 
     446             :         /* Move dirty while page refs frozen and newpage not yet exposed */
     447           0 :         dirty = folio_test_dirty(folio);
     448           0 :         if (dirty) {
     449           0 :                 folio_clear_dirty(folio);
     450             :                 folio_set_dirty(newfolio);
     451             :         }
     452             : 
     453           0 :         xas_store(&xas, newfolio);
     454             : 
     455             :         /*
     456             :          * Drop cache reference from old page by unfreezing
     457             :          * to one less reference.
     458             :          * We know this isn't the last reference.
     459             :          */
     460           0 :         folio_ref_unfreeze(folio, expected_count - nr);
     461             : 
     462           0 :         xas_unlock(&xas);
     463             :         /* Leave irq disabled to prevent preemption while updating stats */
     464             : 
     465             :         /*
     466             :          * If moved to a different zone then also account
     467             :          * the page for that zone. Other VM counters will be
     468             :          * taken care of when we establish references to the
     469             :          * new page and drop references to the old page.
     470             :          *
     471             :          * Note that anonymous pages are accounted for
     472             :          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
     473             :          * are mapped to swap space.
     474             :          */
     475           0 :         if (newzone != oldzone) {
     476             :                 struct lruvec *old_lruvec, *new_lruvec;
     477             :                 struct mem_cgroup *memcg;
     478             : 
     479           0 :                 memcg = folio_memcg(folio);
     480           0 :                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
     481           0 :                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
     482             : 
     483           0 :                 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
     484           0 :                 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
     485           0 :                 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
     486           0 :                         __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
     487           0 :                         __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
     488             : 
     489           0 :                         if (folio_test_pmd_mappable(folio)) {
     490             :                                 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
     491             :                                 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
     492             :                         }
     493             :                 }
     494             : #ifdef CONFIG_SWAP
     495           0 :                 if (folio_test_swapcache(folio)) {
     496           0 :                         __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
     497           0 :                         __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
     498             :                 }
     499             : #endif
     500           0 :                 if (dirty && mapping_can_writeback(mapping)) {
     501           0 :                         __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
     502           0 :                         __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
     503           0 :                         __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
     504             :                         __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
     505             :                 }
     506             :         }
     507             :         local_irq_enable();
     508             : 
     509           0 :         return MIGRATEPAGE_SUCCESS;
     510             : }
     511             : EXPORT_SYMBOL(folio_migrate_mapping);
     512             : 
     513             : /*
     514             :  * The expected number of remaining references is the same as that
     515             :  * of folio_migrate_mapping().
     516             :  */
     517           0 : int migrate_huge_page_move_mapping(struct address_space *mapping,
     518             :                                    struct folio *dst, struct folio *src)
     519             : {
     520           0 :         XA_STATE(xas, &mapping->i_pages, folio_index(src));
     521             :         int expected_count;
     522             : 
     523           0 :         xas_lock_irq(&xas);
     524           0 :         expected_count = 2 + folio_has_private(src);
     525           0 :         if (!folio_ref_freeze(src, expected_count)) {
     526           0 :                 xas_unlock_irq(&xas);
     527           0 :                 return -EAGAIN;
     528             :         }
     529             : 
     530           0 :         dst->index = src->index;
     531           0 :         dst->mapping = src->mapping;
     532             : 
     533           0 :         folio_get(dst);
     534             : 
     535           0 :         xas_store(&xas, dst);
     536             : 
     537           0 :         folio_ref_unfreeze(src, expected_count - 1);
     538             : 
     539           0 :         xas_unlock_irq(&xas);
     540             : 
     541           0 :         return MIGRATEPAGE_SUCCESS;
     542             : }
     543             : 
     544             : /*
     545             :  * Copy the flags and some other ancillary information
     546             :  */
     547           0 : void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
     548             : {
     549             :         int cpupid;
     550             : 
     551           0 :         if (folio_test_error(folio))
     552             :                 folio_set_error(newfolio);
     553           0 :         if (folio_test_referenced(folio))
     554             :                 folio_set_referenced(newfolio);
     555           0 :         if (folio_test_uptodate(folio))
     556             :                 folio_mark_uptodate(newfolio);
     557           0 :         if (folio_test_clear_active(folio)) {
     558             :                 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
     559             :                 folio_set_active(newfolio);
     560           0 :         } else if (folio_test_clear_unevictable(folio))
     561             :                 folio_set_unevictable(newfolio);
     562           0 :         if (folio_test_workingset(folio))
     563             :                 folio_set_workingset(newfolio);
     564           0 :         if (folio_test_checked(folio))
     565             :                 folio_set_checked(newfolio);
     566             :         /*
     567             :          * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
     568             :          * migration entries. We can still have PG_anon_exclusive set on an
     569             :          * effectively unmapped and unreferenced first sub-pages of an
     570             :          * anonymous THP: we can simply copy it here via PG_mappedtodisk.
     571             :          */
     572           0 :         if (folio_test_mappedtodisk(folio))
     573             :                 folio_set_mappedtodisk(newfolio);
     574             : 
     575             :         /* Move dirty on pages not done by folio_migrate_mapping() */
     576           0 :         if (folio_test_dirty(folio))
     577             :                 folio_set_dirty(newfolio);
     578             : 
     579           0 :         if (folio_test_young(folio))
     580             :                 folio_set_young(newfolio);
     581           0 :         if (folio_test_idle(folio))
     582             :                 folio_set_idle(newfolio);
     583             : 
     584             :         /*
     585             :          * Copy NUMA information to the new page, to prevent over-eager
     586             :          * future migrations of this same page.
     587             :          */
     588           0 :         cpupid = page_cpupid_xchg_last(&folio->page, -1);
     589             :         /*
     590             :          * For memory tiering mode, when migrate between slow and fast
     591             :          * memory node, reset cpupid, because that is used to record
     592             :          * page access time in slow memory node.
     593             :          */
     594             :         if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
     595             :                 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
     596             :                 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
     597             : 
     598             :                 if (f_toptier != t_toptier)
     599             :                         cpupid = -1;
     600             :         }
     601           0 :         page_cpupid_xchg_last(&newfolio->page, cpupid);
     602             : 
     603           0 :         folio_migrate_ksm(newfolio, folio);
     604             :         /*
     605             :          * Please do not reorder this without considering how mm/ksm.c's
     606             :          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
     607             :          */
     608           0 :         if (folio_test_swapcache(folio))
     609             :                 folio_clear_swapcache(folio);
     610           0 :         folio_clear_private(folio);
     611             : 
     612             :         /* page->private contains hugetlb specific flags */
     613           0 :         if (!folio_test_hugetlb(folio))
     614           0 :                 folio->private = NULL;
     615             : 
     616             :         /*
     617             :          * If any waiters have accumulated on the new page then
     618             :          * wake them up.
     619             :          */
     620           0 :         if (folio_test_writeback(newfolio))
     621           0 :                 folio_end_writeback(newfolio);
     622             : 
     623             :         /*
     624             :          * PG_readahead shares the same bit with PG_reclaim.  The above
     625             :          * end_page_writeback() may clear PG_readahead mistakenly, so set the
     626             :          * bit after that.
     627             :          */
     628           0 :         if (folio_test_readahead(folio))
     629             :                 folio_set_readahead(newfolio);
     630             : 
     631           0 :         folio_copy_owner(newfolio, folio);
     632             : 
     633           0 :         if (!folio_test_hugetlb(folio))
     634             :                 mem_cgroup_migrate(folio, newfolio);
     635           0 : }
     636             : EXPORT_SYMBOL(folio_migrate_flags);
     637             : 
     638           0 : void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
     639             : {
     640           0 :         folio_copy(newfolio, folio);
     641           0 :         folio_migrate_flags(newfolio, folio);
     642           0 : }
     643             : EXPORT_SYMBOL(folio_migrate_copy);
     644             : 
     645             : /************************************************************
     646             :  *                    Migration functions
     647             :  ***********************************************************/
     648             : 
     649           0 : int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
     650             :                 struct folio *src, enum migrate_mode mode, int extra_count)
     651             : {
     652             :         int rc;
     653             : 
     654           0 :         BUG_ON(folio_test_writeback(src));      /* Writeback must be complete */
     655             : 
     656           0 :         rc = folio_migrate_mapping(mapping, dst, src, extra_count);
     657             : 
     658           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     659             :                 return rc;
     660             : 
     661           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     662             :                 folio_migrate_copy(dst, src);
     663             :         else
     664           0 :                 folio_migrate_flags(dst, src);
     665             :         return MIGRATEPAGE_SUCCESS;
     666             : }
     667             : 
     668             : /**
     669             :  * migrate_folio() - Simple folio migration.
     670             :  * @mapping: The address_space containing the folio.
     671             :  * @dst: The folio to migrate the data to.
     672             :  * @src: The folio containing the current data.
     673             :  * @mode: How to migrate the page.
     674             :  *
     675             :  * Common logic to directly migrate a single LRU folio suitable for
     676             :  * folios that do not use PagePrivate/PagePrivate2.
     677             :  *
     678             :  * Folios are locked upon entry and exit.
     679             :  */
     680           0 : int migrate_folio(struct address_space *mapping, struct folio *dst,
     681             :                 struct folio *src, enum migrate_mode mode)
     682             : {
     683           0 :         return migrate_folio_extra(mapping, dst, src, mode, 0);
     684             : }
     685             : EXPORT_SYMBOL(migrate_folio);
     686             : 
     687             : #ifdef CONFIG_BLOCK
     688             : /* Returns true if all buffers are successfully locked */
     689           0 : static bool buffer_migrate_lock_buffers(struct buffer_head *head,
     690             :                                                         enum migrate_mode mode)
     691             : {
     692           0 :         struct buffer_head *bh = head;
     693             :         struct buffer_head *failed_bh;
     694             : 
     695             :         do {
     696           0 :                 if (!trylock_buffer(bh)) {
     697           0 :                         if (mode == MIGRATE_ASYNC)
     698             :                                 goto unlock;
     699           0 :                         if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
     700             :                                 goto unlock;
     701             :                         lock_buffer(bh);
     702             :                 }
     703             : 
     704           0 :                 bh = bh->b_this_page;
     705           0 :         } while (bh != head);
     706             : 
     707             :         return true;
     708             : 
     709             : unlock:
     710             :         /* We failed to lock the buffer and cannot stall. */
     711             :         failed_bh = bh;
     712             :         bh = head;
     713           0 :         while (bh != failed_bh) {
     714           0 :                 unlock_buffer(bh);
     715           0 :                 bh = bh->b_this_page;
     716             :         }
     717             : 
     718             :         return false;
     719             : }
     720             : 
     721           0 : static int __buffer_migrate_folio(struct address_space *mapping,
     722             :                 struct folio *dst, struct folio *src, enum migrate_mode mode,
     723             :                 bool check_refs)
     724             : {
     725             :         struct buffer_head *bh, *head;
     726             :         int rc;
     727             :         int expected_count;
     728             : 
     729           0 :         head = folio_buffers(src);
     730           0 :         if (!head)
     731           0 :                 return migrate_folio(mapping, dst, src, mode);
     732             : 
     733             :         /* Check whether page does not have extra refs before we do more work */
     734           0 :         expected_count = folio_expected_refs(mapping, src);
     735           0 :         if (folio_ref_count(src) != expected_count)
     736             :                 return -EAGAIN;
     737             : 
     738           0 :         if (!buffer_migrate_lock_buffers(head, mode))
     739             :                 return -EAGAIN;
     740             : 
     741           0 :         if (check_refs) {
     742             :                 bool busy;
     743             :                 bool invalidated = false;
     744             : 
     745             : recheck_buffers:
     746           0 :                 busy = false;
     747           0 :                 spin_lock(&mapping->private_lock);
     748           0 :                 bh = head;
     749             :                 do {
     750           0 :                         if (atomic_read(&bh->b_count)) {
     751             :                                 busy = true;
     752             :                                 break;
     753             :                         }
     754           0 :                         bh = bh->b_this_page;
     755           0 :                 } while (bh != head);
     756           0 :                 if (busy) {
     757           0 :                         if (invalidated) {
     758             :                                 rc = -EAGAIN;
     759             :                                 goto unlock_buffers;
     760             :                         }
     761           0 :                         spin_unlock(&mapping->private_lock);
     762           0 :                         invalidate_bh_lrus();
     763           0 :                         invalidated = true;
     764           0 :                         goto recheck_buffers;
     765             :                 }
     766             :         }
     767             : 
     768           0 :         rc = folio_migrate_mapping(mapping, dst, src, 0);
     769           0 :         if (rc != MIGRATEPAGE_SUCCESS)
     770             :                 goto unlock_buffers;
     771             : 
     772           0 :         folio_attach_private(dst, folio_detach_private(src));
     773             : 
     774           0 :         bh = head;
     775             :         do {
     776           0 :                 set_bh_page(bh, &dst->page, bh_offset(bh));
     777           0 :                 bh = bh->b_this_page;
     778           0 :         } while (bh != head);
     779             : 
     780           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     781             :                 folio_migrate_copy(dst, src);
     782             :         else
     783           0 :                 folio_migrate_flags(dst, src);
     784             : 
     785             :         rc = MIGRATEPAGE_SUCCESS;
     786             : unlock_buffers:
     787           0 :         if (check_refs)
     788           0 :                 spin_unlock(&mapping->private_lock);
     789             :         bh = head;
     790             :         do {
     791           0 :                 unlock_buffer(bh);
     792           0 :                 bh = bh->b_this_page;
     793           0 :         } while (bh != head);
     794             : 
     795             :         return rc;
     796             : }
     797             : 
     798             : /**
     799             :  * buffer_migrate_folio() - Migration function for folios with buffers.
     800             :  * @mapping: The address space containing @src.
     801             :  * @dst: The folio to migrate to.
     802             :  * @src: The folio to migrate from.
     803             :  * @mode: How to migrate the folio.
     804             :  *
     805             :  * This function can only be used if the underlying filesystem guarantees
     806             :  * that no other references to @src exist. For example attached buffer
     807             :  * heads are accessed only under the folio lock.  If your filesystem cannot
     808             :  * provide this guarantee, buffer_migrate_folio_norefs() may be more
     809             :  * appropriate.
     810             :  *
     811             :  * Return: 0 on success or a negative errno on failure.
     812             :  */
     813           0 : int buffer_migrate_folio(struct address_space *mapping,
     814             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     815             : {
     816           0 :         return __buffer_migrate_folio(mapping, dst, src, mode, false);
     817             : }
     818             : EXPORT_SYMBOL(buffer_migrate_folio);
     819             : 
     820             : /**
     821             :  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
     822             :  * @mapping: The address space containing @src.
     823             :  * @dst: The folio to migrate to.
     824             :  * @src: The folio to migrate from.
     825             :  * @mode: How to migrate the folio.
     826             :  *
     827             :  * Like buffer_migrate_folio() except that this variant is more careful
     828             :  * and checks that there are also no buffer head references. This function
     829             :  * is the right one for mappings where buffer heads are directly looked
     830             :  * up and referenced (such as block device mappings).
     831             :  *
     832             :  * Return: 0 on success or a negative errno on failure.
     833             :  */
     834           0 : int buffer_migrate_folio_norefs(struct address_space *mapping,
     835             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     836             : {
     837           0 :         return __buffer_migrate_folio(mapping, dst, src, mode, true);
     838             : }
     839             : EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
     840             : #endif
     841             : 
     842           0 : int filemap_migrate_folio(struct address_space *mapping,
     843             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     844             : {
     845             :         int ret;
     846             : 
     847           0 :         ret = folio_migrate_mapping(mapping, dst, src, 0);
     848           0 :         if (ret != MIGRATEPAGE_SUCCESS)
     849             :                 return ret;
     850             : 
     851           0 :         if (folio_get_private(src))
     852           0 :                 folio_attach_private(dst, folio_detach_private(src));
     853             : 
     854           0 :         if (mode != MIGRATE_SYNC_NO_COPY)
     855             :                 folio_migrate_copy(dst, src);
     856             :         else
     857           0 :                 folio_migrate_flags(dst, src);
     858             :         return MIGRATEPAGE_SUCCESS;
     859             : }
     860             : EXPORT_SYMBOL_GPL(filemap_migrate_folio);
     861             : 
     862             : /*
     863             :  * Writeback a folio to clean the dirty state
     864             :  */
     865           0 : static int writeout(struct address_space *mapping, struct folio *folio)
     866             : {
     867           0 :         struct writeback_control wbc = {
     868             :                 .sync_mode = WB_SYNC_NONE,
     869             :                 .nr_to_write = 1,
     870             :                 .range_start = 0,
     871             :                 .range_end = LLONG_MAX,
     872             :                 .for_reclaim = 1
     873             :         };
     874             :         int rc;
     875             : 
     876           0 :         if (!mapping->a_ops->writepage)
     877             :                 /* No write method for the address space */
     878             :                 return -EINVAL;
     879             : 
     880           0 :         if (!folio_clear_dirty_for_io(folio))
     881             :                 /* Someone else already triggered a write */
     882             :                 return -EAGAIN;
     883             : 
     884             :         /*
     885             :          * A dirty folio may imply that the underlying filesystem has
     886             :          * the folio on some queue. So the folio must be clean for
     887             :          * migration. Writeout may mean we lose the lock and the
     888             :          * folio state is no longer what we checked for earlier.
     889             :          * At this point we know that the migration attempt cannot
     890             :          * be successful.
     891             :          */
     892           0 :         remove_migration_ptes(folio, folio, false);
     893             : 
     894           0 :         rc = mapping->a_ops->writepage(&folio->page, &wbc);
     895             : 
     896           0 :         if (rc != AOP_WRITEPAGE_ACTIVATE)
     897             :                 /* unlocked. Relock */
     898             :                 folio_lock(folio);
     899             : 
     900           0 :         return (rc < 0) ? -EIO : -EAGAIN;
     901             : }
     902             : 
     903             : /*
     904             :  * Default handling if a filesystem does not provide a migration function.
     905             :  */
     906           0 : static int fallback_migrate_folio(struct address_space *mapping,
     907             :                 struct folio *dst, struct folio *src, enum migrate_mode mode)
     908             : {
     909           0 :         if (folio_test_dirty(src)) {
     910             :                 /* Only writeback folios in full synchronous migration */
     911           0 :                 switch (mode) {
     912             :                 case MIGRATE_SYNC:
     913             :                 case MIGRATE_SYNC_NO_COPY:
     914             :                         break;
     915             :                 default:
     916             :                         return -EBUSY;
     917             :                 }
     918           0 :                 return writeout(mapping, src);
     919             :         }
     920             : 
     921             :         /*
     922             :          * Buffers may be managed in a filesystem specific way.
     923             :          * We must have no buffers or drop them.
     924             :          */
     925           0 :         if (folio_test_private(src) &&
     926           0 :             !filemap_release_folio(src, GFP_KERNEL))
     927           0 :                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
     928             : 
     929           0 :         return migrate_folio(mapping, dst, src, mode);
     930             : }
     931             : 
     932             : /*
     933             :  * Move a page to a newly allocated page
     934             :  * The page is locked and all ptes have been successfully removed.
     935             :  *
     936             :  * The new page will have replaced the old page if this function
     937             :  * is successful.
     938             :  *
     939             :  * Return value:
     940             :  *   < 0 - error code
     941             :  *  MIGRATEPAGE_SUCCESS - success
     942             :  */
     943           0 : static int move_to_new_folio(struct folio *dst, struct folio *src,
     944             :                                 enum migrate_mode mode)
     945             : {
     946           0 :         int rc = -EAGAIN;
     947           0 :         bool is_lru = !__PageMovable(&src->page);
     948             : 
     949             :         VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
     950             :         VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
     951             : 
     952           0 :         if (likely(is_lru)) {
     953           0 :                 struct address_space *mapping = folio_mapping(src);
     954             : 
     955           0 :                 if (!mapping)
     956           0 :                         rc = migrate_folio(mapping, dst, src, mode);
     957           0 :                 else if (mapping->a_ops->migrate_folio)
     958             :                         /*
     959             :                          * Most folios have a mapping and most filesystems
     960             :                          * provide a migrate_folio callback. Anonymous folios
     961             :                          * are part of swap space which also has its own
     962             :                          * migrate_folio callback. This is the most common path
     963             :                          * for page migration.
     964             :                          */
     965           0 :                         rc = mapping->a_ops->migrate_folio(mapping, dst, src,
     966             :                                                                 mode);
     967             :                 else
     968           0 :                         rc = fallback_migrate_folio(mapping, dst, src, mode);
     969             :         } else {
     970             :                 const struct movable_operations *mops;
     971             : 
     972             :                 /*
     973             :                  * In case of non-lru page, it could be released after
     974             :                  * isolation step. In that case, we shouldn't try migration.
     975             :                  */
     976             :                 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
     977           0 :                 if (!folio_test_movable(src)) {
     978           0 :                         rc = MIGRATEPAGE_SUCCESS;
     979             :                         folio_clear_isolated(src);
     980             :                         goto out;
     981             :                 }
     982             : 
     983           0 :                 mops = folio_movable_ops(src);
     984           0 :                 rc = mops->migrate_page(&dst->page, &src->page, mode);
     985           0 :                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
     986             :                                 !folio_test_isolated(src));
     987             :         }
     988             : 
     989             :         /*
     990             :          * When successful, old pagecache src->mapping must be cleared before
     991             :          * src is freed; but stats require that PageAnon be left as PageAnon.
     992             :          */
     993           0 :         if (rc == MIGRATEPAGE_SUCCESS) {
     994           0 :                 if (__PageMovable(&src->page)) {
     995             :                         VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
     996             : 
     997             :                         /*
     998             :                          * We clear PG_movable under page_lock so any compactor
     999             :                          * cannot try to migrate this page.
    1000             :                          */
    1001             :                         folio_clear_isolated(src);
    1002             :                 }
    1003             : 
    1004             :                 /*
    1005             :                  * Anonymous and movable src->mapping will be cleared by
    1006             :                  * free_pages_prepare so don't reset it here for keeping
    1007             :                  * the type to work PageAnon, for example.
    1008             :                  */
    1009           0 :                 if (!folio_mapping_flags(src))
    1010           0 :                         src->mapping = NULL;
    1011             : 
    1012             :                 if (likely(!folio_is_zone_device(dst)))
    1013             :                         flush_dcache_folio(dst);
    1014             :         }
    1015             : out:
    1016           0 :         return rc;
    1017             : }
    1018             : 
    1019             : /*
    1020             :  * To record some information during migration, we use some unused
    1021             :  * fields (mapping and private) of struct folio of the newly allocated
    1022             :  * destination folio.  This is safe because nobody is using them
    1023             :  * except us.
    1024             :  */
    1025             : union migration_ptr {
    1026             :         struct anon_vma *anon_vma;
    1027             :         struct address_space *mapping;
    1028             : };
    1029             : static void __migrate_folio_record(struct folio *dst,
    1030             :                                    unsigned long page_was_mapped,
    1031             :                                    struct anon_vma *anon_vma)
    1032             : {
    1033           0 :         union migration_ptr ptr = { .anon_vma = anon_vma };
    1034           0 :         dst->mapping = ptr.mapping;
    1035           0 :         dst->private = (void *)page_was_mapped;
    1036             : }
    1037             : 
    1038             : static void __migrate_folio_extract(struct folio *dst,
    1039             :                                    int *page_was_mappedp,
    1040             :                                    struct anon_vma **anon_vmap)
    1041             : {
    1042           0 :         union migration_ptr ptr = { .mapping = dst->mapping };
    1043           0 :         *anon_vmap = ptr.anon_vma;
    1044           0 :         *page_was_mappedp = (unsigned long)dst->private;
    1045           0 :         dst->mapping = NULL;
    1046           0 :         dst->private = NULL;
    1047             : }
    1048             : 
    1049             : /* Restore the source folio to the original state upon failure */
    1050           0 : static void migrate_folio_undo_src(struct folio *src,
    1051             :                                    int page_was_mapped,
    1052             :                                    struct anon_vma *anon_vma,
    1053             :                                    bool locked,
    1054             :                                    struct list_head *ret)
    1055             : {
    1056           0 :         if (page_was_mapped)
    1057           0 :                 remove_migration_ptes(src, src, false);
    1058             :         /* Drop an anon_vma reference if we took one */
    1059           0 :         if (anon_vma)
    1060             :                 put_anon_vma(anon_vma);
    1061           0 :         if (locked)
    1062           0 :                 folio_unlock(src);
    1063           0 :         if (ret)
    1064           0 :                 list_move_tail(&src->lru, ret);
    1065           0 : }
    1066             : 
    1067             : /* Restore the destination folio to the original state upon failure */
    1068           0 : static void migrate_folio_undo_dst(struct folio *dst, bool locked,
    1069             :                 free_folio_t put_new_folio, unsigned long private)
    1070             : {
    1071           0 :         if (locked)
    1072           0 :                 folio_unlock(dst);
    1073           0 :         if (put_new_folio)
    1074           0 :                 put_new_folio(dst, private);
    1075             :         else
    1076             :                 folio_put(dst);
    1077           0 : }
    1078             : 
    1079             : /* Cleanup src folio upon migration success */
    1080           0 : static void migrate_folio_done(struct folio *src,
    1081             :                                enum migrate_reason reason)
    1082             : {
    1083             :         /*
    1084             :          * Compaction can migrate also non-LRU pages which are
    1085             :          * not accounted to NR_ISOLATED_*. They can be recognized
    1086             :          * as __PageMovable
    1087             :          */
    1088           0 :         if (likely(!__folio_test_movable(src)))
    1089           0 :                 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
    1090           0 :                                     folio_is_file_lru(src), -folio_nr_pages(src));
    1091             : 
    1092           0 :         if (reason != MR_MEMORY_FAILURE)
    1093             :                 /* We release the page in page_handle_poison. */
    1094             :                 folio_put(src);
    1095           0 : }
    1096             : 
    1097             : /* Obtain the lock on page, remove all ptes. */
    1098           0 : static int migrate_folio_unmap(new_folio_t get_new_folio,
    1099             :                 free_folio_t put_new_folio, unsigned long private,
    1100             :                 struct folio *src, struct folio **dstp, enum migrate_mode mode,
    1101             :                 enum migrate_reason reason, struct list_head *ret)
    1102             : {
    1103             :         struct folio *dst;
    1104           0 :         int rc = -EAGAIN;
    1105           0 :         int page_was_mapped = 0;
    1106           0 :         struct anon_vma *anon_vma = NULL;
    1107           0 :         bool is_lru = !__PageMovable(&src->page);
    1108           0 :         bool locked = false;
    1109           0 :         bool dst_locked = false;
    1110             : 
    1111           0 :         if (folio_ref_count(src) == 1) {
    1112             :                 /* Folio was freed from under us. So we are done. */
    1113           0 :                 folio_clear_active(src);
    1114           0 :                 folio_clear_unevictable(src);
    1115             :                 /* free_pages_prepare() will clear PG_isolated. */
    1116           0 :                 list_del(&src->lru);
    1117           0 :                 migrate_folio_done(src, reason);
    1118           0 :                 return MIGRATEPAGE_SUCCESS;
    1119             :         }
    1120             : 
    1121           0 :         dst = get_new_folio(src, private);
    1122           0 :         if (!dst)
    1123             :                 return -ENOMEM;
    1124           0 :         *dstp = dst;
    1125             : 
    1126           0 :         dst->private = NULL;
    1127             : 
    1128           0 :         if (!folio_trylock(src)) {
    1129           0 :                 if (mode == MIGRATE_ASYNC)
    1130             :                         goto out;
    1131             : 
    1132             :                 /*
    1133             :                  * It's not safe for direct compaction to call lock_page.
    1134             :                  * For example, during page readahead pages are added locked
    1135             :                  * to the LRU. Later, when the IO completes the pages are
    1136             :                  * marked uptodate and unlocked. However, the queueing
    1137             :                  * could be merging multiple pages for one bio (e.g.
    1138             :                  * mpage_readahead). If an allocation happens for the
    1139             :                  * second or third page, the process can end up locking
    1140             :                  * the same page twice and deadlocking. Rather than
    1141             :                  * trying to be clever about what pages can be locked,
    1142             :                  * avoid the use of lock_page for direct compaction
    1143             :                  * altogether.
    1144             :                  */
    1145           0 :                 if (current->flags & PF_MEMALLOC)
    1146             :                         goto out;
    1147             : 
    1148             :                 /*
    1149             :                  * In "light" mode, we can wait for transient locks (eg
    1150             :                  * inserting a page into the page table), but it's not
    1151             :                  * worth waiting for I/O.
    1152             :                  */
    1153           0 :                 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
    1154             :                         goto out;
    1155             : 
    1156             :                 folio_lock(src);
    1157             :         }
    1158           0 :         locked = true;
    1159             : 
    1160           0 :         if (folio_test_writeback(src)) {
    1161             :                 /*
    1162             :                  * Only in the case of a full synchronous migration is it
    1163             :                  * necessary to wait for PageWriteback. In the async case,
    1164             :                  * the retry loop is too short and in the sync-light case,
    1165             :                  * the overhead of stalling is too much
    1166             :                  */
    1167           0 :                 switch (mode) {
    1168             :                 case MIGRATE_SYNC:
    1169             :                 case MIGRATE_SYNC_NO_COPY:
    1170             :                         break;
    1171             :                 default:
    1172             :                         rc = -EBUSY;
    1173             :                         goto out;
    1174             :                 }
    1175           0 :                 folio_wait_writeback(src);
    1176             :         }
    1177             : 
    1178             :         /*
    1179             :          * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
    1180             :          * we cannot notice that anon_vma is freed while we migrate a page.
    1181             :          * This get_anon_vma() delays freeing anon_vma pointer until the end
    1182             :          * of migration. File cache pages are no problem because of page_lock()
    1183             :          * File Caches may use write_page() or lock_page() in migration, then,
    1184             :          * just care Anon page here.
    1185             :          *
    1186             :          * Only folio_get_anon_vma() understands the subtleties of
    1187             :          * getting a hold on an anon_vma from outside one of its mms.
    1188             :          * But if we cannot get anon_vma, then we won't need it anyway,
    1189             :          * because that implies that the anon page is no longer mapped
    1190             :          * (and cannot be remapped so long as we hold the page lock).
    1191             :          */
    1192           0 :         if (folio_test_anon(src) && !folio_test_ksm(src))
    1193           0 :                 anon_vma = folio_get_anon_vma(src);
    1194             : 
    1195             :         /*
    1196             :          * Block others from accessing the new page when we get around to
    1197             :          * establishing additional references. We are usually the only one
    1198             :          * holding a reference to dst at this point. We used to have a BUG
    1199             :          * here if folio_trylock(dst) fails, but would like to allow for
    1200             :          * cases where there might be a race with the previous use of dst.
    1201             :          * This is much like races on refcount of oldpage: just don't BUG().
    1202             :          */
    1203           0 :         if (unlikely(!folio_trylock(dst)))
    1204             :                 goto out;
    1205           0 :         dst_locked = true;
    1206             : 
    1207           0 :         if (unlikely(!is_lru)) {
    1208           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1209           0 :                 return MIGRATEPAGE_UNMAP;
    1210             :         }
    1211             : 
    1212             :         /*
    1213             :          * Corner case handling:
    1214             :          * 1. When a new swap-cache page is read into, it is added to the LRU
    1215             :          * and treated as swapcache but it has no rmap yet.
    1216             :          * Calling try_to_unmap() against a src->mapping==NULL page will
    1217             :          * trigger a BUG.  So handle it here.
    1218             :          * 2. An orphaned page (see truncate_cleanup_page) might have
    1219             :          * fs-private metadata. The page can be picked up due to memory
    1220             :          * offlining.  Everywhere else except page reclaim, the page is
    1221             :          * invisible to the vm, so the page can not be migrated.  So try to
    1222             :          * free the metadata, so the page can be freed.
    1223             :          */
    1224           0 :         if (!src->mapping) {
    1225           0 :                 if (folio_test_private(src)) {
    1226           0 :                         try_to_free_buffers(src);
    1227           0 :                         goto out;
    1228             :                 }
    1229           0 :         } else if (folio_mapped(src)) {
    1230             :                 /* Establish migration ptes */
    1231             :                 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
    1232             :                                !folio_test_ksm(src) && !anon_vma, src);
    1233           0 :                 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
    1234           0 :                 page_was_mapped = 1;
    1235             :         }
    1236             : 
    1237           0 :         if (!folio_mapped(src)) {
    1238           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1239           0 :                 return MIGRATEPAGE_UNMAP;
    1240             :         }
    1241             : 
    1242             : out:
    1243             :         /*
    1244             :          * A folio that has not been unmapped will be restored to
    1245             :          * right list unless we want to retry.
    1246             :          */
    1247           0 :         if (rc == -EAGAIN)
    1248           0 :                 ret = NULL;
    1249             : 
    1250           0 :         migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
    1251           0 :         migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
    1252             : 
    1253           0 :         return rc;
    1254             : }
    1255             : 
    1256             : /* Migrate the folio to the newly allocated folio in dst. */
    1257           0 : static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
    1258             :                               struct folio *src, struct folio *dst,
    1259             :                               enum migrate_mode mode, enum migrate_reason reason,
    1260             :                               struct list_head *ret)
    1261             : {
    1262             :         int rc;
    1263           0 :         int page_was_mapped = 0;
    1264           0 :         struct anon_vma *anon_vma = NULL;
    1265           0 :         bool is_lru = !__PageMovable(&src->page);
    1266             :         struct list_head *prev;
    1267             : 
    1268           0 :         __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
    1269           0 :         prev = dst->lru.prev;
    1270           0 :         list_del(&dst->lru);
    1271             : 
    1272           0 :         rc = move_to_new_folio(dst, src, mode);
    1273           0 :         if (rc)
    1274             :                 goto out;
    1275             : 
    1276           0 :         if (unlikely(!is_lru))
    1277             :                 goto out_unlock_both;
    1278             : 
    1279             :         /*
    1280             :          * When successful, push dst to LRU immediately: so that if it
    1281             :          * turns out to be an mlocked page, remove_migration_ptes() will
    1282             :          * automatically build up the correct dst->mlock_count for it.
    1283             :          *
    1284             :          * We would like to do something similar for the old page, when
    1285             :          * unsuccessful, and other cases when a page has been temporarily
    1286             :          * isolated from the unevictable LRU: but this case is the easiest.
    1287             :          */
    1288           0 :         folio_add_lru(dst);
    1289           0 :         if (page_was_mapped)
    1290           0 :                 lru_add_drain();
    1291             : 
    1292           0 :         if (page_was_mapped)
    1293           0 :                 remove_migration_ptes(src, dst, false);
    1294             : 
    1295             : out_unlock_both:
    1296           0 :         folio_unlock(dst);
    1297           0 :         set_page_owner_migrate_reason(&dst->page, reason);
    1298             :         /*
    1299             :          * If migration is successful, decrease refcount of dst,
    1300             :          * which will not free the page because new page owner increased
    1301             :          * refcounter.
    1302             :          */
    1303           0 :         folio_put(dst);
    1304             : 
    1305             :         /*
    1306             :          * A folio that has been migrated has all references removed
    1307             :          * and will be freed.
    1308             :          */
    1309           0 :         list_del(&src->lru);
    1310             :         /* Drop an anon_vma reference if we took one */
    1311           0 :         if (anon_vma)
    1312           0 :                 put_anon_vma(anon_vma);
    1313           0 :         folio_unlock(src);
    1314           0 :         migrate_folio_done(src, reason);
    1315             : 
    1316           0 :         return rc;
    1317             : out:
    1318             :         /*
    1319             :          * A folio that has not been migrated will be restored to
    1320             :          * right list unless we want to retry.
    1321             :          */
    1322           0 :         if (rc == -EAGAIN) {
    1323           0 :                 list_add(&dst->lru, prev);
    1324           0 :                 __migrate_folio_record(dst, page_was_mapped, anon_vma);
    1325           0 :                 return rc;
    1326             :         }
    1327             : 
    1328           0 :         migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
    1329           0 :         migrate_folio_undo_dst(dst, true, put_new_folio, private);
    1330             : 
    1331           0 :         return rc;
    1332             : }
    1333             : 
    1334             : /*
    1335             :  * Counterpart of unmap_and_move_page() for hugepage migration.
    1336             :  *
    1337             :  * This function doesn't wait the completion of hugepage I/O
    1338             :  * because there is no race between I/O and migration for hugepage.
    1339             :  * Note that currently hugepage I/O occurs only in direct I/O
    1340             :  * where no lock is held and PG_writeback is irrelevant,
    1341             :  * and writeback status of all subpages are counted in the reference
    1342             :  * count of the head page (i.e. if all subpages of a 2MB hugepage are
    1343             :  * under direct I/O, the reference of the head page is 512 and a bit more.)
    1344             :  * This means that when we try to migrate hugepage whose subpages are
    1345             :  * doing direct I/O, some references remain after try_to_unmap() and
    1346             :  * hugepage migration fails without data corruption.
    1347             :  *
    1348             :  * There is also no race when direct I/O is issued on the page under migration,
    1349             :  * because then pte is replaced with migration swap entry and direct I/O code
    1350             :  * will wait in the page fault for migration to complete.
    1351             :  */
    1352             : static int unmap_and_move_huge_page(new_folio_t get_new_folio,
    1353             :                 free_folio_t put_new_folio, unsigned long private,
    1354             :                 struct folio *src, int force, enum migrate_mode mode,
    1355             :                 int reason, struct list_head *ret)
    1356             : {
    1357             :         struct folio *dst;
    1358             :         int rc = -EAGAIN;
    1359             :         int page_was_mapped = 0;
    1360             :         struct anon_vma *anon_vma = NULL;
    1361             :         struct address_space *mapping = NULL;
    1362             : 
    1363             :         if (folio_ref_count(src) == 1) {
    1364             :                 /* page was freed from under us. So we are done. */
    1365             :                 folio_putback_active_hugetlb(src);
    1366             :                 return MIGRATEPAGE_SUCCESS;
    1367             :         }
    1368             : 
    1369             :         dst = get_new_folio(src, private);
    1370             :         if (!dst)
    1371             :                 return -ENOMEM;
    1372             : 
    1373             :         if (!folio_trylock(src)) {
    1374             :                 if (!force)
    1375             :                         goto out;
    1376             :                 switch (mode) {
    1377             :                 case MIGRATE_SYNC:
    1378             :                 case MIGRATE_SYNC_NO_COPY:
    1379             :                         break;
    1380             :                 default:
    1381             :                         goto out;
    1382             :                 }
    1383             :                 folio_lock(src);
    1384             :         }
    1385             : 
    1386             :         /*
    1387             :          * Check for pages which are in the process of being freed.  Without
    1388             :          * folio_mapping() set, hugetlbfs specific move page routine will not
    1389             :          * be called and we could leak usage counts for subpools.
    1390             :          */
    1391             :         if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
    1392             :                 rc = -EBUSY;
    1393             :                 goto out_unlock;
    1394             :         }
    1395             : 
    1396             :         if (folio_test_anon(src))
    1397             :                 anon_vma = folio_get_anon_vma(src);
    1398             : 
    1399             :         if (unlikely(!folio_trylock(dst)))
    1400             :                 goto put_anon;
    1401             : 
    1402             :         if (folio_mapped(src)) {
    1403             :                 enum ttu_flags ttu = 0;
    1404             : 
    1405             :                 if (!folio_test_anon(src)) {
    1406             :                         /*
    1407             :                          * In shared mappings, try_to_unmap could potentially
    1408             :                          * call huge_pmd_unshare.  Because of this, take
    1409             :                          * semaphore in write mode here and set TTU_RMAP_LOCKED
    1410             :                          * to let lower levels know we have taken the lock.
    1411             :                          */
    1412             :                         mapping = hugetlb_page_mapping_lock_write(&src->page);
    1413             :                         if (unlikely(!mapping))
    1414             :                                 goto unlock_put_anon;
    1415             : 
    1416             :                         ttu = TTU_RMAP_LOCKED;
    1417             :                 }
    1418             : 
    1419             :                 try_to_migrate(src, ttu);
    1420             :                 page_was_mapped = 1;
    1421             : 
    1422             :                 if (ttu & TTU_RMAP_LOCKED)
    1423             :                         i_mmap_unlock_write(mapping);
    1424             :         }
    1425             : 
    1426             :         if (!folio_mapped(src))
    1427             :                 rc = move_to_new_folio(dst, src, mode);
    1428             : 
    1429             :         if (page_was_mapped)
    1430             :                 remove_migration_ptes(src,
    1431             :                         rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
    1432             : 
    1433             : unlock_put_anon:
    1434             :         folio_unlock(dst);
    1435             : 
    1436             : put_anon:
    1437             :         if (anon_vma)
    1438             :                 put_anon_vma(anon_vma);
    1439             : 
    1440             :         if (rc == MIGRATEPAGE_SUCCESS) {
    1441             :                 move_hugetlb_state(src, dst, reason);
    1442             :                 put_new_folio = NULL;
    1443             :         }
    1444             : 
    1445             : out_unlock:
    1446             :         folio_unlock(src);
    1447             : out:
    1448             :         if (rc == MIGRATEPAGE_SUCCESS)
    1449             :                 folio_putback_active_hugetlb(src);
    1450             :         else if (rc != -EAGAIN)
    1451             :                 list_move_tail(&src->lru, ret);
    1452             : 
    1453             :         /*
    1454             :          * If migration was not successful and there's a freeing callback, use
    1455             :          * it.  Otherwise, put_page() will drop the reference grabbed during
    1456             :          * isolation.
    1457             :          */
    1458             :         if (put_new_folio)
    1459             :                 put_new_folio(dst, private);
    1460             :         else
    1461             :                 folio_putback_active_hugetlb(dst);
    1462             : 
    1463             :         return rc;
    1464             : }
    1465             : 
    1466           0 : static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
    1467             : {
    1468             :         int rc;
    1469             : 
    1470           0 :         folio_lock(folio);
    1471           0 :         rc = split_folio_to_list(folio, split_folios);
    1472           0 :         folio_unlock(folio);
    1473             :         if (!rc)
    1474           0 :                 list_move_tail(&folio->lru, split_folios);
    1475             : 
    1476           0 :         return rc;
    1477             : }
    1478             : 
    1479             : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1480             : #define NR_MAX_BATCHED_MIGRATION        HPAGE_PMD_NR
    1481             : #else
    1482             : #define NR_MAX_BATCHED_MIGRATION        512
    1483             : #endif
    1484             : #define NR_MAX_MIGRATE_PAGES_RETRY      10
    1485             : #define NR_MAX_MIGRATE_ASYNC_RETRY      3
    1486             : #define NR_MAX_MIGRATE_SYNC_RETRY                                       \
    1487             :         (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
    1488             : 
    1489             : struct migrate_pages_stats {
    1490             :         int nr_succeeded;       /* Normal and large folios migrated successfully, in
    1491             :                                    units of base pages */
    1492             :         int nr_failed_pages;    /* Normal and large folios failed to be migrated, in
    1493             :                                    units of base pages.  Untried folios aren't counted */
    1494             :         int nr_thp_succeeded;   /* THP migrated successfully */
    1495             :         int nr_thp_failed;      /* THP failed to be migrated */
    1496             :         int nr_thp_split;       /* THP split before migrating */
    1497             : };
    1498             : 
    1499             : /*
    1500             :  * Returns the number of hugetlb folios that were not migrated, or an error code
    1501             :  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
    1502             :  * any more because the list has become empty or no retryable hugetlb folios
    1503             :  * exist any more. It is caller's responsibility to call putback_movable_pages()
    1504             :  * only if ret != 0.
    1505             :  */
    1506             : static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
    1507             :                             free_folio_t put_new_folio, unsigned long private,
    1508             :                             enum migrate_mode mode, int reason,
    1509             :                             struct migrate_pages_stats *stats,
    1510             :                             struct list_head *ret_folios)
    1511             : {
    1512             :         int retry = 1;
    1513             :         int nr_failed = 0;
    1514             :         int nr_retry_pages = 0;
    1515             :         int pass = 0;
    1516             :         struct folio *folio, *folio2;
    1517             :         int rc, nr_pages;
    1518             : 
    1519           0 :         for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
    1520           0 :                 retry = 0;
    1521           0 :                 nr_retry_pages = 0;
    1522             : 
    1523           0 :                 list_for_each_entry_safe(folio, folio2, from, lru) {
    1524           0 :                         if (!folio_test_hugetlb(folio))
    1525           0 :                                 continue;
    1526             : 
    1527             :                         nr_pages = folio_nr_pages(folio);
    1528             : 
    1529             :                         cond_resched();
    1530             : 
    1531             :                         /*
    1532             :                          * Migratability of hugepages depends on architectures and
    1533             :                          * their size.  This check is necessary because some callers
    1534             :                          * of hugepage migration like soft offline and memory
    1535             :                          * hotremove don't walk through page tables or check whether
    1536             :                          * the hugepage is pmd-based or not before kicking migration.
    1537             :                          */
    1538             :                         if (!hugepage_migration_supported(folio_hstate(folio))) {
    1539             :                                 nr_failed++;
    1540             :                                 stats->nr_failed_pages += nr_pages;
    1541             :                                 list_move_tail(&folio->lru, ret_folios);
    1542             :                                 continue;
    1543             :                         }
    1544             : 
    1545             :                         rc = unmap_and_move_huge_page(get_new_folio,
    1546             :                                                       put_new_folio, private,
    1547             :                                                       folio, pass > 2, mode,
    1548             :                                                       reason, ret_folios);
    1549             :                         /*
    1550             :                          * The rules are:
    1551             :                          *      Success: hugetlb folio will be put back
    1552             :                          *      -EAGAIN: stay on the from list
    1553             :                          *      -ENOMEM: stay on the from list
    1554             :                          *      Other errno: put on ret_folios list
    1555             :                          */
    1556             :                         switch(rc) {
    1557             :                         case -ENOMEM:
    1558             :                                 /*
    1559             :                                  * When memory is low, don't bother to try to migrate
    1560             :                                  * other folios, just exit.
    1561             :                                  */
    1562             :                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
    1563             :                                 return -ENOMEM;
    1564             :                         case -EAGAIN:
    1565             :                                 retry++;
    1566             :                                 nr_retry_pages += nr_pages;
    1567             :                                 break;
    1568             :                         case MIGRATEPAGE_SUCCESS:
    1569             :                                 stats->nr_succeeded += nr_pages;
    1570             :                                 break;
    1571             :                         default:
    1572             :                                 /*
    1573             :                                  * Permanent failure (-EBUSY, etc.):
    1574             :                                  * unlike -EAGAIN case, the failed folio is
    1575             :                                  * removed from migration folio list and not
    1576             :                                  * retried in the next outer loop.
    1577             :                                  */
    1578             :                                 nr_failed++;
    1579             :                                 stats->nr_failed_pages += nr_pages;
    1580             :                                 break;
    1581             :                         }
    1582             :                 }
    1583             :         }
    1584             :         /*
    1585             :          * nr_failed is number of hugetlb folios failed to be migrated.  After
    1586             :          * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
    1587             :          * folios as failed.
    1588             :          */
    1589             :         nr_failed += retry;
    1590             :         stats->nr_failed_pages += nr_retry_pages;
    1591             : 
    1592             :         return nr_failed;
    1593             : }
    1594             : 
    1595             : /*
    1596             :  * migrate_pages_batch() first unmaps folios in the from list as many as
    1597             :  * possible, then move the unmapped folios.
    1598             :  *
    1599             :  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
    1600             :  * lock or bit when we have locked more than one folio.  Which may cause
    1601             :  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
    1602             :  * length of the from list must be <= 1.
    1603             :  */
    1604           0 : static int migrate_pages_batch(struct list_head *from,
    1605             :                 new_folio_t get_new_folio, free_folio_t put_new_folio,
    1606             :                 unsigned long private, enum migrate_mode mode, int reason,
    1607             :                 struct list_head *ret_folios, struct list_head *split_folios,
    1608             :                 struct migrate_pages_stats *stats, int nr_pass)
    1609             : {
    1610           0 :         int retry = 1;
    1611           0 :         int thp_retry = 1;
    1612           0 :         int nr_failed = 0;
    1613           0 :         int nr_retry_pages = 0;
    1614           0 :         int pass = 0;
    1615           0 :         bool is_thp = false;
    1616           0 :         struct folio *folio, *folio2, *dst = NULL, *dst2;
    1617           0 :         int rc, rc_saved = 0, nr_pages;
    1618           0 :         LIST_HEAD(unmap_folios);
    1619           0 :         LIST_HEAD(dst_folios);
    1620           0 :         bool nosplit = (reason == MR_NUMA_MISPLACED);
    1621             : 
    1622             :         VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
    1623             :                         !list_empty(from) && !list_is_singular(from));
    1624             : 
    1625           0 :         for (pass = 0; pass < nr_pass && retry; pass++) {
    1626           0 :                 retry = 0;
    1627           0 :                 thp_retry = 0;
    1628           0 :                 nr_retry_pages = 0;
    1629             : 
    1630           0 :                 list_for_each_entry_safe(folio, folio2, from, lru) {
    1631           0 :                         is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
    1632           0 :                         nr_pages = folio_nr_pages(folio);
    1633             : 
    1634           0 :                         cond_resched();
    1635             : 
    1636             :                         /*
    1637             :                          * Large folio migration might be unsupported or
    1638             :                          * the allocation might be failed so we should retry
    1639             :                          * on the same folio with the large folio split
    1640             :                          * to normal folios.
    1641             :                          *
    1642             :                          * Split folios are put in split_folios, and
    1643             :                          * we will migrate them after the rest of the
    1644             :                          * list is processed.
    1645             :                          */
    1646             :                         if (!thp_migration_supported() && is_thp) {
    1647             :                                 nr_failed++;
    1648             :                                 stats->nr_thp_failed++;
    1649             :                                 if (!try_split_folio(folio, split_folios)) {
    1650             :                                         stats->nr_thp_split++;
    1651             :                                         continue;
    1652             :                                 }
    1653             :                                 stats->nr_failed_pages += nr_pages;
    1654             :                                 list_move_tail(&folio->lru, ret_folios);
    1655             :                                 continue;
    1656             :                         }
    1657             : 
    1658           0 :                         rc = migrate_folio_unmap(get_new_folio, put_new_folio,
    1659             :                                         private, folio, &dst, mode, reason,
    1660             :                                         ret_folios);
    1661             :                         /*
    1662             :                          * The rules are:
    1663             :                          *      Success: folio will be freed
    1664             :                          *      Unmap: folio will be put on unmap_folios list,
    1665             :                          *             dst folio put on dst_folios list
    1666             :                          *      -EAGAIN: stay on the from list
    1667             :                          *      -ENOMEM: stay on the from list
    1668             :                          *      Other errno: put on ret_folios list
    1669             :                          */
    1670           0 :                         switch(rc) {
    1671             :                         case -ENOMEM:
    1672             :                                 /*
    1673             :                                  * When memory is low, don't bother to try to migrate
    1674             :                                  * other folios, move unmapped folios, then exit.
    1675             :                                  */
    1676           0 :                                 nr_failed++;
    1677           0 :                                 stats->nr_thp_failed += is_thp;
    1678             :                                 /* Large folio NUMA faulting doesn't split to retry. */
    1679           0 :                                 if (folio_test_large(folio) && !nosplit) {
    1680           0 :                                         int ret = try_split_folio(folio, split_folios);
    1681             : 
    1682           0 :                                         if (!ret) {
    1683             :                                                 stats->nr_thp_split += is_thp;
    1684             :                                                 break;
    1685           0 :                                         } else if (reason == MR_LONGTERM_PIN &&
    1686           0 :                                                    ret == -EAGAIN) {
    1687             :                                                 /*
    1688             :                                                  * Try again to split large folio to
    1689             :                                                  * mitigate the failure of longterm pinning.
    1690             :                                                  */
    1691           0 :                                                 retry++;
    1692           0 :                                                 thp_retry += is_thp;
    1693           0 :                                                 nr_retry_pages += nr_pages;
    1694             :                                                 /* Undo duplicated failure counting. */
    1695           0 :                                                 nr_failed--;
    1696             :                                                 stats->nr_thp_failed -= is_thp;
    1697           0 :                                                 break;
    1698             :                                         }
    1699             :                                 }
    1700             : 
    1701           0 :                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
    1702             :                                 /* nr_failed isn't updated for not used */
    1703             :                                 stats->nr_thp_failed += thp_retry;
    1704           0 :                                 rc_saved = rc;
    1705           0 :                                 if (list_empty(&unmap_folios))
    1706             :                                         goto out;
    1707             :                                 else
    1708             :                                         goto move;
    1709             :                         case -EAGAIN:
    1710           0 :                                 retry++;
    1711           0 :                                 thp_retry += is_thp;
    1712           0 :                                 nr_retry_pages += nr_pages;
    1713           0 :                                 break;
    1714             :                         case MIGRATEPAGE_SUCCESS:
    1715           0 :                                 stats->nr_succeeded += nr_pages;
    1716             :                                 stats->nr_thp_succeeded += is_thp;
    1717           0 :                                 break;
    1718             :                         case MIGRATEPAGE_UNMAP:
    1719           0 :                                 list_move_tail(&folio->lru, &unmap_folios);
    1720           0 :                                 list_add_tail(&dst->lru, &dst_folios);
    1721             :                                 break;
    1722             :                         default:
    1723             :                                 /*
    1724             :                                  * Permanent failure (-EBUSY, etc.):
    1725             :                                  * unlike -EAGAIN case, the failed folio is
    1726             :                                  * removed from migration folio list and not
    1727             :                                  * retried in the next outer loop.
    1728             :                                  */
    1729           0 :                                 nr_failed++;
    1730             :                                 stats->nr_thp_failed += is_thp;
    1731           0 :                                 stats->nr_failed_pages += nr_pages;
    1732           0 :                                 break;
    1733             :                         }
    1734             :                 }
    1735             :         }
    1736           0 :         nr_failed += retry;
    1737           0 :         stats->nr_thp_failed += thp_retry;
    1738           0 :         stats->nr_failed_pages += nr_retry_pages;
    1739             : move:
    1740             :         /* Flush TLBs for all unmapped folios */
    1741             :         try_to_unmap_flush();
    1742             : 
    1743           0 :         retry = 1;
    1744           0 :         for (pass = 0; pass < nr_pass && retry; pass++) {
    1745           0 :                 retry = 0;
    1746           0 :                 thp_retry = 0;
    1747           0 :                 nr_retry_pages = 0;
    1748             : 
    1749           0 :                 dst = list_first_entry(&dst_folios, struct folio, lru);
    1750           0 :                 dst2 = list_next_entry(dst, lru);
    1751           0 :                 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
    1752           0 :                         is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
    1753           0 :                         nr_pages = folio_nr_pages(folio);
    1754             : 
    1755           0 :                         cond_resched();
    1756             : 
    1757           0 :                         rc = migrate_folio_move(put_new_folio, private,
    1758             :                                                 folio, dst, mode,
    1759             :                                                 reason, ret_folios);
    1760             :                         /*
    1761             :                          * The rules are:
    1762             :                          *      Success: folio will be freed
    1763             :                          *      -EAGAIN: stay on the unmap_folios list
    1764             :                          *      Other errno: put on ret_folios list
    1765             :                          */
    1766           0 :                         switch(rc) {
    1767             :                         case -EAGAIN:
    1768           0 :                                 retry++;
    1769           0 :                                 thp_retry += is_thp;
    1770           0 :                                 nr_retry_pages += nr_pages;
    1771           0 :                                 break;
    1772             :                         case MIGRATEPAGE_SUCCESS:
    1773           0 :                                 stats->nr_succeeded += nr_pages;
    1774             :                                 stats->nr_thp_succeeded += is_thp;
    1775           0 :                                 break;
    1776             :                         default:
    1777           0 :                                 nr_failed++;
    1778             :                                 stats->nr_thp_failed += is_thp;
    1779           0 :                                 stats->nr_failed_pages += nr_pages;
    1780           0 :                                 break;
    1781             :                         }
    1782           0 :                         dst = dst2;
    1783           0 :                         dst2 = list_next_entry(dst, lru);
    1784             :                 }
    1785             :         }
    1786           0 :         nr_failed += retry;
    1787           0 :         stats->nr_thp_failed += thp_retry;
    1788           0 :         stats->nr_failed_pages += nr_retry_pages;
    1789             : 
    1790           0 :         rc = rc_saved ? : nr_failed;
    1791             : out:
    1792             :         /* Cleanup remaining folios */
    1793           0 :         dst = list_first_entry(&dst_folios, struct folio, lru);
    1794           0 :         dst2 = list_next_entry(dst, lru);
    1795           0 :         list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
    1796           0 :                 int page_was_mapped = 0;
    1797           0 :                 struct anon_vma *anon_vma = NULL;
    1798             : 
    1799           0 :                 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
    1800           0 :                 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
    1801             :                                        true, ret_folios);
    1802           0 :                 list_del(&dst->lru);
    1803           0 :                 migrate_folio_undo_dst(dst, true, put_new_folio, private);
    1804           0 :                 dst = dst2;
    1805           0 :                 dst2 = list_next_entry(dst, lru);
    1806             :         }
    1807             : 
    1808           0 :         return rc;
    1809             : }
    1810             : 
    1811           0 : static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
    1812             :                 free_folio_t put_new_folio, unsigned long private,
    1813             :                 enum migrate_mode mode, int reason,
    1814             :                 struct list_head *ret_folios, struct list_head *split_folios,
    1815             :                 struct migrate_pages_stats *stats)
    1816             : {
    1817           0 :         int rc, nr_failed = 0;
    1818           0 :         LIST_HEAD(folios);
    1819             :         struct migrate_pages_stats astats;
    1820             : 
    1821           0 :         memset(&astats, 0, sizeof(astats));
    1822             :         /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
    1823           0 :         rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
    1824             :                                  reason, &folios, split_folios, &astats,
    1825             :                                  NR_MAX_MIGRATE_ASYNC_RETRY);
    1826           0 :         stats->nr_succeeded += astats.nr_succeeded;
    1827           0 :         stats->nr_thp_succeeded += astats.nr_thp_succeeded;
    1828           0 :         stats->nr_thp_split += astats.nr_thp_split;
    1829           0 :         if (rc < 0) {
    1830           0 :                 stats->nr_failed_pages += astats.nr_failed_pages;
    1831           0 :                 stats->nr_thp_failed += astats.nr_thp_failed;
    1832             :                 list_splice_tail(&folios, ret_folios);
    1833             :                 return rc;
    1834             :         }
    1835           0 :         stats->nr_thp_failed += astats.nr_thp_split;
    1836           0 :         nr_failed += astats.nr_thp_split;
    1837             :         /*
    1838             :          * Fall back to migrate all failed folios one by one synchronously. All
    1839             :          * failed folios except split THPs will be retried, so their failure
    1840             :          * isn't counted
    1841             :          */
    1842             :         list_splice_tail_init(&folios, from);
    1843           0 :         while (!list_empty(from)) {
    1844           0 :                 list_move(from->next, &folios);
    1845           0 :                 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
    1846             :                                          private, mode, reason, ret_folios,
    1847             :                                          split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
    1848           0 :                 list_splice_tail_init(&folios, ret_folios);
    1849           0 :                 if (rc < 0)
    1850             :                         return rc;
    1851           0 :                 nr_failed += rc;
    1852             :         }
    1853             : 
    1854             :         return nr_failed;
    1855             : }
    1856             : 
    1857             : /*
    1858             :  * migrate_pages - migrate the folios specified in a list, to the free folios
    1859             :  *                 supplied as the target for the page migration
    1860             :  *
    1861             :  * @from:               The list of folios to be migrated.
    1862             :  * @get_new_folio:      The function used to allocate free folios to be used
    1863             :  *                      as the target of the folio migration.
    1864             :  * @put_new_folio:      The function used to free target folios if migration
    1865             :  *                      fails, or NULL if no special handling is necessary.
    1866             :  * @private:            Private data to be passed on to get_new_folio()
    1867             :  * @mode:               The migration mode that specifies the constraints for
    1868             :  *                      folio migration, if any.
    1869             :  * @reason:             The reason for folio migration.
    1870             :  * @ret_succeeded:      Set to the number of folios migrated successfully if
    1871             :  *                      the caller passes a non-NULL pointer.
    1872             :  *
    1873             :  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
    1874             :  * are movable any more because the list has become empty or no retryable folios
    1875             :  * exist any more. It is caller's responsibility to call putback_movable_pages()
    1876             :  * only if ret != 0.
    1877             :  *
    1878             :  * Returns the number of {normal folio, large folio, hugetlb} that were not
    1879             :  * migrated, or an error code. The number of large folio splits will be
    1880             :  * considered as the number of non-migrated large folio, no matter how many
    1881             :  * split folios of the large folio are migrated successfully.
    1882             :  */
    1883           0 : int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
    1884             :                 free_folio_t put_new_folio, unsigned long private,
    1885             :                 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
    1886             : {
    1887             :         int rc, rc_gather;
    1888             :         int nr_pages;
    1889             :         struct folio *folio, *folio2;
    1890           0 :         LIST_HEAD(folios);
    1891           0 :         LIST_HEAD(ret_folios);
    1892           0 :         LIST_HEAD(split_folios);
    1893             :         struct migrate_pages_stats stats;
    1894             : 
    1895           0 :         trace_mm_migrate_pages_start(mode, reason);
    1896             : 
    1897           0 :         memset(&stats, 0, sizeof(stats));
    1898             : 
    1899           0 :         rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
    1900             :                                      mode, reason, &stats, &ret_folios);
    1901             :         if (rc_gather < 0)
    1902             :                 goto out;
    1903             : 
    1904             : again:
    1905           0 :         nr_pages = 0;
    1906           0 :         list_for_each_entry_safe(folio, folio2, from, lru) {
    1907             :                 /* Retried hugetlb folios will be kept in list  */
    1908           0 :                 if (folio_test_hugetlb(folio)) {
    1909             :                         list_move_tail(&folio->lru, &ret_folios);
    1910             :                         continue;
    1911             :                 }
    1912             : 
    1913           0 :                 nr_pages += folio_nr_pages(folio);
    1914           0 :                 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
    1915             :                         break;
    1916             :         }
    1917           0 :         if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
    1918           0 :                 list_cut_before(&folios, from, &folio2->lru);
    1919             :         else
    1920             :                 list_splice_init(from, &folios);
    1921           0 :         if (mode == MIGRATE_ASYNC)
    1922           0 :                 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
    1923             :                                 private, mode, reason, &ret_folios,
    1924             :                                 &split_folios, &stats,
    1925             :                                 NR_MAX_MIGRATE_PAGES_RETRY);
    1926             :         else
    1927           0 :                 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
    1928             :                                 private, mode, reason, &ret_folios,
    1929             :                                 &split_folios, &stats);
    1930           0 :         list_splice_tail_init(&folios, &ret_folios);
    1931           0 :         if (rc < 0) {
    1932           0 :                 rc_gather = rc;
    1933             :                 list_splice_tail(&split_folios, &ret_folios);
    1934             :                 goto out;
    1935             :         }
    1936           0 :         if (!list_empty(&split_folios)) {
    1937             :                 /*
    1938             :                  * Failure isn't counted since all split folios of a large folio
    1939             :                  * is counted as 1 failure already.  And, we only try to migrate
    1940             :                  * with minimal effort, force MIGRATE_ASYNC mode and retry once.
    1941             :                  */
    1942           0 :                 migrate_pages_batch(&split_folios, get_new_folio,
    1943             :                                 put_new_folio, private, MIGRATE_ASYNC, reason,
    1944             :                                 &ret_folios, NULL, &stats, 1);
    1945             :                 list_splice_tail_init(&split_folios, &ret_folios);
    1946             :         }
    1947           0 :         rc_gather += rc;
    1948           0 :         if (!list_empty(from))
    1949             :                 goto again;
    1950             : out:
    1951             :         /*
    1952             :          * Put the permanent failure folio back to migration list, they
    1953             :          * will be put back to the right list by the caller.
    1954             :          */
    1955           0 :         list_splice(&ret_folios, from);
    1956             : 
    1957             :         /*
    1958             :          * Return 0 in case all split folios of fail-to-migrate large folios
    1959             :          * are migrated successfully.
    1960             :          */
    1961           0 :         if (list_empty(from))
    1962           0 :                 rc_gather = 0;
    1963             : 
    1964           0 :         count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
    1965           0 :         count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
    1966           0 :         count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
    1967           0 :         count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
    1968           0 :         count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
    1969           0 :         trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
    1970           0 :                                stats.nr_thp_succeeded, stats.nr_thp_failed,
    1971           0 :                                stats.nr_thp_split, mode, reason);
    1972             : 
    1973           0 :         if (ret_succeeded)
    1974           0 :                 *ret_succeeded = stats.nr_succeeded;
    1975             : 
    1976           0 :         return rc_gather;
    1977             : }
    1978             : 
    1979           0 : struct folio *alloc_migration_target(struct folio *src, unsigned long private)
    1980             : {
    1981             :         struct migration_target_control *mtc;
    1982             :         gfp_t gfp_mask;
    1983           0 :         unsigned int order = 0;
    1984             :         int nid;
    1985             :         int zidx;
    1986             : 
    1987           0 :         mtc = (struct migration_target_control *)private;
    1988           0 :         gfp_mask = mtc->gfp_mask;
    1989           0 :         nid = mtc->nid;
    1990           0 :         if (nid == NUMA_NO_NODE)
    1991           0 :                 nid = folio_nid(src);
    1992             : 
    1993           0 :         if (folio_test_hugetlb(src)) {
    1994             :                 struct hstate *h = folio_hstate(src);
    1995             : 
    1996             :                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
    1997             :                 return alloc_hugetlb_folio_nodemask(h, nid,
    1998             :                                                 mtc->nmask, gfp_mask);
    1999             :         }
    2000             : 
    2001           0 :         if (folio_test_large(src)) {
    2002             :                 /*
    2003             :                  * clear __GFP_RECLAIM to make the migration callback
    2004             :                  * consistent with regular THP allocations.
    2005             :                  */
    2006           0 :                 gfp_mask &= ~__GFP_RECLAIM;
    2007           0 :                 gfp_mask |= GFP_TRANSHUGE;
    2008             :                 order = folio_order(src);
    2009             :         }
    2010           0 :         zidx = zone_idx(folio_zone(src));
    2011           0 :         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
    2012           0 :                 gfp_mask |= __GFP_HIGHMEM;
    2013             : 
    2014           0 :         return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
    2015             : }
    2016             : 
    2017             : #ifdef CONFIG_NUMA
    2018             : 
    2019             : static int store_status(int __user *status, int start, int value, int nr)
    2020             : {
    2021             :         while (nr-- > 0) {
    2022             :                 if (put_user(value, status + start))
    2023             :                         return -EFAULT;
    2024             :                 start++;
    2025             :         }
    2026             : 
    2027             :         return 0;
    2028             : }
    2029             : 
    2030             : static int do_move_pages_to_node(struct mm_struct *mm,
    2031             :                 struct list_head *pagelist, int node)
    2032             : {
    2033             :         int err;
    2034             :         struct migration_target_control mtc = {
    2035             :                 .nid = node,
    2036             :                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
    2037             :         };
    2038             : 
    2039             :         err = migrate_pages(pagelist, alloc_migration_target, NULL,
    2040             :                 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
    2041             :         if (err)
    2042             :                 putback_movable_pages(pagelist);
    2043             :         return err;
    2044             : }
    2045             : 
    2046             : /*
    2047             :  * Resolves the given address to a struct page, isolates it from the LRU and
    2048             :  * puts it to the given pagelist.
    2049             :  * Returns:
    2050             :  *     errno - if the page cannot be found/isolated
    2051             :  *     0 - when it doesn't have to be migrated because it is already on the
    2052             :  *         target node
    2053             :  *     1 - when it has been queued
    2054             :  */
    2055             : static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
    2056             :                 int node, struct list_head *pagelist, bool migrate_all)
    2057             : {
    2058             :         struct vm_area_struct *vma;
    2059             :         unsigned long addr;
    2060             :         struct page *page;
    2061             :         int err;
    2062             :         bool isolated;
    2063             : 
    2064             :         mmap_read_lock(mm);
    2065             :         addr = (unsigned long)untagged_addr_remote(mm, p);
    2066             : 
    2067             :         err = -EFAULT;
    2068             :         vma = vma_lookup(mm, addr);
    2069             :         if (!vma || !vma_migratable(vma))
    2070             :                 goto out;
    2071             : 
    2072             :         /* FOLL_DUMP to ignore special (like zero) pages */
    2073             :         page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
    2074             : 
    2075             :         err = PTR_ERR(page);
    2076             :         if (IS_ERR(page))
    2077             :                 goto out;
    2078             : 
    2079             :         err = -ENOENT;
    2080             :         if (!page)
    2081             :                 goto out;
    2082             : 
    2083             :         if (is_zone_device_page(page))
    2084             :                 goto out_putpage;
    2085             : 
    2086             :         err = 0;
    2087             :         if (page_to_nid(page) == node)
    2088             :                 goto out_putpage;
    2089             : 
    2090             :         err = -EACCES;
    2091             :         if (page_mapcount(page) > 1 && !migrate_all)
    2092             :                 goto out_putpage;
    2093             : 
    2094             :         if (PageHuge(page)) {
    2095             :                 if (PageHead(page)) {
    2096             :                         isolated = isolate_hugetlb(page_folio(page), pagelist);
    2097             :                         err = isolated ? 1 : -EBUSY;
    2098             :                 }
    2099             :         } else {
    2100             :                 struct page *head;
    2101             : 
    2102             :                 head = compound_head(page);
    2103             :                 isolated = isolate_lru_page(head);
    2104             :                 if (!isolated) {
    2105             :                         err = -EBUSY;
    2106             :                         goto out_putpage;
    2107             :                 }
    2108             : 
    2109             :                 err = 1;
    2110             :                 list_add_tail(&head->lru, pagelist);
    2111             :                 mod_node_page_state(page_pgdat(head),
    2112             :                         NR_ISOLATED_ANON + page_is_file_lru(head),
    2113             :                         thp_nr_pages(head));
    2114             :         }
    2115             : out_putpage:
    2116             :         /*
    2117             :          * Either remove the duplicate refcount from
    2118             :          * isolate_lru_page() or drop the page ref if it was
    2119             :          * not isolated.
    2120             :          */
    2121             :         put_page(page);
    2122             : out:
    2123             :         mmap_read_unlock(mm);
    2124             :         return err;
    2125             : }
    2126             : 
    2127             : static int move_pages_and_store_status(struct mm_struct *mm, int node,
    2128             :                 struct list_head *pagelist, int __user *status,
    2129             :                 int start, int i, unsigned long nr_pages)
    2130             : {
    2131             :         int err;
    2132             : 
    2133             :         if (list_empty(pagelist))
    2134             :                 return 0;
    2135             : 
    2136             :         err = do_move_pages_to_node(mm, pagelist, node);
    2137             :         if (err) {
    2138             :                 /*
    2139             :                  * Positive err means the number of failed
    2140             :                  * pages to migrate.  Since we are going to
    2141             :                  * abort and return the number of non-migrated
    2142             :                  * pages, so need to include the rest of the
    2143             :                  * nr_pages that have not been attempted as
    2144             :                  * well.
    2145             :                  */
    2146             :                 if (err > 0)
    2147             :                         err += nr_pages - i;
    2148             :                 return err;
    2149             :         }
    2150             :         return store_status(status, start, node, i - start);
    2151             : }
    2152             : 
    2153             : /*
    2154             :  * Migrate an array of page address onto an array of nodes and fill
    2155             :  * the corresponding array of status.
    2156             :  */
    2157             : static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
    2158             :                          unsigned long nr_pages,
    2159             :                          const void __user * __user *pages,
    2160             :                          const int __user *nodes,
    2161             :                          int __user *status, int flags)
    2162             : {
    2163             :         int current_node = NUMA_NO_NODE;
    2164             :         LIST_HEAD(pagelist);
    2165             :         int start, i;
    2166             :         int err = 0, err1;
    2167             : 
    2168             :         lru_cache_disable();
    2169             : 
    2170             :         for (i = start = 0; i < nr_pages; i++) {
    2171             :                 const void __user *p;
    2172             :                 int node;
    2173             : 
    2174             :                 err = -EFAULT;
    2175             :                 if (get_user(p, pages + i))
    2176             :                         goto out_flush;
    2177             :                 if (get_user(node, nodes + i))
    2178             :                         goto out_flush;
    2179             : 
    2180             :                 err = -ENODEV;
    2181             :                 if (node < 0 || node >= MAX_NUMNODES)
    2182             :                         goto out_flush;
    2183             :                 if (!node_state(node, N_MEMORY))
    2184             :                         goto out_flush;
    2185             : 
    2186             :                 err = -EACCES;
    2187             :                 if (!node_isset(node, task_nodes))
    2188             :                         goto out_flush;
    2189             : 
    2190             :                 if (current_node == NUMA_NO_NODE) {
    2191             :                         current_node = node;
    2192             :                         start = i;
    2193             :                 } else if (node != current_node) {
    2194             :                         err = move_pages_and_store_status(mm, current_node,
    2195             :                                         &pagelist, status, start, i, nr_pages);
    2196             :                         if (err)
    2197             :                                 goto out;
    2198             :                         start = i;
    2199             :                         current_node = node;
    2200             :                 }
    2201             : 
    2202             :                 /*
    2203             :                  * Errors in the page lookup or isolation are not fatal and we simply
    2204             :                  * report them via status
    2205             :                  */
    2206             :                 err = add_page_for_migration(mm, p, current_node, &pagelist,
    2207             :                                              flags & MPOL_MF_MOVE_ALL);
    2208             : 
    2209             :                 if (err > 0) {
    2210             :                         /* The page is successfully queued for migration */
    2211             :                         continue;
    2212             :                 }
    2213             : 
    2214             :                 /*
    2215             :                  * The move_pages() man page does not have an -EEXIST choice, so
    2216             :                  * use -EFAULT instead.
    2217             :                  */
    2218             :                 if (err == -EEXIST)
    2219             :                         err = -EFAULT;
    2220             : 
    2221             :                 /*
    2222             :                  * If the page is already on the target node (!err), store the
    2223             :                  * node, otherwise, store the err.
    2224             :                  */
    2225             :                 err = store_status(status, i, err ? : current_node, 1);
    2226             :                 if (err)
    2227             :                         goto out_flush;
    2228             : 
    2229             :                 err = move_pages_and_store_status(mm, current_node, &pagelist,
    2230             :                                 status, start, i, nr_pages);
    2231             :                 if (err) {
    2232             :                         /* We have accounted for page i */
    2233             :                         if (err > 0)
    2234             :                                 err--;
    2235             :                         goto out;
    2236             :                 }
    2237             :                 current_node = NUMA_NO_NODE;
    2238             :         }
    2239             : out_flush:
    2240             :         /* Make sure we do not overwrite the existing error */
    2241             :         err1 = move_pages_and_store_status(mm, current_node, &pagelist,
    2242             :                                 status, start, i, nr_pages);
    2243             :         if (err >= 0)
    2244             :                 err = err1;
    2245             : out:
    2246             :         lru_cache_enable();
    2247             :         return err;
    2248             : }
    2249             : 
    2250             : /*
    2251             :  * Determine the nodes of an array of pages and store it in an array of status.
    2252             :  */
    2253             : static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
    2254             :                                 const void __user **pages, int *status)
    2255             : {
    2256             :         unsigned long i;
    2257             : 
    2258             :         mmap_read_lock(mm);
    2259             : 
    2260             :         for (i = 0; i < nr_pages; i++) {
    2261             :                 unsigned long addr = (unsigned long)(*pages);
    2262             :                 struct vm_area_struct *vma;
    2263             :                 struct page *page;
    2264             :                 int err = -EFAULT;
    2265             : 
    2266             :                 vma = vma_lookup(mm, addr);
    2267             :                 if (!vma)
    2268             :                         goto set_status;
    2269             : 
    2270             :                 /* FOLL_DUMP to ignore special (like zero) pages */
    2271             :                 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
    2272             : 
    2273             :                 err = PTR_ERR(page);
    2274             :                 if (IS_ERR(page))
    2275             :                         goto set_status;
    2276             : 
    2277             :                 err = -ENOENT;
    2278             :                 if (!page)
    2279             :                         goto set_status;
    2280             : 
    2281             :                 if (!is_zone_device_page(page))
    2282             :                         err = page_to_nid(page);
    2283             : 
    2284             :                 put_page(page);
    2285             : set_status:
    2286             :                 *status = err;
    2287             : 
    2288             :                 pages++;
    2289             :                 status++;
    2290             :         }
    2291             : 
    2292             :         mmap_read_unlock(mm);
    2293             : }
    2294             : 
    2295             : static int get_compat_pages_array(const void __user *chunk_pages[],
    2296             :                                   const void __user * __user *pages,
    2297             :                                   unsigned long chunk_nr)
    2298             : {
    2299             :         compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
    2300             :         compat_uptr_t p;
    2301             :         int i;
    2302             : 
    2303             :         for (i = 0; i < chunk_nr; i++) {
    2304             :                 if (get_user(p, pages32 + i))
    2305             :                         return -EFAULT;
    2306             :                 chunk_pages[i] = compat_ptr(p);
    2307             :         }
    2308             : 
    2309             :         return 0;
    2310             : }
    2311             : 
    2312             : /*
    2313             :  * Determine the nodes of a user array of pages and store it in
    2314             :  * a user array of status.
    2315             :  */
    2316             : static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
    2317             :                          const void __user * __user *pages,
    2318             :                          int __user *status)
    2319             : {
    2320             : #define DO_PAGES_STAT_CHUNK_NR 16UL
    2321             :         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
    2322             :         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
    2323             : 
    2324             :         while (nr_pages) {
    2325             :                 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
    2326             : 
    2327             :                 if (in_compat_syscall()) {
    2328             :                         if (get_compat_pages_array(chunk_pages, pages,
    2329             :                                                    chunk_nr))
    2330             :                                 break;
    2331             :                 } else {
    2332             :                         if (copy_from_user(chunk_pages, pages,
    2333             :                                       chunk_nr * sizeof(*chunk_pages)))
    2334             :                                 break;
    2335             :                 }
    2336             : 
    2337             :                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
    2338             : 
    2339             :                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
    2340             :                         break;
    2341             : 
    2342             :                 pages += chunk_nr;
    2343             :                 status += chunk_nr;
    2344             :                 nr_pages -= chunk_nr;
    2345             :         }
    2346             :         return nr_pages ? -EFAULT : 0;
    2347             : }
    2348             : 
    2349             : static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
    2350             : {
    2351             :         struct task_struct *task;
    2352             :         struct mm_struct *mm;
    2353             : 
    2354             :         /*
    2355             :          * There is no need to check if current process has the right to modify
    2356             :          * the specified process when they are same.
    2357             :          */
    2358             :         if (!pid) {
    2359             :                 mmget(current->mm);
    2360             :                 *mem_nodes = cpuset_mems_allowed(current);
    2361             :                 return current->mm;
    2362             :         }
    2363             : 
    2364             :         /* Find the mm_struct */
    2365             :         rcu_read_lock();
    2366             :         task = find_task_by_vpid(pid);
    2367             :         if (!task) {
    2368             :                 rcu_read_unlock();
    2369             :                 return ERR_PTR(-ESRCH);
    2370             :         }
    2371             :         get_task_struct(task);
    2372             : 
    2373             :         /*
    2374             :          * Check if this process has the right to modify the specified
    2375             :          * process. Use the regular "ptrace_may_access()" checks.
    2376             :          */
    2377             :         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
    2378             :                 rcu_read_unlock();
    2379             :                 mm = ERR_PTR(-EPERM);
    2380             :                 goto out;
    2381             :         }
    2382             :         rcu_read_unlock();
    2383             : 
    2384             :         mm = ERR_PTR(security_task_movememory(task));
    2385             :         if (IS_ERR(mm))
    2386             :                 goto out;
    2387             :         *mem_nodes = cpuset_mems_allowed(task);
    2388             :         mm = get_task_mm(task);
    2389             : out:
    2390             :         put_task_struct(task);
    2391             :         if (!mm)
    2392             :                 mm = ERR_PTR(-EINVAL);
    2393             :         return mm;
    2394             : }
    2395             : 
    2396             : /*
    2397             :  * Move a list of pages in the address space of the currently executing
    2398             :  * process.
    2399             :  */
    2400             : static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
    2401             :                              const void __user * __user *pages,
    2402             :                              const int __user *nodes,
    2403             :                              int __user *status, int flags)
    2404             : {
    2405             :         struct mm_struct *mm;
    2406             :         int err;
    2407             :         nodemask_t task_nodes;
    2408             : 
    2409             :         /* Check flags */
    2410             :         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
    2411             :                 return -EINVAL;
    2412             : 
    2413             :         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
    2414             :                 return -EPERM;
    2415             : 
    2416             :         mm = find_mm_struct(pid, &task_nodes);
    2417             :         if (IS_ERR(mm))
    2418             :                 return PTR_ERR(mm);
    2419             : 
    2420             :         if (nodes)
    2421             :                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
    2422             :                                     nodes, status, flags);
    2423             :         else
    2424             :                 err = do_pages_stat(mm, nr_pages, pages, status);
    2425             : 
    2426             :         mmput(mm);
    2427             :         return err;
    2428             : }
    2429             : 
    2430             : SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
    2431             :                 const void __user * __user *, pages,
    2432             :                 const int __user *, nodes,
    2433             :                 int __user *, status, int, flags)
    2434             : {
    2435             :         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
    2436             : }
    2437             : 
    2438             : #ifdef CONFIG_NUMA_BALANCING
    2439             : /*
    2440             :  * Returns true if this is a safe migration target node for misplaced NUMA
    2441             :  * pages. Currently it only checks the watermarks which is crude.
    2442             :  */
    2443             : static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
    2444             :                                    unsigned long nr_migrate_pages)
    2445             : {
    2446             :         int z;
    2447             : 
    2448             :         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    2449             :                 struct zone *zone = pgdat->node_zones + z;
    2450             : 
    2451             :                 if (!managed_zone(zone))
    2452             :                         continue;
    2453             : 
    2454             :                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
    2455             :                 if (!zone_watermark_ok(zone, 0,
    2456             :                                        high_wmark_pages(zone) +
    2457             :                                        nr_migrate_pages,
    2458             :                                        ZONE_MOVABLE, 0))
    2459             :                         continue;
    2460             :                 return true;
    2461             :         }
    2462             :         return false;
    2463             : }
    2464             : 
    2465             : static struct folio *alloc_misplaced_dst_folio(struct folio *src,
    2466             :                                            unsigned long data)
    2467             : {
    2468             :         int nid = (int) data;
    2469             :         int order = folio_order(src);
    2470             :         gfp_t gfp = __GFP_THISNODE;
    2471             : 
    2472             :         if (order > 0)
    2473             :                 gfp |= GFP_TRANSHUGE_LIGHT;
    2474             :         else {
    2475             :                 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
    2476             :                         __GFP_NOWARN;
    2477             :                 gfp &= ~__GFP_RECLAIM;
    2478             :         }
    2479             :         return __folio_alloc_node(gfp, order, nid);
    2480             : }
    2481             : 
    2482             : static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
    2483             : {
    2484             :         int nr_pages = thp_nr_pages(page);
    2485             :         int order = compound_order(page);
    2486             : 
    2487             :         VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
    2488             : 
    2489             :         /* Do not migrate THP mapped by multiple processes */
    2490             :         if (PageTransHuge(page) && total_mapcount(page) > 1)
    2491             :                 return 0;
    2492             : 
    2493             :         /* Avoid migrating to a node that is nearly full */
    2494             :         if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
    2495             :                 int z;
    2496             : 
    2497             :                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
    2498             :                         return 0;
    2499             :                 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
    2500             :                         if (managed_zone(pgdat->node_zones + z))
    2501             :                                 break;
    2502             :                 }
    2503             :                 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
    2504             :                 return 0;
    2505             :         }
    2506             : 
    2507             :         if (!isolate_lru_page(page))
    2508             :                 return 0;
    2509             : 
    2510             :         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
    2511             :                             nr_pages);
    2512             : 
    2513             :         /*
    2514             :          * Isolating the page has taken another reference, so the
    2515             :          * caller's reference can be safely dropped without the page
    2516             :          * disappearing underneath us during migration.
    2517             :          */
    2518             :         put_page(page);
    2519             :         return 1;
    2520             : }
    2521             : 
    2522             : /*
    2523             :  * Attempt to migrate a misplaced page to the specified destination
    2524             :  * node. Caller is expected to have an elevated reference count on
    2525             :  * the page that will be dropped by this function before returning.
    2526             :  */
    2527             : int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
    2528             :                            int node)
    2529             : {
    2530             :         pg_data_t *pgdat = NODE_DATA(node);
    2531             :         int isolated;
    2532             :         int nr_remaining;
    2533             :         unsigned int nr_succeeded;
    2534             :         LIST_HEAD(migratepages);
    2535             :         int nr_pages = thp_nr_pages(page);
    2536             : 
    2537             :         /*
    2538             :          * Don't migrate file pages that are mapped in multiple processes
    2539             :          * with execute permissions as they are probably shared libraries.
    2540             :          */
    2541             :         if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
    2542             :             (vma->vm_flags & VM_EXEC))
    2543             :                 goto out;
    2544             : 
    2545             :         /*
    2546             :          * Also do not migrate dirty pages as not all filesystems can move
    2547             :          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
    2548             :          */
    2549             :         if (page_is_file_lru(page) && PageDirty(page))
    2550             :                 goto out;
    2551             : 
    2552             :         isolated = numamigrate_isolate_page(pgdat, page);
    2553             :         if (!isolated)
    2554             :                 goto out;
    2555             : 
    2556             :         list_add(&page->lru, &migratepages);
    2557             :         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
    2558             :                                      NULL, node, MIGRATE_ASYNC,
    2559             :                                      MR_NUMA_MISPLACED, &nr_succeeded);
    2560             :         if (nr_remaining) {
    2561             :                 if (!list_empty(&migratepages)) {
    2562             :                         list_del(&page->lru);
    2563             :                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
    2564             :                                         page_is_file_lru(page), -nr_pages);
    2565             :                         putback_lru_page(page);
    2566             :                 }
    2567             :                 isolated = 0;
    2568             :         }
    2569             :         if (nr_succeeded) {
    2570             :                 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
    2571             :                 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
    2572             :                         mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
    2573             :                                             nr_succeeded);
    2574             :         }
    2575             :         BUG_ON(!list_empty(&migratepages));
    2576             :         return isolated;
    2577             : 
    2578             : out:
    2579             :         put_page(page);
    2580             :         return 0;
    2581             : }
    2582             : #endif /* CONFIG_NUMA_BALANCING */
    2583             : #endif /* CONFIG_NUMA */

Generated by: LCOV version 1.14