LCOV - code coverage report
Current view: top level - mm - swap_state.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 5 331 1.5 %
Date: 2023-08-24 13:40:31 Functions: 1 24 4.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  linux/mm/swap_state.c
       4             :  *
       5             :  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
       6             :  *  Swap reorganised 29.12.95, Stephen Tweedie
       7             :  *
       8             :  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
       9             :  */
      10             : #include <linux/mm.h>
      11             : #include <linux/gfp.h>
      12             : #include <linux/kernel_stat.h>
      13             : #include <linux/swap.h>
      14             : #include <linux/swapops.h>
      15             : #include <linux/init.h>
      16             : #include <linux/pagemap.h>
      17             : #include <linux/backing-dev.h>
      18             : #include <linux/blkdev.h>
      19             : #include <linux/migrate.h>
      20             : #include <linux/vmalloc.h>
      21             : #include <linux/swap_slots.h>
      22             : #include <linux/huge_mm.h>
      23             : #include <linux/shmem_fs.h>
      24             : #include "internal.h"
      25             : #include "swap.h"
      26             : 
      27             : /*
      28             :  * swapper_space is a fiction, retained to simplify the path through
      29             :  * vmscan's shrink_page_list.
      30             :  */
      31             : static const struct address_space_operations swap_aops = {
      32             :         .writepage      = swap_writepage,
      33             :         .dirty_folio    = noop_dirty_folio,
      34             : #ifdef CONFIG_MIGRATION
      35             :         .migrate_folio  = migrate_folio,
      36             : #endif
      37             : };
      38             : 
      39             : struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
      40             : static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
      41             : static bool enable_vma_readahead __read_mostly = true;
      42             : 
      43             : #define SWAP_RA_WIN_SHIFT       (PAGE_SHIFT / 2)
      44             : #define SWAP_RA_HITS_MASK       ((1UL << SWAP_RA_WIN_SHIFT) - 1)
      45             : #define SWAP_RA_HITS_MAX        SWAP_RA_HITS_MASK
      46             : #define SWAP_RA_WIN_MASK        (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
      47             : 
      48             : #define SWAP_RA_HITS(v)         ((v) & SWAP_RA_HITS_MASK)
      49             : #define SWAP_RA_WIN(v)          (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
      50             : #define SWAP_RA_ADDR(v)         ((v) & PAGE_MASK)
      51             : 
      52             : #define SWAP_RA_VAL(addr, win, hits)                            \
      53             :         (((addr) & PAGE_MASK) |                                     \
      54             :          (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |  \
      55             :          ((hits) & SWAP_RA_HITS_MASK))
      56             : 
      57             : /* Initial readahead hits is 4 to start up with a small window */
      58             : #define GET_SWAP_RA_VAL(vma)                                    \
      59             :         (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
      60             : 
      61             : static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
      62             : 
      63           0 : void show_swap_cache_info(void)
      64             : {
      65           0 :         printk("%lu pages in swap cache\n", total_swapcache_pages());
      66           0 :         printk("Free swap  = %ldkB\n",
      67             :                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
      68           0 :         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
      69           0 : }
      70             : 
      71           0 : void *get_shadow_from_swap_cache(swp_entry_t entry)
      72             : {
      73           0 :         struct address_space *address_space = swap_address_space(entry);
      74           0 :         pgoff_t idx = swp_offset(entry);
      75             :         struct page *page;
      76             : 
      77           0 :         page = xa_load(&address_space->i_pages, idx);
      78           0 :         if (xa_is_value(page))
      79             :                 return page;
      80           0 :         return NULL;
      81             : }
      82             : 
      83             : /*
      84             :  * add_to_swap_cache resembles filemap_add_folio on swapper_space,
      85             :  * but sets SwapCache flag and private instead of mapping and index.
      86             :  */
      87           0 : int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
      88             :                         gfp_t gfp, void **shadowp)
      89             : {
      90           0 :         struct address_space *address_space = swap_address_space(entry);
      91           0 :         pgoff_t idx = swp_offset(entry);
      92           0 :         XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
      93           0 :         unsigned long i, nr = folio_nr_pages(folio);
      94             :         void *old;
      95             : 
      96           0 :         xas_set_update(&xas, workingset_update_node);
      97             : 
      98             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
      99             :         VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
     100             :         VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
     101             : 
     102           0 :         folio_ref_add(folio, nr);
     103             :         folio_set_swapcache(folio);
     104             : 
     105             :         do {
     106           0 :                 xas_lock_irq(&xas);
     107           0 :                 xas_create_range(&xas);
     108           0 :                 if (xas_error(&xas))
     109             :                         goto unlock;
     110           0 :                 for (i = 0; i < nr; i++) {
     111             :                         VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
     112           0 :                         old = xas_load(&xas);
     113           0 :                         if (xa_is_value(old)) {
     114           0 :                                 if (shadowp)
     115           0 :                                         *shadowp = old;
     116             :                         }
     117           0 :                         set_page_private(folio_page(folio, i), entry.val + i);
     118           0 :                         xas_store(&xas, folio);
     119           0 :                         xas_next(&xas);
     120             :                 }
     121           0 :                 address_space->nrpages += nr;
     122           0 :                 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
     123           0 :                 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
     124             : unlock:
     125           0 :                 xas_unlock_irq(&xas);
     126           0 :         } while (xas_nomem(&xas, gfp));
     127             : 
     128           0 :         if (!xas_error(&xas))
     129             :                 return 0;
     130             : 
     131           0 :         folio_clear_swapcache(folio);
     132           0 :         folio_ref_sub(folio, nr);
     133           0 :         return xas_error(&xas);
     134             : }
     135             : 
     136             : /*
     137             :  * This must be called only on folios that have
     138             :  * been verified to be in the swap cache.
     139             :  */
     140           0 : void __delete_from_swap_cache(struct folio *folio,
     141             :                         swp_entry_t entry, void *shadow)
     142             : {
     143           0 :         struct address_space *address_space = swap_address_space(entry);
     144             :         int i;
     145           0 :         long nr = folio_nr_pages(folio);
     146           0 :         pgoff_t idx = swp_offset(entry);
     147           0 :         XA_STATE(xas, &address_space->i_pages, idx);
     148             : 
     149           0 :         xas_set_update(&xas, workingset_update_node);
     150             : 
     151             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
     152             :         VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
     153             :         VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
     154             : 
     155           0 :         for (i = 0; i < nr; i++) {
     156           0 :                 void *entry = xas_store(&xas, shadow);
     157             :                 VM_BUG_ON_PAGE(entry != folio, entry);
     158           0 :                 set_page_private(folio_page(folio, i), 0);
     159           0 :                 xas_next(&xas);
     160             :         }
     161           0 :         folio_clear_swapcache(folio);
     162           0 :         address_space->nrpages -= nr;
     163           0 :         __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
     164           0 :         __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
     165           0 : }
     166             : 
     167             : /**
     168             :  * add_to_swap - allocate swap space for a folio
     169             :  * @folio: folio we want to move to swap
     170             :  *
     171             :  * Allocate swap space for the folio and add the folio to the
     172             :  * swap cache.
     173             :  *
     174             :  * Context: Caller needs to hold the folio lock.
     175             :  * Return: Whether the folio was added to the swap cache.
     176             :  */
     177           0 : bool add_to_swap(struct folio *folio)
     178             : {
     179             :         swp_entry_t entry;
     180             :         int err;
     181             : 
     182             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
     183             :         VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
     184             : 
     185           0 :         entry = folio_alloc_swap(folio);
     186           0 :         if (!entry.val)
     187             :                 return false;
     188             : 
     189             :         /*
     190             :          * XArray node allocations from PF_MEMALLOC contexts could
     191             :          * completely exhaust the page allocator. __GFP_NOMEMALLOC
     192             :          * stops emergency reserves from being allocated.
     193             :          *
     194             :          * TODO: this could cause a theoretical memory reclaim
     195             :          * deadlock in the swap out path.
     196             :          */
     197             :         /*
     198             :          * Add it to the swap cache.
     199             :          */
     200           0 :         err = add_to_swap_cache(folio, entry,
     201             :                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
     202           0 :         if (err)
     203             :                 /*
     204             :                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
     205             :                  * clear SWAP_HAS_CACHE flag.
     206             :                  */
     207             :                 goto fail;
     208             :         /*
     209             :          * Normally the folio will be dirtied in unmap because its
     210             :          * pte should be dirty. A special case is MADV_FREE page. The
     211             :          * page's pte could have dirty bit cleared but the folio's
     212             :          * SwapBacked flag is still set because clearing the dirty bit
     213             :          * and SwapBacked flag has no lock protected. For such folio,
     214             :          * unmap will not set dirty bit for it, so folio reclaim will
     215             :          * not write the folio out. This can cause data corruption when
     216             :          * the folio is swapped in later. Always setting the dirty flag
     217             :          * for the folio solves the problem.
     218             :          */
     219           0 :         folio_mark_dirty(folio);
     220             : 
     221           0 :         return true;
     222             : 
     223             : fail:
     224           0 :         put_swap_folio(folio, entry);
     225           0 :         return false;
     226             : }
     227             : 
     228             : /*
     229             :  * This must be called only on folios that have
     230             :  * been verified to be in the swap cache and locked.
     231             :  * It will never put the folio into the free list,
     232             :  * the caller has a reference on the folio.
     233             :  */
     234           0 : void delete_from_swap_cache(struct folio *folio)
     235             : {
     236           0 :         swp_entry_t entry = folio_swap_entry(folio);
     237           0 :         struct address_space *address_space = swap_address_space(entry);
     238             : 
     239           0 :         xa_lock_irq(&address_space->i_pages);
     240           0 :         __delete_from_swap_cache(folio, entry, NULL);
     241           0 :         xa_unlock_irq(&address_space->i_pages);
     242             : 
     243           0 :         put_swap_folio(folio, entry);
     244           0 :         folio_ref_sub(folio, folio_nr_pages(folio));
     245           0 : }
     246             : 
     247           0 : void clear_shadow_from_swap_cache(int type, unsigned long begin,
     248             :                                 unsigned long end)
     249             : {
     250           0 :         unsigned long curr = begin;
     251             :         void *old;
     252             : 
     253           0 :         for (;;) {
     254           0 :                 swp_entry_t entry = swp_entry(type, curr);
     255           0 :                 struct address_space *address_space = swap_address_space(entry);
     256           0 :                 XA_STATE(xas, &address_space->i_pages, curr);
     257             : 
     258           0 :                 xas_set_update(&xas, workingset_update_node);
     259             : 
     260           0 :                 xa_lock_irq(&address_space->i_pages);
     261           0 :                 xas_for_each(&xas, old, end) {
     262           0 :                         if (!xa_is_value(old))
     263           0 :                                 continue;
     264           0 :                         xas_store(&xas, NULL);
     265             :                 }
     266           0 :                 xa_unlock_irq(&address_space->i_pages);
     267             : 
     268             :                 /* search the next swapcache until we meet end */
     269           0 :                 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
     270           0 :                 curr++;
     271           0 :                 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
     272           0 :                 if (curr > end)
     273             :                         break;
     274             :         }
     275           0 : }
     276             : 
     277             : /*
     278             :  * If we are the only user, then try to free up the swap cache.
     279             :  *
     280             :  * Its ok to check the swapcache flag without the folio lock
     281             :  * here because we are going to recheck again inside
     282             :  * folio_free_swap() _with_ the lock.
     283             :  *                                      - Marcelo
     284             :  */
     285           0 : void free_swap_cache(struct page *page)
     286             : {
     287           0 :         struct folio *folio = page_folio(page);
     288             : 
     289           0 :         if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
     290           0 :             folio_trylock(folio)) {
     291           0 :                 folio_free_swap(folio);
     292           0 :                 folio_unlock(folio);
     293             :         }
     294           0 : }
     295             : 
     296             : /*
     297             :  * Perform a free_page(), also freeing any swap cache associated with
     298             :  * this page if it is the last user of the page.
     299             :  */
     300           0 : void free_page_and_swap_cache(struct page *page)
     301             : {
     302           0 :         free_swap_cache(page);
     303           0 :         if (!is_huge_zero_page(page))
     304           0 :                 put_page(page);
     305           0 : }
     306             : 
     307             : /*
     308             :  * Passed an array of pages, drop them all from swapcache and then release
     309             :  * them.  They are removed from the LRU and freed if this is their last use.
     310             :  */
     311           0 : void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
     312             : {
     313           0 :         lru_add_drain();
     314           0 :         for (int i = 0; i < nr; i++)
     315           0 :                 free_swap_cache(encoded_page_ptr(pages[i]));
     316           0 :         release_pages(pages, nr);
     317           0 : }
     318             : 
     319             : static inline bool swap_use_vma_readahead(void)
     320             : {
     321           0 :         return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
     322             : }
     323             : 
     324             : /*
     325             :  * Lookup a swap entry in the swap cache. A found folio will be returned
     326             :  * unlocked and with its refcount incremented - we rely on the kernel
     327             :  * lock getting page table operations atomic even if we drop the folio
     328             :  * lock before returning.
     329             :  *
     330             :  * Caller must lock the swap device or hold a reference to keep it valid.
     331             :  */
     332           0 : struct folio *swap_cache_get_folio(swp_entry_t entry,
     333             :                 struct vm_area_struct *vma, unsigned long addr)
     334             : {
     335             :         struct folio *folio;
     336             : 
     337           0 :         folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
     338           0 :         if (!IS_ERR(folio)) {
     339           0 :                 bool vma_ra = swap_use_vma_readahead();
     340             :                 bool readahead;
     341             : 
     342             :                 /*
     343             :                  * At the moment, we don't support PG_readahead for anon THP
     344             :                  * so let's bail out rather than confusing the readahead stat.
     345             :                  */
     346           0 :                 if (unlikely(folio_test_large(folio)))
     347             :                         return folio;
     348             : 
     349           0 :                 readahead = folio_test_clear_readahead(folio);
     350           0 :                 if (vma && vma_ra) {
     351             :                         unsigned long ra_val;
     352             :                         int win, hits;
     353             : 
     354           0 :                         ra_val = GET_SWAP_RA_VAL(vma);
     355           0 :                         win = SWAP_RA_WIN(ra_val);
     356           0 :                         hits = SWAP_RA_HITS(ra_val);
     357           0 :                         if (readahead)
     358           0 :                                 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
     359           0 :                         atomic_long_set(&vma->swap_readahead_info,
     360           0 :                                         SWAP_RA_VAL(addr, win, hits));
     361             :                 }
     362             : 
     363           0 :                 if (readahead) {
     364           0 :                         count_vm_event(SWAP_RA_HIT);
     365           0 :                         if (!vma || !vma_ra)
     366             :                                 atomic_inc(&swapin_readahead_hits);
     367             :                 }
     368             :         } else {
     369             :                 folio = NULL;
     370             :         }
     371             : 
     372             :         return folio;
     373             : }
     374             : 
     375             : /**
     376             :  * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
     377             :  * @mapping: The address_space to search.
     378             :  * @index: The page cache index.
     379             :  *
     380             :  * This differs from filemap_get_folio() in that it will also look for the
     381             :  * folio in the swap cache.
     382             :  *
     383             :  * Return: The found folio or %NULL.
     384             :  */
     385           0 : struct folio *filemap_get_incore_folio(struct address_space *mapping,
     386             :                 pgoff_t index)
     387             : {
     388             :         swp_entry_t swp;
     389             :         struct swap_info_struct *si;
     390           0 :         struct folio *folio = filemap_get_entry(mapping, index);
     391             : 
     392           0 :         if (!folio)
     393             :                 return ERR_PTR(-ENOENT);
     394           0 :         if (!xa_is_value(folio))
     395             :                 return folio;
     396           0 :         if (!shmem_mapping(mapping))
     397             :                 return ERR_PTR(-ENOENT);
     398             : 
     399           0 :         swp = radix_to_swp_entry(folio);
     400             :         /* There might be swapin error entries in shmem mapping. */
     401           0 :         if (non_swap_entry(swp))
     402             :                 return ERR_PTR(-ENOENT);
     403             :         /* Prevent swapoff from happening to us */
     404           0 :         si = get_swap_device(swp);
     405           0 :         if (!si)
     406             :                 return ERR_PTR(-ENOENT);
     407           0 :         index = swp_offset(swp);
     408           0 :         folio = filemap_get_folio(swap_address_space(swp), index);
     409           0 :         put_swap_device(si);
     410           0 :         return folio;
     411             : }
     412             : 
     413           0 : struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
     414             :                         struct vm_area_struct *vma, unsigned long addr,
     415             :                         bool *new_page_allocated)
     416             : {
     417             :         struct swap_info_struct *si;
     418             :         struct folio *folio;
     419             :         struct page *page;
     420           0 :         void *shadow = NULL;
     421             : 
     422           0 :         *new_page_allocated = false;
     423           0 :         si = get_swap_device(entry);
     424           0 :         if (!si)
     425             :                 return NULL;
     426             : 
     427           0 :         for (;;) {
     428             :                 int err;
     429             :                 /*
     430             :                  * First check the swap cache.  Since this is normally
     431             :                  * called after swap_cache_get_folio() failed, re-calling
     432             :                  * that would confuse statistics.
     433             :                  */
     434           0 :                 folio = filemap_get_folio(swap_address_space(entry),
     435             :                                                 swp_offset(entry));
     436           0 :                 if (!IS_ERR(folio)) {
     437           0 :                         page = folio_file_page(folio, swp_offset(entry));
     438           0 :                         goto got_page;
     439             :                 }
     440             : 
     441             :                 /*
     442             :                  * Just skip read ahead for unused swap slot.
     443             :                  * During swap_off when swap_slot_cache is disabled,
     444             :                  * we have to handle the race between putting
     445             :                  * swap entry in swap cache and marking swap slot
     446             :                  * as SWAP_HAS_CACHE.  That's done in later part of code or
     447             :                  * else swap_off will be aborted if we return NULL.
     448             :                  */
     449           0 :                 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
     450             :                         goto fail_put_swap;
     451             : 
     452             :                 /*
     453             :                  * Get a new page to read into from swap.  Allocate it now,
     454             :                  * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
     455             :                  * cause any racers to loop around until we add it to cache.
     456             :                  */
     457           0 :                 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
     458           0 :                 if (!folio)
     459             :                         goto fail_put_swap;
     460             : 
     461             :                 /*
     462             :                  * Swap entry may have been freed since our caller observed it.
     463             :                  */
     464           0 :                 err = swapcache_prepare(entry);
     465           0 :                 if (!err)
     466             :                         break;
     467             : 
     468           0 :                 folio_put(folio);
     469           0 :                 if (err != -EEXIST)
     470             :                         goto fail_put_swap;
     471             : 
     472             :                 /*
     473             :                  * We might race against __delete_from_swap_cache(), and
     474             :                  * stumble across a swap_map entry whose SWAP_HAS_CACHE
     475             :                  * has not yet been cleared.  Or race against another
     476             :                  * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
     477             :                  * in swap_map, but not yet added its page to swap cache.
     478             :                  */
     479           0 :                 schedule_timeout_uninterruptible(1);
     480             :         }
     481             : 
     482             :         /*
     483             :          * The swap entry is ours to swap in. Prepare the new page.
     484             :          */
     485             : 
     486           0 :         __folio_set_locked(folio);
     487           0 :         __folio_set_swapbacked(folio);
     488             : 
     489           0 :         if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
     490             :                 goto fail_unlock;
     491             : 
     492             :         /* May fail (-ENOMEM) if XArray node allocation failed. */
     493           0 :         if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
     494             :                 goto fail_unlock;
     495             : 
     496           0 :         mem_cgroup_swapin_uncharge_swap(entry);
     497             : 
     498           0 :         if (shadow)
     499           0 :                 workingset_refault(folio, shadow);
     500             : 
     501             :         /* Caller will initiate read into locked folio */
     502           0 :         folio_add_lru(folio);
     503           0 :         *new_page_allocated = true;
     504           0 :         page = &folio->page;
     505             : got_page:
     506           0 :         put_swap_device(si);
     507           0 :         return page;
     508             : 
     509             : fail_unlock:
     510           0 :         put_swap_folio(folio, entry);
     511           0 :         folio_unlock(folio);
     512             :         folio_put(folio);
     513             : fail_put_swap:
     514           0 :         put_swap_device(si);
     515           0 :         return NULL;
     516             : }
     517             : 
     518             : /*
     519             :  * Locate a page of swap in physical memory, reserving swap cache space
     520             :  * and reading the disk if it is not already cached.
     521             :  * A failure return means that either the page allocation failed or that
     522             :  * the swap entry is no longer in use.
     523             :  *
     524             :  * get/put_swap_device() aren't needed to call this function, because
     525             :  * __read_swap_cache_async() call them and swap_readpage() holds the
     526             :  * swap cache folio lock.
     527             :  */
     528           0 : struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
     529             :                                    struct vm_area_struct *vma,
     530             :                                    unsigned long addr, bool do_poll,
     531             :                                    struct swap_iocb **plug)
     532             : {
     533             :         bool page_was_allocated;
     534           0 :         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
     535             :                         vma, addr, &page_was_allocated);
     536             : 
     537           0 :         if (page_was_allocated)
     538           0 :                 swap_readpage(retpage, do_poll, plug);
     539             : 
     540           0 :         return retpage;
     541             : }
     542             : 
     543             : static unsigned int __swapin_nr_pages(unsigned long prev_offset,
     544             :                                       unsigned long offset,
     545             :                                       int hits,
     546             :                                       int max_pages,
     547             :                                       int prev_win)
     548             : {
     549             :         unsigned int pages, last_ra;
     550             : 
     551             :         /*
     552             :          * This heuristic has been found to work well on both sequential and
     553             :          * random loads, swapping to hard disk or to SSD: please don't ask
     554             :          * what the "+ 2" means, it just happens to work well, that's all.
     555             :          */
     556           0 :         pages = hits + 2;
     557           0 :         if (pages == 2) {
     558             :                 /*
     559             :                  * We can have no readahead hits to judge by: but must not get
     560             :                  * stuck here forever, so check for an adjacent offset instead
     561             :                  * (and don't even bother to check whether swap type is same).
     562             :                  */
     563           0 :                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
     564           0 :                         pages = 1;
     565             :         } else {
     566             :                 unsigned int roundup = 4;
     567           0 :                 while (roundup < pages)
     568           0 :                         roundup <<= 1;
     569             :                 pages = roundup;
     570             :         }
     571             : 
     572           0 :         if (pages > max_pages)
     573           0 :                 pages = max_pages;
     574             : 
     575             :         /* Don't shrink readahead too fast */
     576           0 :         last_ra = prev_win / 2;
     577           0 :         if (pages < last_ra)
     578           0 :                 pages = last_ra;
     579             : 
     580             :         return pages;
     581             : }
     582             : 
     583           0 : static unsigned long swapin_nr_pages(unsigned long offset)
     584             : {
     585             :         static unsigned long prev_offset;
     586             :         unsigned int hits, pages, max_pages;
     587             :         static atomic_t last_readahead_pages;
     588             : 
     589           0 :         max_pages = 1 << READ_ONCE(page_cluster);
     590           0 :         if (max_pages <= 1)
     591             :                 return 1;
     592             : 
     593           0 :         hits = atomic_xchg(&swapin_readahead_hits, 0);
     594           0 :         pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
     595             :                                   max_pages,
     596             :                                   atomic_read(&last_readahead_pages));
     597           0 :         if (!hits)
     598           0 :                 WRITE_ONCE(prev_offset, offset);
     599           0 :         atomic_set(&last_readahead_pages, pages);
     600             : 
     601           0 :         return pages;
     602             : }
     603             : 
     604             : /**
     605             :  * swap_cluster_readahead - swap in pages in hope we need them soon
     606             :  * @entry: swap entry of this memory
     607             :  * @gfp_mask: memory allocation flags
     608             :  * @vmf: fault information
     609             :  *
     610             :  * Returns the struct page for entry and addr, after queueing swapin.
     611             :  *
     612             :  * Primitive swap readahead code. We simply read an aligned block of
     613             :  * (1 << page_cluster) entries in the swap area. This method is chosen
     614             :  * because it doesn't cost us any seek time.  We also make sure to queue
     615             :  * the 'original' request together with the readahead ones...
     616             :  *
     617             :  * This has been extended to use the NUMA policies from the mm triggering
     618             :  * the readahead.
     619             :  *
     620             :  * Caller must hold read mmap_lock if vmf->vma is not NULL.
     621             :  */
     622           0 : struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
     623             :                                 struct vm_fault *vmf)
     624             : {
     625             :         struct page *page;
     626           0 :         unsigned long entry_offset = swp_offset(entry);
     627           0 :         unsigned long offset = entry_offset;
     628             :         unsigned long start_offset, end_offset;
     629             :         unsigned long mask;
     630           0 :         struct swap_info_struct *si = swp_swap_info(entry);
     631             :         struct blk_plug plug;
     632           0 :         struct swap_iocb *splug = NULL;
     633           0 :         bool do_poll = true, page_allocated;
     634           0 :         struct vm_area_struct *vma = vmf->vma;
     635           0 :         unsigned long addr = vmf->address;
     636             : 
     637           0 :         mask = swapin_nr_pages(offset) - 1;
     638           0 :         if (!mask)
     639             :                 goto skip;
     640             : 
     641           0 :         do_poll = false;
     642             :         /* Read a page_cluster sized and aligned cluster around offset. */
     643           0 :         start_offset = offset & ~mask;
     644           0 :         end_offset = offset | mask;
     645           0 :         if (!start_offset)      /* First page is swap header. */
     646           0 :                 start_offset++;
     647           0 :         if (end_offset >= si->max)
     648           0 :                 end_offset = si->max - 1;
     649             : 
     650           0 :         blk_start_plug(&plug);
     651           0 :         for (offset = start_offset; offset <= end_offset ; offset++) {
     652             :                 /* Ok, do the async read-ahead now */
     653           0 :                 page = __read_swap_cache_async(
     654             :                         swp_entry(swp_type(entry), offset),
     655             :                         gfp_mask, vma, addr, &page_allocated);
     656           0 :                 if (!page)
     657           0 :                         continue;
     658           0 :                 if (page_allocated) {
     659           0 :                         swap_readpage(page, false, &splug);
     660           0 :                         if (offset != entry_offset) {
     661           0 :                                 SetPageReadahead(page);
     662           0 :                                 count_vm_event(SWAP_RA);
     663             :                         }
     664             :                 }
     665           0 :                 put_page(page);
     666             :         }
     667           0 :         blk_finish_plug(&plug);
     668           0 :         swap_read_unplug(splug);
     669             : 
     670           0 :         lru_add_drain();        /* Push any new pages onto the LRU now */
     671             : skip:
     672             :         /* The page was likely read above, so no need for plugging here */
     673           0 :         return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
     674             : }
     675             : 
     676           0 : int init_swap_address_space(unsigned int type, unsigned long nr_pages)
     677             : {
     678             :         struct address_space *spaces, *space;
     679             :         unsigned int i, nr;
     680             : 
     681           0 :         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
     682           0 :         spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
     683           0 :         if (!spaces)
     684             :                 return -ENOMEM;
     685           0 :         for (i = 0; i < nr; i++) {
     686           0 :                 space = spaces + i;
     687           0 :                 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
     688           0 :                 atomic_set(&space->i_mmap_writable, 0);
     689           0 :                 space->a_ops = &swap_aops;
     690             :                 /* swap cache doesn't use writeback related tags */
     691           0 :                 mapping_set_no_writeback_tags(space);
     692             :         }
     693           0 :         nr_swapper_spaces[type] = nr;
     694           0 :         swapper_spaces[type] = spaces;
     695             : 
     696           0 :         return 0;
     697             : }
     698             : 
     699           0 : void exit_swap_address_space(unsigned int type)
     700             : {
     701             :         int i;
     702           0 :         struct address_space *spaces = swapper_spaces[type];
     703             : 
     704           0 :         for (i = 0; i < nr_swapper_spaces[type]; i++)
     705             :                 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
     706           0 :         kvfree(spaces);
     707           0 :         nr_swapper_spaces[type] = 0;
     708           0 :         swapper_spaces[type] = NULL;
     709           0 : }
     710             : 
     711             : #define SWAP_RA_ORDER_CEILING   5
     712             : 
     713             : struct vma_swap_readahead {
     714             :         unsigned short win;
     715             :         unsigned short offset;
     716             :         unsigned short nr_pte;
     717             : };
     718             : 
     719           0 : static void swap_ra_info(struct vm_fault *vmf,
     720             :                          struct vma_swap_readahead *ra_info)
     721             : {
     722           0 :         struct vm_area_struct *vma = vmf->vma;
     723             :         unsigned long ra_val;
     724             :         unsigned long faddr, pfn, fpfn, lpfn, rpfn;
     725             :         unsigned long start, end;
     726             :         unsigned int max_win, hits, prev_win, win;
     727             : 
     728           0 :         max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
     729             :                              SWAP_RA_ORDER_CEILING);
     730           0 :         if (max_win == 1) {
     731           0 :                 ra_info->win = 1;
     732             :                 return;
     733             :         }
     734             : 
     735           0 :         faddr = vmf->address;
     736           0 :         fpfn = PFN_DOWN(faddr);
     737           0 :         ra_val = GET_SWAP_RA_VAL(vma);
     738           0 :         pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
     739           0 :         prev_win = SWAP_RA_WIN(ra_val);
     740           0 :         hits = SWAP_RA_HITS(ra_val);
     741           0 :         ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
     742             :                                                max_win, prev_win);
     743           0 :         atomic_long_set(&vma->swap_readahead_info,
     744           0 :                         SWAP_RA_VAL(faddr, win, 0));
     745           0 :         if (win == 1)
     746             :                 return;
     747             : 
     748           0 :         if (fpfn == pfn + 1) {
     749           0 :                 lpfn = fpfn;
     750           0 :                 rpfn = fpfn + win;
     751           0 :         } else if (pfn == fpfn + 1) {
     752           0 :                 lpfn = fpfn - win + 1;
     753           0 :                 rpfn = fpfn + 1;
     754             :         } else {
     755           0 :                 unsigned int left = (win - 1) / 2;
     756             : 
     757           0 :                 lpfn = fpfn - left;
     758           0 :                 rpfn = fpfn + win - left;
     759             :         }
     760           0 :         start = max3(lpfn, PFN_DOWN(vma->vm_start),
     761             :                      PFN_DOWN(faddr & PMD_MASK));
     762           0 :         end = min3(rpfn, PFN_DOWN(vma->vm_end),
     763             :                    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
     764             : 
     765           0 :         ra_info->nr_pte = end - start;
     766           0 :         ra_info->offset = fpfn - start;
     767             : }
     768             : 
     769             : /**
     770             :  * swap_vma_readahead - swap in pages in hope we need them soon
     771             :  * @fentry: swap entry of this memory
     772             :  * @gfp_mask: memory allocation flags
     773             :  * @vmf: fault information
     774             :  *
     775             :  * Returns the struct page for entry and addr, after queueing swapin.
     776             :  *
     777             :  * Primitive swap readahead code. We simply read in a few pages whose
     778             :  * virtual addresses are around the fault address in the same vma.
     779             :  *
     780             :  * Caller must hold read mmap_lock if vmf->vma is not NULL.
     781             :  *
     782             :  */
     783           0 : static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
     784             :                                        struct vm_fault *vmf)
     785             : {
     786             :         struct blk_plug plug;
     787           0 :         struct swap_iocb *splug = NULL;
     788           0 :         struct vm_area_struct *vma = vmf->vma;
     789             :         struct page *page;
     790           0 :         pte_t *pte = NULL, pentry;
     791             :         unsigned long addr;
     792             :         swp_entry_t entry;
     793             :         unsigned int i;
     794             :         bool page_allocated;
     795           0 :         struct vma_swap_readahead ra_info = {
     796             :                 .win = 1,
     797             :         };
     798             : 
     799           0 :         swap_ra_info(vmf, &ra_info);
     800           0 :         if (ra_info.win == 1)
     801             :                 goto skip;
     802             : 
     803           0 :         addr = vmf->address - (ra_info.offset * PAGE_SIZE);
     804             : 
     805           0 :         blk_start_plug(&plug);
     806           0 :         for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
     807           0 :                 if (!pte++) {
     808           0 :                         pte = pte_offset_map(vmf->pmd, addr);
     809           0 :                         if (!pte)
     810             :                                 break;
     811             :                 }
     812           0 :                 pentry = ptep_get_lockless(pte);
     813           0 :                 if (!is_swap_pte(pentry))
     814           0 :                         continue;
     815           0 :                 entry = pte_to_swp_entry(pentry);
     816           0 :                 if (unlikely(non_swap_entry(entry)))
     817           0 :                         continue;
     818           0 :                 pte_unmap(pte);
     819           0 :                 pte = NULL;
     820           0 :                 page = __read_swap_cache_async(entry, gfp_mask, vma,
     821             :                                                addr, &page_allocated);
     822           0 :                 if (!page)
     823           0 :                         continue;
     824           0 :                 if (page_allocated) {
     825           0 :                         swap_readpage(page, false, &splug);
     826           0 :                         if (i != ra_info.offset) {
     827           0 :                                 SetPageReadahead(page);
     828           0 :                                 count_vm_event(SWAP_RA);
     829             :                         }
     830             :                 }
     831           0 :                 put_page(page);
     832             :         }
     833             :         if (pte)
     834             :                 pte_unmap(pte);
     835           0 :         blk_finish_plug(&plug);
     836           0 :         swap_read_unplug(splug);
     837           0 :         lru_add_drain();
     838             : skip:
     839             :         /* The page was likely read above, so no need for plugging here */
     840           0 :         return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
     841           0 :                                      ra_info.win == 1, NULL);
     842             : }
     843             : 
     844             : /**
     845             :  * swapin_readahead - swap in pages in hope we need them soon
     846             :  * @entry: swap entry of this memory
     847             :  * @gfp_mask: memory allocation flags
     848             :  * @vmf: fault information
     849             :  *
     850             :  * Returns the struct page for entry and addr, after queueing swapin.
     851             :  *
     852             :  * It's a main entry function for swap readahead. By the configuration,
     853             :  * it will read ahead blocks by cluster-based(ie, physical disk based)
     854             :  * or vma-based(ie, virtual address based on faulty address) readahead.
     855             :  */
     856           0 : struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
     857             :                                 struct vm_fault *vmf)
     858             : {
     859             :         return swap_use_vma_readahead() ?
     860           0 :                         swap_vma_readahead(entry, gfp_mask, vmf) :
     861             :                         swap_cluster_readahead(entry, gfp_mask, vmf);
     862             : }
     863             : 
     864             : #ifdef CONFIG_SYSFS
     865           0 : static ssize_t vma_ra_enabled_show(struct kobject *kobj,
     866             :                                      struct kobj_attribute *attr, char *buf)
     867             : {
     868           0 :         return sysfs_emit(buf, "%s\n",
     869           0 :                           enable_vma_readahead ? "true" : "false");
     870             : }
     871           0 : static ssize_t vma_ra_enabled_store(struct kobject *kobj,
     872             :                                       struct kobj_attribute *attr,
     873             :                                       const char *buf, size_t count)
     874             : {
     875             :         ssize_t ret;
     876             : 
     877           0 :         ret = kstrtobool(buf, &enable_vma_readahead);
     878           0 :         if (ret)
     879             :                 return ret;
     880             : 
     881           0 :         return count;
     882             : }
     883             : static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
     884             : 
     885             : static struct attribute *swap_attrs[] = {
     886             :         &vma_ra_enabled_attr.attr,
     887             :         NULL,
     888             : };
     889             : 
     890             : static const struct attribute_group swap_attr_group = {
     891             :         .attrs = swap_attrs,
     892             : };
     893             : 
     894           1 : static int __init swap_init_sysfs(void)
     895             : {
     896             :         int err;
     897             :         struct kobject *swap_kobj;
     898             : 
     899           1 :         swap_kobj = kobject_create_and_add("swap", mm_kobj);
     900           1 :         if (!swap_kobj) {
     901           0 :                 pr_err("failed to create swap kobject\n");
     902           0 :                 return -ENOMEM;
     903             :         }
     904           1 :         err = sysfs_create_group(swap_kobj, &swap_attr_group);
     905           1 :         if (err) {
     906           0 :                 pr_err("failed to register swap group\n");
     907             :                 goto delete_obj;
     908             :         }
     909             :         return 0;
     910             : 
     911             : delete_obj:
     912           0 :         kobject_put(swap_kobj);
     913           0 :         return err;
     914             : }
     915             : subsys_initcall(swap_init_sysfs);
     916             : #endif

Generated by: LCOV version 1.14