LCOV - code coverage report
Current view: top level - mm - swap_state.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 5 325 1.5 %
Date: 2023-07-19 18:55:55 Functions: 1 24 4.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  *  linux/mm/swap_state.c
       4             :  *
       5             :  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
       6             :  *  Swap reorganised 29.12.95, Stephen Tweedie
       7             :  *
       8             :  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
       9             :  */
      10             : #include <linux/mm.h>
      11             : #include <linux/gfp.h>
      12             : #include <linux/kernel_stat.h>
      13             : #include <linux/swap.h>
      14             : #include <linux/swapops.h>
      15             : #include <linux/init.h>
      16             : #include <linux/pagemap.h>
      17             : #include <linux/backing-dev.h>
      18             : #include <linux/blkdev.h>
      19             : #include <linux/pagevec.h>
      20             : #include <linux/migrate.h>
      21             : #include <linux/vmalloc.h>
      22             : #include <linux/swap_slots.h>
      23             : #include <linux/huge_mm.h>
      24             : #include <linux/shmem_fs.h>
      25             : #include "internal.h"
      26             : #include "swap.h"
      27             : 
      28             : /*
      29             :  * swapper_space is a fiction, retained to simplify the path through
      30             :  * vmscan's shrink_page_list.
      31             :  */
      32             : static const struct address_space_operations swap_aops = {
      33             :         .writepage      = swap_writepage,
      34             :         .dirty_folio    = noop_dirty_folio,
      35             : #ifdef CONFIG_MIGRATION
      36             :         .migrate_folio  = migrate_folio,
      37             : #endif
      38             : };
      39             : 
      40             : struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
      41             : static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
      42             : static bool enable_vma_readahead __read_mostly = true;
      43             : 
      44             : #define SWAP_RA_WIN_SHIFT       (PAGE_SHIFT / 2)
      45             : #define SWAP_RA_HITS_MASK       ((1UL << SWAP_RA_WIN_SHIFT) - 1)
      46             : #define SWAP_RA_HITS_MAX        SWAP_RA_HITS_MASK
      47             : #define SWAP_RA_WIN_MASK        (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
      48             : 
      49             : #define SWAP_RA_HITS(v)         ((v) & SWAP_RA_HITS_MASK)
      50             : #define SWAP_RA_WIN(v)          (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
      51             : #define SWAP_RA_ADDR(v)         ((v) & PAGE_MASK)
      52             : 
      53             : #define SWAP_RA_VAL(addr, win, hits)                            \
      54             :         (((addr) & PAGE_MASK) |                                     \
      55             :          (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |  \
      56             :          ((hits) & SWAP_RA_HITS_MASK))
      57             : 
      58             : /* Initial readahead hits is 4 to start up with a small window */
      59             : #define GET_SWAP_RA_VAL(vma)                                    \
      60             :         (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
      61             : 
      62             : static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
      63             : 
      64           0 : void show_swap_cache_info(void)
      65             : {
      66           0 :         printk("%lu pages in swap cache\n", total_swapcache_pages());
      67           0 :         printk("Free swap  = %ldkB\n",
      68             :                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
      69           0 :         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
      70           0 : }
      71             : 
      72           0 : void *get_shadow_from_swap_cache(swp_entry_t entry)
      73             : {
      74           0 :         struct address_space *address_space = swap_address_space(entry);
      75           0 :         pgoff_t idx = swp_offset(entry);
      76             :         struct page *page;
      77             : 
      78           0 :         page = xa_load(&address_space->i_pages, idx);
      79           0 :         if (xa_is_value(page))
      80             :                 return page;
      81           0 :         return NULL;
      82             : }
      83             : 
      84             : /*
      85             :  * add_to_swap_cache resembles filemap_add_folio on swapper_space,
      86             :  * but sets SwapCache flag and private instead of mapping and index.
      87             :  */
      88           0 : int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
      89             :                         gfp_t gfp, void **shadowp)
      90             : {
      91           0 :         struct address_space *address_space = swap_address_space(entry);
      92           0 :         pgoff_t idx = swp_offset(entry);
      93           0 :         XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
      94           0 :         unsigned long i, nr = folio_nr_pages(folio);
      95             :         void *old;
      96             : 
      97           0 :         xas_set_update(&xas, workingset_update_node);
      98             : 
      99             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
     100             :         VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
     101             :         VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
     102             : 
     103           0 :         folio_ref_add(folio, nr);
     104             :         folio_set_swapcache(folio);
     105             : 
     106             :         do {
     107           0 :                 xas_lock_irq(&xas);
     108           0 :                 xas_create_range(&xas);
     109           0 :                 if (xas_error(&xas))
     110             :                         goto unlock;
     111           0 :                 for (i = 0; i < nr; i++) {
     112             :                         VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
     113           0 :                         old = xas_load(&xas);
     114           0 :                         if (xa_is_value(old)) {
     115           0 :                                 if (shadowp)
     116           0 :                                         *shadowp = old;
     117             :                         }
     118           0 :                         set_page_private(folio_page(folio, i), entry.val + i);
     119           0 :                         xas_store(&xas, folio);
     120           0 :                         xas_next(&xas);
     121             :                 }
     122           0 :                 address_space->nrpages += nr;
     123           0 :                 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
     124           0 :                 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
     125             : unlock:
     126           0 :                 xas_unlock_irq(&xas);
     127           0 :         } while (xas_nomem(&xas, gfp));
     128             : 
     129           0 :         if (!xas_error(&xas))
     130             :                 return 0;
     131             : 
     132           0 :         folio_clear_swapcache(folio);
     133           0 :         folio_ref_sub(folio, nr);
     134           0 :         return xas_error(&xas);
     135             : }
     136             : 
     137             : /*
     138             :  * This must be called only on folios that have
     139             :  * been verified to be in the swap cache.
     140             :  */
     141           0 : void __delete_from_swap_cache(struct folio *folio,
     142             :                         swp_entry_t entry, void *shadow)
     143             : {
     144           0 :         struct address_space *address_space = swap_address_space(entry);
     145             :         int i;
     146           0 :         long nr = folio_nr_pages(folio);
     147           0 :         pgoff_t idx = swp_offset(entry);
     148           0 :         XA_STATE(xas, &address_space->i_pages, idx);
     149             : 
     150           0 :         xas_set_update(&xas, workingset_update_node);
     151             : 
     152             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
     153             :         VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
     154             :         VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
     155             : 
     156           0 :         for (i = 0; i < nr; i++) {
     157           0 :                 void *entry = xas_store(&xas, shadow);
     158             :                 VM_BUG_ON_PAGE(entry != folio, entry);
     159           0 :                 set_page_private(folio_page(folio, i), 0);
     160           0 :                 xas_next(&xas);
     161             :         }
     162           0 :         folio_clear_swapcache(folio);
     163           0 :         address_space->nrpages -= nr;
     164           0 :         __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
     165           0 :         __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
     166           0 : }
     167             : 
     168             : /**
     169             :  * add_to_swap - allocate swap space for a folio
     170             :  * @folio: folio we want to move to swap
     171             :  *
     172             :  * Allocate swap space for the folio and add the folio to the
     173             :  * swap cache.
     174             :  *
     175             :  * Context: Caller needs to hold the folio lock.
     176             :  * Return: Whether the folio was added to the swap cache.
     177             :  */
     178           0 : bool add_to_swap(struct folio *folio)
     179             : {
     180             :         swp_entry_t entry;
     181             :         int err;
     182             : 
     183             :         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
     184             :         VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
     185             : 
     186           0 :         entry = folio_alloc_swap(folio);
     187           0 :         if (!entry.val)
     188             :                 return false;
     189             : 
     190             :         /*
     191             :          * XArray node allocations from PF_MEMALLOC contexts could
     192             :          * completely exhaust the page allocator. __GFP_NOMEMALLOC
     193             :          * stops emergency reserves from being allocated.
     194             :          *
     195             :          * TODO: this could cause a theoretical memory reclaim
     196             :          * deadlock in the swap out path.
     197             :          */
     198             :         /*
     199             :          * Add it to the swap cache.
     200             :          */
     201           0 :         err = add_to_swap_cache(folio, entry,
     202             :                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
     203           0 :         if (err)
     204             :                 /*
     205             :                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
     206             :                  * clear SWAP_HAS_CACHE flag.
     207             :                  */
     208             :                 goto fail;
     209             :         /*
     210             :          * Normally the folio will be dirtied in unmap because its
     211             :          * pte should be dirty. A special case is MADV_FREE page. The
     212             :          * page's pte could have dirty bit cleared but the folio's
     213             :          * SwapBacked flag is still set because clearing the dirty bit
     214             :          * and SwapBacked flag has no lock protected. For such folio,
     215             :          * unmap will not set dirty bit for it, so folio reclaim will
     216             :          * not write the folio out. This can cause data corruption when
     217             :          * the folio is swapped in later. Always setting the dirty flag
     218             :          * for the folio solves the problem.
     219             :          */
     220           0 :         folio_mark_dirty(folio);
     221             : 
     222           0 :         return true;
     223             : 
     224             : fail:
     225           0 :         put_swap_folio(folio, entry);
     226           0 :         return false;
     227             : }
     228             : 
     229             : /*
     230             :  * This must be called only on folios that have
     231             :  * been verified to be in the swap cache and locked.
     232             :  * It will never put the folio into the free list,
     233             :  * the caller has a reference on the folio.
     234             :  */
     235           0 : void delete_from_swap_cache(struct folio *folio)
     236             : {
     237           0 :         swp_entry_t entry = folio_swap_entry(folio);
     238           0 :         struct address_space *address_space = swap_address_space(entry);
     239             : 
     240           0 :         xa_lock_irq(&address_space->i_pages);
     241           0 :         __delete_from_swap_cache(folio, entry, NULL);
     242           0 :         xa_unlock_irq(&address_space->i_pages);
     243             : 
     244           0 :         put_swap_folio(folio, entry);
     245           0 :         folio_ref_sub(folio, folio_nr_pages(folio));
     246           0 : }
     247             : 
     248           0 : void clear_shadow_from_swap_cache(int type, unsigned long begin,
     249             :                                 unsigned long end)
     250             : {
     251           0 :         unsigned long curr = begin;
     252             :         void *old;
     253             : 
     254           0 :         for (;;) {
     255           0 :                 swp_entry_t entry = swp_entry(type, curr);
     256           0 :                 struct address_space *address_space = swap_address_space(entry);
     257           0 :                 XA_STATE(xas, &address_space->i_pages, curr);
     258             : 
     259           0 :                 xas_set_update(&xas, workingset_update_node);
     260             : 
     261           0 :                 xa_lock_irq(&address_space->i_pages);
     262           0 :                 xas_for_each(&xas, old, end) {
     263           0 :                         if (!xa_is_value(old))
     264           0 :                                 continue;
     265           0 :                         xas_store(&xas, NULL);
     266             :                 }
     267           0 :                 xa_unlock_irq(&address_space->i_pages);
     268             : 
     269             :                 /* search the next swapcache until we meet end */
     270           0 :                 curr >>= SWAP_ADDRESS_SPACE_SHIFT;
     271           0 :                 curr++;
     272           0 :                 curr <<= SWAP_ADDRESS_SPACE_SHIFT;
     273           0 :                 if (curr > end)
     274             :                         break;
     275             :         }
     276           0 : }
     277             : 
     278             : /* 
     279             :  * If we are the only user, then try to free up the swap cache. 
     280             :  * 
     281             :  * Its ok to check the swapcache flag without the folio lock
     282             :  * here because we are going to recheck again inside
     283             :  * folio_free_swap() _with_ the lock.
     284             :  *                                      - Marcelo
     285             :  */
     286           0 : void free_swap_cache(struct page *page)
     287             : {
     288           0 :         struct folio *folio = page_folio(page);
     289             : 
     290           0 :         if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
     291           0 :             folio_trylock(folio)) {
     292           0 :                 folio_free_swap(folio);
     293           0 :                 folio_unlock(folio);
     294             :         }
     295           0 : }
     296             : 
     297             : /* 
     298             :  * Perform a free_page(), also freeing any swap cache associated with
     299             :  * this page if it is the last user of the page.
     300             :  */
     301           0 : void free_page_and_swap_cache(struct page *page)
     302             : {
     303           0 :         free_swap_cache(page);
     304           0 :         if (!is_huge_zero_page(page))
     305           0 :                 put_page(page);
     306           0 : }
     307             : 
     308             : /*
     309             :  * Passed an array of pages, drop them all from swapcache and then release
     310             :  * them.  They are removed from the LRU and freed if this is their last use.
     311             :  */
     312           0 : void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
     313             : {
     314           0 :         lru_add_drain();
     315           0 :         for (int i = 0; i < nr; i++)
     316           0 :                 free_swap_cache(encoded_page_ptr(pages[i]));
     317           0 :         release_pages(pages, nr);
     318           0 : }
     319             : 
     320             : static inline bool swap_use_vma_readahead(void)
     321             : {
     322           0 :         return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
     323             : }
     324             : 
     325             : /*
     326             :  * Lookup a swap entry in the swap cache. A found folio will be returned
     327             :  * unlocked and with its refcount incremented - we rely on the kernel
     328             :  * lock getting page table operations atomic even if we drop the folio
     329             :  * lock before returning.
     330             :  *
     331             :  * Caller must lock the swap device or hold a reference to keep it valid.
     332             :  */
     333           0 : struct folio *swap_cache_get_folio(swp_entry_t entry,
     334             :                 struct vm_area_struct *vma, unsigned long addr)
     335             : {
     336             :         struct folio *folio;
     337             : 
     338           0 :         folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
     339           0 :         if (!IS_ERR(folio)) {
     340           0 :                 bool vma_ra = swap_use_vma_readahead();
     341             :                 bool readahead;
     342             : 
     343             :                 /*
     344             :                  * At the moment, we don't support PG_readahead for anon THP
     345             :                  * so let's bail out rather than confusing the readahead stat.
     346             :                  */
     347           0 :                 if (unlikely(folio_test_large(folio)))
     348             :                         return folio;
     349             : 
     350           0 :                 readahead = folio_test_clear_readahead(folio);
     351           0 :                 if (vma && vma_ra) {
     352             :                         unsigned long ra_val;
     353             :                         int win, hits;
     354             : 
     355           0 :                         ra_val = GET_SWAP_RA_VAL(vma);
     356           0 :                         win = SWAP_RA_WIN(ra_val);
     357           0 :                         hits = SWAP_RA_HITS(ra_val);
     358           0 :                         if (readahead)
     359           0 :                                 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
     360           0 :                         atomic_long_set(&vma->swap_readahead_info,
     361           0 :                                         SWAP_RA_VAL(addr, win, hits));
     362             :                 }
     363             : 
     364           0 :                 if (readahead) {
     365           0 :                         count_vm_event(SWAP_RA_HIT);
     366           0 :                         if (!vma || !vma_ra)
     367             :                                 atomic_inc(&swapin_readahead_hits);
     368             :                 }
     369             :         } else {
     370             :                 folio = NULL;
     371             :         }
     372             : 
     373             :         return folio;
     374             : }
     375             : 
     376             : /**
     377             :  * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
     378             :  * @mapping: The address_space to search.
     379             :  * @index: The page cache index.
     380             :  *
     381             :  * This differs from filemap_get_folio() in that it will also look for the
     382             :  * folio in the swap cache.
     383             :  *
     384             :  * Return: The found folio or %NULL.
     385             :  */
     386           0 : struct folio *filemap_get_incore_folio(struct address_space *mapping,
     387             :                 pgoff_t index)
     388             : {
     389             :         swp_entry_t swp;
     390             :         struct swap_info_struct *si;
     391           0 :         struct folio *folio = filemap_get_entry(mapping, index);
     392             : 
     393           0 :         if (!folio)
     394             :                 return ERR_PTR(-ENOENT);
     395           0 :         if (!xa_is_value(folio))
     396             :                 return folio;
     397           0 :         if (!shmem_mapping(mapping))
     398             :                 return ERR_PTR(-ENOENT);
     399             : 
     400           0 :         swp = radix_to_swp_entry(folio);
     401             :         /* There might be swapin error entries in shmem mapping. */
     402           0 :         if (non_swap_entry(swp))
     403             :                 return ERR_PTR(-ENOENT);
     404             :         /* Prevent swapoff from happening to us */
     405           0 :         si = get_swap_device(swp);
     406           0 :         if (!si)
     407             :                 return ERR_PTR(-ENOENT);
     408           0 :         index = swp_offset(swp);
     409           0 :         folio = filemap_get_folio(swap_address_space(swp), index);
     410           0 :         put_swap_device(si);
     411           0 :         return folio;
     412             : }
     413             : 
     414           0 : struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
     415             :                         struct vm_area_struct *vma, unsigned long addr,
     416             :                         bool *new_page_allocated)
     417             : {
     418             :         struct swap_info_struct *si;
     419             :         struct folio *folio;
     420           0 :         void *shadow = NULL;
     421             : 
     422           0 :         *new_page_allocated = false;
     423             : 
     424           0 :         for (;;) {
     425             :                 int err;
     426             :                 /*
     427             :                  * First check the swap cache.  Since this is normally
     428             :                  * called after swap_cache_get_folio() failed, re-calling
     429             :                  * that would confuse statistics.
     430             :                  */
     431           0 :                 si = get_swap_device(entry);
     432           0 :                 if (!si)
     433             :                         return NULL;
     434           0 :                 folio = filemap_get_folio(swap_address_space(entry),
     435             :                                                 swp_offset(entry));
     436           0 :                 put_swap_device(si);
     437           0 :                 if (!IS_ERR(folio))
     438           0 :                         return folio_file_page(folio, swp_offset(entry));
     439             : 
     440             :                 /*
     441             :                  * Just skip read ahead for unused swap slot.
     442             :                  * During swap_off when swap_slot_cache is disabled,
     443             :                  * we have to handle the race between putting
     444             :                  * swap entry in swap cache and marking swap slot
     445             :                  * as SWAP_HAS_CACHE.  That's done in later part of code or
     446             :                  * else swap_off will be aborted if we return NULL.
     447             :                  */
     448           0 :                 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
     449             :                         return NULL;
     450             : 
     451             :                 /*
     452             :                  * Get a new page to read into from swap.  Allocate it now,
     453             :                  * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
     454             :                  * cause any racers to loop around until we add it to cache.
     455             :                  */
     456           0 :                 folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
     457           0 :                 if (!folio)
     458             :                         return NULL;
     459             : 
     460             :                 /*
     461             :                  * Swap entry may have been freed since our caller observed it.
     462             :                  */
     463           0 :                 err = swapcache_prepare(entry);
     464           0 :                 if (!err)
     465             :                         break;
     466             : 
     467           0 :                 folio_put(folio);
     468           0 :                 if (err != -EEXIST)
     469             :                         return NULL;
     470             : 
     471             :                 /*
     472             :                  * We might race against __delete_from_swap_cache(), and
     473             :                  * stumble across a swap_map entry whose SWAP_HAS_CACHE
     474             :                  * has not yet been cleared.  Or race against another
     475             :                  * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
     476             :                  * in swap_map, but not yet added its page to swap cache.
     477             :                  */
     478           0 :                 schedule_timeout_uninterruptible(1);
     479             :         }
     480             : 
     481             :         /*
     482             :          * The swap entry is ours to swap in. Prepare the new page.
     483             :          */
     484             : 
     485           0 :         __folio_set_locked(folio);
     486           0 :         __folio_set_swapbacked(folio);
     487             : 
     488           0 :         if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
     489             :                 goto fail_unlock;
     490             : 
     491             :         /* May fail (-ENOMEM) if XArray node allocation failed. */
     492           0 :         if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
     493             :                 goto fail_unlock;
     494             : 
     495           0 :         mem_cgroup_swapin_uncharge_swap(entry);
     496             : 
     497           0 :         if (shadow)
     498           0 :                 workingset_refault(folio, shadow);
     499             : 
     500             :         /* Caller will initiate read into locked folio */
     501           0 :         folio_add_lru(folio);
     502           0 :         *new_page_allocated = true;
     503           0 :         return &folio->page;
     504             : 
     505             : fail_unlock:
     506           0 :         put_swap_folio(folio, entry);
     507           0 :         folio_unlock(folio);
     508             :         folio_put(folio);
     509             :         return NULL;
     510             : }
     511             : 
     512             : /*
     513             :  * Locate a page of swap in physical memory, reserving swap cache space
     514             :  * and reading the disk if it is not already cached.
     515             :  * A failure return means that either the page allocation failed or that
     516             :  * the swap entry is no longer in use.
     517             :  */
     518           0 : struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
     519             :                                    struct vm_area_struct *vma,
     520             :                                    unsigned long addr, bool do_poll,
     521             :                                    struct swap_iocb **plug)
     522             : {
     523             :         bool page_was_allocated;
     524           0 :         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
     525             :                         vma, addr, &page_was_allocated);
     526             : 
     527           0 :         if (page_was_allocated)
     528           0 :                 swap_readpage(retpage, do_poll, plug);
     529             : 
     530           0 :         return retpage;
     531             : }
     532             : 
     533             : static unsigned int __swapin_nr_pages(unsigned long prev_offset,
     534             :                                       unsigned long offset,
     535             :                                       int hits,
     536             :                                       int max_pages,
     537             :                                       int prev_win)
     538             : {
     539             :         unsigned int pages, last_ra;
     540             : 
     541             :         /*
     542             :          * This heuristic has been found to work well on both sequential and
     543             :          * random loads, swapping to hard disk or to SSD: please don't ask
     544             :          * what the "+ 2" means, it just happens to work well, that's all.
     545             :          */
     546           0 :         pages = hits + 2;
     547           0 :         if (pages == 2) {
     548             :                 /*
     549             :                  * We can have no readahead hits to judge by: but must not get
     550             :                  * stuck here forever, so check for an adjacent offset instead
     551             :                  * (and don't even bother to check whether swap type is same).
     552             :                  */
     553           0 :                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
     554           0 :                         pages = 1;
     555             :         } else {
     556             :                 unsigned int roundup = 4;
     557           0 :                 while (roundup < pages)
     558           0 :                         roundup <<= 1;
     559             :                 pages = roundup;
     560             :         }
     561             : 
     562           0 :         if (pages > max_pages)
     563           0 :                 pages = max_pages;
     564             : 
     565             :         /* Don't shrink readahead too fast */
     566           0 :         last_ra = prev_win / 2;
     567           0 :         if (pages < last_ra)
     568           0 :                 pages = last_ra;
     569             : 
     570             :         return pages;
     571             : }
     572             : 
     573           0 : static unsigned long swapin_nr_pages(unsigned long offset)
     574             : {
     575             :         static unsigned long prev_offset;
     576             :         unsigned int hits, pages, max_pages;
     577             :         static atomic_t last_readahead_pages;
     578             : 
     579           0 :         max_pages = 1 << READ_ONCE(page_cluster);
     580           0 :         if (max_pages <= 1)
     581             :                 return 1;
     582             : 
     583           0 :         hits = atomic_xchg(&swapin_readahead_hits, 0);
     584           0 :         pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
     585             :                                   max_pages,
     586             :                                   atomic_read(&last_readahead_pages));
     587           0 :         if (!hits)
     588           0 :                 WRITE_ONCE(prev_offset, offset);
     589           0 :         atomic_set(&last_readahead_pages, pages);
     590             : 
     591           0 :         return pages;
     592             : }
     593             : 
     594             : /**
     595             :  * swap_cluster_readahead - swap in pages in hope we need them soon
     596             :  * @entry: swap entry of this memory
     597             :  * @gfp_mask: memory allocation flags
     598             :  * @vmf: fault information
     599             :  *
     600             :  * Returns the struct page for entry and addr, after queueing swapin.
     601             :  *
     602             :  * Primitive swap readahead code. We simply read an aligned block of
     603             :  * (1 << page_cluster) entries in the swap area. This method is chosen
     604             :  * because it doesn't cost us any seek time.  We also make sure to queue
     605             :  * the 'original' request together with the readahead ones...
     606             :  *
     607             :  * This has been extended to use the NUMA policies from the mm triggering
     608             :  * the readahead.
     609             :  *
     610             :  * Caller must hold read mmap_lock if vmf->vma is not NULL.
     611             :  */
     612           0 : struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
     613             :                                 struct vm_fault *vmf)
     614             : {
     615             :         struct page *page;
     616           0 :         unsigned long entry_offset = swp_offset(entry);
     617           0 :         unsigned long offset = entry_offset;
     618             :         unsigned long start_offset, end_offset;
     619             :         unsigned long mask;
     620           0 :         struct swap_info_struct *si = swp_swap_info(entry);
     621             :         struct blk_plug plug;
     622           0 :         struct swap_iocb *splug = NULL;
     623           0 :         bool do_poll = true, page_allocated;
     624           0 :         struct vm_area_struct *vma = vmf->vma;
     625           0 :         unsigned long addr = vmf->address;
     626             : 
     627           0 :         mask = swapin_nr_pages(offset) - 1;
     628           0 :         if (!mask)
     629             :                 goto skip;
     630             : 
     631           0 :         do_poll = false;
     632             :         /* Read a page_cluster sized and aligned cluster around offset. */
     633           0 :         start_offset = offset & ~mask;
     634           0 :         end_offset = offset | mask;
     635           0 :         if (!start_offset)      /* First page is swap header. */
     636           0 :                 start_offset++;
     637           0 :         if (end_offset >= si->max)
     638           0 :                 end_offset = si->max - 1;
     639             : 
     640           0 :         blk_start_plug(&plug);
     641           0 :         for (offset = start_offset; offset <= end_offset ; offset++) {
     642             :                 /* Ok, do the async read-ahead now */
     643           0 :                 page = __read_swap_cache_async(
     644             :                         swp_entry(swp_type(entry), offset),
     645             :                         gfp_mask, vma, addr, &page_allocated);
     646           0 :                 if (!page)
     647           0 :                         continue;
     648           0 :                 if (page_allocated) {
     649           0 :                         swap_readpage(page, false, &splug);
     650           0 :                         if (offset != entry_offset) {
     651           0 :                                 SetPageReadahead(page);
     652           0 :                                 count_vm_event(SWAP_RA);
     653             :                         }
     654             :                 }
     655           0 :                 put_page(page);
     656             :         }
     657           0 :         blk_finish_plug(&plug);
     658           0 :         swap_read_unplug(splug);
     659             : 
     660           0 :         lru_add_drain();        /* Push any new pages onto the LRU now */
     661             : skip:
     662             :         /* The page was likely read above, so no need for plugging here */
     663           0 :         return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
     664             : }
     665             : 
     666           0 : int init_swap_address_space(unsigned int type, unsigned long nr_pages)
     667             : {
     668             :         struct address_space *spaces, *space;
     669             :         unsigned int i, nr;
     670             : 
     671           0 :         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
     672           0 :         spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
     673           0 :         if (!spaces)
     674             :                 return -ENOMEM;
     675           0 :         for (i = 0; i < nr; i++) {
     676           0 :                 space = spaces + i;
     677           0 :                 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
     678           0 :                 atomic_set(&space->i_mmap_writable, 0);
     679           0 :                 space->a_ops = &swap_aops;
     680             :                 /* swap cache doesn't use writeback related tags */
     681           0 :                 mapping_set_no_writeback_tags(space);
     682             :         }
     683           0 :         nr_swapper_spaces[type] = nr;
     684           0 :         swapper_spaces[type] = spaces;
     685             : 
     686           0 :         return 0;
     687             : }
     688             : 
     689           0 : void exit_swap_address_space(unsigned int type)
     690             : {
     691             :         int i;
     692           0 :         struct address_space *spaces = swapper_spaces[type];
     693             : 
     694           0 :         for (i = 0; i < nr_swapper_spaces[type]; i++)
     695             :                 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
     696           0 :         kvfree(spaces);
     697           0 :         nr_swapper_spaces[type] = 0;
     698           0 :         swapper_spaces[type] = NULL;
     699           0 : }
     700             : 
     701           0 : static void swap_ra_info(struct vm_fault *vmf,
     702             :                          struct vma_swap_readahead *ra_info)
     703             : {
     704           0 :         struct vm_area_struct *vma = vmf->vma;
     705             :         unsigned long ra_val;
     706             :         unsigned long faddr, pfn, fpfn, lpfn, rpfn;
     707             :         unsigned long start, end;
     708             :         pte_t *pte, *orig_pte;
     709             :         unsigned int max_win, hits, prev_win, win;
     710             : #ifndef CONFIG_64BIT
     711             :         pte_t *tpte;
     712             : #endif
     713             : 
     714           0 :         max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
     715             :                              SWAP_RA_ORDER_CEILING);
     716           0 :         if (max_win == 1) {
     717           0 :                 ra_info->win = 1;
     718           0 :                 return;
     719             :         }
     720             : 
     721           0 :         faddr = vmf->address;
     722           0 :         fpfn = PFN_DOWN(faddr);
     723           0 :         ra_val = GET_SWAP_RA_VAL(vma);
     724           0 :         pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
     725           0 :         prev_win = SWAP_RA_WIN(ra_val);
     726           0 :         hits = SWAP_RA_HITS(ra_val);
     727           0 :         ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
     728             :                                                max_win, prev_win);
     729           0 :         atomic_long_set(&vma->swap_readahead_info,
     730           0 :                         SWAP_RA_VAL(faddr, win, 0));
     731             : 
     732           0 :         if (win == 1)
     733             :                 return;
     734             : 
     735             :         /* Copy the PTEs because the page table may be unmapped */
     736           0 :         orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
     737           0 :         if (fpfn == pfn + 1) {
     738           0 :                 lpfn = fpfn;
     739           0 :                 rpfn = fpfn + win;
     740           0 :         } else if (pfn == fpfn + 1) {
     741           0 :                 lpfn = fpfn - win + 1;
     742           0 :                 rpfn = fpfn + 1;
     743             :         } else {
     744           0 :                 unsigned int left = (win - 1) / 2;
     745             : 
     746           0 :                 lpfn = fpfn - left;
     747           0 :                 rpfn = fpfn + win - left;
     748             :         }
     749           0 :         start = max3(lpfn, PFN_DOWN(vma->vm_start),
     750             :                      PFN_DOWN(faddr & PMD_MASK));
     751           0 :         end = min3(rpfn, PFN_DOWN(vma->vm_end),
     752             :                    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
     753             : 
     754           0 :         ra_info->nr_pte = end - start;
     755           0 :         ra_info->offset = fpfn - start;
     756           0 :         pte -= ra_info->offset;
     757             : #ifdef CONFIG_64BIT
     758           0 :         ra_info->ptes = pte;
     759             : #else
     760             :         tpte = ra_info->ptes;
     761             :         for (pfn = start; pfn != end; pfn++)
     762             :                 *tpte++ = *pte++;
     763             : #endif
     764             :         pte_unmap(orig_pte);
     765             : }
     766             : 
     767             : /**
     768             :  * swap_vma_readahead - swap in pages in hope we need them soon
     769             :  * @fentry: swap entry of this memory
     770             :  * @gfp_mask: memory allocation flags
     771             :  * @vmf: fault information
     772             :  *
     773             :  * Returns the struct page for entry and addr, after queueing swapin.
     774             :  *
     775             :  * Primitive swap readahead code. We simply read in a few pages whose
     776             :  * virtual addresses are around the fault address in the same vma.
     777             :  *
     778             :  * Caller must hold read mmap_lock if vmf->vma is not NULL.
     779             :  *
     780             :  */
     781           0 : static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
     782             :                                        struct vm_fault *vmf)
     783             : {
     784             :         struct blk_plug plug;
     785           0 :         struct swap_iocb *splug = NULL;
     786           0 :         struct vm_area_struct *vma = vmf->vma;
     787             :         struct page *page;
     788             :         pte_t *pte, pentry;
     789             :         swp_entry_t entry;
     790             :         unsigned int i;
     791             :         bool page_allocated;
     792           0 :         struct vma_swap_readahead ra_info = {
     793             :                 .win = 1,
     794             :         };
     795             : 
     796           0 :         swap_ra_info(vmf, &ra_info);
     797           0 :         if (ra_info.win == 1)
     798             :                 goto skip;
     799             : 
     800           0 :         blk_start_plug(&plug);
     801           0 :         for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
     802           0 :              i++, pte++) {
     803           0 :                 pentry = *pte;
     804           0 :                 if (!is_swap_pte(pentry))
     805           0 :                         continue;
     806           0 :                 entry = pte_to_swp_entry(pentry);
     807           0 :                 if (unlikely(non_swap_entry(entry)))
     808           0 :                         continue;
     809           0 :                 page = __read_swap_cache_async(entry, gfp_mask, vma,
     810             :                                                vmf->address, &page_allocated);
     811           0 :                 if (!page)
     812           0 :                         continue;
     813           0 :                 if (page_allocated) {
     814           0 :                         swap_readpage(page, false, &splug);
     815           0 :                         if (i != ra_info.offset) {
     816           0 :                                 SetPageReadahead(page);
     817           0 :                                 count_vm_event(SWAP_RA);
     818             :                         }
     819             :                 }
     820           0 :                 put_page(page);
     821             :         }
     822           0 :         blk_finish_plug(&plug);
     823           0 :         swap_read_unplug(splug);
     824           0 :         lru_add_drain();
     825             : skip:
     826             :         /* The page was likely read above, so no need for plugging here */
     827           0 :         return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
     828           0 :                                      ra_info.win == 1, NULL);
     829             : }
     830             : 
     831             : /**
     832             :  * swapin_readahead - swap in pages in hope we need them soon
     833             :  * @entry: swap entry of this memory
     834             :  * @gfp_mask: memory allocation flags
     835             :  * @vmf: fault information
     836             :  *
     837             :  * Returns the struct page for entry and addr, after queueing swapin.
     838             :  *
     839             :  * It's a main entry function for swap readahead. By the configuration,
     840             :  * it will read ahead blocks by cluster-based(ie, physical disk based)
     841             :  * or vma-based(ie, virtual address based on faulty address) readahead.
     842             :  */
     843           0 : struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
     844             :                                 struct vm_fault *vmf)
     845             : {
     846             :         return swap_use_vma_readahead() ?
     847           0 :                         swap_vma_readahead(entry, gfp_mask, vmf) :
     848             :                         swap_cluster_readahead(entry, gfp_mask, vmf);
     849             : }
     850             : 
     851             : #ifdef CONFIG_SYSFS
     852           0 : static ssize_t vma_ra_enabled_show(struct kobject *kobj,
     853             :                                      struct kobj_attribute *attr, char *buf)
     854             : {
     855           0 :         return sysfs_emit(buf, "%s\n",
     856           0 :                           enable_vma_readahead ? "true" : "false");
     857             : }
     858           0 : static ssize_t vma_ra_enabled_store(struct kobject *kobj,
     859             :                                       struct kobj_attribute *attr,
     860             :                                       const char *buf, size_t count)
     861             : {
     862             :         ssize_t ret;
     863             : 
     864           0 :         ret = kstrtobool(buf, &enable_vma_readahead);
     865           0 :         if (ret)
     866             :                 return ret;
     867             : 
     868           0 :         return count;
     869             : }
     870             : static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
     871             : 
     872             : static struct attribute *swap_attrs[] = {
     873             :         &vma_ra_enabled_attr.attr,
     874             :         NULL,
     875             : };
     876             : 
     877             : static const struct attribute_group swap_attr_group = {
     878             :         .attrs = swap_attrs,
     879             : };
     880             : 
     881           1 : static int __init swap_init_sysfs(void)
     882             : {
     883             :         int err;
     884             :         struct kobject *swap_kobj;
     885             : 
     886           1 :         swap_kobj = kobject_create_and_add("swap", mm_kobj);
     887           1 :         if (!swap_kobj) {
     888           0 :                 pr_err("failed to create swap kobject\n");
     889           0 :                 return -ENOMEM;
     890             :         }
     891           1 :         err = sysfs_create_group(swap_kobj, &swap_attr_group);
     892           1 :         if (err) {
     893           0 :                 pr_err("failed to register swap group\n");
     894             :                 goto delete_obj;
     895             :         }
     896             :         return 0;
     897             : 
     898             : delete_obj:
     899           0 :         kobject_put(swap_kobj);
     900           0 :         return err;
     901             : }
     902             : subsys_initcall(swap_init_sysfs);
     903             : #endif

Generated by: LCOV version 1.14