LCOV - code coverage report
Current view: top level - include/asm-generic - tlb.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 45 0.0 %
Date: 2023-03-27 20:00:47 Functions: 0 3 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0-or-later */
       2             : /* include/asm-generic/tlb.h
       3             :  *
       4             :  *      Generic TLB shootdown code
       5             :  *
       6             :  * Copyright 2001 Red Hat, Inc.
       7             :  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
       8             :  *
       9             :  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
      10             :  */
      11             : #ifndef _ASM_GENERIC__TLB_H
      12             : #define _ASM_GENERIC__TLB_H
      13             : 
      14             : #include <linux/mmu_notifier.h>
      15             : #include <linux/swap.h>
      16             : #include <linux/hugetlb_inline.h>
      17             : #include <asm/tlbflush.h>
      18             : #include <asm/cacheflush.h>
      19             : 
      20             : /*
      21             :  * Blindly accessing user memory from NMI context can be dangerous
      22             :  * if we're in the middle of switching the current user task or switching
      23             :  * the loaded mm.
      24             :  */
      25             : #ifndef nmi_uaccess_okay
      26             : # define nmi_uaccess_okay() true
      27             : #endif
      28             : 
      29             : #ifdef CONFIG_MMU
      30             : 
      31             : /*
      32             :  * Generic MMU-gather implementation.
      33             :  *
      34             :  * The mmu_gather data structure is used by the mm code to implement the
      35             :  * correct and efficient ordering of freeing pages and TLB invalidations.
      36             :  *
      37             :  * This correct ordering is:
      38             :  *
      39             :  *  1) unhook page
      40             :  *  2) TLB invalidate page
      41             :  *  3) free page
      42             :  *
      43             :  * That is, we must never free a page before we have ensured there are no live
      44             :  * translations left to it. Otherwise it might be possible to observe (or
      45             :  * worse, change) the page content after it has been reused.
      46             :  *
      47             :  * The mmu_gather API consists of:
      48             :  *
      49             :  *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
      50             :  *
      51             :  *    start and finish a mmu_gather
      52             :  *
      53             :  *    Finish in particular will issue a (final) TLB invalidate and free
      54             :  *    all (remaining) queued pages.
      55             :  *
      56             :  *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
      57             :  *
      58             :  *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
      59             :  *    there's large holes between the VMAs.
      60             :  *
      61             :  *  - tlb_remove_table()
      62             :  *
      63             :  *    tlb_remove_table() is the basic primitive to free page-table directories
      64             :  *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
      65             :  *    tlb_remove_page() below, for when page directories are pages and have no
      66             :  *    additional constraints.
      67             :  *
      68             :  *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
      69             :  *
      70             :  *  - tlb_remove_page() / __tlb_remove_page()
      71             :  *  - tlb_remove_page_size() / __tlb_remove_page_size()
      72             :  *
      73             :  *    __tlb_remove_page_size() is the basic primitive that queues a page for
      74             :  *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
      75             :  *    boolean indicating if the queue is (now) full and a call to
      76             :  *    tlb_flush_mmu() is required.
      77             :  *
      78             :  *    tlb_remove_page() and tlb_remove_page_size() imply the call to
      79             :  *    tlb_flush_mmu() when required and has no return value.
      80             :  *
      81             :  *  - tlb_change_page_size()
      82             :  *
      83             :  *    call before __tlb_remove_page*() to set the current page-size; implies a
      84             :  *    possible tlb_flush_mmu() call.
      85             :  *
      86             :  *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
      87             :  *
      88             :  *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
      89             :  *                              related state, like the range)
      90             :  *
      91             :  *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
      92             :  *                      whatever pages are still batched.
      93             :  *
      94             :  *  - mmu_gather::fullmm
      95             :  *
      96             :  *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
      97             :  *    the entire mm; this allows a number of optimizations.
      98             :  *
      99             :  *    - We can ignore tlb_{start,end}_vma(); because we don't
     100             :  *      care about ranges. Everything will be shot down.
     101             :  *
     102             :  *    - (RISC) architectures that use ASIDs can cycle to a new ASID
     103             :  *      and delay the invalidation until ASID space runs out.
     104             :  *
     105             :  *  - mmu_gather::need_flush_all
     106             :  *
     107             :  *    A flag that can be set by the arch code if it wants to force
     108             :  *    flush the entire TLB irrespective of the range. For instance
     109             :  *    x86-PAE needs this when changing top-level entries.
     110             :  *
     111             :  * And allows the architecture to provide and implement tlb_flush():
     112             :  *
     113             :  * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
     114             :  * use of:
     115             :  *
     116             :  *  - mmu_gather::start / mmu_gather::end
     117             :  *
     118             :  *    which provides the range that needs to be flushed to cover the pages to
     119             :  *    be freed.
     120             :  *
     121             :  *  - mmu_gather::freed_tables
     122             :  *
     123             :  *    set when we freed page table pages
     124             :  *
     125             :  *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
     126             :  *
     127             :  *    returns the smallest TLB entry size unmapped in this range.
     128             :  *
     129             :  * If an architecture does not provide tlb_flush() a default implementation
     130             :  * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
     131             :  * specified, in which case we'll default to flush_tlb_mm().
     132             :  *
     133             :  * Additionally there are a few opt-in features:
     134             :  *
     135             :  *  MMU_GATHER_PAGE_SIZE
     136             :  *
     137             :  *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
     138             :  *  changes the size and provides mmu_gather::page_size to tlb_flush().
     139             :  *
     140             :  *  This might be useful if your architecture has size specific TLB
     141             :  *  invalidation instructions.
     142             :  *
     143             :  *  MMU_GATHER_TABLE_FREE
     144             :  *
     145             :  *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
     146             :  *  for page directores (__p*_free_tlb()).
     147             :  *
     148             :  *  Useful if your architecture has non-page page directories.
     149             :  *
     150             :  *  When used, an architecture is expected to provide __tlb_remove_table()
     151             :  *  which does the actual freeing of these pages.
     152             :  *
     153             :  *  MMU_GATHER_RCU_TABLE_FREE
     154             :  *
     155             :  *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
     156             :  *  comment below).
     157             :  *
     158             :  *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
     159             :  *  and therefore doesn't naturally serialize with software page-table walkers.
     160             :  *
     161             :  *  MMU_GATHER_NO_FLUSH_CACHE
     162             :  *
     163             :  *  Indicates the architecture has flush_cache_range() but it needs *NOT* be called
     164             :  *  before unmapping a VMA.
     165             :  *
     166             :  *  NOTE: strictly speaking we shouldn't have this knob and instead rely on
     167             :  *        flush_cache_range() being a NOP, except Sparc64 seems to be
     168             :  *        different here.
     169             :  *
     170             :  *  MMU_GATHER_MERGE_VMAS
     171             :  *
     172             :  *  Indicates the architecture wants to merge ranges over VMAs; typical when
     173             :  *  multiple range invalidates are more expensive than a full invalidate.
     174             :  *
     175             :  *  MMU_GATHER_NO_RANGE
     176             :  *
     177             :  *  Use this if your architecture lacks an efficient flush_tlb_range(). This
     178             :  *  option implies MMU_GATHER_MERGE_VMAS above.
     179             :  *
     180             :  *  MMU_GATHER_NO_GATHER
     181             :  *
     182             :  *  If the option is set the mmu_gather will not track individual pages for
     183             :  *  delayed page free anymore. A platform that enables the option needs to
     184             :  *  provide its own implementation of the __tlb_remove_page_size() function to
     185             :  *  free pages.
     186             :  *
     187             :  *  This is useful if your architecture already flushes TLB entries in the
     188             :  *  various ptep_get_and_clear() functions.
     189             :  */
     190             : 
     191             : #ifdef CONFIG_MMU_GATHER_TABLE_FREE
     192             : 
     193             : struct mmu_table_batch {
     194             : #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
     195             :         struct rcu_head         rcu;
     196             : #endif
     197             :         unsigned int            nr;
     198             :         void                    *tables[];
     199             : };
     200             : 
     201             : #define MAX_TABLE_BATCH         \
     202             :         ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
     203             : 
     204             : extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
     205             : 
     206             : #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
     207             : 
     208             : /*
     209             :  * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
     210             :  * page directories and we can use the normal page batching to free them.
     211             :  */
     212             : #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
     213             : 
     214             : #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
     215             : 
     216             : #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
     217             : /*
     218             :  * This allows an architecture that does not use the linux page-tables for
     219             :  * hardware to skip the TLBI when freeing page tables.
     220             :  */
     221             : #ifndef tlb_needs_table_invalidate
     222             : #define tlb_needs_table_invalidate() (true)
     223             : #endif
     224             : 
     225             : void tlb_remove_table_sync_one(void);
     226             : 
     227             : #else
     228             : 
     229             : #ifdef tlb_needs_table_invalidate
     230             : #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
     231             : #endif
     232             : 
     233             : static inline void tlb_remove_table_sync_one(void) { }
     234             : 
     235             : #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
     236             : 
     237             : 
     238             : #ifndef CONFIG_MMU_GATHER_NO_GATHER
     239             : /*
     240             :  * If we can't allocate a page to make a big batch of page pointers
     241             :  * to work on, then just handle a few from the on-stack structure.
     242             :  */
     243             : #define MMU_GATHER_BUNDLE       8
     244             : 
     245             : struct mmu_gather_batch {
     246             :         struct mmu_gather_batch *next;
     247             :         unsigned int            nr;
     248             :         unsigned int            max;
     249             :         struct encoded_page     *encoded_pages[];
     250             : };
     251             : 
     252             : #define MAX_GATHER_BATCH        \
     253             :         ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
     254             : 
     255             : /*
     256             :  * Limit the maximum number of mmu_gather batches to reduce a risk of soft
     257             :  * lockups for non-preemptible kernels on huge machines when a lot of memory
     258             :  * is zapped during unmapping.
     259             :  * 10K pages freed at once should be safe even without a preemption point.
     260             :  */
     261             : #define MAX_GATHER_BATCH_COUNT  (10000UL/MAX_GATHER_BATCH)
     262             : 
     263             : extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
     264             :                                    struct encoded_page *page,
     265             :                                    int page_size);
     266             : 
     267             : #ifdef CONFIG_SMP
     268             : /*
     269             :  * This both sets 'delayed_rmap', and returns true. It would be an inline
     270             :  * function, except we define it before the 'struct mmu_gather'.
     271             :  */
     272             : #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
     273             : extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
     274             : #endif
     275             : 
     276             : #endif
     277             : 
     278             : /*
     279             :  * We have a no-op version of the rmap removal that doesn't
     280             :  * delay anything. That is used on S390, which flushes remote
     281             :  * TLBs synchronously, and on UP, which doesn't have any
     282             :  * remote TLBs to flush and is not preemptible due to this
     283             :  * all happening under the page table lock.
     284             :  */
     285             : #ifndef tlb_delay_rmap
     286             : #define tlb_delay_rmap(tlb) (false)
     287             : static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
     288             : #endif
     289             : 
     290             : /*
     291             :  * struct mmu_gather is an opaque type used by the mm code for passing around
     292             :  * any data needed by arch specific code for tlb_remove_page.
     293             :  */
     294             : struct mmu_gather {
     295             :         struct mm_struct        *mm;
     296             : 
     297             : #ifdef CONFIG_MMU_GATHER_TABLE_FREE
     298             :         struct mmu_table_batch  *batch;
     299             : #endif
     300             : 
     301             :         unsigned long           start;
     302             :         unsigned long           end;
     303             :         /*
     304             :          * we are in the middle of an operation to clear
     305             :          * a full mm and can make some optimizations
     306             :          */
     307             :         unsigned int            fullmm : 1;
     308             : 
     309             :         /*
     310             :          * we have performed an operation which
     311             :          * requires a complete flush of the tlb
     312             :          */
     313             :         unsigned int            need_flush_all : 1;
     314             : 
     315             :         /*
     316             :          * we have removed page directories
     317             :          */
     318             :         unsigned int            freed_tables : 1;
     319             : 
     320             :         /*
     321             :          * Do we have pending delayed rmap removals?
     322             :          */
     323             :         unsigned int            delayed_rmap : 1;
     324             : 
     325             :         /*
     326             :          * at which levels have we cleared entries?
     327             :          */
     328             :         unsigned int            cleared_ptes : 1;
     329             :         unsigned int            cleared_pmds : 1;
     330             :         unsigned int            cleared_puds : 1;
     331             :         unsigned int            cleared_p4ds : 1;
     332             : 
     333             :         /*
     334             :          * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
     335             :          */
     336             :         unsigned int            vma_exec : 1;
     337             :         unsigned int            vma_huge : 1;
     338             :         unsigned int            vma_pfn  : 1;
     339             : 
     340             :         unsigned int            batch_count;
     341             : 
     342             : #ifndef CONFIG_MMU_GATHER_NO_GATHER
     343             :         struct mmu_gather_batch *active;
     344             :         struct mmu_gather_batch local;
     345             :         struct page             *__pages[MMU_GATHER_BUNDLE];
     346             : 
     347             : #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
     348             :         unsigned int page_size;
     349             : #endif
     350             : #endif
     351             : };
     352             : 
     353             : void tlb_flush_mmu(struct mmu_gather *tlb);
     354             : 
     355             : static inline void __tlb_adjust_range(struct mmu_gather *tlb,
     356             :                                       unsigned long address,
     357             :                                       unsigned int range_size)
     358             : {
     359           0 :         tlb->start = min(tlb->start, address);
     360           0 :         tlb->end = max(tlb->end, address + range_size);
     361             : }
     362             : 
     363             : static inline void __tlb_reset_range(struct mmu_gather *tlb)
     364             : {
     365           0 :         if (tlb->fullmm) {
     366           0 :                 tlb->start = tlb->end = ~0;
     367             :         } else {
     368           0 :                 tlb->start = TASK_SIZE;
     369           0 :                 tlb->end = 0;
     370             :         }
     371           0 :         tlb->freed_tables = 0;
     372           0 :         tlb->cleared_ptes = 0;
     373           0 :         tlb->cleared_pmds = 0;
     374           0 :         tlb->cleared_puds = 0;
     375           0 :         tlb->cleared_p4ds = 0;
     376             :         /*
     377             :          * Do not reset mmu_gather::vma_* fields here, we do not
     378             :          * call into tlb_start_vma() again to set them if there is an
     379             :          * intermediate flush.
     380             :          */
     381             : }
     382             : 
     383             : #ifdef CONFIG_MMU_GATHER_NO_RANGE
     384             : 
     385             : #if defined(tlb_flush)
     386             : #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
     387             : #endif
     388             : 
     389             : /*
     390             :  * When an architecture does not have efficient means of range flushing TLBs
     391             :  * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
     392             :  * range small. We equally don't have to worry about page granularity or other
     393             :  * things.
     394             :  *
     395             :  * All we need to do is issue a full flush for any !0 range.
     396             :  */
     397             : static inline void tlb_flush(struct mmu_gather *tlb)
     398             : {
     399             :         if (tlb->end)
     400             :                 flush_tlb_mm(tlb->mm);
     401             : }
     402             : 
     403             : #else /* CONFIG_MMU_GATHER_NO_RANGE */
     404             : 
     405             : #ifndef tlb_flush
     406             : /*
     407             :  * When an architecture does not provide its own tlb_flush() implementation
     408             :  * but does have a reasonably efficient flush_vma_range() implementation
     409             :  * use that.
     410             :  */
     411           0 : static inline void tlb_flush(struct mmu_gather *tlb)
     412             : {
     413           0 :         if (tlb->fullmm || tlb->need_flush_all) {
     414           0 :                 flush_tlb_mm(tlb->mm);
     415           0 :         } else if (tlb->end) {
     416           0 :                 struct vm_area_struct vma = {
     417           0 :                         .vm_mm = tlb->mm,
     418           0 :                         .vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
     419           0 :                                     (tlb->vma_huge ? VM_HUGETLB : 0),
     420             :                 };
     421             : 
     422           0 :                 flush_tlb_range(&vma, tlb->start, tlb->end);
     423             :         }
     424           0 : }
     425             : #endif
     426             : 
     427             : #endif /* CONFIG_MMU_GATHER_NO_RANGE */
     428             : 
     429             : static inline void
     430             : tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
     431             : {
     432             :         /*
     433             :          * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
     434             :          * mips-4k) flush only large pages.
     435             :          *
     436             :          * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
     437             :          * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
     438             :          * range.
     439             :          *
     440             :          * We rely on tlb_end_vma() to issue a flush, such that when we reset
     441             :          * these values the batch is empty.
     442             :          */
     443           0 :         tlb->vma_huge = is_vm_hugetlb_page(vma);
     444           0 :         tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
     445           0 :         tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
     446             : }
     447             : 
     448           0 : static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
     449             : {
     450             :         /*
     451             :          * Anything calling __tlb_adjust_range() also sets at least one of
     452             :          * these bits.
     453             :          */
     454           0 :         if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
     455             :               tlb->cleared_puds || tlb->cleared_p4ds))
     456             :                 return;
     457             : 
     458           0 :         tlb_flush(tlb);
     459           0 :         mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
     460             :         __tlb_reset_range(tlb);
     461             : }
     462             : 
     463           0 : static inline void tlb_remove_page_size(struct mmu_gather *tlb,
     464             :                                         struct page *page, int page_size)
     465             : {
     466           0 :         if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size))
     467           0 :                 tlb_flush_mmu(tlb);
     468           0 : }
     469             : 
     470             : static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags)
     471             : {
     472           0 :         return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE);
     473             : }
     474             : 
     475             : /* tlb_remove_page
     476             :  *      Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
     477             :  *      required.
     478             :  */
     479             : static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
     480             : {
     481           0 :         return tlb_remove_page_size(tlb, page, PAGE_SIZE);
     482             : }
     483             : 
     484             : static inline void tlb_change_page_size(struct mmu_gather *tlb,
     485             :                                                      unsigned int page_size)
     486             : {
     487             : #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
     488             :         if (tlb->page_size && tlb->page_size != page_size) {
     489             :                 if (!tlb->fullmm && !tlb->need_flush_all)
     490             :                         tlb_flush_mmu(tlb);
     491             :         }
     492             : 
     493             :         tlb->page_size = page_size;
     494             : #endif
     495             : }
     496             : 
     497             : static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
     498             : {
     499             :         if (tlb->cleared_ptes)
     500             :                 return PAGE_SHIFT;
     501             :         if (tlb->cleared_pmds)
     502             :                 return PMD_SHIFT;
     503             :         if (tlb->cleared_puds)
     504             :                 return PUD_SHIFT;
     505             :         if (tlb->cleared_p4ds)
     506             :                 return P4D_SHIFT;
     507             : 
     508             :         return PAGE_SHIFT;
     509             : }
     510             : 
     511             : static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
     512             : {
     513             :         return 1UL << tlb_get_unmap_shift(tlb);
     514             : }
     515             : 
     516             : /*
     517             :  * In the case of tlb vma handling, we can optimise these away in the
     518             :  * case where we're doing a full MM flush.  When we're doing a munmap,
     519             :  * the vmas are adjusted to only cover the region to be torn down.
     520             :  */
     521             : static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
     522             : {
     523           0 :         if (tlb->fullmm)
     524             :                 return;
     525             : 
     526           0 :         tlb_update_vma_flags(tlb, vma);
     527             : #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
     528           0 :         flush_cache_range(vma, vma->vm_start, vma->vm_end);
     529             : #endif
     530             : }
     531             : 
     532             : static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
     533             : {
     534           0 :         if (tlb->fullmm)
     535             :                 return;
     536             : 
     537             :         /*
     538             :          * VM_PFNMAP is more fragile because the core mm will not track the
     539             :          * page mapcount -- there might not be page-frames for these PFNs after
     540             :          * all. Force flush TLBs for such ranges to avoid munmap() vs
     541             :          * unmap_mapping_range() races.
     542             :          */
     543             :         if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
     544             :                 /*
     545             :                  * Do a TLB flush and reset the range at VMA boundaries; this avoids
     546             :                  * the ranges growing with the unused space between consecutive VMAs.
     547             :                  */
     548           0 :                 tlb_flush_mmu_tlbonly(tlb);
     549             :         }
     550             : }
     551             : 
     552             : /*
     553             :  * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
     554             :  * and set corresponding cleared_*.
     555             :  */
     556             : static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
     557             :                                      unsigned long address, unsigned long size)
     558             : {
     559           0 :         __tlb_adjust_range(tlb, address, size);
     560           0 :         tlb->cleared_ptes = 1;
     561             : }
     562             : 
     563             : static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
     564             :                                      unsigned long address, unsigned long size)
     565             : {
     566           0 :         __tlb_adjust_range(tlb, address, size);
     567           0 :         tlb->cleared_pmds = 1;
     568             : }
     569             : 
     570             : static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
     571             :                                      unsigned long address, unsigned long size)
     572             : {
     573           0 :         __tlb_adjust_range(tlb, address, size);
     574           0 :         tlb->cleared_puds = 1;
     575             : }
     576             : 
     577             : static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
     578             :                                      unsigned long address, unsigned long size)
     579             : {
     580             :         __tlb_adjust_range(tlb, address, size);
     581             :         tlb->cleared_p4ds = 1;
     582             : }
     583             : 
     584             : #ifndef __tlb_remove_tlb_entry
     585             : #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
     586             : #endif
     587             : 
     588             : /**
     589             :  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
     590             :  *
     591             :  * Record the fact that pte's were really unmapped by updating the range,
     592             :  * so we can later optimise away the tlb invalidate.   This helps when
     593             :  * userspace is unmapping already-unmapped pages, which happens quite a lot.
     594             :  */
     595             : #define tlb_remove_tlb_entry(tlb, ptep, address)                \
     596             :         do {                                                    \
     597             :                 tlb_flush_pte_range(tlb, address, PAGE_SIZE);   \
     598             :                 __tlb_remove_tlb_entry(tlb, ptep, address);     \
     599             :         } while (0)
     600             : 
     601             : #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)        \
     602             :         do {                                                    \
     603             :                 unsigned long _sz = huge_page_size(h);          \
     604             :                 if (_sz >= P4D_SIZE)                         \
     605             :                         tlb_flush_p4d_range(tlb, address, _sz); \
     606             :                 else if (_sz >= PUD_SIZE)                    \
     607             :                         tlb_flush_pud_range(tlb, address, _sz); \
     608             :                 else if (_sz >= PMD_SIZE)                    \
     609             :                         tlb_flush_pmd_range(tlb, address, _sz); \
     610             :                 else                                            \
     611             :                         tlb_flush_pte_range(tlb, address, _sz); \
     612             :                 __tlb_remove_tlb_entry(tlb, ptep, address);     \
     613             :         } while (0)
     614             : 
     615             : /**
     616             :  * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
     617             :  * This is a nop so far, because only x86 needs it.
     618             :  */
     619             : #ifndef __tlb_remove_pmd_tlb_entry
     620             : #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
     621             : #endif
     622             : 
     623             : #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)                    \
     624             :         do {                                                            \
     625             :                 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);      \
     626             :                 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address);         \
     627             :         } while (0)
     628             : 
     629             : /**
     630             :  * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
     631             :  * invalidation. This is a nop so far, because only x86 needs it.
     632             :  */
     633             : #ifndef __tlb_remove_pud_tlb_entry
     634             : #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
     635             : #endif
     636             : 
     637             : #define tlb_remove_pud_tlb_entry(tlb, pudp, address)                    \
     638             :         do {                                                            \
     639             :                 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);      \
     640             :                 __tlb_remove_pud_tlb_entry(tlb, pudp, address);         \
     641             :         } while (0)
     642             : 
     643             : /*
     644             :  * For things like page tables caches (ie caching addresses "inside" the
     645             :  * page tables, like x86 does), for legacy reasons, flushing an
     646             :  * individual page had better flush the page table caches behind it. This
     647             :  * is definitely how x86 works, for example. And if you have an
     648             :  * architected non-legacy page table cache (which I'm not aware of
     649             :  * anybody actually doing), you're going to have some architecturally
     650             :  * explicit flushing for that, likely *separate* from a regular TLB entry
     651             :  * flush, and thus you'd need more than just some range expansion..
     652             :  *
     653             :  * So if we ever find an architecture
     654             :  * that would want something that odd, I think it is up to that
     655             :  * architecture to do its own odd thing, not cause pain for others
     656             :  * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
     657             :  *
     658             :  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
     659             :  */
     660             : 
     661             : #ifndef pte_free_tlb
     662             : #define pte_free_tlb(tlb, ptep, address)                        \
     663             :         do {                                                    \
     664             :                 tlb_flush_pmd_range(tlb, address, PAGE_SIZE);   \
     665             :                 tlb->freed_tables = 1;                               \
     666             :                 __pte_free_tlb(tlb, ptep, address);             \
     667             :         } while (0)
     668             : #endif
     669             : 
     670             : #ifndef pmd_free_tlb
     671             : #define pmd_free_tlb(tlb, pmdp, address)                        \
     672             :         do {                                                    \
     673             :                 tlb_flush_pud_range(tlb, address, PAGE_SIZE);   \
     674             :                 tlb->freed_tables = 1;                               \
     675             :                 __pmd_free_tlb(tlb, pmdp, address);             \
     676             :         } while (0)
     677             : #endif
     678             : 
     679             : #ifndef pud_free_tlb
     680             : #define pud_free_tlb(tlb, pudp, address)                        \
     681             :         do {                                                    \
     682             :                 tlb_flush_p4d_range(tlb, address, PAGE_SIZE);   \
     683             :                 tlb->freed_tables = 1;                               \
     684             :                 __pud_free_tlb(tlb, pudp, address);             \
     685             :         } while (0)
     686             : #endif
     687             : 
     688             : #ifndef p4d_free_tlb
     689             : #define p4d_free_tlb(tlb, pudp, address)                        \
     690             :         do {                                                    \
     691             :                 __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
     692             :                 tlb->freed_tables = 1;                               \
     693             :                 __p4d_free_tlb(tlb, pudp, address);             \
     694             :         } while (0)
     695             : #endif
     696             : 
     697             : #ifndef pte_needs_flush
     698             : static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
     699             : {
     700             :         return true;
     701             : }
     702             : #endif
     703             : 
     704             : #ifndef huge_pmd_needs_flush
     705             : static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
     706             : {
     707             :         return true;
     708             : }
     709             : #endif
     710             : 
     711             : #endif /* CONFIG_MMU */
     712             : 
     713             : #endif /* _ASM_GENERIC__TLB_H */

Generated by: LCOV version 1.14