LCOV - code coverage report
Current view: top level - arch/um/include/asm - pgtable.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 8 28 28.6 %
Date: 2023-04-06 08:38:28 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
       4             :  * Copyright 2003 PathScale, Inc.
       5             :  * Derived from include/asm-i386/pgtable.h
       6             :  */
       7             : 
       8             : #ifndef __UM_PGTABLE_H
       9             : #define __UM_PGTABLE_H
      10             : 
      11             : #include <asm/fixmap.h>
      12             : 
      13             : #define _PAGE_PRESENT   0x001
      14             : #define _PAGE_NEWPAGE   0x002
      15             : #define _PAGE_NEWPROT   0x004
      16             : #define _PAGE_RW        0x020
      17             : #define _PAGE_USER      0x040
      18             : #define _PAGE_ACCESSED  0x080
      19             : #define _PAGE_DIRTY     0x100
      20             : /* If _PAGE_PRESENT is clear, we use these: */
      21             : #define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
      22             :                                    pte_present gives true */
      23             : 
      24             : /* We borrow bit 10 to store the exclusive marker in swap PTEs. */
      25             : #define _PAGE_SWP_EXCLUSIVE     0x400
      26             : 
      27             : #ifdef CONFIG_3_LEVEL_PGTABLES
      28             : #include <asm/pgtable-3level.h>
      29             : #else
      30             : #include <asm/pgtable-2level.h>
      31             : #endif
      32             : 
      33             : extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
      34             : 
      35             : /* zero page used for uninitialized stuff */
      36             : extern unsigned long *empty_zero_page;
      37             : 
      38             : /* Just any arbitrary offset to the start of the vmalloc VM area: the
      39             :  * current 8MB value just means that there will be a 8MB "hole" after the
      40             :  * physical memory until the kernel virtual memory starts.  That means that
      41             :  * any out-of-bounds memory accesses will hopefully be caught.
      42             :  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
      43             :  * area for the same reason. ;)
      44             :  */
      45             : 
      46             : extern unsigned long end_iomem;
      47             : 
      48             : #define VMALLOC_OFFSET  (__va_space)
      49             : #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
      50             : #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
      51             : #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
      52             : #define MODULES_VADDR   VMALLOC_START
      53             : #define MODULES_END     VMALLOC_END
      54             : #define MODULES_LEN     (MODULES_VADDR - MODULES_END)
      55             : 
      56             : #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
      57             : #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
      58             : #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
      59             : #define __PAGE_KERNEL_EXEC                                              \
      60             :          (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
      61             : #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
      62             : #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
      63             : #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
      64             : #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
      65             : #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
      66             : #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
      67             : 
      68             : /*
      69             :  * The i386 can't do page protection for execute, and considers that the same
      70             :  * are read.
      71             :  * Also, write permissions imply read permissions. This is the closest we can
      72             :  * get..
      73             :  */
      74             : 
      75             : /*
      76             :  * ZERO_PAGE is a global shared page that is always zero: used
      77             :  * for zero-mapped memory areas etc..
      78             :  */
      79             : #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
      80             : 
      81             : #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
      82             : 
      83             : #define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
      84             : #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
      85             : 
      86             : #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
      87             : #define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
      88             : 
      89             : #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
      90             : #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
      91             : 
      92             : #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
      93             : #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
      94             : 
      95             : #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
      96             : #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
      97             : 
      98             : #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
      99             : #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
     100             : 
     101             : #define pte_page(x) pfn_to_page(pte_pfn(x))
     102             : 
     103             : #define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
     104             : 
     105             : /*
     106             :  * =================================
     107             :  * Flags checking section.
     108             :  * =================================
     109             :  */
     110             : 
     111             : static inline int pte_none(pte_t pte)
     112             : {
     113       85275 :         return pte_is_zero(pte);
     114             : }
     115             : 
     116             : /*
     117             :  * The following only work if pte_present() is true.
     118             :  * Undefined behaviour if not..
     119             :  */
     120             : static inline int pte_read(pte_t pte)
     121             : {
     122           0 :         return((pte_get_bits(pte, _PAGE_USER)) &&
     123             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     124             : }
     125             : 
     126             : static inline int pte_exec(pte_t pte){
     127             :         return((pte_get_bits(pte, _PAGE_USER)) &&
     128             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     129             : }
     130             : 
     131             : static inline int pte_write(pte_t pte)
     132             : {
     133           0 :         return((pte_get_bits(pte, _PAGE_RW)) &&
     134             :                !(pte_get_bits(pte, _PAGE_PROTNONE)));
     135             : }
     136             : 
     137             : static inline int pte_dirty(pte_t pte)
     138             : {
     139           0 :         return pte_get_bits(pte, _PAGE_DIRTY);
     140             : }
     141             : 
     142             : static inline int pte_young(pte_t pte)
     143             : {
     144           0 :         return pte_get_bits(pte, _PAGE_ACCESSED);
     145             : }
     146             : 
     147             : static inline int pte_newpage(pte_t pte)
     148             : {
     149       85018 :         return pte_get_bits(pte, _PAGE_NEWPAGE);
     150             : }
     151             : 
     152             : static inline int pte_newprot(pte_t pte)
     153             : {
     154           0 :         return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
     155             : }
     156             : 
     157             : /*
     158             :  * =================================
     159             :  * Flags setting section.
     160             :  * =================================
     161             :  */
     162             : 
     163             : static inline pte_t pte_mknewprot(pte_t pte)
     164             : {
     165       42541 :         pte_set_bits(pte, _PAGE_NEWPROT);
     166             :         return(pte);
     167             : }
     168             : 
     169             : static inline pte_t pte_mkclean(pte_t pte)
     170             : {
     171           0 :         pte_clear_bits(pte, _PAGE_DIRTY);
     172             :         return(pte);
     173             : }
     174             : 
     175             : static inline pte_t pte_mkold(pte_t pte)
     176             : {
     177           0 :         pte_clear_bits(pte, _PAGE_ACCESSED);
     178             :         return(pte);
     179             : }
     180             : 
     181             : static inline pte_t pte_wrprotect(pte_t pte)
     182             : {
     183           0 :         if (likely(pte_get_bits(pte, _PAGE_RW)))
     184           0 :                 pte_clear_bits(pte, _PAGE_RW);
     185             :         else
     186             :                 return pte;
     187             :         return(pte_mknewprot(pte));
     188             : }
     189             : 
     190             : static inline pte_t pte_mkread(pte_t pte)
     191             : {
     192             :         if (unlikely(pte_get_bits(pte, _PAGE_USER)))
     193             :                 return pte;
     194             :         pte_set_bits(pte, _PAGE_USER);
     195             :         return(pte_mknewprot(pte));
     196             : }
     197             : 
     198             : static inline pte_t pte_mkdirty(pte_t pte)
     199             : {
     200           0 :         pte_set_bits(pte, _PAGE_DIRTY);
     201             :         return(pte);
     202             : }
     203             : 
     204             : static inline pte_t pte_mkyoung(pte_t pte)
     205             : {
     206           0 :         pte_set_bits(pte, _PAGE_ACCESSED);
     207             :         return(pte);
     208             : }
     209             : 
     210             : static inline pte_t pte_mkwrite(pte_t pte)
     211             : {
     212           0 :         if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
     213             :                 return pte;
     214           0 :         pte_set_bits(pte, _PAGE_RW);
     215             :         return(pte_mknewprot(pte));
     216             : }
     217             : 
     218             : static inline pte_t pte_mkuptodate(pte_t pte)
     219             : {
     220           1 :         pte_clear_bits(pte, _PAGE_NEWPAGE);
     221           0 :         if(pte_present(pte))
     222           0 :                 pte_clear_bits(pte, _PAGE_NEWPROT);
     223             :         return(pte);
     224             : }
     225             : 
     226             : static inline pte_t pte_mknewpage(pte_t pte)
     227             : {
     228       42541 :         pte_set_bits(pte, _PAGE_NEWPAGE);
     229             :         return(pte);
     230             : }
     231             : 
     232             : static inline void set_pte(pte_t *pteptr, pte_t pteval)
     233             : {
     234             :         pte_copy(*pteptr, pteval);
     235             : 
     236             :         /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
     237             :          * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
     238             :          * mapped pages.
     239             :          */
     240             : 
     241       42541 :         *pteptr = pte_mknewpage(*pteptr);
     242       85082 :         if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
     243             : }
     244             : 
     245             : static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
     246             :                               pte_t *pteptr, pte_t pteval)
     247             : {
     248       42541 :         set_pte(pteptr, pteval);
     249             : }
     250             : 
     251             : #define __HAVE_ARCH_PTE_SAME
     252             : static inline int pte_same(pte_t pte_a, pte_t pte_b)
     253             : {
     254           0 :         return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
     255             : }
     256             : 
     257             : /*
     258             :  * Conversion functions: convert a page and protection to a page entry,
     259             :  * and a page entry and page directory to the page they refer to.
     260             :  */
     261             : 
     262             : #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
     263             : #define __virt_to_page(virt) phys_to_page(__pa(virt))
     264             : #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
     265             : #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
     266             : 
     267             : #define mk_pte(page, pgprot) \
     268             :         ({ pte_t pte;                                   \
     269             :                                                         \
     270             :         pte_set_val(pte, page_to_phys(page), (pgprot)); \
     271             :         if (pte_present(pte))                           \
     272             :                 pte_mknewprot(pte_mknewpage(pte));      \
     273             :         pte;})
     274             : 
     275             : static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
     276             : {
     277           0 :         pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
     278             :         return pte;
     279             : }
     280             : 
     281             : /*
     282             :  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
     283             :  *
     284             :  * this macro returns the index of the entry in the pmd page which would
     285             :  * control the given virtual address
     286             :  */
     287             : #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
     288             : 
     289             : struct mm_struct;
     290             : extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
     291             : 
     292             : #define update_mmu_cache(vma,address,ptep) do {} while (0)
     293             : 
     294             : /*
     295             :  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
     296             :  * are !pte_none() && !pte_present().
     297             :  *
     298             :  * Format of swap PTEs:
     299             :  *
     300             :  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
     301             :  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
     302             :  *   <--------------- offset ----------------> E < type -> 0 0 0 1 0
     303             :  *
     304             :  *   E is the exclusive marker that is not stored in swap entries.
     305             :  *   _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte().
     306             :  */
     307             : #define __swp_type(x)                   (((x).val >> 5) & 0x1f)
     308             : #define __swp_offset(x)                 ((x).val >> 11)
     309             : 
     310             : #define __swp_entry(type, offset) \
     311             :         ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
     312             : #define __pte_to_swp_entry(pte) \
     313             :         ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
     314             : #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
     315             : 
     316             : static inline int pte_swp_exclusive(pte_t pte)
     317             : {
     318           0 :         return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
     319             : }
     320             : 
     321             : static inline pte_t pte_swp_mkexclusive(pte_t pte)
     322             : {
     323           0 :         pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
     324             :         return pte;
     325             : }
     326             : 
     327             : static inline pte_t pte_swp_clear_exclusive(pte_t pte)
     328             : {
     329           0 :         pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
     330             :         return pte;
     331             : }
     332             : 
     333             : /* Clear a kernel PTE and flush it from the TLB */
     334             : #define kpte_clear_flush(ptep, vaddr)           \
     335             : do {                                            \
     336             :         pte_clear(&init_mm, (vaddr), (ptep));       \
     337             :         __flush_tlb_one((vaddr));               \
     338             : } while (0)
     339             : 
     340             : #endif

Generated by: LCOV version 1.14