LCOV - code coverage report
Current view: top level - include/linux - vmalloc.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 2 2 100.0 %
Date: 2023-03-27 20:00:47 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_VMALLOC_H
       3             : #define _LINUX_VMALLOC_H
       4             : 
       5             : #include <linux/spinlock.h>
       6             : #include <linux/init.h>
       7             : #include <linux/list.h>
       8             : #include <linux/llist.h>
       9             : #include <asm/page.h>             /* pgprot_t */
      10             : #include <linux/rbtree.h>
      11             : #include <linux/overflow.h>
      12             : 
      13             : #include <asm/vmalloc.h>
      14             : 
      15             : struct vm_area_struct;          /* vma defining user mapping in mm_types.h */
      16             : struct notifier_block;          /* in notifier.h */
      17             : 
      18             : /* bits in flags of vmalloc's vm_struct below */
      19             : #define VM_IOREMAP              0x00000001      /* ioremap() and friends */
      20             : #define VM_ALLOC                0x00000002      /* vmalloc() */
      21             : #define VM_MAP                  0x00000004      /* vmap()ed pages */
      22             : #define VM_USERMAP              0x00000008      /* suitable for remap_vmalloc_range */
      23             : #define VM_DMA_COHERENT         0x00000010      /* dma_alloc_coherent */
      24             : #define VM_UNINITIALIZED        0x00000020      /* vm_struct is not fully initialized */
      25             : #define VM_NO_GUARD             0x00000040      /* ***DANGEROUS*** don't add guard page */
      26             : #define VM_KASAN                0x00000080      /* has allocated kasan shadow memory */
      27             : #define VM_FLUSH_RESET_PERMS    0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
      28             : #define VM_MAP_PUT_PAGES        0x00000200      /* put pages and free array in vfree */
      29             : #define VM_ALLOW_HUGE_VMAP      0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
      30             : 
      31             : #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
      32             :         !defined(CONFIG_KASAN_VMALLOC)
      33             : #define VM_DEFER_KMEMLEAK       0x00000800      /* defer kmemleak object creation */
      34             : #else
      35             : #define VM_DEFER_KMEMLEAK       0
      36             : #endif
      37             : 
      38             : /* bits [20..32] reserved for arch specific ioremap internals */
      39             : 
      40             : /*
      41             :  * Maximum alignment for ioremap() regions.
      42             :  * Can be overridden by arch-specific value.
      43             :  */
      44             : #ifndef IOREMAP_MAX_ORDER
      45             : #define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
      46             : #endif
      47             : 
      48             : struct vm_struct {
      49             :         struct vm_struct        *next;
      50             :         void                    *addr;
      51             :         unsigned long           size;
      52             :         unsigned long           flags;
      53             :         struct page             **pages;
      54             : #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
      55             :         unsigned int            page_order;
      56             : #endif
      57             :         unsigned int            nr_pages;
      58             :         phys_addr_t             phys_addr;
      59             :         const void              *caller;
      60             : };
      61             : 
      62             : struct vmap_area {
      63             :         unsigned long va_start;
      64             :         unsigned long va_end;
      65             : 
      66             :         struct rb_node rb_node;         /* address sorted rbtree */
      67             :         struct list_head list;          /* address sorted list */
      68             : 
      69             :         /*
      70             :          * The following two variables can be packed, because
      71             :          * a vmap_area object can be either:
      72             :          *    1) in "free" tree (root is free_vmap_area_root)
      73             :          *    2) or "busy" tree (root is vmap_area_root)
      74             :          */
      75             :         union {
      76             :                 unsigned long subtree_max_size; /* in "free" tree */
      77             :                 struct vm_struct *vm;           /* in "busy" tree */
      78             :         };
      79             :         unsigned long flags; /* mark type of vm_map_ram area */
      80             : };
      81             : 
      82             : /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
      83             : #ifndef arch_vmap_p4d_supported
      84             : static inline bool arch_vmap_p4d_supported(pgprot_t prot)
      85             : {
      86             :         return false;
      87             : }
      88             : #endif
      89             : 
      90             : #ifndef arch_vmap_pud_supported
      91             : static inline bool arch_vmap_pud_supported(pgprot_t prot)
      92             : {
      93             :         return false;
      94             : }
      95             : #endif
      96             : 
      97             : #ifndef arch_vmap_pmd_supported
      98             : static inline bool arch_vmap_pmd_supported(pgprot_t prot)
      99             : {
     100             :         return false;
     101             : }
     102             : #endif
     103             : 
     104             : #ifndef arch_vmap_pte_range_map_size
     105             : static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
     106             :                                                          u64 pfn, unsigned int max_page_shift)
     107             : {
     108             :         return PAGE_SIZE;
     109             : }
     110             : #endif
     111             : 
     112             : #ifndef arch_vmap_pte_supported_shift
     113             : static inline int arch_vmap_pte_supported_shift(unsigned long size)
     114             : {
     115             :         return PAGE_SHIFT;
     116             : }
     117             : #endif
     118             : 
     119             : #ifndef arch_vmap_pgprot_tagged
     120             : static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
     121             : {
     122             :         return prot;
     123             : }
     124             : #endif
     125             : 
     126             : /*
     127             :  *      Highlevel APIs for driver use
     128             :  */
     129             : extern void vm_unmap_ram(const void *mem, unsigned int count);
     130             : extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
     131             : extern void vm_unmap_aliases(void);
     132             : 
     133             : #ifdef CONFIG_MMU
     134             : extern void __init vmalloc_init(void);
     135             : extern unsigned long vmalloc_nr_pages(void);
     136             : #else
     137             : static inline void vmalloc_init(void)
     138             : {
     139             : }
     140             : static inline unsigned long vmalloc_nr_pages(void) { return 0; }
     141             : #endif
     142             : 
     143             : extern void *vmalloc(unsigned long size) __alloc_size(1);
     144             : extern void *vzalloc(unsigned long size) __alloc_size(1);
     145             : extern void *vmalloc_user(unsigned long size) __alloc_size(1);
     146             : extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
     147             : extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
     148             : extern void *vmalloc_32(unsigned long size) __alloc_size(1);
     149             : extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
     150             : extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
     151             : extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
     152             :                         unsigned long start, unsigned long end, gfp_t gfp_mask,
     153             :                         pgprot_t prot, unsigned long vm_flags, int node,
     154             :                         const void *caller) __alloc_size(1);
     155             : void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
     156             :                 int node, const void *caller) __alloc_size(1);
     157             : void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
     158             : 
     159             : extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
     160             : extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
     161             : extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
     162             : extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
     163             : 
     164             : extern void vfree(const void *addr);
     165             : extern void vfree_atomic(const void *addr);
     166             : 
     167             : extern void *vmap(struct page **pages, unsigned int count,
     168             :                         unsigned long flags, pgprot_t prot);
     169             : void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
     170             : extern void vunmap(const void *addr);
     171             : 
     172             : extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
     173             :                                        unsigned long uaddr, void *kaddr,
     174             :                                        unsigned long pgoff, unsigned long size);
     175             : 
     176             : extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
     177             :                                                         unsigned long pgoff);
     178             : 
     179             : /*
     180             :  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
     181             :  * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
     182             :  * needs to be called.
     183             :  */
     184             : #ifndef ARCH_PAGE_TABLE_SYNC_MASK
     185             : #define ARCH_PAGE_TABLE_SYNC_MASK 0
     186             : #endif
     187             : 
     188             : /*
     189             :  * There is no default implementation for arch_sync_kernel_mappings(). It is
     190             :  * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
     191             :  * is 0.
     192             :  */
     193             : void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
     194             : 
     195             : /*
     196             :  *      Lowlevel-APIs (not for driver use!)
     197             :  */
     198             : 
     199             : static inline size_t get_vm_area_size(const struct vm_struct *area)
     200             : {
     201         274 :         if (!(area->flags & VM_NO_GUARD))
     202             :                 /* return actual size without guard page */
     203         274 :                 return area->size - PAGE_SIZE;
     204             :         else
     205             :                 return area->size;
     206             : 
     207             : }
     208             : 
     209             : extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
     210             : extern struct vm_struct *get_vm_area_caller(unsigned long size,
     211             :                                         unsigned long flags, const void *caller);
     212             : extern struct vm_struct *__get_vm_area_caller(unsigned long size,
     213             :                                         unsigned long flags,
     214             :                                         unsigned long start, unsigned long end,
     215             :                                         const void *caller);
     216             : void free_vm_area(struct vm_struct *area);
     217             : extern struct vm_struct *remove_vm_area(const void *addr);
     218             : extern struct vm_struct *find_vm_area(const void *addr);
     219             : struct vmap_area *find_vmap_area(unsigned long addr);
     220             : 
     221             : static inline bool is_vm_area_hugepages(const void *addr)
     222             : {
     223             :         /*
     224             :          * This may not 100% tell if the area is mapped with > PAGE_SIZE
     225             :          * page table entries, if for some reason the architecture indicates
     226             :          * larger sizes are available but decides not to use them, nothing
     227             :          * prevents that. This only indicates the size of the physical page
     228             :          * allocated in the vmalloc layer.
     229             :          */
     230             : #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
     231             :         return find_vm_area(addr)->page_order > 0;
     232             : #else
     233             :         return false;
     234             : #endif
     235             : }
     236             : 
     237             : #ifdef CONFIG_MMU
     238             : void vunmap_range(unsigned long addr, unsigned long end);
     239             : static inline void set_vm_flush_reset_perms(void *addr)
     240             : {
     241             :         struct vm_struct *vm = find_vm_area(addr);
     242             : 
     243             :         if (vm)
     244             :                 vm->flags |= VM_FLUSH_RESET_PERMS;
     245             : }
     246             : 
     247             : #else
     248             : static inline void set_vm_flush_reset_perms(void *addr)
     249             : {
     250             : }
     251             : #endif
     252             : 
     253             : /* for /proc/kcore */
     254             : extern long vread(char *buf, char *addr, unsigned long count);
     255             : 
     256             : /*
     257             :  *      Internals.  Don't use..
     258             :  */
     259             : extern struct list_head vmap_area_list;
     260             : extern __init void vm_area_add_early(struct vm_struct *vm);
     261             : extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
     262             : 
     263             : #ifdef CONFIG_SMP
     264             : # ifdef CONFIG_MMU
     265             : struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
     266             :                                      const size_t *sizes, int nr_vms,
     267             :                                      size_t align);
     268             : 
     269             : void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
     270             : # else
     271             : static inline struct vm_struct **
     272             : pcpu_get_vm_areas(const unsigned long *offsets,
     273             :                 const size_t *sizes, int nr_vms,
     274             :                 size_t align)
     275             : {
     276             :         return NULL;
     277             : }
     278             : 
     279             : static inline void
     280             : pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
     281             : {
     282             : }
     283             : # endif
     284             : #endif
     285             : 
     286             : #ifdef CONFIG_MMU
     287             : #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
     288             : #else
     289             : #define VMALLOC_TOTAL 0UL
     290             : #endif
     291             : 
     292             : int register_vmap_purge_notifier(struct notifier_block *nb);
     293             : int unregister_vmap_purge_notifier(struct notifier_block *nb);
     294             : 
     295             : #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
     296             : bool vmalloc_dump_obj(void *object);
     297             : #else
     298             : static inline bool vmalloc_dump_obj(void *object) { return false; }
     299             : #endif
     300             : 
     301             : #endif /* _LINUX_VMALLOC_H */

Generated by: LCOV version 1.14