LCOV - code coverage report
Current view: top level - include/linux - highmem.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 8 53 15.1 %
Date: 2023-04-06 08:38:28 Functions: 1 6 16.7 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_HIGHMEM_H
       3             : #define _LINUX_HIGHMEM_H
       4             : 
       5             : #include <linux/fs.h>
       6             : #include <linux/kernel.h>
       7             : #include <linux/bug.h>
       8             : #include <linux/cacheflush.h>
       9             : #include <linux/kmsan.h>
      10             : #include <linux/mm.h>
      11             : #include <linux/uaccess.h>
      12             : #include <linux/hardirq.h>
      13             : 
      14             : #include "highmem-internal.h"
      15             : 
      16             : /**
      17             :  * kmap - Map a page for long term usage
      18             :  * @page:       Pointer to the page to be mapped
      19             :  *
      20             :  * Returns: The virtual address of the mapping
      21             :  *
      22             :  * Can only be invoked from preemptible task context because on 32bit
      23             :  * systems with CONFIG_HIGHMEM enabled this function might sleep.
      24             :  *
      25             :  * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
      26             :  * this returns the virtual address of the direct kernel mapping.
      27             :  *
      28             :  * The returned virtual address is globally visible and valid up to the
      29             :  * point where it is unmapped via kunmap(). The pointer can be handed to
      30             :  * other contexts.
      31             :  *
      32             :  * For highmem pages on 32bit systems this can be slow as the mapping space
      33             :  * is limited and protected by a global lock. In case that there is no
      34             :  * mapping slot available the function blocks until a slot is released via
      35             :  * kunmap().
      36             :  */
      37             : static inline void *kmap(struct page *page);
      38             : 
      39             : /**
      40             :  * kunmap - Unmap the virtual address mapped by kmap()
      41             :  * @page:       Pointer to the page which was mapped by kmap()
      42             :  *
      43             :  * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
      44             :  * pages in the low memory area.
      45             :  */
      46             : static inline void kunmap(struct page *page);
      47             : 
      48             : /**
      49             :  * kmap_to_page - Get the page for a kmap'ed address
      50             :  * @addr:       The address to look up
      51             :  *
      52             :  * Returns: The page which is mapped to @addr.
      53             :  */
      54             : static inline struct page *kmap_to_page(void *addr);
      55             : 
      56             : /**
      57             :  * kmap_flush_unused - Flush all unused kmap mappings in order to
      58             :  *                     remove stray mappings
      59             :  */
      60             : static inline void kmap_flush_unused(void);
      61             : 
      62             : /**
      63             :  * kmap_local_page - Map a page for temporary usage
      64             :  * @page: Pointer to the page to be mapped
      65             :  *
      66             :  * Returns: The virtual address of the mapping
      67             :  *
      68             :  * Can be invoked from any context, including interrupts.
      69             :  *
      70             :  * Requires careful handling when nesting multiple mappings because the map
      71             :  * management is stack based. The unmap has to be in the reverse order of
      72             :  * the map operation:
      73             :  *
      74             :  * addr1 = kmap_local_page(page1);
      75             :  * addr2 = kmap_local_page(page2);
      76             :  * ...
      77             :  * kunmap_local(addr2);
      78             :  * kunmap_local(addr1);
      79             :  *
      80             :  * Unmapping addr1 before addr2 is invalid and causes malfunction.
      81             :  *
      82             :  * Contrary to kmap() mappings the mapping is only valid in the context of
      83             :  * the caller and cannot be handed to other contexts.
      84             :  *
      85             :  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
      86             :  * virtual address of the direct mapping. Only real highmem pages are
      87             :  * temporarily mapped.
      88             :  *
      89             :  * While kmap_local_page() is significantly faster than kmap() for the highmem
      90             :  * case it comes with restrictions about the pointer validity.
      91             :  *
      92             :  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
      93             :  * disabling migration in order to keep the virtual address stable across
      94             :  * preemption. No caller of kmap_local_page() can rely on this side effect.
      95             :  */
      96             : static inline void *kmap_local_page(struct page *page);
      97             : 
      98             : /**
      99             :  * kmap_local_folio - Map a page in this folio for temporary usage
     100             :  * @folio: The folio containing the page.
     101             :  * @offset: The byte offset within the folio which identifies the page.
     102             :  *
     103             :  * Requires careful handling when nesting multiple mappings because the map
     104             :  * management is stack based. The unmap has to be in the reverse order of
     105             :  * the map operation::
     106             :  *
     107             :  *   addr1 = kmap_local_folio(folio1, offset1);
     108             :  *   addr2 = kmap_local_folio(folio2, offset2);
     109             :  *   ...
     110             :  *   kunmap_local(addr2);
     111             :  *   kunmap_local(addr1);
     112             :  *
     113             :  * Unmapping addr1 before addr2 is invalid and causes malfunction.
     114             :  *
     115             :  * Contrary to kmap() mappings the mapping is only valid in the context of
     116             :  * the caller and cannot be handed to other contexts.
     117             :  *
     118             :  * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
     119             :  * virtual address of the direct mapping. Only real highmem pages are
     120             :  * temporarily mapped.
     121             :  *
     122             :  * While it is significantly faster than kmap() for the highmem case it
     123             :  * comes with restrictions about the pointer validity.
     124             :  *
     125             :  * On HIGHMEM enabled systems mapping a highmem page has the side effect of
     126             :  * disabling migration in order to keep the virtual address stable across
     127             :  * preemption. No caller of kmap_local_folio() can rely on this side effect.
     128             :  *
     129             :  * Context: Can be invoked from any context.
     130             :  * Return: The virtual address of @offset.
     131             :  */
     132             : static inline void *kmap_local_folio(struct folio *folio, size_t offset);
     133             : 
     134             : /**
     135             :  * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
     136             :  * @page:       Pointer to the page to be mapped
     137             :  *
     138             :  * Returns: The virtual address of the mapping
     139             :  *
     140             :  * In fact a wrapper around kmap_local_page() which also disables pagefaults
     141             :  * and, depending on PREEMPT_RT configuration, also CPU migration and
     142             :  * preemption. Therefore users should not count on the latter two side effects.
     143             :  *
     144             :  * Mappings should always be released by kunmap_atomic().
     145             :  *
     146             :  * Do not use in new code. Use kmap_local_page() instead.
     147             :  *
     148             :  * It is used in atomic context when code wants to access the contents of a
     149             :  * page that might be allocated from high memory (see __GFP_HIGHMEM), for
     150             :  * example a page in the pagecache.  The API has two functions, and they
     151             :  * can be used in a manner similar to the following::
     152             :  *
     153             :  *   // Find the page of interest.
     154             :  *   struct page *page = find_get_page(mapping, offset);
     155             :  *
     156             :  *   // Gain access to the contents of that page.
     157             :  *   void *vaddr = kmap_atomic(page);
     158             :  *
     159             :  *   // Do something to the contents of that page.
     160             :  *   memset(vaddr, 0, PAGE_SIZE);
     161             :  *
     162             :  *   // Unmap that page.
     163             :  *   kunmap_atomic(vaddr);
     164             :  *
     165             :  * Note that the kunmap_atomic() call takes the result of the kmap_atomic()
     166             :  * call, not the argument.
     167             :  *
     168             :  * If you need to map two pages because you want to copy from one page to
     169             :  * another you need to keep the kmap_atomic calls strictly nested, like:
     170             :  *
     171             :  * vaddr1 = kmap_atomic(page1);
     172             :  * vaddr2 = kmap_atomic(page2);
     173             :  *
     174             :  * memcpy(vaddr1, vaddr2, PAGE_SIZE);
     175             :  *
     176             :  * kunmap_atomic(vaddr2);
     177             :  * kunmap_atomic(vaddr1);
     178             :  */
     179             : static inline void *kmap_atomic(struct page *page);
     180             : 
     181             : /* Highmem related interfaces for management code */
     182             : static inline unsigned int nr_free_highpages(void);
     183             : static inline unsigned long totalhigh_pages(void);
     184             : 
     185             : #ifndef ARCH_HAS_FLUSH_ANON_PAGE
     186             : static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
     187             : {
     188             : }
     189             : #endif
     190             : 
     191             : #ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
     192             : static inline void flush_kernel_vmap_range(void *vaddr, int size)
     193             : {
     194             : }
     195             : static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
     196             : {
     197             : }
     198             : #endif
     199             : 
     200             : /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
     201             : #ifndef clear_user_highpage
     202           0 : static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
     203             : {
     204           0 :         void *addr = kmap_local_page(page);
     205           0 :         clear_user_page(addr, vaddr, page);
     206             :         kunmap_local(addr);
     207           0 : }
     208             : #endif
     209             : 
     210             : #ifndef vma_alloc_zeroed_movable_folio
     211             : /**
     212             :  * vma_alloc_zeroed_movable_folio - Allocate a zeroed page for a VMA.
     213             :  * @vma: The VMA the page is to be allocated for.
     214             :  * @vaddr: The virtual address the page will be inserted into.
     215             :  *
     216             :  * This function will allocate a page suitable for inserting into this
     217             :  * VMA at this virtual address.  It may be allocated from highmem or
     218             :  * the movable zone.  An architecture may provide its own implementation.
     219             :  *
     220             :  * Return: A folio containing one allocated and zeroed page or NULL if
     221             :  * we are out of memory.
     222             :  */
     223             : static inline
     224           0 : struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
     225             :                                    unsigned long vaddr)
     226             : {
     227             :         struct folio *folio;
     228             : 
     229           0 :         folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false);
     230           0 :         if (folio)
     231           0 :                 clear_user_highpage(&folio->page, vaddr);
     232             : 
     233           0 :         return folio;
     234             : }
     235             : #endif
     236             : 
     237       38283 : static inline void clear_highpage(struct page *page)
     238             : {
     239       38283 :         void *kaddr = kmap_local_page(page);
     240       38283 :         clear_page(kaddr);
     241             :         kunmap_local(kaddr);
     242       38283 : }
     243             : 
     244             : static inline void clear_highpage_kasan_tagged(struct page *page)
     245             : {
     246             :         u8 tag;
     247             : 
     248       38283 :         tag = page_kasan_tag(page);
     249       38283 :         page_kasan_tag_reset(page);
     250       38283 :         clear_highpage(page);
     251       38283 :         page_kasan_tag_set(page, tag);
     252             : }
     253             : 
     254             : #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
     255             : 
     256             : static inline void tag_clear_highpage(struct page *page)
     257             : {
     258             : }
     259             : 
     260             : #endif
     261             : 
     262             : /*
     263             :  * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
     264             :  * If we pass in a head page, we can zero up to the size of the compound page.
     265             :  */
     266             : #ifdef CONFIG_HIGHMEM
     267             : void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
     268             :                 unsigned start2, unsigned end2);
     269             : #else
     270           0 : static inline void zero_user_segments(struct page *page,
     271             :                 unsigned start1, unsigned end1,
     272             :                 unsigned start2, unsigned end2)
     273             : {
     274           0 :         void *kaddr = kmap_local_page(page);
     275             :         unsigned int i;
     276             : 
     277           0 :         BUG_ON(end1 > page_size(page) || end2 > page_size(page));
     278             : 
     279           0 :         if (end1 > start1)
     280           0 :                 memset(kaddr + start1, 0, end1 - start1);
     281             : 
     282           0 :         if (end2 > start2)
     283           0 :                 memset(kaddr + start2, 0, end2 - start2);
     284             : 
     285             :         kunmap_local(kaddr);
     286           0 :         for (i = 0; i < compound_nr(page); i++)
     287           0 :                 flush_dcache_page(page + i);
     288           0 : }
     289             : #endif
     290             : 
     291             : static inline void zero_user_segment(struct page *page,
     292             :         unsigned start, unsigned end)
     293             : {
     294           0 :         zero_user_segments(page, start, end, 0, 0);
     295             : }
     296             : 
     297             : static inline void zero_user(struct page *page,
     298             :         unsigned start, unsigned size)
     299             : {
     300           0 :         zero_user_segments(page, start, start + size, 0, 0);
     301             : }
     302             : 
     303             : #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
     304             : 
     305           0 : static inline void copy_user_highpage(struct page *to, struct page *from,
     306             :         unsigned long vaddr, struct vm_area_struct *vma)
     307             : {
     308             :         char *vfrom, *vto;
     309             : 
     310           0 :         vfrom = kmap_local_page(from);
     311           0 :         vto = kmap_local_page(to);
     312           0 :         copy_user_page(vto, vfrom, vaddr, to);
     313           0 :         kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
     314             :         kunmap_local(vto);
     315             :         kunmap_local(vfrom);
     316           0 : }
     317             : 
     318             : #endif
     319             : 
     320             : #ifdef copy_mc_to_kernel
     321             : static inline int copy_mc_user_highpage(struct page *to, struct page *from,
     322             :                                         unsigned long vaddr, struct vm_area_struct *vma)
     323             : {
     324             :         unsigned long ret;
     325             :         char *vfrom, *vto;
     326             : 
     327             :         vfrom = kmap_local_page(from);
     328             :         vto = kmap_local_page(to);
     329             :         ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
     330             :         if (!ret)
     331             :                 kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
     332             :         kunmap_local(vto);
     333             :         kunmap_local(vfrom);
     334             : 
     335             :         return ret;
     336             : }
     337             : #else
     338             : static inline int copy_mc_user_highpage(struct page *to, struct page *from,
     339             :                                         unsigned long vaddr, struct vm_area_struct *vma)
     340             : {
     341           0 :         copy_user_highpage(to, from, vaddr, vma);
     342             :         return 0;
     343             : }
     344             : #endif
     345             : 
     346             : #ifndef __HAVE_ARCH_COPY_HIGHPAGE
     347             : 
     348           0 : static inline void copy_highpage(struct page *to, struct page *from)
     349             : {
     350             :         char *vfrom, *vto;
     351             : 
     352           0 :         vfrom = kmap_local_page(from);
     353           0 :         vto = kmap_local_page(to);
     354           0 :         copy_page(vto, vfrom);
     355           0 :         kmsan_copy_page_meta(to, from);
     356             :         kunmap_local(vto);
     357             :         kunmap_local(vfrom);
     358           0 : }
     359             : 
     360             : #endif
     361             : 
     362             : static inline void memcpy_page(struct page *dst_page, size_t dst_off,
     363             :                                struct page *src_page, size_t src_off,
     364             :                                size_t len)
     365             : {
     366             :         char *dst = kmap_local_page(dst_page);
     367             :         char *src = kmap_local_page(src_page);
     368             : 
     369             :         VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE);
     370             :         memcpy(dst + dst_off, src + src_off, len);
     371             :         kunmap_local(src);
     372             :         kunmap_local(dst);
     373             : }
     374             : 
     375             : static inline void memset_page(struct page *page, size_t offset, int val,
     376             :                                size_t len)
     377             : {
     378             :         char *addr = kmap_local_page(page);
     379             : 
     380             :         VM_BUG_ON(offset + len > PAGE_SIZE);
     381             :         memset(addr + offset, val, len);
     382             :         kunmap_local(addr);
     383             : }
     384             : 
     385             : static inline void memcpy_from_page(char *to, struct page *page,
     386             :                                     size_t offset, size_t len)
     387             : {
     388           0 :         char *from = kmap_local_page(page);
     389             : 
     390             :         VM_BUG_ON(offset + len > PAGE_SIZE);
     391           0 :         memcpy(to, from + offset, len);
     392             :         kunmap_local(from);
     393             : }
     394             : 
     395             : static inline void memcpy_to_page(struct page *page, size_t offset,
     396             :                                   const char *from, size_t len)
     397             : {
     398           0 :         char *to = kmap_local_page(page);
     399             : 
     400             :         VM_BUG_ON(offset + len > PAGE_SIZE);
     401           0 :         memcpy(to + offset, from, len);
     402           0 :         flush_dcache_page(page);
     403             :         kunmap_local(to);
     404             : }
     405             : 
     406             : static inline void memzero_page(struct page *page, size_t offset, size_t len)
     407             : {
     408           0 :         char *addr = kmap_local_page(page);
     409             : 
     410             :         VM_BUG_ON(offset + len > PAGE_SIZE);
     411           0 :         memset(addr + offset, 0, len);
     412           0 :         flush_dcache_page(page);
     413             :         kunmap_local(addr);
     414             : }
     415             : 
     416             : /**
     417             :  * memcpy_from_file_folio - Copy some bytes from a file folio.
     418             :  * @to: The destination buffer.
     419             :  * @folio: The folio to copy from.
     420             :  * @pos: The position in the file.
     421             :  * @len: The maximum number of bytes to copy.
     422             :  *
     423             :  * Copy up to @len bytes from this folio.  This may be limited by PAGE_SIZE
     424             :  * if the folio comes from HIGHMEM, and by the size of the folio.
     425             :  *
     426             :  * Return: The number of bytes copied from the folio.
     427             :  */
     428             : static inline size_t memcpy_from_file_folio(char *to, struct folio *folio,
     429             :                 loff_t pos, size_t len)
     430             : {
     431             :         size_t offset = offset_in_folio(folio, pos);
     432             :         char *from = kmap_local_folio(folio, offset);
     433             : 
     434             :         if (folio_test_highmem(folio)) {
     435             :                 offset = offset_in_page(offset);
     436             :                 len = min_t(size_t, len, PAGE_SIZE - offset);
     437             :         } else
     438             :                 len = min(len, folio_size(folio) - offset);
     439             : 
     440             :         memcpy(to, from, len);
     441             :         kunmap_local(from);
     442             : 
     443             :         return len;
     444             : }
     445             : 
     446             : /**
     447             :  * folio_zero_segments() - Zero two byte ranges in a folio.
     448             :  * @folio: The folio to write to.
     449             :  * @start1: The first byte to zero.
     450             :  * @xend1: One more than the last byte in the first range.
     451             :  * @start2: The first byte to zero in the second range.
     452             :  * @xend2: One more than the last byte in the second range.
     453             :  */
     454             : static inline void folio_zero_segments(struct folio *folio,
     455             :                 size_t start1, size_t xend1, size_t start2, size_t xend2)
     456             : {
     457           0 :         zero_user_segments(&folio->page, start1, xend1, start2, xend2);
     458             : }
     459             : 
     460             : /**
     461             :  * folio_zero_segment() - Zero a byte range in a folio.
     462             :  * @folio: The folio to write to.
     463             :  * @start: The first byte to zero.
     464             :  * @xend: One more than the last byte to zero.
     465             :  */
     466             : static inline void folio_zero_segment(struct folio *folio,
     467             :                 size_t start, size_t xend)
     468             : {
     469           0 :         zero_user_segments(&folio->page, start, xend, 0, 0);
     470             : }
     471             : 
     472             : /**
     473             :  * folio_zero_range() - Zero a byte range in a folio.
     474             :  * @folio: The folio to write to.
     475             :  * @start: The first byte to zero.
     476             :  * @length: The number of bytes to zero.
     477             :  */
     478             : static inline void folio_zero_range(struct folio *folio,
     479             :                 size_t start, size_t length)
     480             : {
     481           0 :         zero_user_segments(&folio->page, start, start + length, 0, 0);
     482             : }
     483             : 
     484             : #endif /* _LINUX_HIGHMEM_H */

Generated by: LCOV version 1.14