LCOV - code coverage report
Current view: top level - kernel - iomem.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 46 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 7 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #include <linux/device.h>
       3             : #include <linux/types.h>
       4             : #include <linux/io.h>
       5             : #include <linux/mm.h>
       6             : 
       7             : #ifndef ioremap_cache
       8             : /* temporary while we convert existing ioremap_cache users to memremap */
       9           0 : __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
      10             : {
      11           0 :         return ioremap(offset, size);
      12             : }
      13             : #endif
      14             : 
      15             : #ifndef arch_memremap_wb
      16             : static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
      17             : {
      18           0 :         return (__force void *)ioremap_cache(offset, size);
      19             : }
      20             : #endif
      21             : 
      22             : #ifndef arch_memremap_can_ram_remap
      23             : static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
      24             :                                         unsigned long flags)
      25             : {
      26             :         return true;
      27             : }
      28             : #endif
      29             : 
      30             : static void *try_ram_remap(resource_size_t offset, size_t size,
      31             :                            unsigned long flags)
      32             : {
      33           0 :         unsigned long pfn = PHYS_PFN(offset);
      34             : 
      35             :         /* In the simple case just return the existing linear address */
      36           0 :         if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
      37           0 :             arch_memremap_can_ram_remap(offset, size, flags))
      38           0 :                 return __va(offset);
      39             : 
      40             :         return NULL; /* fallback to arch_memremap_wb */
      41             : }
      42             : 
      43             : /**
      44             :  * memremap() - remap an iomem_resource as cacheable memory
      45             :  * @offset: iomem resource start address
      46             :  * @size: size of remap
      47             :  * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
      48             :  *                MEMREMAP_ENC, MEMREMAP_DEC
      49             :  *
      50             :  * memremap() is "ioremap" for cases where it is known that the resource
      51             :  * being mapped does not have i/o side effects and the __iomem
      52             :  * annotation is not applicable. In the case of multiple flags, the different
      53             :  * mapping types will be attempted in the order listed below until one of
      54             :  * them succeeds.
      55             :  *
      56             :  * MEMREMAP_WB - matches the default mapping for System RAM on
      57             :  * the architecture.  This is usually a read-allocate write-back cache.
      58             :  * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
      59             :  * memremap() will bypass establishing a new mapping and instead return
      60             :  * a pointer into the direct map.
      61             :  *
      62             :  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
      63             :  * cache or are written through to memory and never exist in a
      64             :  * cache-dirty state with respect to program visibility.  Attempts to
      65             :  * map System RAM with this mapping type will fail.
      66             :  *
      67             :  * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
      68             :  * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
      69             :  * uncached. Attempts to map System RAM with this mapping type will fail.
      70             :  */
      71           0 : void *memremap(resource_size_t offset, size_t size, unsigned long flags)
      72             : {
      73           0 :         int is_ram = region_intersects(offset, size,
      74             :                                        IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
      75           0 :         void *addr = NULL;
      76             : 
      77           0 :         if (!flags)
      78             :                 return NULL;
      79             : 
      80           0 :         if (is_ram == REGION_MIXED) {
      81           0 :                 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
      82             :                                 &offset, (unsigned long) size);
      83             :                 return NULL;
      84             :         }
      85             : 
      86             :         /* Try all mapping types requested until one returns non-NULL */
      87           0 :         if (flags & MEMREMAP_WB) {
      88             :                 /*
      89             :                  * MEMREMAP_WB is special in that it can be satisfied
      90             :                  * from the direct map.  Some archs depend on the
      91             :                  * capability of memremap() to autodetect cases where
      92             :                  * the requested range is potentially in System RAM.
      93             :                  */
      94           0 :                 if (is_ram == REGION_INTERSECTS)
      95           0 :                         addr = try_ram_remap(offset, size, flags);
      96           0 :                 if (!addr)
      97           0 :                         addr = arch_memremap_wb(offset, size);
      98             :         }
      99             : 
     100             :         /*
     101             :          * If we don't have a mapping yet and other request flags are
     102             :          * present then we will be attempting to establish a new virtual
     103             :          * address mapping.  Enforce that this mapping is not aliasing
     104             :          * System RAM.
     105             :          */
     106           0 :         if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
     107           0 :                 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
     108             :                                 &offset, (unsigned long) size);
     109             :                 return NULL;
     110             :         }
     111             : 
     112           0 :         if (!addr && (flags & MEMREMAP_WT))
     113           0 :                 addr = ioremap_wt(offset, size);
     114             : 
     115           0 :         if (!addr && (flags & MEMREMAP_WC))
     116           0 :                 addr = ioremap_wc(offset, size);
     117             : 
     118             :         return addr;
     119             : }
     120             : EXPORT_SYMBOL(memremap);
     121             : 
     122           0 : void memunmap(void *addr)
     123             : {
     124           0 :         if (is_ioremap_addr(addr))
     125           0 :                 iounmap((void __iomem *) addr);
     126           0 : }
     127             : EXPORT_SYMBOL(memunmap);
     128             : 
     129           0 : static void devm_memremap_release(struct device *dev, void *res)
     130             : {
     131           0 :         memunmap(*(void **)res);
     132           0 : }
     133             : 
     134           0 : static int devm_memremap_match(struct device *dev, void *res, void *match_data)
     135             : {
     136           0 :         return *(void **)res == match_data;
     137             : }
     138             : 
     139           0 : void *devm_memremap(struct device *dev, resource_size_t offset,
     140             :                 size_t size, unsigned long flags)
     141             : {
     142             :         void **ptr, *addr;
     143             : 
     144           0 :         ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
     145             :                         dev_to_node(dev));
     146           0 :         if (!ptr)
     147             :                 return ERR_PTR(-ENOMEM);
     148             : 
     149           0 :         addr = memremap(offset, size, flags);
     150           0 :         if (addr) {
     151           0 :                 *ptr = addr;
     152           0 :                 devres_add(dev, ptr);
     153             :         } else {
     154           0 :                 devres_free(ptr);
     155           0 :                 return ERR_PTR(-ENXIO);
     156             :         }
     157             : 
     158           0 :         return addr;
     159             : }
     160             : EXPORT_SYMBOL(devm_memremap);
     161             : 
     162           0 : void devm_memunmap(struct device *dev, void *addr)
     163             : {
     164           0 :         WARN_ON(devres_release(dev, devm_memremap_release,
     165             :                                 devm_memremap_match, addr));
     166           0 : }
     167             : EXPORT_SYMBOL(devm_memunmap);

Generated by: LCOV version 1.14