LCOV - code coverage report
Current view: top level - kernel/dma - direct.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 133 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 19 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2018-2020 Christoph Hellwig.
       4             :  *
       5             :  * DMA operations that map physical memory directly without using an IOMMU.
       6             :  */
       7             : #include <linux/memblock.h> /* for max_pfn */
       8             : #include <linux/export.h>
       9             : #include <linux/mm.h>
      10             : #include <linux/dma-map-ops.h>
      11             : #include <linux/scatterlist.h>
      12             : #include <linux/pfn.h>
      13             : #include <linux/vmalloc.h>
      14             : #include <linux/set_memory.h>
      15             : #include <linux/slab.h>
      16             : #include "direct.h"
      17             : 
      18             : /*
      19             :  * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
      20             :  * it for entirely different regions. In that case the arch code needs to
      21             :  * override the variable below for dma-direct to work properly.
      22             :  */
      23             : unsigned int zone_dma_bits __ro_after_init = 24;
      24             : 
      25             : static inline dma_addr_t phys_to_dma_direct(struct device *dev,
      26             :                 phys_addr_t phys)
      27             : {
      28           0 :         if (force_dma_unencrypted(dev))
      29             :                 return phys_to_dma_unencrypted(dev, phys);
      30           0 :         return phys_to_dma(dev, phys);
      31             : }
      32             : 
      33           0 : static inline struct page *dma_direct_to_page(struct device *dev,
      34             :                 dma_addr_t dma_addr)
      35             : {
      36           0 :         return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
      37             : }
      38             : 
      39           0 : u64 dma_direct_get_required_mask(struct device *dev)
      40             : {
      41           0 :         phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
      42           0 :         u64 max_dma = phys_to_dma_direct(dev, phys);
      43             : 
      44           0 :         return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
      45             : }
      46             : 
      47           0 : static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
      48             : {
      49           0 :         u64 dma_limit = min_not_zero(
      50             :                 dev->coherent_dma_mask,
      51             :                 dev->bus_dma_limit);
      52             : 
      53             :         /*
      54             :          * Optimistically try the zone that the physical address mask falls
      55             :          * into first.  If that returns memory that isn't actually addressable
      56             :          * we will fallback to the next lower zone and try again.
      57             :          *
      58             :          * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
      59             :          * zones.
      60             :          */
      61           0 :         *phys_limit = dma_to_phys(dev, dma_limit);
      62           0 :         if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
      63             :                 return GFP_DMA;
      64           0 :         if (*phys_limit <= DMA_BIT_MASK(32))
      65             :                 return GFP_DMA32;
      66           0 :         return 0;
      67             : }
      68             : 
      69           0 : static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
      70             : {
      71           0 :         dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
      72             : 
      73           0 :         if (dma_addr == DMA_MAPPING_ERROR)
      74             :                 return false;
      75           0 :         return dma_addr + size - 1 <=
      76           0 :                 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
      77             : }
      78             : 
      79             : static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
      80             : {
      81           0 :         if (!force_dma_unencrypted(dev))
      82             :                 return 0;
      83             :         return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
      84             : }
      85             : 
      86             : static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
      87             : {
      88             :         int ret;
      89             : 
      90           0 :         if (!force_dma_unencrypted(dev))
      91             :                 return 0;
      92             :         ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
      93             :         if (ret)
      94             :                 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
      95             :         return ret;
      96             : }
      97             : 
      98             : static void __dma_direct_free_pages(struct device *dev, struct page *page,
      99             :                                     size_t size)
     100             : {
     101           0 :         if (swiotlb_free(dev, page, size))
     102             :                 return;
     103           0 :         dma_free_contiguous(dev, page, size);
     104             : }
     105             : 
     106             : static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
     107             : {
     108             :         struct page *page = swiotlb_alloc(dev, size);
     109             : 
     110             :         if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
     111             :                 swiotlb_free(dev, page, size);
     112             :                 return NULL;
     113             :         }
     114             : 
     115             :         return page;
     116             : }
     117             : 
     118           0 : static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
     119             :                 gfp_t gfp, bool allow_highmem)
     120             : {
     121           0 :         int node = dev_to_node(dev);
     122           0 :         struct page *page = NULL;
     123             :         u64 phys_limit;
     124             : 
     125           0 :         WARN_ON_ONCE(!PAGE_ALIGNED(size));
     126             : 
     127           0 :         if (is_swiotlb_for_alloc(dev))
     128             :                 return dma_direct_alloc_swiotlb(dev, size);
     129             : 
     130           0 :         gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
     131           0 :         page = dma_alloc_contiguous(dev, size, gfp);
     132             :         if (page) {
     133             :                 if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
     134             :                     (!allow_highmem && PageHighMem(page))) {
     135             :                         dma_free_contiguous(dev, page, size);
     136             :                         page = NULL;
     137             :                 }
     138             :         }
     139             : again:
     140             :         if (!page)
     141           0 :                 page = alloc_pages_node(node, gfp, get_order(size));
     142           0 :         if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
     143           0 :                 dma_free_contiguous(dev, page, size);
     144           0 :                 page = NULL;
     145             : 
     146             :                 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
     147             :                     phys_limit < DMA_BIT_MASK(64) &&
     148             :                     !(gfp & (GFP_DMA32 | GFP_DMA))) {
     149             :                         gfp |= GFP_DMA32;
     150             :                         goto again;
     151             :                 }
     152             : 
     153             :                 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
     154             :                         gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
     155             :                         goto again;
     156             :                 }
     157             :         }
     158             : 
     159             :         return page;
     160             : }
     161             : 
     162             : /*
     163             :  * Check if a potentially blocking operations needs to dip into the atomic
     164             :  * pools for the given device/gfp.
     165             :  */
     166             : static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
     167             : {
     168             :         return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
     169             : }
     170             : 
     171             : static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
     172             :                 dma_addr_t *dma_handle, gfp_t gfp)
     173             : {
     174             :         struct page *page;
     175             :         u64 phys_limit;
     176             :         void *ret;
     177             : 
     178             :         if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
     179             :                 return NULL;
     180             : 
     181             :         gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
     182             :         page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
     183             :         if (!page)
     184             :                 return NULL;
     185             :         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
     186             :         return ret;
     187             : }
     188             : 
     189           0 : static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
     190             :                 dma_addr_t *dma_handle, gfp_t gfp)
     191             : {
     192             :         struct page *page;
     193             : 
     194           0 :         page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
     195           0 :         if (!page)
     196             :                 return NULL;
     197             : 
     198             :         /* remove any dirty cache lines on the kernel alias */
     199           0 :         if (!PageHighMem(page))
     200             :                 arch_dma_prep_coherent(page, size);
     201             : 
     202             :         /* return the page pointer as the opaque cookie */
     203           0 :         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
     204           0 :         return page;
     205             : }
     206             : 
     207           0 : void *dma_direct_alloc(struct device *dev, size_t size,
     208             :                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
     209             : {
     210           0 :         bool remap = false, set_uncached = false;
     211             :         struct page *page;
     212             :         void *ret;
     213             : 
     214           0 :         size = PAGE_ALIGN(size);
     215           0 :         if (attrs & DMA_ATTR_NO_WARN)
     216           0 :                 gfp |= __GFP_NOWARN;
     217             : 
     218           0 :         if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
     219           0 :             !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
     220           0 :                 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
     221             : 
     222           0 :         if (!dev_is_dma_coherent(dev)) {
     223             :                 /*
     224             :                  * Fallback to the arch handler if it exists.  This should
     225             :                  * eventually go away.
     226             :                  */
     227             :                 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
     228             :                     !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
     229             :                     !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
     230             :                     !is_swiotlb_for_alloc(dev))
     231             :                         return arch_dma_alloc(dev, size, dma_handle, gfp,
     232             :                                               attrs);
     233             : 
     234             :                 /*
     235             :                  * If there is a global pool, always allocate from it for
     236             :                  * non-coherent devices.
     237             :                  */
     238             :                 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
     239             :                         return dma_alloc_from_global_coherent(dev, size,
     240             :                                         dma_handle);
     241             : 
     242             :                 /*
     243             :                  * Otherwise remap if the architecture is asking for it.  But
     244             :                  * given that remapping memory is a blocking operation we'll
     245             :                  * instead have to dip into the atomic pools.
     246             :                  */
     247             :                 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
     248             :                 if (remap) {
     249             :                         if (dma_direct_use_pool(dev, gfp))
     250             :                                 return dma_direct_alloc_from_pool(dev, size,
     251             :                                                 dma_handle, gfp);
     252             :                 } else {
     253             :                         if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
     254             :                                 return NULL;
     255             :                         set_uncached = true;
     256             :                 }
     257             :         }
     258             : 
     259             :         /*
     260             :          * Decrypting memory may block, so allocate the memory from the atomic
     261             :          * pools if we can't block.
     262             :          */
     263           0 :         if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
     264             :                 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
     265             : 
     266             :         /* we always manually zero the memory once we are done */
     267           0 :         page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
     268           0 :         if (!page)
     269             :                 return NULL;
     270             : 
     271             :         /*
     272             :          * dma_alloc_contiguous can return highmem pages depending on a
     273             :          * combination the cma= arguments and per-arch setup.  These need to be
     274             :          * remapped to return a kernel virtual address.
     275             :          */
     276           0 :         if (PageHighMem(page)) {
     277             :                 remap = true;
     278             :                 set_uncached = false;
     279             :         }
     280             : 
     281             :         if (remap) {
     282             :                 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
     283             : 
     284             :                 if (force_dma_unencrypted(dev))
     285             :                         prot = pgprot_decrypted(prot);
     286             : 
     287             :                 /* remove any dirty cache lines on the kernel alias */
     288             :                 arch_dma_prep_coherent(page, size);
     289             : 
     290             :                 /* create a coherent mapping */
     291             :                 ret = dma_common_contiguous_remap(page, size, prot,
     292             :                                 __builtin_return_address(0));
     293             :                 if (!ret)
     294             :                         goto out_free_pages;
     295             :         } else {
     296           0 :                 ret = page_address(page);
     297           0 :                 if (dma_set_decrypted(dev, ret, size))
     298             :                         goto out_free_pages;
     299             :         }
     300             : 
     301           0 :         memset(ret, 0, size);
     302             : 
     303             :         if (set_uncached) {
     304             :                 arch_dma_prep_coherent(page, size);
     305             :                 ret = arch_dma_set_uncached(ret, size);
     306             :                 if (IS_ERR(ret))
     307             :                         goto out_encrypt_pages;
     308             :         }
     309             : 
     310           0 :         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
     311           0 :         return ret;
     312             : 
     313             : out_encrypt_pages:
     314             :         if (dma_set_encrypted(dev, page_address(page), size))
     315             :                 return NULL;
     316             : out_free_pages:
     317             :         __dma_direct_free_pages(dev, page, size);
     318             :         return NULL;
     319             : }
     320             : 
     321           0 : void dma_direct_free(struct device *dev, size_t size,
     322             :                 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
     323             : {
     324           0 :         unsigned int page_order = get_order(size);
     325             : 
     326           0 :         if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
     327           0 :             !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
     328             :                 /* cpu_addr is a struct page cookie, not a kernel address */
     329           0 :                 dma_free_contiguous(dev, cpu_addr, size);
     330           0 :                 return;
     331             :         }
     332             : 
     333             :         if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
     334             :             !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
     335             :             !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
     336           0 :             !dev_is_dma_coherent(dev) &&
     337             :             !is_swiotlb_for_alloc(dev)) {
     338             :                 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
     339             :                 return;
     340             :         }
     341             : 
     342             :         if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
     343             :             !dev_is_dma_coherent(dev)) {
     344             :                 if (!dma_release_from_global_coherent(page_order, cpu_addr))
     345             :                         WARN_ON_ONCE(1);
     346             :                 return;
     347             :         }
     348             : 
     349             :         /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
     350             :         if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
     351             :             dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
     352             :                 return;
     353             : 
     354           0 :         if (is_vmalloc_addr(cpu_addr)) {
     355           0 :                 vunmap(cpu_addr);
     356             :         } else {
     357             :                 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
     358             :                         arch_dma_clear_uncached(cpu_addr, size);
     359             :                 if (dma_set_encrypted(dev, cpu_addr, size))
     360             :                         return;
     361             :         }
     362             : 
     363           0 :         __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
     364             : }
     365             : 
     366           0 : struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
     367             :                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
     368             : {
     369             :         struct page *page;
     370             :         void *ret;
     371             : 
     372           0 :         if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
     373             :                 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
     374             : 
     375           0 :         page = __dma_direct_alloc_pages(dev, size, gfp, false);
     376           0 :         if (!page)
     377             :                 return NULL;
     378             : 
     379           0 :         ret = page_address(page);
     380           0 :         if (dma_set_decrypted(dev, ret, size))
     381             :                 goto out_free_pages;
     382           0 :         memset(ret, 0, size);
     383           0 :         *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
     384           0 :         return page;
     385             : out_free_pages:
     386             :         __dma_direct_free_pages(dev, page, size);
     387             :         return NULL;
     388             : }
     389             : 
     390           0 : void dma_direct_free_pages(struct device *dev, size_t size,
     391             :                 struct page *page, dma_addr_t dma_addr,
     392             :                 enum dma_data_direction dir)
     393             : {
     394           0 :         void *vaddr = page_address(page);
     395             : 
     396             :         /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
     397             :         if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
     398             :             dma_free_from_pool(dev, vaddr, size))
     399             :                 return;
     400             : 
     401           0 :         if (dma_set_encrypted(dev, vaddr, size))
     402             :                 return;
     403           0 :         __dma_direct_free_pages(dev, page, size);
     404             : }
     405             : 
     406             : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
     407             :     defined(CONFIG_SWIOTLB)
     408             : void dma_direct_sync_sg_for_device(struct device *dev,
     409             :                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
     410             : {
     411             :         struct scatterlist *sg;
     412             :         int i;
     413             : 
     414             :         for_each_sg(sgl, sg, nents, i) {
     415             :                 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
     416             : 
     417             :                 if (unlikely(is_swiotlb_buffer(dev, paddr)))
     418             :                         swiotlb_sync_single_for_device(dev, paddr, sg->length,
     419             :                                                        dir);
     420             : 
     421             :                 if (!dev_is_dma_coherent(dev))
     422             :                         arch_sync_dma_for_device(paddr, sg->length,
     423             :                                         dir);
     424             :         }
     425             : }
     426             : #endif
     427             : 
     428             : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
     429             :     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
     430             :     defined(CONFIG_SWIOTLB)
     431             : void dma_direct_sync_sg_for_cpu(struct device *dev,
     432             :                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
     433             : {
     434             :         struct scatterlist *sg;
     435             :         int i;
     436             : 
     437             :         for_each_sg(sgl, sg, nents, i) {
     438             :                 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
     439             : 
     440             :                 if (!dev_is_dma_coherent(dev))
     441             :                         arch_sync_dma_for_cpu(paddr, sg->length, dir);
     442             : 
     443             :                 if (unlikely(is_swiotlb_buffer(dev, paddr)))
     444             :                         swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
     445             :                                                     dir);
     446             : 
     447             :                 if (dir == DMA_FROM_DEVICE)
     448             :                         arch_dma_mark_clean(paddr, sg->length);
     449             :         }
     450             : 
     451             :         if (!dev_is_dma_coherent(dev))
     452             :                 arch_sync_dma_for_cpu_all();
     453             : }
     454             : 
     455             : /*
     456             :  * Unmaps segments, except for ones marked as pci_p2pdma which do not
     457             :  * require any further action as they contain a bus address.
     458             :  */
     459             : void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
     460             :                 int nents, enum dma_data_direction dir, unsigned long attrs)
     461             : {
     462             :         struct scatterlist *sg;
     463             :         int i;
     464             : 
     465             :         for_each_sg(sgl,  sg, nents, i) {
     466             :                 if (sg_is_dma_bus_address(sg))
     467             :                         sg_dma_unmark_bus_address(sg);
     468             :                 else
     469             :                         dma_direct_unmap_page(dev, sg->dma_address,
     470             :                                               sg_dma_len(sg), dir, attrs);
     471             :         }
     472             : }
     473             : #endif
     474             : 
     475           0 : int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
     476             :                 enum dma_data_direction dir, unsigned long attrs)
     477             : {
     478             :         struct pci_p2pdma_map_state p2pdma_state = {};
     479             :         enum pci_p2pdma_map_type map;
     480             :         struct scatterlist *sg;
     481             :         int i, ret;
     482             : 
     483           0 :         for_each_sg(sgl, sg, nents, i) {
     484           0 :                 if (is_pci_p2pdma_page(sg_page(sg))) {
     485             :                         map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
     486             :                         switch (map) {
     487             :                         case PCI_P2PDMA_MAP_BUS_ADDR:
     488             :                                 continue;
     489             :                         case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
     490             :                                 /*
     491             :                                  * Any P2P mapping that traverses the PCI
     492             :                                  * host bridge must be mapped with CPU physical
     493             :                                  * address and not PCI bus addresses. This is
     494             :                                  * done with dma_direct_map_page() below.
     495             :                                  */
     496             :                                 break;
     497             :                         default:
     498             :                                 ret = -EREMOTEIO;
     499             :                                 goto out_unmap;
     500             :                         }
     501             :                 }
     502             : 
     503           0 :                 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
     504           0 :                                 sg->offset, sg->length, dir, attrs);
     505           0 :                 if (sg->dma_address == DMA_MAPPING_ERROR) {
     506             :                         ret = -EIO;
     507             :                         goto out_unmap;
     508             :                 }
     509             :                 sg_dma_len(sg) = sg->length;
     510             :         }
     511             : 
     512             :         return nents;
     513             : 
     514             : out_unmap:
     515             :         dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
     516             :         return ret;
     517             : }
     518             : 
     519           0 : dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
     520             :                 size_t size, enum dma_data_direction dir, unsigned long attrs)
     521             : {
     522           0 :         dma_addr_t dma_addr = paddr;
     523             : 
     524           0 :         if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
     525           0 :                 dev_err_once(dev,
     526             :                              "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
     527             :                              &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
     528           0 :                 WARN_ON_ONCE(1);
     529             :                 return DMA_MAPPING_ERROR;
     530             :         }
     531             : 
     532             :         return dma_addr;
     533             : }
     534             : 
     535           0 : int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
     536             :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     537             :                 unsigned long attrs)
     538             : {
     539           0 :         struct page *page = dma_direct_to_page(dev, dma_addr);
     540             :         int ret;
     541             : 
     542           0 :         ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
     543           0 :         if (!ret)
     544           0 :                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
     545           0 :         return ret;
     546             : }
     547             : 
     548           0 : bool dma_direct_can_mmap(struct device *dev)
     549             : {
     550           0 :         return dev_is_dma_coherent(dev) ||
     551             :                 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
     552             : }
     553             : 
     554           0 : int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
     555             :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     556             :                 unsigned long attrs)
     557             : {
     558           0 :         unsigned long user_count = vma_pages(vma);
     559           0 :         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
     560           0 :         unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
     561           0 :         int ret = -ENXIO;
     562             : 
     563           0 :         vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
     564           0 :         if (force_dma_unencrypted(dev))
     565             :                 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
     566             : 
     567             :         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
     568             :                 return ret;
     569           0 :         if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
     570             :                 return ret;
     571             : 
     572           0 :         if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
     573             :                 return -ENXIO;
     574           0 :         return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
     575             :                         user_count << PAGE_SHIFT, vma->vm_page_prot);
     576             : }
     577             : 
     578           0 : int dma_direct_supported(struct device *dev, u64 mask)
     579             : {
     580           0 :         u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
     581             : 
     582             :         /*
     583             :          * Because 32-bit DMA masks are so common we expect every architecture
     584             :          * to be able to satisfy them - either by not supporting more physical
     585             :          * memory, or by providing a ZONE_DMA32.  If neither is the case, the
     586             :          * architecture needs to use an IOMMU instead of the direct mapping.
     587             :          */
     588           0 :         if (mask >= DMA_BIT_MASK(32))
     589             :                 return 1;
     590             : 
     591             :         /*
     592             :          * This check needs to be against the actual bit mask value, so use
     593             :          * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
     594             :          * part of the check.
     595             :          */
     596             :         if (IS_ENABLED(CONFIG_ZONE_DMA))
     597             :                 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
     598           0 :         return mask >= phys_to_dma_unencrypted(dev, min_mask);
     599             : }
     600             : 
     601           0 : size_t dma_direct_max_mapping_size(struct device *dev)
     602             : {
     603             :         /* If SWIOTLB is active, use its maximum mapping size */
     604           0 :         if (is_swiotlb_active(dev) &&
     605             :             (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
     606             :                 return swiotlb_max_mapping_size(dev);
     607             :         return SIZE_MAX;
     608             : }
     609             : 
     610           0 : bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
     611             : {
     612           0 :         return !dev_is_dma_coherent(dev) ||
     613           0 :                is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
     614             : }
     615             : 
     616             : /**
     617             :  * dma_direct_set_offset - Assign scalar offset for a single DMA range.
     618             :  * @dev:        device pointer; needed to "own" the alloced memory.
     619             :  * @cpu_start:  beginning of memory region covered by this offset.
     620             :  * @dma_start:  beginning of DMA/PCI region covered by this offset.
     621             :  * @size:       size of the region.
     622             :  *
     623             :  * This is for the simple case of a uniform offset which cannot
     624             :  * be discovered by "dma-ranges".
     625             :  *
     626             :  * It returns -ENOMEM if out of memory, -EINVAL if a map
     627             :  * already exists, 0 otherwise.
     628             :  *
     629             :  * Note: any call to this from a driver is a bug.  The mapping needs
     630             :  * to be described by the device tree or other firmware interfaces.
     631             :  */
     632           0 : int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
     633             :                          dma_addr_t dma_start, u64 size)
     634             : {
     635             :         struct bus_dma_region *map;
     636           0 :         u64 offset = (u64)cpu_start - (u64)dma_start;
     637             : 
     638           0 :         if (dev->dma_range_map) {
     639           0 :                 dev_err(dev, "attempt to add DMA range to existing map\n");
     640           0 :                 return -EINVAL;
     641             :         }
     642             : 
     643           0 :         if (!offset)
     644             :                 return 0;
     645             : 
     646           0 :         map = kcalloc(2, sizeof(*map), GFP_KERNEL);
     647           0 :         if (!map)
     648             :                 return -ENOMEM;
     649           0 :         map[0].cpu_start = cpu_start;
     650           0 :         map[0].dma_start = dma_start;
     651           0 :         map[0].offset = offset;
     652           0 :         map[0].size = size;
     653           0 :         dev->dma_range_map = map;
     654           0 :         return 0;
     655             : }

Generated by: LCOV version 1.14