LCOV - code coverage report
Current view: top level - include/linux - dma-mapping.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 15 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 1 0.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : #ifndef _LINUX_DMA_MAPPING_H
       3             : #define _LINUX_DMA_MAPPING_H
       4             : 
       5             : #include <linux/cache.h>
       6             : #include <linux/sizes.h>
       7             : #include <linux/string.h>
       8             : #include <linux/device.h>
       9             : #include <linux/err.h>
      10             : #include <linux/dma-direction.h>
      11             : #include <linux/scatterlist.h>
      12             : #include <linux/bug.h>
      13             : #include <linux/mem_encrypt.h>
      14             : 
      15             : /**
      16             :  * List of possible attributes associated with a DMA mapping. The semantics
      17             :  * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
      18             :  */
      19             : 
      20             : /*
      21             :  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
      22             :  * may be weakly ordered, that is that reads and writes may pass each other.
      23             :  */
      24             : #define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
      25             : /*
      26             :  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
      27             :  * buffered to improve performance.
      28             :  */
      29             : #define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
      30             : /*
      31             :  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
      32             :  * virtual mapping for the allocated buffer.
      33             :  */
      34             : #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
      35             : /*
      36             :  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
      37             :  * the CPU cache for the given buffer assuming that it has been already
      38             :  * transferred to 'device' domain.
      39             :  */
      40             : #define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
      41             : /*
      42             :  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
      43             :  * in physical memory.
      44             :  */
      45             : #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
      46             : /*
      47             :  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
      48             :  * that it's probably not worth the time to try to allocate memory to in a way
      49             :  * that gives better TLB efficiency.
      50             :  */
      51             : #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
      52             : /*
      53             :  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
      54             :  * allocation failure reports (similarly to __GFP_NOWARN).
      55             :  */
      56             : #define DMA_ATTR_NO_WARN        (1UL << 8)
      57             : 
      58             : /*
      59             :  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
      60             :  * accessible at an elevated privilege level (and ideally inaccessible or
      61             :  * at least read-only at lesser-privileged levels).
      62             :  */
      63             : #define DMA_ATTR_PRIVILEGED             (1UL << 9)
      64             : 
      65             : /*
      66             :  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
      67             :  * be given to a device to use as a DMA source or target.  It is specific to a
      68             :  * given device and there may be a translation between the CPU physical address
      69             :  * space and the bus address space.
      70             :  *
      71             :  * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
      72             :  * be used directly in drivers, but checked for using dma_mapping_error()
      73             :  * instead.
      74             :  */
      75             : #define DMA_MAPPING_ERROR               (~(dma_addr_t)0)
      76             : 
      77             : #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
      78             : 
      79             : #ifdef CONFIG_DMA_API_DEBUG
      80             : void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
      81             : void debug_dma_map_single(struct device *dev, const void *addr,
      82             :                 unsigned long len);
      83             : #else
      84             : static inline void debug_dma_mapping_error(struct device *dev,
      85             :                 dma_addr_t dma_addr)
      86             : {
      87             : }
      88             : static inline void debug_dma_map_single(struct device *dev, const void *addr,
      89             :                 unsigned long len)
      90             : {
      91             : }
      92             : #endif /* CONFIG_DMA_API_DEBUG */
      93             : 
      94             : #ifdef CONFIG_HAS_DMA
      95             : static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
      96             : {
      97           0 :         debug_dma_mapping_error(dev, dma_addr);
      98             : 
      99           0 :         if (unlikely(dma_addr == DMA_MAPPING_ERROR))
     100             :                 return -ENOMEM;
     101             :         return 0;
     102             : }
     103             : 
     104             : dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
     105             :                 size_t offset, size_t size, enum dma_data_direction dir,
     106             :                 unsigned long attrs);
     107             : void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
     108             :                 enum dma_data_direction dir, unsigned long attrs);
     109             : unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
     110             :                 int nents, enum dma_data_direction dir, unsigned long attrs);
     111             : void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
     112             :                                       int nents, enum dma_data_direction dir,
     113             :                                       unsigned long attrs);
     114             : int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
     115             :                 enum dma_data_direction dir, unsigned long attrs);
     116             : dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
     117             :                 size_t size, enum dma_data_direction dir, unsigned long attrs);
     118             : void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
     119             :                 enum dma_data_direction dir, unsigned long attrs);
     120             : void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
     121             :                 enum dma_data_direction dir);
     122             : void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
     123             :                 size_t size, enum dma_data_direction dir);
     124             : void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
     125             :                     int nelems, enum dma_data_direction dir);
     126             : void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
     127             :                        int nelems, enum dma_data_direction dir);
     128             : void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
     129             :                 gfp_t flag, unsigned long attrs);
     130             : void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
     131             :                 dma_addr_t dma_handle, unsigned long attrs);
     132             : void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
     133             :                 gfp_t gfp, unsigned long attrs);
     134             : void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
     135             :                 dma_addr_t dma_handle);
     136             : int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
     137             :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     138             :                 unsigned long attrs);
     139             : int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
     140             :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     141             :                 unsigned long attrs);
     142             : bool dma_can_mmap(struct device *dev);
     143             : bool dma_pci_p2pdma_supported(struct device *dev);
     144             : int dma_set_mask(struct device *dev, u64 mask);
     145             : int dma_set_coherent_mask(struct device *dev, u64 mask);
     146             : u64 dma_get_required_mask(struct device *dev);
     147             : size_t dma_max_mapping_size(struct device *dev);
     148             : size_t dma_opt_mapping_size(struct device *dev);
     149             : bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
     150             : unsigned long dma_get_merge_boundary(struct device *dev);
     151             : struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
     152             :                 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
     153             : void dma_free_noncontiguous(struct device *dev, size_t size,
     154             :                 struct sg_table *sgt, enum dma_data_direction dir);
     155             : void *dma_vmap_noncontiguous(struct device *dev, size_t size,
     156             :                 struct sg_table *sgt);
     157             : void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
     158             : int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
     159             :                 size_t size, struct sg_table *sgt);
     160             : #else /* CONFIG_HAS_DMA */
     161             : static inline dma_addr_t dma_map_page_attrs(struct device *dev,
     162             :                 struct page *page, size_t offset, size_t size,
     163             :                 enum dma_data_direction dir, unsigned long attrs)
     164             : {
     165             :         return DMA_MAPPING_ERROR;
     166             : }
     167             : static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
     168             :                 size_t size, enum dma_data_direction dir, unsigned long attrs)
     169             : {
     170             : }
     171             : static inline unsigned int dma_map_sg_attrs(struct device *dev,
     172             :                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
     173             :                 unsigned long attrs)
     174             : {
     175             :         return 0;
     176             : }
     177             : static inline void dma_unmap_sg_attrs(struct device *dev,
     178             :                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
     179             :                 unsigned long attrs)
     180             : {
     181             : }
     182             : static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
     183             :                 enum dma_data_direction dir, unsigned long attrs)
     184             : {
     185             :         return -EOPNOTSUPP;
     186             : }
     187             : static inline dma_addr_t dma_map_resource(struct device *dev,
     188             :                 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
     189             :                 unsigned long attrs)
     190             : {
     191             :         return DMA_MAPPING_ERROR;
     192             : }
     193             : static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
     194             :                 size_t size, enum dma_data_direction dir, unsigned long attrs)
     195             : {
     196             : }
     197             : static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
     198             :                 size_t size, enum dma_data_direction dir)
     199             : {
     200             : }
     201             : static inline void dma_sync_single_for_device(struct device *dev,
     202             :                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
     203             : {
     204             : }
     205             : static inline void dma_sync_sg_for_cpu(struct device *dev,
     206             :                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
     207             : {
     208             : }
     209             : static inline void dma_sync_sg_for_device(struct device *dev,
     210             :                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
     211             : {
     212             : }
     213             : static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
     214             : {
     215             :         return -ENOMEM;
     216             : }
     217             : static inline void *dma_alloc_attrs(struct device *dev, size_t size,
     218             :                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
     219             : {
     220             :         return NULL;
     221             : }
     222             : static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
     223             :                 dma_addr_t dma_handle, unsigned long attrs)
     224             : {
     225             : }
     226             : static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
     227             :                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
     228             : {
     229             :         return NULL;
     230             : }
     231             : static inline void dmam_free_coherent(struct device *dev, size_t size,
     232             :                 void *vaddr, dma_addr_t dma_handle)
     233             : {
     234             : }
     235             : static inline int dma_get_sgtable_attrs(struct device *dev,
     236             :                 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
     237             :                 size_t size, unsigned long attrs)
     238             : {
     239             :         return -ENXIO;
     240             : }
     241             : static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
     242             :                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
     243             :                 unsigned long attrs)
     244             : {
     245             :         return -ENXIO;
     246             : }
     247             : static inline bool dma_can_mmap(struct device *dev)
     248             : {
     249             :         return false;
     250             : }
     251             : static inline bool dma_pci_p2pdma_supported(struct device *dev)
     252             : {
     253             :         return false;
     254             : }
     255             : static inline int dma_set_mask(struct device *dev, u64 mask)
     256             : {
     257             :         return -EIO;
     258             : }
     259             : static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
     260             : {
     261             :         return -EIO;
     262             : }
     263             : static inline u64 dma_get_required_mask(struct device *dev)
     264             : {
     265             :         return 0;
     266             : }
     267             : static inline size_t dma_max_mapping_size(struct device *dev)
     268             : {
     269             :         return 0;
     270             : }
     271             : static inline size_t dma_opt_mapping_size(struct device *dev)
     272             : {
     273             :         return 0;
     274             : }
     275             : static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
     276             : {
     277             :         return false;
     278             : }
     279             : static inline unsigned long dma_get_merge_boundary(struct device *dev)
     280             : {
     281             :         return 0;
     282             : }
     283             : static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
     284             :                 size_t size, enum dma_data_direction dir, gfp_t gfp,
     285             :                 unsigned long attrs)
     286             : {
     287             :         return NULL;
     288             : }
     289             : static inline void dma_free_noncontiguous(struct device *dev, size_t size,
     290             :                 struct sg_table *sgt, enum dma_data_direction dir)
     291             : {
     292             : }
     293             : static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
     294             :                 struct sg_table *sgt)
     295             : {
     296             :         return NULL;
     297             : }
     298             : static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
     299             : {
     300             : }
     301             : static inline int dma_mmap_noncontiguous(struct device *dev,
     302             :                 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
     303             : {
     304             :         return -EINVAL;
     305             : }
     306             : #endif /* CONFIG_HAS_DMA */
     307             : 
     308             : struct page *dma_alloc_pages(struct device *dev, size_t size,
     309             :                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
     310             : void dma_free_pages(struct device *dev, size_t size, struct page *page,
     311             :                 dma_addr_t dma_handle, enum dma_data_direction dir);
     312             : int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
     313             :                 size_t size, struct page *page);
     314             : 
     315             : static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
     316             :                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
     317             : {
     318             :         struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
     319             :         return page ? page_address(page) : NULL;
     320             : }
     321             : 
     322             : static inline void dma_free_noncoherent(struct device *dev, size_t size,
     323             :                 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
     324             : {
     325             :         dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
     326             : }
     327             : 
     328           0 : static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
     329             :                 size_t size, enum dma_data_direction dir, unsigned long attrs)
     330             : {
     331             :         /* DMA must never operate on areas that might be remapped. */
     332           0 :         if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
     333             :                           "rejecting DMA map of vmalloc memory\n"))
     334             :                 return DMA_MAPPING_ERROR;
     335           0 :         debug_dma_map_single(dev, ptr, size);
     336           0 :         return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
     337             :                         size, dir, attrs);
     338             : }
     339             : 
     340             : static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
     341             :                 size_t size, enum dma_data_direction dir, unsigned long attrs)
     342             : {
     343           0 :         return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
     344             : }
     345             : 
     346             : static inline void dma_sync_single_range_for_cpu(struct device *dev,
     347             :                 dma_addr_t addr, unsigned long offset, size_t size,
     348             :                 enum dma_data_direction dir)
     349             : {
     350             :         return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
     351             : }
     352             : 
     353             : static inline void dma_sync_single_range_for_device(struct device *dev,
     354             :                 dma_addr_t addr, unsigned long offset, size_t size,
     355             :                 enum dma_data_direction dir)
     356             : {
     357             :         return dma_sync_single_for_device(dev, addr + offset, size, dir);
     358             : }
     359             : 
     360             : /**
     361             :  * dma_unmap_sgtable - Unmap the given buffer for DMA
     362             :  * @dev:        The device for which to perform the DMA operation
     363             :  * @sgt:        The sg_table object describing the buffer
     364             :  * @dir:        DMA direction
     365             :  * @attrs:      Optional DMA attributes for the unmap operation
     366             :  *
     367             :  * Unmaps a buffer described by a scatterlist stored in the given sg_table
     368             :  * object for the @dir DMA operation by the @dev device. After this function
     369             :  * the ownership of the buffer is transferred back to the CPU domain.
     370             :  */
     371             : static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
     372             :                 enum dma_data_direction dir, unsigned long attrs)
     373             : {
     374           0 :         dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
     375             : }
     376             : 
     377             : /**
     378             :  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
     379             :  * @dev:        The device for which to perform the DMA operation
     380             :  * @sgt:        The sg_table object describing the buffer
     381             :  * @dir:        DMA direction
     382             :  *
     383             :  * Performs the needed cache synchronization and moves the ownership of the
     384             :  * buffer back to the CPU domain, so it is safe to perform any access to it
     385             :  * by the CPU. Before doing any further DMA operations, one has to transfer
     386             :  * the ownership of the buffer back to the DMA domain by calling the
     387             :  * dma_sync_sgtable_for_device().
     388             :  */
     389             : static inline void dma_sync_sgtable_for_cpu(struct device *dev,
     390             :                 struct sg_table *sgt, enum dma_data_direction dir)
     391             : {
     392             :         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
     393             : }
     394             : 
     395             : /**
     396             :  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
     397             :  * @dev:        The device for which to perform the DMA operation
     398             :  * @sgt:        The sg_table object describing the buffer
     399             :  * @dir:        DMA direction
     400             :  *
     401             :  * Performs the needed cache synchronization and moves the ownership of the
     402             :  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
     403             :  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
     404             :  * dma_unmap_sgtable().
     405             :  */
     406             : static inline void dma_sync_sgtable_for_device(struct device *dev,
     407             :                 struct sg_table *sgt, enum dma_data_direction dir)
     408             : {
     409             :         dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
     410             : }
     411             : 
     412             : #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
     413             : #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
     414             : #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
     415             : #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
     416             : #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
     417             : #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
     418             : #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
     419             : #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
     420             : 
     421             : static inline void *dma_alloc_coherent(struct device *dev, size_t size,
     422             :                 dma_addr_t *dma_handle, gfp_t gfp)
     423             : {
     424           0 :         return dma_alloc_attrs(dev, size, dma_handle, gfp,
     425           0 :                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
     426             : }
     427             : 
     428             : static inline void dma_free_coherent(struct device *dev, size_t size,
     429             :                 void *cpu_addr, dma_addr_t dma_handle)
     430             : {
     431           0 :         return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
     432             : }
     433             : 
     434             : 
     435             : static inline u64 dma_get_mask(struct device *dev)
     436             : {
     437             :         if (dev->dma_mask && *dev->dma_mask)
     438             :                 return *dev->dma_mask;
     439             :         return DMA_BIT_MASK(32);
     440             : }
     441             : 
     442             : /*
     443             :  * Set both the DMA mask and the coherent DMA mask to the same thing.
     444             :  * Note that we don't check the return value from dma_set_coherent_mask()
     445             :  * as the DMA API guarantees that the coherent DMA mask can be set to
     446             :  * the same or smaller than the streaming DMA mask.
     447             :  */
     448             : static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
     449             : {
     450             :         int rc = dma_set_mask(dev, mask);
     451             :         if (rc == 0)
     452             :                 dma_set_coherent_mask(dev, mask);
     453             :         return rc;
     454             : }
     455             : 
     456             : /*
     457             :  * Similar to the above, except it deals with the case where the device
     458             :  * does not have dev->dma_mask appropriately setup.
     459             :  */
     460             : static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
     461             : {
     462             :         dev->dma_mask = &dev->coherent_dma_mask;
     463             :         return dma_set_mask_and_coherent(dev, mask);
     464             : }
     465             : 
     466             : /**
     467             :  * dma_addressing_limited - return if the device is addressing limited
     468             :  * @dev:        device to check
     469             :  *
     470             :  * Return %true if the devices DMA mask is too small to address all memory in
     471             :  * the system, else %false.  Lack of addressing bits is the prime reason for
     472             :  * bounce buffering, but might not be the only one.
     473             :  */
     474             : static inline bool dma_addressing_limited(struct device *dev)
     475             : {
     476             :         return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
     477             :                             dma_get_required_mask(dev);
     478             : }
     479             : 
     480             : static inline unsigned int dma_get_max_seg_size(struct device *dev)
     481             : {
     482             :         if (dev->dma_parms && dev->dma_parms->max_segment_size)
     483             :                 return dev->dma_parms->max_segment_size;
     484             :         return SZ_64K;
     485             : }
     486             : 
     487             : static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
     488             : {
     489           0 :         if (dev->dma_parms) {
     490           0 :                 dev->dma_parms->max_segment_size = size;
     491             :                 return 0;
     492             :         }
     493             :         return -EIO;
     494             : }
     495             : 
     496             : static inline unsigned long dma_get_seg_boundary(struct device *dev)
     497             : {
     498             :         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
     499             :                 return dev->dma_parms->segment_boundary_mask;
     500             :         return ULONG_MAX;
     501             : }
     502             : 
     503             : /**
     504             :  * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
     505             :  * @dev: device to guery the boundary for
     506             :  * @page_shift: ilog() of the IOMMU page size
     507             :  *
     508             :  * Return the segment boundary in IOMMU page units (which may be different from
     509             :  * the CPU page size) for the passed in device.
     510             :  *
     511             :  * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
     512             :  * non-DMA API callers.
     513             :  */
     514             : static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
     515             :                 unsigned int page_shift)
     516             : {
     517             :         if (!dev)
     518             :                 return (U32_MAX >> page_shift) + 1;
     519             :         return (dma_get_seg_boundary(dev) >> page_shift) + 1;
     520             : }
     521             : 
     522             : static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
     523             : {
     524           0 :         if (dev->dma_parms) {
     525           0 :                 dev->dma_parms->segment_boundary_mask = mask;
     526             :                 return 0;
     527             :         }
     528             :         return -EIO;
     529             : }
     530             : 
     531             : static inline unsigned int dma_get_min_align_mask(struct device *dev)
     532             : {
     533             :         if (dev->dma_parms)
     534             :                 return dev->dma_parms->min_align_mask;
     535             :         return 0;
     536             : }
     537             : 
     538             : static inline int dma_set_min_align_mask(struct device *dev,
     539             :                 unsigned int min_align_mask)
     540             : {
     541             :         if (WARN_ON_ONCE(!dev->dma_parms))
     542             :                 return -EIO;
     543             :         dev->dma_parms->min_align_mask = min_align_mask;
     544             :         return 0;
     545             : }
     546             : 
     547             : #ifndef dma_get_cache_alignment
     548             : static inline int dma_get_cache_alignment(void)
     549             : {
     550             : #ifdef ARCH_HAS_DMA_MINALIGN
     551             :         return ARCH_DMA_MINALIGN;
     552             : #endif
     553             :         return 1;
     554             : }
     555             : #endif
     556             : 
     557             : static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
     558             :                 dma_addr_t *dma_handle, gfp_t gfp)
     559             : {
     560             :         return dmam_alloc_attrs(dev, size, dma_handle, gfp,
     561             :                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
     562             : }
     563             : 
     564             : static inline void *dma_alloc_wc(struct device *dev, size_t size,
     565             :                                  dma_addr_t *dma_addr, gfp_t gfp)
     566             : {
     567             :         unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
     568             : 
     569             :         if (gfp & __GFP_NOWARN)
     570             :                 attrs |= DMA_ATTR_NO_WARN;
     571             : 
     572             :         return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
     573             : }
     574             : 
     575             : static inline void dma_free_wc(struct device *dev, size_t size,
     576             :                                void *cpu_addr, dma_addr_t dma_addr)
     577             : {
     578             :         return dma_free_attrs(dev, size, cpu_addr, dma_addr,
     579             :                               DMA_ATTR_WRITE_COMBINE);
     580             : }
     581             : 
     582             : static inline int dma_mmap_wc(struct device *dev,
     583             :                               struct vm_area_struct *vma,
     584             :                               void *cpu_addr, dma_addr_t dma_addr,
     585             :                               size_t size)
     586             : {
     587             :         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
     588             :                               DMA_ATTR_WRITE_COMBINE);
     589             : }
     590             : 
     591             : #ifdef CONFIG_NEED_DMA_MAP_STATE
     592             : #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
     593             : #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
     594             : #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
     595             : #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
     596             : #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
     597             : #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
     598             : #else
     599             : #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
     600             : #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
     601             : #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
     602             : #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
     603             : #define dma_unmap_len(PTR, LEN_NAME)             (0)
     604             : #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
     605             : #endif
     606             : 
     607             : #endif /* _LINUX_DMA_MAPPING_H */

Generated by: LCOV version 1.14