Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * This header is for implementations of dma_map_ops and related code.
4 : * It should not be included in drivers just using the DMA API.
5 : */
6 : #ifndef _LINUX_DMA_MAP_OPS_H
7 : #define _LINUX_DMA_MAP_OPS_H
8 :
9 : #include <linux/dma-mapping.h>
10 : #include <linux/pgtable.h>
11 : #include <linux/slab.h>
12 :
13 : struct cma;
14 :
15 : /*
16 : * Values for struct dma_map_ops.flags:
17 : *
18 : * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can
19 : * handle PCI P2PDMA pages in the map_sg/unmap_sg operation.
20 : */
21 : #define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0)
22 :
23 : struct dma_map_ops {
24 : unsigned int flags;
25 :
26 : void *(*alloc)(struct device *dev, size_t size,
27 : dma_addr_t *dma_handle, gfp_t gfp,
28 : unsigned long attrs);
29 : void (*free)(struct device *dev, size_t size, void *vaddr,
30 : dma_addr_t dma_handle, unsigned long attrs);
31 : struct page *(*alloc_pages)(struct device *dev, size_t size,
32 : dma_addr_t *dma_handle, enum dma_data_direction dir,
33 : gfp_t gfp);
34 : void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
35 : dma_addr_t dma_handle, enum dma_data_direction dir);
36 : struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
37 : enum dma_data_direction dir, gfp_t gfp,
38 : unsigned long attrs);
39 : void (*free_noncontiguous)(struct device *dev, size_t size,
40 : struct sg_table *sgt, enum dma_data_direction dir);
41 : int (*mmap)(struct device *, struct vm_area_struct *,
42 : void *, dma_addr_t, size_t, unsigned long attrs);
43 :
44 : int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
45 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
46 : unsigned long attrs);
47 :
48 : dma_addr_t (*map_page)(struct device *dev, struct page *page,
49 : unsigned long offset, size_t size,
50 : enum dma_data_direction dir, unsigned long attrs);
51 : void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
52 : size_t size, enum dma_data_direction dir,
53 : unsigned long attrs);
54 : /*
55 : * map_sg should return a negative error code on error. See
56 : * dma_map_sgtable() for a list of appropriate error codes
57 : * and their meanings.
58 : */
59 : int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
60 : enum dma_data_direction dir, unsigned long attrs);
61 : void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
62 : enum dma_data_direction dir, unsigned long attrs);
63 : dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
64 : size_t size, enum dma_data_direction dir,
65 : unsigned long attrs);
66 : void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
67 : size_t size, enum dma_data_direction dir,
68 : unsigned long attrs);
69 : void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
70 : size_t size, enum dma_data_direction dir);
71 : void (*sync_single_for_device)(struct device *dev,
72 : dma_addr_t dma_handle, size_t size,
73 : enum dma_data_direction dir);
74 : void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
75 : int nents, enum dma_data_direction dir);
76 : void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
77 : int nents, enum dma_data_direction dir);
78 : void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
79 : enum dma_data_direction direction);
80 : int (*dma_supported)(struct device *dev, u64 mask);
81 : u64 (*get_required_mask)(struct device *dev);
82 : size_t (*max_mapping_size)(struct device *dev);
83 : size_t (*opt_mapping_size)(void);
84 : unsigned long (*get_merge_boundary)(struct device *dev);
85 : };
86 :
87 : #ifdef CONFIG_DMA_OPS
88 : #include <asm/dma-mapping.h>
89 :
90 : static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
91 : {
92 : if (dev->dma_ops)
93 : return dev->dma_ops;
94 : return get_arch_dma_ops();
95 : }
96 :
97 : static inline void set_dma_ops(struct device *dev,
98 : const struct dma_map_ops *dma_ops)
99 : {
100 : dev->dma_ops = dma_ops;
101 : }
102 : #else /* CONFIG_DMA_OPS */
103 : static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
104 : {
105 : return NULL;
106 : }
107 : static inline void set_dma_ops(struct device *dev,
108 : const struct dma_map_ops *dma_ops)
109 : {
110 : }
111 : #endif /* CONFIG_DMA_OPS */
112 :
113 : #ifdef CONFIG_DMA_CMA
114 : extern struct cma *dma_contiguous_default_area;
115 :
116 : static inline struct cma *dev_get_cma_area(struct device *dev)
117 : {
118 : if (dev && dev->cma_area)
119 : return dev->cma_area;
120 : return dma_contiguous_default_area;
121 : }
122 :
123 : void dma_contiguous_reserve(phys_addr_t addr_limit);
124 : int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
125 : phys_addr_t limit, struct cma **res_cma, bool fixed);
126 :
127 : struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
128 : unsigned int order, bool no_warn);
129 : bool dma_release_from_contiguous(struct device *dev, struct page *pages,
130 : int count);
131 : struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
132 : void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
133 :
134 : void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
135 : #else /* CONFIG_DMA_CMA */
136 : static inline struct cma *dev_get_cma_area(struct device *dev)
137 : {
138 : return NULL;
139 : }
140 : static inline void dma_contiguous_reserve(phys_addr_t limit)
141 : {
142 : }
143 : static inline int dma_contiguous_reserve_area(phys_addr_t size,
144 : phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
145 : bool fixed)
146 : {
147 : return -ENOSYS;
148 : }
149 : static inline struct page *dma_alloc_from_contiguous(struct device *dev,
150 : size_t count, unsigned int order, bool no_warn)
151 : {
152 : return NULL;
153 : }
154 : static inline bool dma_release_from_contiguous(struct device *dev,
155 : struct page *pages, int count)
156 : {
157 : return false;
158 : }
159 : /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
160 : static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 : gfp_t gfp)
162 : {
163 : return NULL;
164 : }
165 0 : static inline void dma_free_contiguous(struct device *dev, struct page *page,
166 : size_t size)
167 : {
168 0 : __free_pages(page, get_order(size));
169 0 : }
170 : #endif /* CONFIG_DMA_CMA*/
171 :
172 : #ifdef CONFIG_DMA_PERNUMA_CMA
173 : void dma_pernuma_cma_reserve(void);
174 : #else
175 : static inline void dma_pernuma_cma_reserve(void) { }
176 : #endif /* CONFIG_DMA_PERNUMA_CMA */
177 :
178 : #ifdef CONFIG_DMA_DECLARE_COHERENT
179 : int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
180 : dma_addr_t device_addr, size_t size);
181 : void dma_release_coherent_memory(struct device *dev);
182 : int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
183 : dma_addr_t *dma_handle, void **ret);
184 : int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
185 : int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
186 : void *cpu_addr, size_t size, int *ret);
187 : #else
188 : static inline int dma_declare_coherent_memory(struct device *dev,
189 : phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
190 : {
191 : return -ENOSYS;
192 : }
193 :
194 : #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
195 : #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
196 : #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
197 : static inline void dma_release_coherent_memory(struct device *dev) { }
198 : #endif /* CONFIG_DMA_DECLARE_COHERENT */
199 :
200 : #ifdef CONFIG_DMA_GLOBAL_POOL
201 : void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
202 : dma_addr_t *dma_handle);
203 : int dma_release_from_global_coherent(int order, void *vaddr);
204 : int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
205 : size_t size, int *ret);
206 : int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
207 : #else
208 : static inline void *dma_alloc_from_global_coherent(struct device *dev,
209 : ssize_t size, dma_addr_t *dma_handle)
210 : {
211 : return NULL;
212 : }
213 : static inline int dma_release_from_global_coherent(int order, void *vaddr)
214 : {
215 : return 0;
216 : }
217 : static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
218 : void *cpu_addr, size_t size, int *ret)
219 : {
220 : return 0;
221 : }
222 : #endif /* CONFIG_DMA_GLOBAL_POOL */
223 :
224 : /*
225 : * This is the actual return value from the ->alloc_noncontiguous method.
226 : * The users of the DMA API should only care about the sg_table, but to make
227 : * the DMA-API internal vmaping and freeing easier we stash away the page
228 : * array as well (except for the fallback case). This can go away any time,
229 : * e.g. when a vmap-variant that takes a scatterlist comes along.
230 : */
231 : struct dma_sgt_handle {
232 : struct sg_table sgt;
233 : struct page **pages;
234 : };
235 : #define sgt_handle(sgt) \
236 : container_of((sgt), struct dma_sgt_handle, sgt)
237 :
238 : int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
239 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
240 : unsigned long attrs);
241 : int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
242 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
243 : unsigned long attrs);
244 : struct page *dma_common_alloc_pages(struct device *dev, size_t size,
245 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
246 : void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
247 : dma_addr_t dma_handle, enum dma_data_direction dir);
248 :
249 : struct page **dma_common_find_pages(void *cpu_addr);
250 : void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
251 : const void *caller);
252 : void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
253 : const void *caller);
254 : void dma_common_free_remap(void *cpu_addr, size_t size);
255 :
256 : struct page *dma_alloc_from_pool(struct device *dev, size_t size,
257 : void **cpu_addr, gfp_t flags,
258 : bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
259 : bool dma_free_from_pool(struct device *dev, void *start, size_t size);
260 :
261 : int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
262 : dma_addr_t dma_start, u64 size);
263 :
264 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
265 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
266 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
267 : extern bool dma_default_coherent;
268 : static inline bool dev_is_dma_coherent(struct device *dev)
269 : {
270 : return dev->dma_coherent;
271 : }
272 : #else
273 : #define dma_default_coherent true
274 :
275 : static inline bool dev_is_dma_coherent(struct device *dev)
276 : {
277 : return true;
278 : }
279 : #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
280 :
281 : /*
282 : * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
283 : */
284 : static inline bool dma_kmalloc_safe(struct device *dev,
285 : enum dma_data_direction dir)
286 : {
287 : /*
288 : * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
289 : * caches have already been aligned to a DMA-safe size.
290 : */
291 : if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
292 : return true;
293 :
294 : /*
295 : * kmalloc() buffers are DMA-safe irrespective of size if the device
296 : * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
297 : * cache maintenance and benign cache line evictions).
298 : */
299 : if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
300 : return true;
301 :
302 : return false;
303 : }
304 :
305 : /*
306 : * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
307 : * sufficiently aligned for non-coherent DMA.
308 : */
309 : static inline bool dma_kmalloc_size_aligned(size_t size)
310 : {
311 : /*
312 : * Larger kmalloc() sizes are guaranteed to be aligned to
313 : * ARCH_DMA_MINALIGN.
314 : */
315 : if (size >= 2 * ARCH_DMA_MINALIGN ||
316 : IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
317 : return true;
318 :
319 : return false;
320 : }
321 :
322 : /*
323 : * Check whether the given object size may have originated from a kmalloc()
324 : * buffer with a slab alignment below the DMA-safe alignment and needs
325 : * bouncing for non-coherent DMA. The pointer alignment is not considered and
326 : * in-structure DMA-safe offsets are the responsibility of the caller. Such
327 : * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
328 : *
329 : * The heuristics can have false positives, bouncing unnecessarily, though the
330 : * buffers would be small. False negatives are theoretically possible if, for
331 : * example, multiple small kmalloc() buffers are coalesced into a larger
332 : * buffer that passes the alignment check. There are no such known constructs
333 : * in the kernel.
334 : */
335 : static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
336 : enum dma_data_direction dir)
337 : {
338 : return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
339 : }
340 :
341 : void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
342 : gfp_t gfp, unsigned long attrs);
343 : void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
344 : dma_addr_t dma_addr, unsigned long attrs);
345 :
346 : #ifdef CONFIG_MMU
347 : /*
348 : * Page protection so that devices that can't snoop CPU caches can use the
349 : * memory coherently. We default to pgprot_noncached which is usually used
350 : * for ioremap as a safe bet, but architectures can override this with less
351 : * strict semantics if possible.
352 : */
353 : #ifndef pgprot_dmacoherent
354 : #define pgprot_dmacoherent(prot) pgprot_noncached(prot)
355 : #endif
356 :
357 : pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
358 : #else
359 : static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
360 : unsigned long attrs)
361 : {
362 : return prot; /* no protection bits supported without page tables */
363 : }
364 : #endif /* CONFIG_MMU */
365 :
366 : #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
367 : void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
368 : enum dma_data_direction dir);
369 : #else
370 : static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
371 : enum dma_data_direction dir)
372 : {
373 : }
374 : #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
375 :
376 : #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
377 : void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
378 : enum dma_data_direction dir);
379 : #else
380 : static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
381 : enum dma_data_direction dir)
382 : {
383 : }
384 : #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
385 :
386 : #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
387 : void arch_sync_dma_for_cpu_all(void);
388 : #else
389 : static inline void arch_sync_dma_for_cpu_all(void)
390 : {
391 : }
392 : #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
393 :
394 : #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
395 : void arch_dma_prep_coherent(struct page *page, size_t size);
396 : #else
397 : static inline void arch_dma_prep_coherent(struct page *page, size_t size)
398 : {
399 : }
400 : #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
401 :
402 : #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
403 : void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
404 : #else
405 : static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
406 : {
407 : }
408 : #endif /* ARCH_HAS_DMA_MARK_CLEAN */
409 :
410 : void *arch_dma_set_uncached(void *addr, size_t size);
411 : void arch_dma_clear_uncached(void *addr, size_t size);
412 :
413 : #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
414 : bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
415 : bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
416 : bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
417 : int nents);
418 : bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
419 : int nents);
420 : #else
421 : #define arch_dma_map_page_direct(d, a) (false)
422 : #define arch_dma_unmap_page_direct(d, a) (false)
423 : #define arch_dma_map_sg_direct(d, s, n) (false)
424 : #define arch_dma_unmap_sg_direct(d, s, n) (false)
425 : #endif
426 :
427 : #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
428 : void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
429 : const struct iommu_ops *iommu, bool coherent);
430 : #else
431 : static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
432 : u64 size, const struct iommu_ops *iommu, bool coherent)
433 : {
434 : }
435 : #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
436 :
437 : #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
438 : void arch_teardown_dma_ops(struct device *dev);
439 : #else
440 : static inline void arch_teardown_dma_ops(struct device *dev)
441 : {
442 : }
443 : #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
444 :
445 : #ifdef CONFIG_DMA_API_DEBUG
446 : void dma_debug_add_bus(struct bus_type *bus);
447 : void debug_dma_dump_mappings(struct device *dev);
448 : #else
449 : static inline void dma_debug_add_bus(struct bus_type *bus)
450 : {
451 : }
452 : static inline void debug_dma_dump_mappings(struct device *dev)
453 : {
454 : }
455 : #endif /* CONFIG_DMA_API_DEBUG */
456 :
457 : extern const struct dma_map_ops dma_dummy_ops;
458 :
459 : enum pci_p2pdma_map_type {
460 : /*
461 : * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
462 : * type hasn't been calculated yet. Functions that return this enum
463 : * never return this value.
464 : */
465 : PCI_P2PDMA_MAP_UNKNOWN = 0,
466 :
467 : /*
468 : * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
469 : * traverse the host bridge and the host bridge is not in the
470 : * allowlist. DMA Mapping routines should return an error when
471 : * this is returned.
472 : */
473 : PCI_P2PDMA_MAP_NOT_SUPPORTED,
474 :
475 : /*
476 : * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
477 : * each other directly through a PCI switch and the transaction will
478 : * not traverse the host bridge. Such a mapping should program
479 : * the DMA engine with PCI bus addresses.
480 : */
481 : PCI_P2PDMA_MAP_BUS_ADDR,
482 :
483 : /*
484 : * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
485 : * to each other, but the transaction traverses a host bridge on the
486 : * allowlist. In this case, a normal mapping either with CPU physical
487 : * addresses (in the case of dma-direct) or IOVA addresses (in the
488 : * case of IOMMUs) should be used to program the DMA engine.
489 : */
490 : PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
491 : };
492 :
493 : struct pci_p2pdma_map_state {
494 : struct dev_pagemap *pgmap;
495 : int map;
496 : u64 bus_off;
497 : };
498 :
499 : #ifdef CONFIG_PCI_P2PDMA
500 : enum pci_p2pdma_map_type
501 : pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
502 : struct scatterlist *sg);
503 : #else /* CONFIG_PCI_P2PDMA */
504 : static inline enum pci_p2pdma_map_type
505 : pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
506 : struct scatterlist *sg)
507 : {
508 : return PCI_P2PDMA_MAP_NOT_SUPPORTED;
509 : }
510 : #endif /* CONFIG_PCI_P2PDMA */
511 :
512 : #endif /* _LINUX_DMA_MAP_OPS_H */
|