Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * arch-independent dma-mapping routines
4 : *
5 : * Copyright (c) 2006 SUSE Linux Products GmbH
6 : * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 : */
8 : #include <linux/memblock.h> /* for max_pfn */
9 : #include <linux/acpi.h>
10 : #include <linux/dma-map-ops.h>
11 : #include <linux/export.h>
12 : #include <linux/gfp.h>
13 : #include <linux/kmsan.h>
14 : #include <linux/of_device.h>
15 : #include <linux/slab.h>
16 : #include <linux/vmalloc.h>
17 : #include "debug.h"
18 : #include "direct.h"
19 :
20 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
21 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
22 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
23 : bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT);
24 : #endif
25 :
26 : /*
27 : * Managed DMA API
28 : */
29 : struct dma_devres {
30 : size_t size;
31 : void *vaddr;
32 : dma_addr_t dma_handle;
33 : unsigned long attrs;
34 : };
35 :
36 0 : static void dmam_release(struct device *dev, void *res)
37 : {
38 0 : struct dma_devres *this = res;
39 :
40 0 : dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
41 : this->attrs);
42 0 : }
43 :
44 0 : static int dmam_match(struct device *dev, void *res, void *match_data)
45 : {
46 0 : struct dma_devres *this = res, *match = match_data;
47 :
48 0 : if (this->vaddr == match->vaddr) {
49 0 : WARN_ON(this->size != match->size ||
50 : this->dma_handle != match->dma_handle);
51 : return 1;
52 : }
53 : return 0;
54 : }
55 :
56 : /**
57 : * dmam_free_coherent - Managed dma_free_coherent()
58 : * @dev: Device to free coherent memory for
59 : * @size: Size of allocation
60 : * @vaddr: Virtual address of the memory to free
61 : * @dma_handle: DMA handle of the memory to free
62 : *
63 : * Managed dma_free_coherent().
64 : */
65 0 : void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
66 : dma_addr_t dma_handle)
67 : {
68 0 : struct dma_devres match_data = { size, vaddr, dma_handle };
69 :
70 0 : dma_free_coherent(dev, size, vaddr, dma_handle);
71 0 : WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
72 0 : }
73 : EXPORT_SYMBOL(dmam_free_coherent);
74 :
75 : /**
76 : * dmam_alloc_attrs - Managed dma_alloc_attrs()
77 : * @dev: Device to allocate non_coherent memory for
78 : * @size: Size of allocation
79 : * @dma_handle: Out argument for allocated DMA handle
80 : * @gfp: Allocation flags
81 : * @attrs: Flags in the DMA_ATTR_* namespace.
82 : *
83 : * Managed dma_alloc_attrs(). Memory allocated using this function will be
84 : * automatically released on driver detach.
85 : *
86 : * RETURNS:
87 : * Pointer to allocated memory on success, NULL on failure.
88 : */
89 0 : void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
90 : gfp_t gfp, unsigned long attrs)
91 : {
92 : struct dma_devres *dr;
93 : void *vaddr;
94 :
95 0 : dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
96 0 : if (!dr)
97 : return NULL;
98 :
99 0 : vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
100 0 : if (!vaddr) {
101 0 : devres_free(dr);
102 0 : return NULL;
103 : }
104 :
105 0 : dr->vaddr = vaddr;
106 0 : dr->dma_handle = *dma_handle;
107 0 : dr->size = size;
108 0 : dr->attrs = attrs;
109 :
110 0 : devres_add(dev, dr);
111 :
112 0 : return vaddr;
113 : }
114 : EXPORT_SYMBOL(dmam_alloc_attrs);
115 :
116 : static bool dma_go_direct(struct device *dev, dma_addr_t mask,
117 : const struct dma_map_ops *ops)
118 : {
119 : if (likely(!ops))
120 : return true;
121 : #ifdef CONFIG_DMA_OPS_BYPASS
122 : if (dev->dma_ops_bypass)
123 : return min_not_zero(mask, dev->bus_dma_limit) >=
124 : dma_direct_get_required_mask(dev);
125 : #endif
126 : return false;
127 : }
128 :
129 :
130 : /*
131 : * Check if the devices uses a direct mapping for streaming DMA operations.
132 : * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
133 : * enough.
134 : */
135 : static inline bool dma_alloc_direct(struct device *dev,
136 : const struct dma_map_ops *ops)
137 : {
138 0 : return dma_go_direct(dev, dev->coherent_dma_mask, ops);
139 : }
140 :
141 : static inline bool dma_map_direct(struct device *dev,
142 : const struct dma_map_ops *ops)
143 : {
144 0 : return dma_go_direct(dev, *dev->dma_mask, ops);
145 : }
146 :
147 0 : dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
148 : size_t offset, size_t size, enum dma_data_direction dir,
149 : unsigned long attrs)
150 : {
151 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
152 : dma_addr_t addr;
153 :
154 0 : BUG_ON(!valid_dma_direction(dir));
155 :
156 0 : if (WARN_ON_ONCE(!dev->dma_mask))
157 : return DMA_MAPPING_ERROR;
158 :
159 0 : if (dma_map_direct(dev, ops) ||
160 : arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
161 0 : addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
162 : else
163 : addr = ops->map_page(dev, page, offset, size, dir, attrs);
164 0 : kmsan_handle_dma(page, offset, size, dir);
165 0 : debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
166 :
167 0 : return addr;
168 : }
169 : EXPORT_SYMBOL(dma_map_page_attrs);
170 :
171 0 : void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
172 : enum dma_data_direction dir, unsigned long attrs)
173 : {
174 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
175 :
176 0 : BUG_ON(!valid_dma_direction(dir));
177 0 : if (dma_map_direct(dev, ops) ||
178 : arch_dma_unmap_page_direct(dev, addr + size))
179 0 : dma_direct_unmap_page(dev, addr, size, dir, attrs);
180 : else if (ops->unmap_page)
181 : ops->unmap_page(dev, addr, size, dir, attrs);
182 0 : debug_dma_unmap_page(dev, addr, size, dir);
183 0 : }
184 : EXPORT_SYMBOL(dma_unmap_page_attrs);
185 :
186 0 : static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
187 : int nents, enum dma_data_direction dir, unsigned long attrs)
188 : {
189 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
190 : int ents;
191 :
192 0 : BUG_ON(!valid_dma_direction(dir));
193 :
194 0 : if (WARN_ON_ONCE(!dev->dma_mask))
195 : return 0;
196 :
197 0 : if (dma_map_direct(dev, ops) ||
198 : arch_dma_map_sg_direct(dev, sg, nents))
199 0 : ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
200 : else
201 : ents = ops->map_sg(dev, sg, nents, dir, attrs);
202 :
203 0 : if (ents > 0) {
204 : kmsan_handle_dma_sg(sg, nents, dir);
205 : debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
206 0 : } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
207 : ents != -EIO && ents != -EREMOTEIO)) {
208 : return -EIO;
209 : }
210 :
211 : return ents;
212 : }
213 :
214 : /**
215 : * dma_map_sg_attrs - Map the given buffer for DMA
216 : * @dev: The device for which to perform the DMA operation
217 : * @sg: The sg_table object describing the buffer
218 : * @nents: Number of entries to map
219 : * @dir: DMA direction
220 : * @attrs: Optional DMA attributes for the map operation
221 : *
222 : * Maps a buffer described by a scatterlist passed in the sg argument with
223 : * nents segments for the @dir DMA operation by the @dev device.
224 : *
225 : * Returns the number of mapped entries (which can be less than nents)
226 : * on success. Zero is returned for any error.
227 : *
228 : * dma_unmap_sg_attrs() should be used to unmap the buffer with the
229 : * original sg and original nents (not the value returned by this funciton).
230 : */
231 0 : unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
232 : int nents, enum dma_data_direction dir, unsigned long attrs)
233 : {
234 : int ret;
235 :
236 0 : ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
237 0 : if (ret < 0)
238 : return 0;
239 0 : return ret;
240 : }
241 : EXPORT_SYMBOL(dma_map_sg_attrs);
242 :
243 : /**
244 : * dma_map_sgtable - Map the given buffer for DMA
245 : * @dev: The device for which to perform the DMA operation
246 : * @sgt: The sg_table object describing the buffer
247 : * @dir: DMA direction
248 : * @attrs: Optional DMA attributes for the map operation
249 : *
250 : * Maps a buffer described by a scatterlist stored in the given sg_table
251 : * object for the @dir DMA operation by the @dev device. After success, the
252 : * ownership for the buffer is transferred to the DMA domain. One has to
253 : * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
254 : * ownership of the buffer back to the CPU domain before touching the
255 : * buffer by the CPU.
256 : *
257 : * Returns 0 on success or a negative error code on error. The following
258 : * error codes are supported with the given meaning:
259 : *
260 : * -EINVAL An invalid argument, unaligned access or other error
261 : * in usage. Will not succeed if retried.
262 : * -ENOMEM Insufficient resources (like memory or IOVA space) to
263 : * complete the mapping. Should succeed if retried later.
264 : * -EIO Legacy error code with an unknown meaning. eg. this is
265 : * returned if a lower level call returned
266 : * DMA_MAPPING_ERROR.
267 : * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
268 : * in the sg_table. This will not succeed if retried.
269 : */
270 0 : int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
271 : enum dma_data_direction dir, unsigned long attrs)
272 : {
273 : int nents;
274 :
275 0 : nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
276 0 : if (nents < 0)
277 : return nents;
278 0 : sgt->nents = nents;
279 0 : return 0;
280 : }
281 : EXPORT_SYMBOL_GPL(dma_map_sgtable);
282 :
283 0 : void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
284 : int nents, enum dma_data_direction dir,
285 : unsigned long attrs)
286 : {
287 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
288 :
289 0 : BUG_ON(!valid_dma_direction(dir));
290 0 : debug_dma_unmap_sg(dev, sg, nents, dir);
291 0 : if (dma_map_direct(dev, ops) ||
292 : arch_dma_unmap_sg_direct(dev, sg, nents))
293 : dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
294 : else if (ops->unmap_sg)
295 : ops->unmap_sg(dev, sg, nents, dir, attrs);
296 0 : }
297 : EXPORT_SYMBOL(dma_unmap_sg_attrs);
298 :
299 0 : dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
300 : size_t size, enum dma_data_direction dir, unsigned long attrs)
301 : {
302 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
303 0 : dma_addr_t addr = DMA_MAPPING_ERROR;
304 :
305 0 : BUG_ON(!valid_dma_direction(dir));
306 :
307 0 : if (WARN_ON_ONCE(!dev->dma_mask))
308 : return DMA_MAPPING_ERROR;
309 :
310 0 : if (dma_map_direct(dev, ops))
311 0 : addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
312 : else if (ops->map_resource)
313 : addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
314 :
315 0 : debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
316 0 : return addr;
317 : }
318 : EXPORT_SYMBOL(dma_map_resource);
319 :
320 0 : void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
321 : enum dma_data_direction dir, unsigned long attrs)
322 : {
323 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
324 :
325 0 : BUG_ON(!valid_dma_direction(dir));
326 0 : if (!dma_map_direct(dev, ops) && ops->unmap_resource)
327 : ops->unmap_resource(dev, addr, size, dir, attrs);
328 0 : debug_dma_unmap_resource(dev, addr, size, dir);
329 0 : }
330 : EXPORT_SYMBOL(dma_unmap_resource);
331 :
332 0 : void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
333 : enum dma_data_direction dir)
334 : {
335 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
336 :
337 0 : BUG_ON(!valid_dma_direction(dir));
338 0 : if (dma_map_direct(dev, ops))
339 0 : dma_direct_sync_single_for_cpu(dev, addr, size, dir);
340 : else if (ops->sync_single_for_cpu)
341 : ops->sync_single_for_cpu(dev, addr, size, dir);
342 0 : debug_dma_sync_single_for_cpu(dev, addr, size, dir);
343 0 : }
344 : EXPORT_SYMBOL(dma_sync_single_for_cpu);
345 :
346 0 : void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
347 : size_t size, enum dma_data_direction dir)
348 : {
349 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
350 :
351 0 : BUG_ON(!valid_dma_direction(dir));
352 0 : if (dma_map_direct(dev, ops))
353 0 : dma_direct_sync_single_for_device(dev, addr, size, dir);
354 : else if (ops->sync_single_for_device)
355 : ops->sync_single_for_device(dev, addr, size, dir);
356 0 : debug_dma_sync_single_for_device(dev, addr, size, dir);
357 0 : }
358 : EXPORT_SYMBOL(dma_sync_single_for_device);
359 :
360 0 : void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
361 : int nelems, enum dma_data_direction dir)
362 : {
363 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
364 :
365 0 : BUG_ON(!valid_dma_direction(dir));
366 0 : if (dma_map_direct(dev, ops))
367 : dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
368 : else if (ops->sync_sg_for_cpu)
369 : ops->sync_sg_for_cpu(dev, sg, nelems, dir);
370 0 : debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
371 0 : }
372 : EXPORT_SYMBOL(dma_sync_sg_for_cpu);
373 :
374 0 : void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
375 : int nelems, enum dma_data_direction dir)
376 : {
377 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
378 :
379 0 : BUG_ON(!valid_dma_direction(dir));
380 0 : if (dma_map_direct(dev, ops))
381 : dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
382 : else if (ops->sync_sg_for_device)
383 : ops->sync_sg_for_device(dev, sg, nelems, dir);
384 0 : debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
385 0 : }
386 : EXPORT_SYMBOL(dma_sync_sg_for_device);
387 :
388 : /*
389 : * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
390 : * that the intention is to allow exporting memory allocated via the
391 : * coherent DMA APIs through the dma_buf API, which only accepts a
392 : * scattertable. This presents a couple of problems:
393 : * 1. Not all memory allocated via the coherent DMA APIs is backed by
394 : * a struct page
395 : * 2. Passing coherent DMA memory into the streaming APIs is not allowed
396 : * as we will try to flush the memory through a different alias to that
397 : * actually being used (and the flushes are redundant.)
398 : */
399 0 : int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
400 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
401 : unsigned long attrs)
402 : {
403 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
404 :
405 0 : if (dma_alloc_direct(dev, ops))
406 0 : return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
407 : size, attrs);
408 : if (!ops->get_sgtable)
409 : return -ENXIO;
410 : return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
411 : }
412 : EXPORT_SYMBOL(dma_get_sgtable_attrs);
413 :
414 : #ifdef CONFIG_MMU
415 : /*
416 : * Return the page attributes used for mapping dma_alloc_* memory, either in
417 : * kernel space if remapping is needed, or to userspace through dma_mmap_*.
418 : */
419 0 : pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
420 : {
421 0 : if (dev_is_dma_coherent(dev))
422 0 : return prot;
423 : #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
424 : if (attrs & DMA_ATTR_WRITE_COMBINE)
425 : return pgprot_writecombine(prot);
426 : #endif
427 : return pgprot_dmacoherent(prot);
428 : }
429 : #endif /* CONFIG_MMU */
430 :
431 : /**
432 : * dma_can_mmap - check if a given device supports dma_mmap_*
433 : * @dev: device to check
434 : *
435 : * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
436 : * map DMA allocations to userspace.
437 : */
438 0 : bool dma_can_mmap(struct device *dev)
439 : {
440 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
441 :
442 0 : if (dma_alloc_direct(dev, ops))
443 0 : return dma_direct_can_mmap(dev);
444 : return ops->mmap != NULL;
445 : }
446 : EXPORT_SYMBOL_GPL(dma_can_mmap);
447 :
448 : /**
449 : * dma_mmap_attrs - map a coherent DMA allocation into user space
450 : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
451 : * @vma: vm_area_struct describing requested user mapping
452 : * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
453 : * @dma_addr: device-view address returned from dma_alloc_attrs
454 : * @size: size of memory originally requested in dma_alloc_attrs
455 : * @attrs: attributes of mapping properties requested in dma_alloc_attrs
456 : *
457 : * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
458 : * space. The coherent DMA buffer must not be freed by the driver until the
459 : * user space mapping has been released.
460 : */
461 0 : int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
462 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
463 : unsigned long attrs)
464 : {
465 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
466 :
467 0 : if (dma_alloc_direct(dev, ops))
468 0 : return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
469 : attrs);
470 : if (!ops->mmap)
471 : return -ENXIO;
472 : return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
473 : }
474 : EXPORT_SYMBOL(dma_mmap_attrs);
475 :
476 0 : u64 dma_get_required_mask(struct device *dev)
477 : {
478 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
479 :
480 0 : if (dma_alloc_direct(dev, ops))
481 0 : return dma_direct_get_required_mask(dev);
482 : if (ops->get_required_mask)
483 : return ops->get_required_mask(dev);
484 :
485 : /*
486 : * We require every DMA ops implementation to at least support a 32-bit
487 : * DMA mask (and use bounce buffering if that isn't supported in
488 : * hardware). As the direct mapping code has its own routine to
489 : * actually report an optimal mask we default to 32-bit here as that
490 : * is the right thing for most IOMMUs, and at least not actively
491 : * harmful in general.
492 : */
493 : return DMA_BIT_MASK(32);
494 : }
495 : EXPORT_SYMBOL_GPL(dma_get_required_mask);
496 :
497 0 : void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
498 : gfp_t flag, unsigned long attrs)
499 : {
500 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
501 : void *cpu_addr;
502 :
503 0 : WARN_ON_ONCE(!dev->coherent_dma_mask);
504 :
505 : /*
506 : * DMA allocations can never be turned back into a page pointer, so
507 : * requesting compound pages doesn't make sense (and can't even be
508 : * supported at all by various backends).
509 : */
510 0 : if (WARN_ON_ONCE(flag & __GFP_COMP))
511 : return NULL;
512 :
513 : if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
514 : return cpu_addr;
515 :
516 : /* let the implementation decide on the zone to allocate from: */
517 0 : flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
518 :
519 0 : if (dma_alloc_direct(dev, ops))
520 0 : cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
521 : else if (ops->alloc)
522 : cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
523 : else
524 : return NULL;
525 :
526 0 : debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
527 0 : return cpu_addr;
528 : }
529 : EXPORT_SYMBOL(dma_alloc_attrs);
530 :
531 0 : void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
532 : dma_addr_t dma_handle, unsigned long attrs)
533 : {
534 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
535 :
536 : if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
537 : return;
538 : /*
539 : * On non-coherent platforms which implement DMA-coherent buffers via
540 : * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
541 : * this far in IRQ context is a) at risk of a BUG_ON() or trying to
542 : * sleep on some machines, and b) an indication that the driver is
543 : * probably misusing the coherent API anyway.
544 : */
545 0 : WARN_ON(irqs_disabled());
546 :
547 0 : if (!cpu_addr)
548 : return;
549 :
550 0 : debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
551 0 : if (dma_alloc_direct(dev, ops))
552 0 : dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
553 : else if (ops->free)
554 : ops->free(dev, size, cpu_addr, dma_handle, attrs);
555 : }
556 : EXPORT_SYMBOL(dma_free_attrs);
557 :
558 0 : static struct page *__dma_alloc_pages(struct device *dev, size_t size,
559 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
560 : {
561 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
562 :
563 0 : if (WARN_ON_ONCE(!dev->coherent_dma_mask))
564 : return NULL;
565 0 : if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
566 : return NULL;
567 0 : if (WARN_ON_ONCE(gfp & __GFP_COMP))
568 : return NULL;
569 :
570 0 : size = PAGE_ALIGN(size);
571 0 : if (dma_alloc_direct(dev, ops))
572 0 : return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
573 : if (!ops->alloc_pages)
574 : return NULL;
575 : return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
576 : }
577 :
578 0 : struct page *dma_alloc_pages(struct device *dev, size_t size,
579 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
580 : {
581 0 : struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
582 :
583 : if (page)
584 : debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
585 0 : return page;
586 : }
587 : EXPORT_SYMBOL_GPL(dma_alloc_pages);
588 :
589 : static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
590 : dma_addr_t dma_handle, enum dma_data_direction dir)
591 : {
592 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
593 :
594 0 : size = PAGE_ALIGN(size);
595 0 : if (dma_alloc_direct(dev, ops))
596 0 : dma_direct_free_pages(dev, size, page, dma_handle, dir);
597 : else if (ops->free_pages)
598 : ops->free_pages(dev, size, page, dma_handle, dir);
599 : }
600 :
601 0 : void dma_free_pages(struct device *dev, size_t size, struct page *page,
602 : dma_addr_t dma_handle, enum dma_data_direction dir)
603 : {
604 0 : debug_dma_unmap_page(dev, dma_handle, size, dir);
605 0 : __dma_free_pages(dev, size, page, dma_handle, dir);
606 0 : }
607 : EXPORT_SYMBOL_GPL(dma_free_pages);
608 :
609 0 : int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
610 : size_t size, struct page *page)
611 : {
612 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
613 :
614 0 : if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
615 : return -ENXIO;
616 0 : return remap_pfn_range(vma, vma->vm_start,
617 0 : page_to_pfn(page) + vma->vm_pgoff,
618 0 : vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
619 : }
620 : EXPORT_SYMBOL_GPL(dma_mmap_pages);
621 :
622 0 : static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
623 : enum dma_data_direction dir, gfp_t gfp)
624 : {
625 : struct sg_table *sgt;
626 : struct page *page;
627 :
628 0 : sgt = kmalloc(sizeof(*sgt), gfp);
629 0 : if (!sgt)
630 : return NULL;
631 0 : if (sg_alloc_table(sgt, 1, gfp))
632 : goto out_free_sgt;
633 0 : page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
634 0 : if (!page)
635 : goto out_free_table;
636 0 : sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
637 : sg_dma_len(sgt->sgl) = sgt->sgl->length;
638 0 : return sgt;
639 : out_free_table:
640 0 : sg_free_table(sgt);
641 : out_free_sgt:
642 0 : kfree(sgt);
643 0 : return NULL;
644 : }
645 :
646 0 : struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
647 : enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
648 : {
649 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
650 : struct sg_table *sgt;
651 :
652 0 : if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
653 : return NULL;
654 0 : if (WARN_ON_ONCE(gfp & __GFP_COMP))
655 : return NULL;
656 :
657 : if (ops && ops->alloc_noncontiguous)
658 : sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
659 : else
660 0 : sgt = alloc_single_sgt(dev, size, dir, gfp);
661 :
662 0 : if (sgt) {
663 0 : sgt->nents = 1;
664 0 : debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
665 : }
666 : return sgt;
667 : }
668 : EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
669 :
670 0 : static void free_single_sgt(struct device *dev, size_t size,
671 : struct sg_table *sgt, enum dma_data_direction dir)
672 : {
673 0 : __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
674 : dir);
675 0 : sg_free_table(sgt);
676 0 : kfree(sgt);
677 0 : }
678 :
679 0 : void dma_free_noncontiguous(struct device *dev, size_t size,
680 : struct sg_table *sgt, enum dma_data_direction dir)
681 : {
682 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
683 :
684 0 : debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
685 : if (ops && ops->free_noncontiguous)
686 : ops->free_noncontiguous(dev, size, sgt, dir);
687 : else
688 0 : free_single_sgt(dev, size, sgt, dir);
689 0 : }
690 : EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
691 :
692 0 : void *dma_vmap_noncontiguous(struct device *dev, size_t size,
693 : struct sg_table *sgt)
694 : {
695 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
696 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
697 :
698 : if (ops && ops->alloc_noncontiguous)
699 : return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
700 0 : return page_address(sg_page(sgt->sgl));
701 : }
702 : EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
703 :
704 0 : void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
705 : {
706 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
707 :
708 : if (ops && ops->alloc_noncontiguous)
709 : vunmap(vaddr);
710 0 : }
711 : EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
712 :
713 0 : int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
714 : size_t size, struct sg_table *sgt)
715 : {
716 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
717 :
718 : if (ops && ops->alloc_noncontiguous) {
719 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
720 :
721 : if (vma->vm_pgoff >= count ||
722 : vma_pages(vma) > count - vma->vm_pgoff)
723 : return -ENXIO;
724 : return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
725 : }
726 0 : return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
727 : }
728 : EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
729 :
730 : static int dma_supported(struct device *dev, u64 mask)
731 : {
732 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
733 :
734 : /*
735 : * ->dma_supported sets the bypass flag, so we must always call
736 : * into the method here unless the device is truly direct mapped.
737 : */
738 : if (!ops)
739 0 : return dma_direct_supported(dev, mask);
740 : if (!ops->dma_supported)
741 : return 1;
742 : return ops->dma_supported(dev, mask);
743 : }
744 :
745 0 : bool dma_pci_p2pdma_supported(struct device *dev)
746 : {
747 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
748 :
749 : /* if ops is not set, dma direct will be used which supports P2PDMA */
750 : if (!ops)
751 : return true;
752 :
753 : /*
754 : * Note: dma_ops_bypass is not checked here because P2PDMA should
755 : * not be used with dma mapping ops that do not have support even
756 : * if the specific device is bypassing them.
757 : */
758 :
759 : return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
760 : }
761 : EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
762 :
763 : #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
764 : void arch_dma_set_mask(struct device *dev, u64 mask);
765 : #else
766 : #define arch_dma_set_mask(dev, mask) do { } while (0)
767 : #endif
768 :
769 0 : int dma_set_mask(struct device *dev, u64 mask)
770 : {
771 : /*
772 : * Truncate the mask to the actually supported dma_addr_t width to
773 : * avoid generating unsupportable addresses.
774 : */
775 0 : mask = (dma_addr_t)mask;
776 :
777 0 : if (!dev->dma_mask || !dma_supported(dev, mask))
778 : return -EIO;
779 :
780 : arch_dma_set_mask(dev, mask);
781 0 : *dev->dma_mask = mask;
782 0 : return 0;
783 : }
784 : EXPORT_SYMBOL(dma_set_mask);
785 :
786 0 : int dma_set_coherent_mask(struct device *dev, u64 mask)
787 : {
788 : /*
789 : * Truncate the mask to the actually supported dma_addr_t width to
790 : * avoid generating unsupportable addresses.
791 : */
792 0 : mask = (dma_addr_t)mask;
793 :
794 0 : if (!dma_supported(dev, mask))
795 : return -EIO;
796 :
797 0 : dev->coherent_dma_mask = mask;
798 0 : return 0;
799 : }
800 : EXPORT_SYMBOL(dma_set_coherent_mask);
801 :
802 0 : size_t dma_max_mapping_size(struct device *dev)
803 : {
804 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
805 0 : size_t size = SIZE_MAX;
806 :
807 0 : if (dma_map_direct(dev, ops))
808 0 : size = dma_direct_max_mapping_size(dev);
809 : else if (ops && ops->max_mapping_size)
810 : size = ops->max_mapping_size(dev);
811 :
812 0 : return size;
813 : }
814 : EXPORT_SYMBOL_GPL(dma_max_mapping_size);
815 :
816 0 : size_t dma_opt_mapping_size(struct device *dev)
817 : {
818 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
819 0 : size_t size = SIZE_MAX;
820 :
821 : if (ops && ops->opt_mapping_size)
822 : size = ops->opt_mapping_size();
823 :
824 0 : return min(dma_max_mapping_size(dev), size);
825 : }
826 : EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
827 :
828 0 : bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
829 : {
830 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
831 :
832 0 : if (dma_map_direct(dev, ops))
833 0 : return dma_direct_need_sync(dev, dma_addr);
834 : return ops->sync_single_for_cpu || ops->sync_single_for_device;
835 : }
836 : EXPORT_SYMBOL_GPL(dma_need_sync);
837 :
838 0 : unsigned long dma_get_merge_boundary(struct device *dev)
839 : {
840 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
841 :
842 : if (!ops || !ops->get_merge_boundary)
843 : return 0; /* can't merge */
844 :
845 : return ops->get_merge_boundary(dev);
846 : }
847 : EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
|