Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2018-2020 Christoph Hellwig.
4 : *
5 : * DMA operations that map physical memory directly without using an IOMMU.
6 : */
7 : #include <linux/memblock.h> /* for max_pfn */
8 : #include <linux/export.h>
9 : #include <linux/mm.h>
10 : #include <linux/dma-map-ops.h>
11 : #include <linux/scatterlist.h>
12 : #include <linux/pfn.h>
13 : #include <linux/vmalloc.h>
14 : #include <linux/set_memory.h>
15 : #include <linux/slab.h>
16 : #include "direct.h"
17 :
18 : /*
19 : * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 : * it for entirely different regions. In that case the arch code needs to
21 : * override the variable below for dma-direct to work properly.
22 : */
23 : unsigned int zone_dma_bits __ro_after_init = 24;
24 :
25 : static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 : phys_addr_t phys)
27 : {
28 0 : if (force_dma_unencrypted(dev))
29 : return phys_to_dma_unencrypted(dev, phys);
30 0 : return phys_to_dma(dev, phys);
31 : }
32 :
33 0 : static inline struct page *dma_direct_to_page(struct device *dev,
34 : dma_addr_t dma_addr)
35 : {
36 0 : return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37 : }
38 :
39 0 : u64 dma_direct_get_required_mask(struct device *dev)
40 : {
41 0 : phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 0 : u64 max_dma = phys_to_dma_direct(dev, phys);
43 :
44 0 : return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45 : }
46 :
47 0 : static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
48 : u64 *phys_limit)
49 : {
50 0 : u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
51 :
52 : /*
53 : * Optimistically try the zone that the physical address mask falls
54 : * into first. If that returns memory that isn't actually addressable
55 : * we will fallback to the next lower zone and try again.
56 : *
57 : * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 : * zones.
59 : */
60 0 : *phys_limit = dma_to_phys(dev, dma_limit);
61 0 : if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
62 : return GFP_DMA;
63 0 : if (*phys_limit <= DMA_BIT_MASK(32))
64 : return GFP_DMA32;
65 0 : return 0;
66 : }
67 :
68 0 : static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
69 : {
70 0 : dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71 :
72 0 : if (dma_addr == DMA_MAPPING_ERROR)
73 : return false;
74 0 : return dma_addr + size - 1 <=
75 0 : min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
76 : }
77 :
78 : static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
79 : {
80 0 : if (!force_dma_unencrypted(dev))
81 : return 0;
82 : return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
83 : }
84 :
85 : static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
86 : {
87 : int ret;
88 :
89 0 : if (!force_dma_unencrypted(dev))
90 : return 0;
91 : ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
92 : if (ret)
93 : pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
94 : return ret;
95 : }
96 :
97 : static void __dma_direct_free_pages(struct device *dev, struct page *page,
98 : size_t size)
99 : {
100 0 : if (swiotlb_free(dev, page, size))
101 : return;
102 0 : dma_free_contiguous(dev, page, size);
103 : }
104 :
105 : static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
106 : {
107 : struct page *page = swiotlb_alloc(dev, size);
108 :
109 : if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
110 : swiotlb_free(dev, page, size);
111 : return NULL;
112 : }
113 :
114 : return page;
115 : }
116 :
117 0 : static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
118 : gfp_t gfp, bool allow_highmem)
119 : {
120 0 : int node = dev_to_node(dev);
121 0 : struct page *page = NULL;
122 : u64 phys_limit;
123 :
124 0 : WARN_ON_ONCE(!PAGE_ALIGNED(size));
125 :
126 0 : if (is_swiotlb_for_alloc(dev))
127 : return dma_direct_alloc_swiotlb(dev, size);
128 :
129 0 : gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
130 : &phys_limit);
131 0 : page = dma_alloc_contiguous(dev, size, gfp);
132 : if (page) {
133 : if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
134 : (!allow_highmem && PageHighMem(page))) {
135 : dma_free_contiguous(dev, page, size);
136 : page = NULL;
137 : }
138 : }
139 : again:
140 : if (!page)
141 0 : page = alloc_pages_node(node, gfp, get_order(size));
142 0 : if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143 0 : dma_free_contiguous(dev, page, size);
144 0 : page = NULL;
145 :
146 : if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147 : phys_limit < DMA_BIT_MASK(64) &&
148 : !(gfp & (GFP_DMA32 | GFP_DMA))) {
149 : gfp |= GFP_DMA32;
150 : goto again;
151 : }
152 :
153 : if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154 : gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
155 : goto again;
156 : }
157 : }
158 :
159 : return page;
160 : }
161 :
162 : /*
163 : * Check if a potentially blocking operations needs to dip into the atomic
164 : * pools for the given device/gfp.
165 : */
166 : static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
167 : {
168 : return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
169 : }
170 :
171 : static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
172 : dma_addr_t *dma_handle, gfp_t gfp)
173 : {
174 : struct page *page;
175 : u64 phys_mask;
176 : void *ret;
177 :
178 : if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
179 : return NULL;
180 :
181 : gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
182 : &phys_mask);
183 : page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
184 : if (!page)
185 : return NULL;
186 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
187 : return ret;
188 : }
189 :
190 0 : static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
191 : dma_addr_t *dma_handle, gfp_t gfp)
192 : {
193 : struct page *page;
194 :
195 0 : page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
196 0 : if (!page)
197 : return NULL;
198 :
199 : /* remove any dirty cache lines on the kernel alias */
200 0 : if (!PageHighMem(page))
201 : arch_dma_prep_coherent(page, size);
202 :
203 : /* return the page pointer as the opaque cookie */
204 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
205 0 : return page;
206 : }
207 :
208 0 : void *dma_direct_alloc(struct device *dev, size_t size,
209 : dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
210 : {
211 0 : bool remap = false, set_uncached = false;
212 : struct page *page;
213 : void *ret;
214 :
215 0 : size = PAGE_ALIGN(size);
216 0 : if (attrs & DMA_ATTR_NO_WARN)
217 0 : gfp |= __GFP_NOWARN;
218 :
219 0 : if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
220 0 : !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
221 0 : return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
222 :
223 0 : if (!dev_is_dma_coherent(dev)) {
224 : /*
225 : * Fallback to the arch handler if it exists. This should
226 : * eventually go away.
227 : */
228 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
229 : !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
230 : !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
231 : !is_swiotlb_for_alloc(dev))
232 : return arch_dma_alloc(dev, size, dma_handle, gfp,
233 : attrs);
234 :
235 : /*
236 : * If there is a global pool, always allocate from it for
237 : * non-coherent devices.
238 : */
239 : if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
240 : return dma_alloc_from_global_coherent(dev, size,
241 : dma_handle);
242 :
243 : /*
244 : * Otherwise remap if the architecture is asking for it. But
245 : * given that remapping memory is a blocking operation we'll
246 : * instead have to dip into the atomic pools.
247 : */
248 : remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
249 : if (remap) {
250 : if (dma_direct_use_pool(dev, gfp))
251 : return dma_direct_alloc_from_pool(dev, size,
252 : dma_handle, gfp);
253 : } else {
254 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
255 : return NULL;
256 : set_uncached = true;
257 : }
258 : }
259 :
260 : /*
261 : * Decrypting memory may block, so allocate the memory from the atomic
262 : * pools if we can't block.
263 : */
264 0 : if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
265 : return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
266 :
267 : /* we always manually zero the memory once we are done */
268 0 : page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
269 0 : if (!page)
270 : return NULL;
271 :
272 : /*
273 : * dma_alloc_contiguous can return highmem pages depending on a
274 : * combination the cma= arguments and per-arch setup. These need to be
275 : * remapped to return a kernel virtual address.
276 : */
277 0 : if (PageHighMem(page)) {
278 : remap = true;
279 : set_uncached = false;
280 : }
281 :
282 : if (remap) {
283 : pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
284 :
285 : if (force_dma_unencrypted(dev))
286 : prot = pgprot_decrypted(prot);
287 :
288 : /* remove any dirty cache lines on the kernel alias */
289 : arch_dma_prep_coherent(page, size);
290 :
291 : /* create a coherent mapping */
292 : ret = dma_common_contiguous_remap(page, size, prot,
293 : __builtin_return_address(0));
294 : if (!ret)
295 : goto out_free_pages;
296 : } else {
297 0 : ret = page_address(page);
298 0 : if (dma_set_decrypted(dev, ret, size))
299 : goto out_free_pages;
300 : }
301 :
302 0 : memset(ret, 0, size);
303 :
304 : if (set_uncached) {
305 : arch_dma_prep_coherent(page, size);
306 : ret = arch_dma_set_uncached(ret, size);
307 : if (IS_ERR(ret))
308 : goto out_encrypt_pages;
309 : }
310 :
311 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
312 0 : return ret;
313 :
314 : out_encrypt_pages:
315 : if (dma_set_encrypted(dev, page_address(page), size))
316 : return NULL;
317 : out_free_pages:
318 : __dma_direct_free_pages(dev, page, size);
319 : return NULL;
320 : }
321 :
322 0 : void dma_direct_free(struct device *dev, size_t size,
323 : void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
324 : {
325 0 : unsigned int page_order = get_order(size);
326 :
327 0 : if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
328 0 : !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
329 : /* cpu_addr is a struct page cookie, not a kernel address */
330 0 : dma_free_contiguous(dev, cpu_addr, size);
331 0 : return;
332 : }
333 :
334 : if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
335 : !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
336 : !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
337 0 : !dev_is_dma_coherent(dev) &&
338 : !is_swiotlb_for_alloc(dev)) {
339 : arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
340 : return;
341 : }
342 :
343 : if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
344 : !dev_is_dma_coherent(dev)) {
345 : if (!dma_release_from_global_coherent(page_order, cpu_addr))
346 : WARN_ON_ONCE(1);
347 : return;
348 : }
349 :
350 : /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
351 : if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
352 : dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
353 : return;
354 :
355 0 : if (is_vmalloc_addr(cpu_addr)) {
356 0 : vunmap(cpu_addr);
357 : } else {
358 : if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
359 : arch_dma_clear_uncached(cpu_addr, size);
360 : if (dma_set_encrypted(dev, cpu_addr, size))
361 : return;
362 : }
363 :
364 0 : __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
365 : }
366 :
367 0 : struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
368 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
369 : {
370 : struct page *page;
371 : void *ret;
372 :
373 0 : if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
374 : return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
375 :
376 0 : page = __dma_direct_alloc_pages(dev, size, gfp, false);
377 0 : if (!page)
378 : return NULL;
379 :
380 0 : ret = page_address(page);
381 0 : if (dma_set_decrypted(dev, ret, size))
382 : goto out_free_pages;
383 0 : memset(ret, 0, size);
384 0 : *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
385 0 : return page;
386 : out_free_pages:
387 : __dma_direct_free_pages(dev, page, size);
388 : return NULL;
389 : }
390 :
391 0 : void dma_direct_free_pages(struct device *dev, size_t size,
392 : struct page *page, dma_addr_t dma_addr,
393 : enum dma_data_direction dir)
394 : {
395 0 : void *vaddr = page_address(page);
396 :
397 : /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
398 : if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
399 : dma_free_from_pool(dev, vaddr, size))
400 : return;
401 :
402 0 : if (dma_set_encrypted(dev, vaddr, size))
403 : return;
404 0 : __dma_direct_free_pages(dev, page, size);
405 : }
406 :
407 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
408 : defined(CONFIG_SWIOTLB)
409 : void dma_direct_sync_sg_for_device(struct device *dev,
410 : struct scatterlist *sgl, int nents, enum dma_data_direction dir)
411 : {
412 : struct scatterlist *sg;
413 : int i;
414 :
415 : for_each_sg(sgl, sg, nents, i) {
416 : phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
417 :
418 : if (unlikely(is_swiotlb_buffer(dev, paddr)))
419 : swiotlb_sync_single_for_device(dev, paddr, sg->length,
420 : dir);
421 :
422 : if (!dev_is_dma_coherent(dev))
423 : arch_sync_dma_for_device(paddr, sg->length,
424 : dir);
425 : }
426 : }
427 : #endif
428 :
429 : #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
430 : defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
431 : defined(CONFIG_SWIOTLB)
432 : void dma_direct_sync_sg_for_cpu(struct device *dev,
433 : struct scatterlist *sgl, int nents, enum dma_data_direction dir)
434 : {
435 : struct scatterlist *sg;
436 : int i;
437 :
438 : for_each_sg(sgl, sg, nents, i) {
439 : phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
440 :
441 : if (!dev_is_dma_coherent(dev))
442 : arch_sync_dma_for_cpu(paddr, sg->length, dir);
443 :
444 : if (unlikely(is_swiotlb_buffer(dev, paddr)))
445 : swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
446 : dir);
447 :
448 : if (dir == DMA_FROM_DEVICE)
449 : arch_dma_mark_clean(paddr, sg->length);
450 : }
451 :
452 : if (!dev_is_dma_coherent(dev))
453 : arch_sync_dma_for_cpu_all();
454 : }
455 :
456 : /*
457 : * Unmaps segments, except for ones marked as pci_p2pdma which do not
458 : * require any further action as they contain a bus address.
459 : */
460 : void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
461 : int nents, enum dma_data_direction dir, unsigned long attrs)
462 : {
463 : struct scatterlist *sg;
464 : int i;
465 :
466 : for_each_sg(sgl, sg, nents, i) {
467 : if (sg_is_dma_bus_address(sg))
468 : sg_dma_unmark_bus_address(sg);
469 : else
470 : dma_direct_unmap_page(dev, sg->dma_address,
471 : sg_dma_len(sg), dir, attrs);
472 : }
473 : }
474 : #endif
475 :
476 0 : int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
477 : enum dma_data_direction dir, unsigned long attrs)
478 : {
479 : struct pci_p2pdma_map_state p2pdma_state = {};
480 : enum pci_p2pdma_map_type map;
481 : struct scatterlist *sg;
482 : int i, ret;
483 :
484 0 : for_each_sg(sgl, sg, nents, i) {
485 0 : if (is_pci_p2pdma_page(sg_page(sg))) {
486 : map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
487 : switch (map) {
488 : case PCI_P2PDMA_MAP_BUS_ADDR:
489 : continue;
490 : case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
491 : /*
492 : * Any P2P mapping that traverses the PCI
493 : * host bridge must be mapped with CPU physical
494 : * address and not PCI bus addresses. This is
495 : * done with dma_direct_map_page() below.
496 : */
497 : break;
498 : default:
499 : ret = -EREMOTEIO;
500 : goto out_unmap;
501 : }
502 : }
503 :
504 0 : sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
505 0 : sg->offset, sg->length, dir, attrs);
506 0 : if (sg->dma_address == DMA_MAPPING_ERROR) {
507 : ret = -EIO;
508 : goto out_unmap;
509 : }
510 : sg_dma_len(sg) = sg->length;
511 : }
512 :
513 : return nents;
514 :
515 : out_unmap:
516 : dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
517 : return ret;
518 : }
519 :
520 0 : dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
521 : size_t size, enum dma_data_direction dir, unsigned long attrs)
522 : {
523 0 : dma_addr_t dma_addr = paddr;
524 :
525 0 : if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
526 0 : dev_err_once(dev,
527 : "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
528 : &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
529 0 : WARN_ON_ONCE(1);
530 : return DMA_MAPPING_ERROR;
531 : }
532 :
533 : return dma_addr;
534 : }
535 :
536 0 : int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
537 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
538 : unsigned long attrs)
539 : {
540 0 : struct page *page = dma_direct_to_page(dev, dma_addr);
541 : int ret;
542 :
543 0 : ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
544 0 : if (!ret)
545 0 : sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
546 0 : return ret;
547 : }
548 :
549 0 : bool dma_direct_can_mmap(struct device *dev)
550 : {
551 0 : return dev_is_dma_coherent(dev) ||
552 : IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
553 : }
554 :
555 0 : int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
556 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
557 : unsigned long attrs)
558 : {
559 0 : unsigned long user_count = vma_pages(vma);
560 0 : unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
561 0 : unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
562 0 : int ret = -ENXIO;
563 :
564 0 : vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
565 0 : if (force_dma_unencrypted(dev))
566 : vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
567 :
568 : if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
569 : return ret;
570 0 : if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
571 : return ret;
572 :
573 0 : if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
574 : return -ENXIO;
575 0 : return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
576 : user_count << PAGE_SHIFT, vma->vm_page_prot);
577 : }
578 :
579 0 : int dma_direct_supported(struct device *dev, u64 mask)
580 : {
581 0 : u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
582 :
583 : /*
584 : * Because 32-bit DMA masks are so common we expect every architecture
585 : * to be able to satisfy them - either by not supporting more physical
586 : * memory, or by providing a ZONE_DMA32. If neither is the case, the
587 : * architecture needs to use an IOMMU instead of the direct mapping.
588 : */
589 0 : if (mask >= DMA_BIT_MASK(32))
590 : return 1;
591 :
592 : /*
593 : * This check needs to be against the actual bit mask value, so use
594 : * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
595 : * part of the check.
596 : */
597 : if (IS_ENABLED(CONFIG_ZONE_DMA))
598 : min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
599 0 : return mask >= phys_to_dma_unencrypted(dev, min_mask);
600 : }
601 :
602 0 : size_t dma_direct_max_mapping_size(struct device *dev)
603 : {
604 : /* If SWIOTLB is active, use its maximum mapping size */
605 0 : if (is_swiotlb_active(dev) &&
606 : (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
607 : return swiotlb_max_mapping_size(dev);
608 : return SIZE_MAX;
609 : }
610 :
611 0 : bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
612 : {
613 0 : return !dev_is_dma_coherent(dev) ||
614 0 : is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
615 : }
616 :
617 : /**
618 : * dma_direct_set_offset - Assign scalar offset for a single DMA range.
619 : * @dev: device pointer; needed to "own" the alloced memory.
620 : * @cpu_start: beginning of memory region covered by this offset.
621 : * @dma_start: beginning of DMA/PCI region covered by this offset.
622 : * @size: size of the region.
623 : *
624 : * This is for the simple case of a uniform offset which cannot
625 : * be discovered by "dma-ranges".
626 : *
627 : * It returns -ENOMEM if out of memory, -EINVAL if a map
628 : * already exists, 0 otherwise.
629 : *
630 : * Note: any call to this from a driver is a bug. The mapping needs
631 : * to be described by the device tree or other firmware interfaces.
632 : */
633 0 : int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
634 : dma_addr_t dma_start, u64 size)
635 : {
636 : struct bus_dma_region *map;
637 0 : u64 offset = (u64)cpu_start - (u64)dma_start;
638 :
639 0 : if (dev->dma_range_map) {
640 0 : dev_err(dev, "attempt to add DMA range to existing map\n");
641 0 : return -EINVAL;
642 : }
643 :
644 0 : if (!offset)
645 : return 0;
646 :
647 0 : map = kcalloc(2, sizeof(*map), GFP_KERNEL);
648 0 : if (!map)
649 : return -ENOMEM;
650 0 : map[0].cpu_start = cpu_start;
651 0 : map[0].dma_start = dma_start;
652 0 : map[0].offset = offset;
653 0 : map[0].size = size;
654 0 : dev->dma_range_map = map;
655 0 : return 0;
656 : }
|