Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Framework for buffer objects that can be shared across devices/subsystems.
4 : *
5 : * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 : * Author: Sumit Semwal <sumit.semwal@ti.com>
7 : *
8 : * Many thanks to linaro-mm-sig list, and specially
9 : * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 : * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 : * refining of this idea.
12 : */
13 :
14 : #include <linux/fs.h>
15 : #include <linux/slab.h>
16 : #include <linux/dma-buf.h>
17 : #include <linux/dma-fence.h>
18 : #include <linux/dma-fence-unwrap.h>
19 : #include <linux/anon_inodes.h>
20 : #include <linux/export.h>
21 : #include <linux/debugfs.h>
22 : #include <linux/module.h>
23 : #include <linux/seq_file.h>
24 : #include <linux/sync_file.h>
25 : #include <linux/poll.h>
26 : #include <linux/dma-resv.h>
27 : #include <linux/mm.h>
28 : #include <linux/mount.h>
29 : #include <linux/pseudo_fs.h>
30 :
31 : #include <uapi/linux/dma-buf.h>
32 : #include <uapi/linux/magic.h>
33 :
34 : #include "dma-buf-sysfs-stats.h"
35 :
36 : static inline int is_dma_buf_file(struct file *);
37 :
38 : struct dma_buf_list {
39 : struct list_head head;
40 : struct mutex lock;
41 : };
42 :
43 : static struct dma_buf_list db_list;
44 :
45 0 : static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
46 : {
47 : struct dma_buf *dmabuf;
48 : char name[DMA_BUF_NAME_LEN];
49 0 : size_t ret = 0;
50 :
51 0 : dmabuf = dentry->d_fsdata;
52 0 : spin_lock(&dmabuf->name_lock);
53 0 : if (dmabuf->name)
54 0 : ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
55 0 : spin_unlock(&dmabuf->name_lock);
56 :
57 0 : return dynamic_dname(buffer, buflen, "/%s:%s",
58 : dentry->d_name.name, ret > 0 ? name : "");
59 : }
60 :
61 0 : static void dma_buf_release(struct dentry *dentry)
62 : {
63 : struct dma_buf *dmabuf;
64 :
65 0 : dmabuf = dentry->d_fsdata;
66 0 : if (unlikely(!dmabuf))
67 : return;
68 :
69 0 : BUG_ON(dmabuf->vmapping_counter);
70 :
71 : /*
72 : * If you hit this BUG() it could mean:
73 : * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
74 : * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
75 : */
76 0 : BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
77 :
78 0 : dma_buf_stats_teardown(dmabuf);
79 0 : dmabuf->ops->release(dmabuf);
80 :
81 0 : if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
82 0 : dma_resv_fini(dmabuf->resv);
83 :
84 0 : WARN_ON(!list_empty(&dmabuf->attachments));
85 0 : module_put(dmabuf->owner);
86 0 : kfree(dmabuf->name);
87 0 : kfree(dmabuf);
88 : }
89 :
90 0 : static int dma_buf_file_release(struct inode *inode, struct file *file)
91 : {
92 : struct dma_buf *dmabuf;
93 :
94 0 : if (!is_dma_buf_file(file))
95 : return -EINVAL;
96 :
97 0 : dmabuf = file->private_data;
98 0 : if (dmabuf) {
99 0 : mutex_lock(&db_list.lock);
100 0 : list_del(&dmabuf->list_node);
101 0 : mutex_unlock(&db_list.lock);
102 : }
103 :
104 : return 0;
105 : }
106 :
107 : static const struct dentry_operations dma_buf_dentry_ops = {
108 : .d_dname = dmabuffs_dname,
109 : .d_release = dma_buf_release,
110 : };
111 :
112 : static struct vfsmount *dma_buf_mnt;
113 :
114 1 : static int dma_buf_fs_init_context(struct fs_context *fc)
115 : {
116 : struct pseudo_fs_context *ctx;
117 :
118 1 : ctx = init_pseudo(fc, DMA_BUF_MAGIC);
119 1 : if (!ctx)
120 : return -ENOMEM;
121 1 : ctx->dops = &dma_buf_dentry_ops;
122 1 : return 0;
123 : }
124 :
125 : static struct file_system_type dma_buf_fs_type = {
126 : .name = "dmabuf",
127 : .init_fs_context = dma_buf_fs_init_context,
128 : .kill_sb = kill_anon_super,
129 : };
130 :
131 0 : static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
132 : {
133 : struct dma_buf *dmabuf;
134 : int ret;
135 :
136 0 : if (!is_dma_buf_file(file))
137 : return -EINVAL;
138 :
139 0 : dmabuf = file->private_data;
140 :
141 : /* check if buffer supports mmap */
142 0 : if (!dmabuf->ops->mmap)
143 : return -EINVAL;
144 :
145 : /* check for overflowing the buffer's size */
146 0 : if (vma->vm_pgoff + vma_pages(vma) >
147 0 : dmabuf->size >> PAGE_SHIFT)
148 : return -EINVAL;
149 :
150 0 : dma_resv_lock(dmabuf->resv, NULL);
151 0 : ret = dmabuf->ops->mmap(dmabuf, vma);
152 0 : dma_resv_unlock(dmabuf->resv);
153 :
154 0 : return ret;
155 : }
156 :
157 0 : static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
158 : {
159 : struct dma_buf *dmabuf;
160 : loff_t base;
161 :
162 0 : if (!is_dma_buf_file(file))
163 : return -EBADF;
164 :
165 0 : dmabuf = file->private_data;
166 :
167 : /* only support discovering the end of the buffer,
168 : but also allow SEEK_SET to maintain the idiomatic
169 : SEEK_END(0), SEEK_CUR(0) pattern */
170 0 : if (whence == SEEK_END)
171 0 : base = dmabuf->size;
172 0 : else if (whence == SEEK_SET)
173 : base = 0;
174 : else
175 : return -EINVAL;
176 :
177 0 : if (offset != 0)
178 : return -EINVAL;
179 :
180 0 : return base + offset;
181 : }
182 :
183 : /**
184 : * DOC: implicit fence polling
185 : *
186 : * To support cross-device and cross-driver synchronization of buffer access
187 : * implicit fences (represented internally in the kernel with &struct dma_fence)
188 : * can be attached to a &dma_buf. The glue for that and a few related things are
189 : * provided in the &dma_resv structure.
190 : *
191 : * Userspace can query the state of these implicitly tracked fences using poll()
192 : * and related system calls:
193 : *
194 : * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
195 : * most recent write or exclusive fence.
196 : *
197 : * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
198 : * all attached fences, shared and exclusive ones.
199 : *
200 : * Note that this only signals the completion of the respective fences, i.e. the
201 : * DMA transfers are complete. Cache flushing and any other necessary
202 : * preparations before CPU access can begin still need to happen.
203 : *
204 : * As an alternative to poll(), the set of fences on DMA buffer can be
205 : * exported as a &sync_file using &dma_buf_sync_file_export.
206 : */
207 :
208 0 : static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
209 : {
210 0 : struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
211 0 : struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
212 : unsigned long flags;
213 :
214 0 : spin_lock_irqsave(&dcb->poll->lock, flags);
215 0 : wake_up_locked_poll(dcb->poll, dcb->active);
216 0 : dcb->active = 0;
217 0 : spin_unlock_irqrestore(&dcb->poll->lock, flags);
218 0 : dma_fence_put(fence);
219 : /* Paired with get_file in dma_buf_poll */
220 0 : fput(dmabuf->file);
221 0 : }
222 :
223 0 : static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
224 : struct dma_buf_poll_cb_t *dcb)
225 : {
226 : struct dma_resv_iter cursor;
227 : struct dma_fence *fence;
228 : int r;
229 :
230 0 : dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
231 : fence) {
232 0 : dma_fence_get(fence);
233 0 : r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
234 0 : if (!r)
235 : return true;
236 0 : dma_fence_put(fence);
237 : }
238 :
239 : return false;
240 : }
241 :
242 0 : static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
243 : {
244 : struct dma_buf *dmabuf;
245 : struct dma_resv *resv;
246 : __poll_t events;
247 :
248 0 : dmabuf = file->private_data;
249 0 : if (!dmabuf || !dmabuf->resv)
250 : return EPOLLERR;
251 :
252 0 : resv = dmabuf->resv;
253 :
254 0 : poll_wait(file, &dmabuf->poll, poll);
255 :
256 0 : events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
257 0 : if (!events)
258 : return 0;
259 :
260 0 : dma_resv_lock(resv, NULL);
261 :
262 0 : if (events & EPOLLOUT) {
263 0 : struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
264 :
265 : /* Check that callback isn't busy */
266 0 : spin_lock_irq(&dmabuf->poll.lock);
267 0 : if (dcb->active)
268 0 : events &= ~EPOLLOUT;
269 : else
270 0 : dcb->active = EPOLLOUT;
271 0 : spin_unlock_irq(&dmabuf->poll.lock);
272 :
273 0 : if (events & EPOLLOUT) {
274 : /* Paired with fput in dma_buf_poll_cb */
275 0 : get_file(dmabuf->file);
276 :
277 0 : if (!dma_buf_poll_add_cb(resv, true, dcb))
278 : /* No callback queued, wake up any other waiters */
279 0 : dma_buf_poll_cb(NULL, &dcb->cb);
280 : else
281 0 : events &= ~EPOLLOUT;
282 : }
283 : }
284 :
285 0 : if (events & EPOLLIN) {
286 0 : struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
287 :
288 : /* Check that callback isn't busy */
289 0 : spin_lock_irq(&dmabuf->poll.lock);
290 0 : if (dcb->active)
291 0 : events &= ~EPOLLIN;
292 : else
293 0 : dcb->active = EPOLLIN;
294 0 : spin_unlock_irq(&dmabuf->poll.lock);
295 :
296 0 : if (events & EPOLLIN) {
297 : /* Paired with fput in dma_buf_poll_cb */
298 0 : get_file(dmabuf->file);
299 :
300 0 : if (!dma_buf_poll_add_cb(resv, false, dcb))
301 : /* No callback queued, wake up any other waiters */
302 0 : dma_buf_poll_cb(NULL, &dcb->cb);
303 : else
304 0 : events &= ~EPOLLIN;
305 : }
306 : }
307 :
308 0 : dma_resv_unlock(resv);
309 0 : return events;
310 : }
311 :
312 : /**
313 : * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
314 : * It could support changing the name of the dma-buf if the same
315 : * piece of memory is used for multiple purpose between different devices.
316 : *
317 : * @dmabuf: [in] dmabuf buffer that will be renamed.
318 : * @buf: [in] A piece of userspace memory that contains the name of
319 : * the dma-buf.
320 : *
321 : * Returns 0 on success. If the dma-buf buffer is already attached to
322 : * devices, return -EBUSY.
323 : *
324 : */
325 0 : static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
326 : {
327 0 : char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
328 :
329 0 : if (IS_ERR(name))
330 0 : return PTR_ERR(name);
331 :
332 0 : spin_lock(&dmabuf->name_lock);
333 0 : kfree(dmabuf->name);
334 0 : dmabuf->name = name;
335 0 : spin_unlock(&dmabuf->name_lock);
336 :
337 : return 0;
338 : }
339 :
340 : #if IS_ENABLED(CONFIG_SYNC_FILE)
341 0 : static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
342 : void __user *user_data)
343 : {
344 : struct dma_buf_export_sync_file arg;
345 : enum dma_resv_usage usage;
346 0 : struct dma_fence *fence = NULL;
347 : struct sync_file *sync_file;
348 : int fd, ret;
349 :
350 0 : if (copy_from_user(&arg, user_data, sizeof(arg)))
351 : return -EFAULT;
352 :
353 0 : if (arg.flags & ~DMA_BUF_SYNC_RW)
354 : return -EINVAL;
355 :
356 0 : if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
357 : return -EINVAL;
358 :
359 0 : fd = get_unused_fd_flags(O_CLOEXEC);
360 0 : if (fd < 0)
361 0 : return fd;
362 :
363 0 : usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
364 0 : ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
365 0 : if (ret)
366 : goto err_put_fd;
367 :
368 0 : if (!fence)
369 0 : fence = dma_fence_get_stub();
370 :
371 0 : sync_file = sync_file_create(fence);
372 :
373 0 : dma_fence_put(fence);
374 :
375 0 : if (!sync_file) {
376 : ret = -ENOMEM;
377 : goto err_put_fd;
378 : }
379 :
380 0 : arg.fd = fd;
381 0 : if (copy_to_user(user_data, &arg, sizeof(arg))) {
382 0 : ret = -EFAULT;
383 : goto err_put_file;
384 : }
385 :
386 0 : fd_install(fd, sync_file->file);
387 :
388 : return 0;
389 :
390 : err_put_file:
391 0 : fput(sync_file->file);
392 : err_put_fd:
393 0 : put_unused_fd(fd);
394 0 : return ret;
395 : }
396 :
397 0 : static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
398 : const void __user *user_data)
399 : {
400 : struct dma_buf_import_sync_file arg;
401 : struct dma_fence *fence, *f;
402 : enum dma_resv_usage usage;
403 : struct dma_fence_unwrap iter;
404 : unsigned int num_fences;
405 0 : int ret = 0;
406 :
407 0 : if (copy_from_user(&arg, user_data, sizeof(arg)))
408 : return -EFAULT;
409 :
410 0 : if (arg.flags & ~DMA_BUF_SYNC_RW)
411 : return -EINVAL;
412 :
413 0 : if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
414 : return -EINVAL;
415 :
416 0 : fence = sync_file_get_fence(arg.fd);
417 0 : if (!fence)
418 : return -EINVAL;
419 :
420 0 : usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
421 : DMA_RESV_USAGE_READ;
422 :
423 0 : num_fences = 0;
424 0 : dma_fence_unwrap_for_each(f, &iter, fence)
425 0 : ++num_fences;
426 :
427 0 : if (num_fences > 0) {
428 0 : dma_resv_lock(dmabuf->resv, NULL);
429 :
430 0 : ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
431 0 : if (!ret) {
432 0 : dma_fence_unwrap_for_each(f, &iter, fence)
433 0 : dma_resv_add_fence(dmabuf->resv, f, usage);
434 : }
435 :
436 0 : dma_resv_unlock(dmabuf->resv);
437 : }
438 :
439 0 : dma_fence_put(fence);
440 :
441 0 : return ret;
442 : }
443 : #endif
444 :
445 0 : static long dma_buf_ioctl(struct file *file,
446 : unsigned int cmd, unsigned long arg)
447 : {
448 : struct dma_buf *dmabuf;
449 : struct dma_buf_sync sync;
450 : enum dma_data_direction direction;
451 : int ret;
452 :
453 0 : dmabuf = file->private_data;
454 :
455 0 : switch (cmd) {
456 : case DMA_BUF_IOCTL_SYNC:
457 0 : if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
458 : return -EFAULT;
459 :
460 0 : if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
461 : return -EINVAL;
462 :
463 0 : switch (sync.flags & DMA_BUF_SYNC_RW) {
464 : case DMA_BUF_SYNC_READ:
465 : direction = DMA_FROM_DEVICE;
466 : break;
467 : case DMA_BUF_SYNC_WRITE:
468 0 : direction = DMA_TO_DEVICE;
469 0 : break;
470 : case DMA_BUF_SYNC_RW:
471 0 : direction = DMA_BIDIRECTIONAL;
472 0 : break;
473 : default:
474 : return -EINVAL;
475 : }
476 :
477 0 : if (sync.flags & DMA_BUF_SYNC_END)
478 0 : ret = dma_buf_end_cpu_access(dmabuf, direction);
479 : else
480 0 : ret = dma_buf_begin_cpu_access(dmabuf, direction);
481 :
482 0 : return ret;
483 :
484 : case DMA_BUF_SET_NAME_A:
485 : case DMA_BUF_SET_NAME_B:
486 0 : return dma_buf_set_name(dmabuf, (const char __user *)arg);
487 :
488 : #if IS_ENABLED(CONFIG_SYNC_FILE)
489 : case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
490 0 : return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
491 : case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
492 0 : return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
493 : #endif
494 :
495 : default:
496 : return -ENOTTY;
497 : }
498 : }
499 :
500 0 : static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
501 : {
502 0 : struct dma_buf *dmabuf = file->private_data;
503 :
504 0 : seq_printf(m, "size:\t%zu\n", dmabuf->size);
505 : /* Don't count the temporary reference taken inside procfs seq_show */
506 0 : seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
507 0 : seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
508 0 : spin_lock(&dmabuf->name_lock);
509 0 : if (dmabuf->name)
510 0 : seq_printf(m, "name:\t%s\n", dmabuf->name);
511 0 : spin_unlock(&dmabuf->name_lock);
512 0 : }
513 :
514 : static const struct file_operations dma_buf_fops = {
515 : .release = dma_buf_file_release,
516 : .mmap = dma_buf_mmap_internal,
517 : .llseek = dma_buf_llseek,
518 : .poll = dma_buf_poll,
519 : .unlocked_ioctl = dma_buf_ioctl,
520 : .compat_ioctl = compat_ptr_ioctl,
521 : .show_fdinfo = dma_buf_show_fdinfo,
522 : };
523 :
524 : /*
525 : * is_dma_buf_file - Check if struct file* is associated with dma_buf
526 : */
527 : static inline int is_dma_buf_file(struct file *file)
528 : {
529 : return file->f_op == &dma_buf_fops;
530 : }
531 :
532 0 : static struct file *dma_buf_getfile(size_t size, int flags)
533 : {
534 : static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
535 0 : struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
536 : struct file *file;
537 :
538 0 : if (IS_ERR(inode))
539 : return ERR_CAST(inode);
540 :
541 0 : inode->i_size = size;
542 0 : inode_set_bytes(inode, size);
543 :
544 : /*
545 : * The ->i_ino acquired from get_next_ino() is not unique thus
546 : * not suitable for using it as dentry name by dmabuf stats.
547 : * Override ->i_ino with the unique and dmabuffs specific
548 : * value.
549 : */
550 0 : inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
551 0 : flags &= O_ACCMODE | O_NONBLOCK;
552 0 : file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
553 : flags, &dma_buf_fops);
554 0 : if (IS_ERR(file))
555 : goto err_alloc_file;
556 :
557 : return file;
558 :
559 : err_alloc_file:
560 0 : iput(inode);
561 0 : return file;
562 : }
563 :
564 : /**
565 : * DOC: dma buf device access
566 : *
567 : * For device DMA access to a shared DMA buffer the usual sequence of operations
568 : * is fairly simple:
569 : *
570 : * 1. The exporter defines his exporter instance using
571 : * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
572 : * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
573 : * as a file descriptor by calling dma_buf_fd().
574 : *
575 : * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
576 : * to share with: First the file descriptor is converted to a &dma_buf using
577 : * dma_buf_get(). Then the buffer is attached to the device using
578 : * dma_buf_attach().
579 : *
580 : * Up to this stage the exporter is still free to migrate or reallocate the
581 : * backing storage.
582 : *
583 : * 3. Once the buffer is attached to all devices userspace can initiate DMA
584 : * access to the shared buffer. In the kernel this is done by calling
585 : * dma_buf_map_attachment() and dma_buf_unmap_attachment().
586 : *
587 : * 4. Once a driver is done with a shared buffer it needs to call
588 : * dma_buf_detach() (after cleaning up any mappings) and then release the
589 : * reference acquired with dma_buf_get() by calling dma_buf_put().
590 : *
591 : * For the detailed semantics exporters are expected to implement see
592 : * &dma_buf_ops.
593 : */
594 :
595 : /**
596 : * dma_buf_export - Creates a new dma_buf, and associates an anon file
597 : * with this buffer, so it can be exported.
598 : * Also connect the allocator specific data and ops to the buffer.
599 : * Additionally, provide a name string for exporter; useful in debugging.
600 : *
601 : * @exp_info: [in] holds all the export related information provided
602 : * by the exporter. see &struct dma_buf_export_info
603 : * for further details.
604 : *
605 : * Returns, on success, a newly created struct dma_buf object, which wraps the
606 : * supplied private data and operations for struct dma_buf_ops. On either
607 : * missing ops, or error in allocating struct dma_buf, will return negative
608 : * error.
609 : *
610 : * For most cases the easiest way to create @exp_info is through the
611 : * %DEFINE_DMA_BUF_EXPORT_INFO macro.
612 : */
613 0 : struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
614 : {
615 : struct dma_buf *dmabuf;
616 0 : struct dma_resv *resv = exp_info->resv;
617 : struct file *file;
618 0 : size_t alloc_size = sizeof(struct dma_buf);
619 : int ret;
620 :
621 0 : if (WARN_ON(!exp_info->priv || !exp_info->ops
622 : || !exp_info->ops->map_dma_buf
623 : || !exp_info->ops->unmap_dma_buf
624 : || !exp_info->ops->release))
625 : return ERR_PTR(-EINVAL);
626 :
627 0 : if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
628 : (exp_info->ops->pin || exp_info->ops->unpin)))
629 : return ERR_PTR(-EINVAL);
630 :
631 0 : if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
632 : return ERR_PTR(-EINVAL);
633 :
634 0 : if (!try_module_get(exp_info->owner))
635 : return ERR_PTR(-ENOENT);
636 :
637 0 : file = dma_buf_getfile(exp_info->size, exp_info->flags);
638 0 : if (IS_ERR(file)) {
639 0 : ret = PTR_ERR(file);
640 0 : goto err_module;
641 : }
642 :
643 0 : if (!exp_info->resv)
644 : alloc_size += sizeof(struct dma_resv);
645 : else
646 : /* prevent &dma_buf[1] == dma_buf->resv */
647 0 : alloc_size += 1;
648 0 : dmabuf = kzalloc(alloc_size, GFP_KERNEL);
649 0 : if (!dmabuf) {
650 0 : ret = -ENOMEM;
651 : goto err_file;
652 : }
653 :
654 0 : dmabuf->priv = exp_info->priv;
655 0 : dmabuf->ops = exp_info->ops;
656 0 : dmabuf->size = exp_info->size;
657 0 : dmabuf->exp_name = exp_info->exp_name;
658 0 : dmabuf->owner = exp_info->owner;
659 0 : spin_lock_init(&dmabuf->name_lock);
660 0 : init_waitqueue_head(&dmabuf->poll);
661 0 : dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
662 0 : dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
663 0 : INIT_LIST_HEAD(&dmabuf->attachments);
664 :
665 0 : if (!resv) {
666 0 : dmabuf->resv = (struct dma_resv *)&dmabuf[1];
667 0 : dma_resv_init(dmabuf->resv);
668 : } else {
669 0 : dmabuf->resv = resv;
670 : }
671 :
672 0 : ret = dma_buf_stats_setup(dmabuf, file);
673 : if (ret)
674 : goto err_dmabuf;
675 :
676 0 : file->private_data = dmabuf;
677 0 : file->f_path.dentry->d_fsdata = dmabuf;
678 0 : dmabuf->file = file;
679 :
680 0 : mutex_lock(&db_list.lock);
681 0 : list_add(&dmabuf->list_node, &db_list.head);
682 0 : mutex_unlock(&db_list.lock);
683 :
684 0 : return dmabuf;
685 :
686 : err_dmabuf:
687 : if (!resv)
688 : dma_resv_fini(dmabuf->resv);
689 : kfree(dmabuf);
690 : err_file:
691 0 : fput(file);
692 : err_module:
693 0 : module_put(exp_info->owner);
694 0 : return ERR_PTR(ret);
695 : }
696 : EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
697 :
698 : /**
699 : * dma_buf_fd - returns a file descriptor for the given struct dma_buf
700 : * @dmabuf: [in] pointer to dma_buf for which fd is required.
701 : * @flags: [in] flags to give to fd
702 : *
703 : * On success, returns an associated 'fd'. Else, returns error.
704 : */
705 0 : int dma_buf_fd(struct dma_buf *dmabuf, int flags)
706 : {
707 : int fd;
708 :
709 0 : if (!dmabuf || !dmabuf->file)
710 : return -EINVAL;
711 :
712 0 : fd = get_unused_fd_flags(flags);
713 0 : if (fd < 0)
714 : return fd;
715 :
716 0 : fd_install(fd, dmabuf->file);
717 :
718 0 : return fd;
719 : }
720 : EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
721 :
722 : /**
723 : * dma_buf_get - returns the struct dma_buf related to an fd
724 : * @fd: [in] fd associated with the struct dma_buf to be returned
725 : *
726 : * On success, returns the struct dma_buf associated with an fd; uses
727 : * file's refcounting done by fget to increase refcount. returns ERR_PTR
728 : * otherwise.
729 : */
730 0 : struct dma_buf *dma_buf_get(int fd)
731 : {
732 : struct file *file;
733 :
734 0 : file = fget(fd);
735 :
736 0 : if (!file)
737 : return ERR_PTR(-EBADF);
738 :
739 0 : if (!is_dma_buf_file(file)) {
740 0 : fput(file);
741 0 : return ERR_PTR(-EINVAL);
742 : }
743 :
744 0 : return file->private_data;
745 : }
746 : EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
747 :
748 : /**
749 : * dma_buf_put - decreases refcount of the buffer
750 : * @dmabuf: [in] buffer to reduce refcount of
751 : *
752 : * Uses file's refcounting done implicitly by fput().
753 : *
754 : * If, as a result of this call, the refcount becomes 0, the 'release' file
755 : * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
756 : * in turn, and frees the memory allocated for dmabuf when exported.
757 : */
758 0 : void dma_buf_put(struct dma_buf *dmabuf)
759 : {
760 0 : if (WARN_ON(!dmabuf || !dmabuf->file))
761 : return;
762 :
763 0 : fput(dmabuf->file);
764 : }
765 : EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
766 :
767 : static void mangle_sg_table(struct sg_table *sg_table)
768 : {
769 : #ifdef CONFIG_DMABUF_DEBUG
770 : int i;
771 : struct scatterlist *sg;
772 :
773 : /* To catch abuse of the underlying struct page by importers mix
774 : * up the bits, but take care to preserve the low SG_ bits to
775 : * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
776 : * before passing the sgt back to the exporter. */
777 : for_each_sgtable_sg(sg_table, sg, i)
778 : sg->page_link ^= ~0xffUL;
779 : #endif
780 :
781 : }
782 0 : static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
783 : enum dma_data_direction direction)
784 : {
785 : struct sg_table *sg_table;
786 : signed long ret;
787 :
788 0 : sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
789 0 : if (IS_ERR_OR_NULL(sg_table))
790 : return sg_table;
791 :
792 0 : if (!dma_buf_attachment_is_dynamic(attach)) {
793 0 : ret = dma_resv_wait_timeout(attach->dmabuf->resv,
794 : DMA_RESV_USAGE_KERNEL, true,
795 : MAX_SCHEDULE_TIMEOUT);
796 0 : if (ret < 0) {
797 0 : attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
798 : direction);
799 0 : return ERR_PTR(ret);
800 : }
801 : }
802 :
803 : mangle_sg_table(sg_table);
804 : return sg_table;
805 : }
806 :
807 : /**
808 : * DOC: locking convention
809 : *
810 : * In order to avoid deadlock situations between dma-buf exports and importers,
811 : * all dma-buf API users must follow the common dma-buf locking convention.
812 : *
813 : * Convention for importers
814 : *
815 : * 1. Importers must hold the dma-buf reservation lock when calling these
816 : * functions:
817 : *
818 : * - dma_buf_pin()
819 : * - dma_buf_unpin()
820 : * - dma_buf_map_attachment()
821 : * - dma_buf_unmap_attachment()
822 : * - dma_buf_vmap()
823 : * - dma_buf_vunmap()
824 : *
825 : * 2. Importers must not hold the dma-buf reservation lock when calling these
826 : * functions:
827 : *
828 : * - dma_buf_attach()
829 : * - dma_buf_dynamic_attach()
830 : * - dma_buf_detach()
831 : * - dma_buf_export()
832 : * - dma_buf_fd()
833 : * - dma_buf_get()
834 : * - dma_buf_put()
835 : * - dma_buf_mmap()
836 : * - dma_buf_begin_cpu_access()
837 : * - dma_buf_end_cpu_access()
838 : * - dma_buf_map_attachment_unlocked()
839 : * - dma_buf_unmap_attachment_unlocked()
840 : * - dma_buf_vmap_unlocked()
841 : * - dma_buf_vunmap_unlocked()
842 : *
843 : * Convention for exporters
844 : *
845 : * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
846 : * reservation and exporter can take the lock:
847 : *
848 : * - &dma_buf_ops.attach()
849 : * - &dma_buf_ops.detach()
850 : * - &dma_buf_ops.release()
851 : * - &dma_buf_ops.begin_cpu_access()
852 : * - &dma_buf_ops.end_cpu_access()
853 : *
854 : * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
855 : * reservation and exporter can't take the lock:
856 : *
857 : * - &dma_buf_ops.pin()
858 : * - &dma_buf_ops.unpin()
859 : * - &dma_buf_ops.map_dma_buf()
860 : * - &dma_buf_ops.unmap_dma_buf()
861 : * - &dma_buf_ops.mmap()
862 : * - &dma_buf_ops.vmap()
863 : * - &dma_buf_ops.vunmap()
864 : *
865 : * 3. Exporters must hold the dma-buf reservation lock when calling these
866 : * functions:
867 : *
868 : * - dma_buf_move_notify()
869 : */
870 :
871 : /**
872 : * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
873 : * @dmabuf: [in] buffer to attach device to.
874 : * @dev: [in] device to be attached.
875 : * @importer_ops: [in] importer operations for the attachment
876 : * @importer_priv: [in] importer private pointer for the attachment
877 : *
878 : * Returns struct dma_buf_attachment pointer for this attachment. Attachments
879 : * must be cleaned up by calling dma_buf_detach().
880 : *
881 : * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
882 : * functionality.
883 : *
884 : * Returns:
885 : *
886 : * A pointer to newly created &dma_buf_attachment on success, or a negative
887 : * error code wrapped into a pointer on failure.
888 : *
889 : * Note that this can fail if the backing storage of @dmabuf is in a place not
890 : * accessible to @dev, and cannot be moved to a more suitable place. This is
891 : * indicated with the error code -EBUSY.
892 : */
893 : struct dma_buf_attachment *
894 0 : dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
895 : const struct dma_buf_attach_ops *importer_ops,
896 : void *importer_priv)
897 : {
898 : struct dma_buf_attachment *attach;
899 : int ret;
900 :
901 0 : if (WARN_ON(!dmabuf || !dev))
902 : return ERR_PTR(-EINVAL);
903 :
904 0 : if (WARN_ON(importer_ops && !importer_ops->move_notify))
905 : return ERR_PTR(-EINVAL);
906 :
907 0 : attach = kzalloc(sizeof(*attach), GFP_KERNEL);
908 0 : if (!attach)
909 : return ERR_PTR(-ENOMEM);
910 :
911 0 : attach->dev = dev;
912 0 : attach->dmabuf = dmabuf;
913 0 : if (importer_ops)
914 0 : attach->peer2peer = importer_ops->allow_peer2peer;
915 0 : attach->importer_ops = importer_ops;
916 0 : attach->importer_priv = importer_priv;
917 :
918 0 : if (dmabuf->ops->attach) {
919 0 : ret = dmabuf->ops->attach(dmabuf, attach);
920 0 : if (ret)
921 : goto err_attach;
922 : }
923 0 : dma_resv_lock(dmabuf->resv, NULL);
924 0 : list_add(&attach->node, &dmabuf->attachments);
925 0 : dma_resv_unlock(dmabuf->resv);
926 :
927 : /* When either the importer or the exporter can't handle dynamic
928 : * mappings we cache the mapping here to avoid issues with the
929 : * reservation object lock.
930 : */
931 0 : if (dma_buf_attachment_is_dynamic(attach) !=
932 0 : dma_buf_is_dynamic(dmabuf)) {
933 : struct sg_table *sgt;
934 :
935 0 : dma_resv_lock(attach->dmabuf->resv, NULL);
936 0 : if (dma_buf_is_dynamic(attach->dmabuf)) {
937 0 : ret = dmabuf->ops->pin(attach);
938 0 : if (ret)
939 : goto err_unlock;
940 : }
941 :
942 0 : sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
943 0 : if (!sgt)
944 0 : sgt = ERR_PTR(-ENOMEM);
945 0 : if (IS_ERR(sgt)) {
946 0 : ret = PTR_ERR(sgt);
947 : goto err_unpin;
948 : }
949 0 : dma_resv_unlock(attach->dmabuf->resv);
950 0 : attach->sgt = sgt;
951 0 : attach->dir = DMA_BIDIRECTIONAL;
952 : }
953 :
954 : return attach;
955 :
956 : err_attach:
957 0 : kfree(attach);
958 0 : return ERR_PTR(ret);
959 :
960 : err_unpin:
961 0 : if (dma_buf_is_dynamic(attach->dmabuf))
962 0 : dmabuf->ops->unpin(attach);
963 :
964 : err_unlock:
965 0 : dma_resv_unlock(attach->dmabuf->resv);
966 :
967 0 : dma_buf_detach(dmabuf, attach);
968 0 : return ERR_PTR(ret);
969 : }
970 : EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
971 :
972 : /**
973 : * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
974 : * @dmabuf: [in] buffer to attach device to.
975 : * @dev: [in] device to be attached.
976 : *
977 : * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
978 : * mapping.
979 : */
980 0 : struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
981 : struct device *dev)
982 : {
983 0 : return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
984 : }
985 : EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
986 :
987 : static void __unmap_dma_buf(struct dma_buf_attachment *attach,
988 : struct sg_table *sg_table,
989 : enum dma_data_direction direction)
990 : {
991 : /* uses XOR, hence this unmangles */
992 0 : mangle_sg_table(sg_table);
993 :
994 0 : attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
995 : }
996 :
997 : /**
998 : * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
999 : * @dmabuf: [in] buffer to detach from.
1000 : * @attach: [in] attachment to be detached; is free'd after this call.
1001 : *
1002 : * Clean up a device attachment obtained by calling dma_buf_attach().
1003 : *
1004 : * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1005 : */
1006 0 : void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1007 : {
1008 0 : if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1009 : return;
1010 :
1011 0 : dma_resv_lock(dmabuf->resv, NULL);
1012 :
1013 0 : if (attach->sgt) {
1014 :
1015 0 : __unmap_dma_buf(attach, attach->sgt, attach->dir);
1016 :
1017 0 : if (dma_buf_is_dynamic(attach->dmabuf))
1018 0 : dmabuf->ops->unpin(attach);
1019 : }
1020 0 : list_del(&attach->node);
1021 :
1022 0 : dma_resv_unlock(dmabuf->resv);
1023 :
1024 0 : if (dmabuf->ops->detach)
1025 0 : dmabuf->ops->detach(dmabuf, attach);
1026 :
1027 0 : kfree(attach);
1028 : }
1029 : EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1030 :
1031 : /**
1032 : * dma_buf_pin - Lock down the DMA-buf
1033 : * @attach: [in] attachment which should be pinned
1034 : *
1035 : * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1036 : * call this, and only for limited use cases like scanout and not for temporary
1037 : * pin operations. It is not permitted to allow userspace to pin arbitrary
1038 : * amounts of buffers through this interface.
1039 : *
1040 : * Buffers must be unpinned by calling dma_buf_unpin().
1041 : *
1042 : * Returns:
1043 : * 0 on success, negative error code on failure.
1044 : */
1045 0 : int dma_buf_pin(struct dma_buf_attachment *attach)
1046 : {
1047 0 : struct dma_buf *dmabuf = attach->dmabuf;
1048 0 : int ret = 0;
1049 :
1050 0 : WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1051 :
1052 : dma_resv_assert_held(dmabuf->resv);
1053 :
1054 0 : if (dmabuf->ops->pin)
1055 0 : ret = dmabuf->ops->pin(attach);
1056 :
1057 0 : return ret;
1058 : }
1059 : EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1060 :
1061 : /**
1062 : * dma_buf_unpin - Unpin a DMA-buf
1063 : * @attach: [in] attachment which should be unpinned
1064 : *
1065 : * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1066 : * any mapping of @attach again and inform the importer through
1067 : * &dma_buf_attach_ops.move_notify.
1068 : */
1069 0 : void dma_buf_unpin(struct dma_buf_attachment *attach)
1070 : {
1071 0 : struct dma_buf *dmabuf = attach->dmabuf;
1072 :
1073 0 : WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1074 :
1075 : dma_resv_assert_held(dmabuf->resv);
1076 :
1077 0 : if (dmabuf->ops->unpin)
1078 0 : dmabuf->ops->unpin(attach);
1079 0 : }
1080 : EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1081 :
1082 : /**
1083 : * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1084 : * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1085 : * dma_buf_ops.
1086 : * @attach: [in] attachment whose scatterlist is to be returned
1087 : * @direction: [in] direction of DMA transfer
1088 : *
1089 : * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1090 : * on error. May return -EINTR if it is interrupted by a signal.
1091 : *
1092 : * On success, the DMA addresses and lengths in the returned scatterlist are
1093 : * PAGE_SIZE aligned.
1094 : *
1095 : * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1096 : * the underlying backing storage is pinned for as long as a mapping exists,
1097 : * therefore users/importers should not hold onto a mapping for undue amounts of
1098 : * time.
1099 : *
1100 : * Important: Dynamic importers must wait for the exclusive fence of the struct
1101 : * dma_resv attached to the DMA-BUF first.
1102 : */
1103 0 : struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1104 : enum dma_data_direction direction)
1105 : {
1106 : struct sg_table *sg_table;
1107 : int r;
1108 :
1109 : might_sleep();
1110 :
1111 0 : if (WARN_ON(!attach || !attach->dmabuf))
1112 : return ERR_PTR(-EINVAL);
1113 :
1114 0 : dma_resv_assert_held(attach->dmabuf->resv);
1115 :
1116 0 : if (attach->sgt) {
1117 : /*
1118 : * Two mappings with different directions for the same
1119 : * attachment are not allowed.
1120 : */
1121 0 : if (attach->dir != direction &&
1122 : attach->dir != DMA_BIDIRECTIONAL)
1123 : return ERR_PTR(-EBUSY);
1124 :
1125 0 : return attach->sgt;
1126 : }
1127 :
1128 0 : if (dma_buf_is_dynamic(attach->dmabuf)) {
1129 : if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1130 0 : r = attach->dmabuf->ops->pin(attach);
1131 0 : if (r)
1132 0 : return ERR_PTR(r);
1133 : }
1134 : }
1135 :
1136 0 : sg_table = __map_dma_buf(attach, direction);
1137 0 : if (!sg_table)
1138 0 : sg_table = ERR_PTR(-ENOMEM);
1139 :
1140 0 : if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1141 : !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1142 0 : attach->dmabuf->ops->unpin(attach);
1143 :
1144 0 : if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1145 0 : attach->sgt = sg_table;
1146 0 : attach->dir = direction;
1147 : }
1148 :
1149 : #ifdef CONFIG_DMA_API_DEBUG
1150 : if (!IS_ERR(sg_table)) {
1151 : struct scatterlist *sg;
1152 : u64 addr;
1153 : int len;
1154 : int i;
1155 :
1156 : for_each_sgtable_dma_sg(sg_table, sg, i) {
1157 : addr = sg_dma_address(sg);
1158 : len = sg_dma_len(sg);
1159 : if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1160 : pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1161 : __func__, addr, len);
1162 : }
1163 : }
1164 : }
1165 : #endif /* CONFIG_DMA_API_DEBUG */
1166 : return sg_table;
1167 : }
1168 : EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1169 :
1170 : /**
1171 : * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1172 : * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1173 : * dma_buf_ops.
1174 : * @attach: [in] attachment whose scatterlist is to be returned
1175 : * @direction: [in] direction of DMA transfer
1176 : *
1177 : * Unlocked variant of dma_buf_map_attachment().
1178 : */
1179 : struct sg_table *
1180 0 : dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1181 : enum dma_data_direction direction)
1182 : {
1183 : struct sg_table *sg_table;
1184 :
1185 : might_sleep();
1186 :
1187 0 : if (WARN_ON(!attach || !attach->dmabuf))
1188 : return ERR_PTR(-EINVAL);
1189 :
1190 0 : dma_resv_lock(attach->dmabuf->resv, NULL);
1191 0 : sg_table = dma_buf_map_attachment(attach, direction);
1192 0 : dma_resv_unlock(attach->dmabuf->resv);
1193 :
1194 0 : return sg_table;
1195 : }
1196 : EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1197 :
1198 : /**
1199 : * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1200 : * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1201 : * dma_buf_ops.
1202 : * @attach: [in] attachment to unmap buffer from
1203 : * @sg_table: [in] scatterlist info of the buffer to unmap
1204 : * @direction: [in] direction of DMA transfer
1205 : *
1206 : * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1207 : */
1208 0 : void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1209 : struct sg_table *sg_table,
1210 : enum dma_data_direction direction)
1211 : {
1212 : might_sleep();
1213 :
1214 0 : if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1215 : return;
1216 :
1217 0 : dma_resv_assert_held(attach->dmabuf->resv);
1218 :
1219 0 : if (attach->sgt == sg_table)
1220 : return;
1221 :
1222 0 : __unmap_dma_buf(attach, sg_table, direction);
1223 :
1224 0 : if (dma_buf_is_dynamic(attach->dmabuf) &&
1225 : !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1226 0 : dma_buf_unpin(attach);
1227 : }
1228 : EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1229 :
1230 : /**
1231 : * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1232 : * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1233 : * dma_buf_ops.
1234 : * @attach: [in] attachment to unmap buffer from
1235 : * @sg_table: [in] scatterlist info of the buffer to unmap
1236 : * @direction: [in] direction of DMA transfer
1237 : *
1238 : * Unlocked variant of dma_buf_unmap_attachment().
1239 : */
1240 0 : void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1241 : struct sg_table *sg_table,
1242 : enum dma_data_direction direction)
1243 : {
1244 : might_sleep();
1245 :
1246 0 : if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1247 : return;
1248 :
1249 0 : dma_resv_lock(attach->dmabuf->resv, NULL);
1250 0 : dma_buf_unmap_attachment(attach, sg_table, direction);
1251 0 : dma_resv_unlock(attach->dmabuf->resv);
1252 : }
1253 : EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1254 :
1255 : /**
1256 : * dma_buf_move_notify - notify attachments that DMA-buf is moving
1257 : *
1258 : * @dmabuf: [in] buffer which is moving
1259 : *
1260 : * Informs all attachments that they need to destroy and recreate all their
1261 : * mappings.
1262 : */
1263 0 : void dma_buf_move_notify(struct dma_buf *dmabuf)
1264 : {
1265 : struct dma_buf_attachment *attach;
1266 :
1267 : dma_resv_assert_held(dmabuf->resv);
1268 :
1269 0 : list_for_each_entry(attach, &dmabuf->attachments, node)
1270 0 : if (attach->importer_ops)
1271 0 : attach->importer_ops->move_notify(attach);
1272 0 : }
1273 : EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1274 :
1275 : /**
1276 : * DOC: cpu access
1277 : *
1278 : * There are multiple reasons for supporting CPU access to a dma buffer object:
1279 : *
1280 : * - Fallback operations in the kernel, for example when a device is connected
1281 : * over USB and the kernel needs to shuffle the data around first before
1282 : * sending it away. Cache coherency is handled by bracketing any transactions
1283 : * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1284 : * access.
1285 : *
1286 : * Since for most kernel internal dma-buf accesses need the entire buffer, a
1287 : * vmap interface is introduced. Note that on very old 32-bit architectures
1288 : * vmalloc space might be limited and result in vmap calls failing.
1289 : *
1290 : * Interfaces::
1291 : *
1292 : * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1293 : * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1294 : *
1295 : * The vmap call can fail if there is no vmap support in the exporter, or if
1296 : * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1297 : * count for all vmap access and calls down into the exporter's vmap function
1298 : * only when no vmapping exists, and only unmaps it once. Protection against
1299 : * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1300 : *
1301 : * - For full compatibility on the importer side with existing userspace
1302 : * interfaces, which might already support mmap'ing buffers. This is needed in
1303 : * many processing pipelines (e.g. feeding a software rendered image into a
1304 : * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1305 : * framework already supported this and for DMA buffer file descriptors to
1306 : * replace ION buffers mmap support was needed.
1307 : *
1308 : * There is no special interfaces, userspace simply calls mmap on the dma-buf
1309 : * fd. But like for CPU access there's a need to bracket the actual access,
1310 : * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1311 : * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1312 : * be restarted.
1313 : *
1314 : * Some systems might need some sort of cache coherency management e.g. when
1315 : * CPU and GPU domains are being accessed through dma-buf at the same time.
1316 : * To circumvent this problem there are begin/end coherency markers, that
1317 : * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1318 : * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1319 : * sequence would be used like following:
1320 : *
1321 : * - mmap dma-buf fd
1322 : * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1323 : * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1324 : * want (with the new data being consumed by say the GPU or the scanout
1325 : * device)
1326 : * - munmap once you don't need the buffer any more
1327 : *
1328 : * For correctness and optimal performance, it is always required to use
1329 : * SYNC_START and SYNC_END before and after, respectively, when accessing the
1330 : * mapped address. Userspace cannot rely on coherent access, even when there
1331 : * are systems where it just works without calling these ioctls.
1332 : *
1333 : * - And as a CPU fallback in userspace processing pipelines.
1334 : *
1335 : * Similar to the motivation for kernel cpu access it is again important that
1336 : * the userspace code of a given importing subsystem can use the same
1337 : * interfaces with a imported dma-buf buffer object as with a native buffer
1338 : * object. This is especially important for drm where the userspace part of
1339 : * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1340 : * use a different way to mmap a buffer rather invasive.
1341 : *
1342 : * The assumption in the current dma-buf interfaces is that redirecting the
1343 : * initial mmap is all that's needed. A survey of some of the existing
1344 : * subsystems shows that no driver seems to do any nefarious thing like
1345 : * syncing up with outstanding asynchronous processing on the device or
1346 : * allocating special resources at fault time. So hopefully this is good
1347 : * enough, since adding interfaces to intercept pagefaults and allow pte
1348 : * shootdowns would increase the complexity quite a bit.
1349 : *
1350 : * Interface::
1351 : *
1352 : * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1353 : * unsigned long);
1354 : *
1355 : * If the importing subsystem simply provides a special-purpose mmap call to
1356 : * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1357 : * equally achieve that for a dma-buf object.
1358 : */
1359 :
1360 : static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1361 : enum dma_data_direction direction)
1362 : {
1363 0 : bool write = (direction == DMA_BIDIRECTIONAL ||
1364 : direction == DMA_TO_DEVICE);
1365 0 : struct dma_resv *resv = dmabuf->resv;
1366 : long ret;
1367 :
1368 : /* Wait on any implicit rendering fences */
1369 0 : ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1370 : true, MAX_SCHEDULE_TIMEOUT);
1371 0 : if (ret < 0)
1372 0 : return ret;
1373 :
1374 : return 0;
1375 : }
1376 :
1377 : /**
1378 : * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1379 : * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1380 : * preparations. Coherency is only guaranteed in the specified range for the
1381 : * specified access direction.
1382 : * @dmabuf: [in] buffer to prepare cpu access for.
1383 : * @direction: [in] direction of access.
1384 : *
1385 : * After the cpu access is complete the caller should call
1386 : * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1387 : * it guaranteed to be coherent with other DMA access.
1388 : *
1389 : * This function will also wait for any DMA transactions tracked through
1390 : * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1391 : * synchronization this function will only ensure cache coherency, callers must
1392 : * ensure synchronization with such DMA transactions on their own.
1393 : *
1394 : * Can return negative error values, returns 0 on success.
1395 : */
1396 0 : int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1397 : enum dma_data_direction direction)
1398 : {
1399 0 : int ret = 0;
1400 :
1401 0 : if (WARN_ON(!dmabuf))
1402 : return -EINVAL;
1403 :
1404 : might_lock(&dmabuf->resv->lock.base);
1405 :
1406 0 : if (dmabuf->ops->begin_cpu_access)
1407 0 : ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1408 :
1409 : /* Ensure that all fences are waited upon - but we first allow
1410 : * the native handler the chance to do so more efficiently if it
1411 : * chooses. A double invocation here will be reasonably cheap no-op.
1412 : */
1413 0 : if (ret == 0)
1414 0 : ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1415 :
1416 : return ret;
1417 : }
1418 : EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1419 :
1420 : /**
1421 : * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1422 : * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1423 : * actions. Coherency is only guaranteed in the specified range for the
1424 : * specified access direction.
1425 : * @dmabuf: [in] buffer to complete cpu access for.
1426 : * @direction: [in] direction of access.
1427 : *
1428 : * This terminates CPU access started with dma_buf_begin_cpu_access().
1429 : *
1430 : * Can return negative error values, returns 0 on success.
1431 : */
1432 0 : int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1433 : enum dma_data_direction direction)
1434 : {
1435 0 : int ret = 0;
1436 :
1437 0 : WARN_ON(!dmabuf);
1438 :
1439 : might_lock(&dmabuf->resv->lock.base);
1440 :
1441 0 : if (dmabuf->ops->end_cpu_access)
1442 0 : ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1443 :
1444 0 : return ret;
1445 : }
1446 : EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1447 :
1448 :
1449 : /**
1450 : * dma_buf_mmap - Setup up a userspace mmap with the given vma
1451 : * @dmabuf: [in] buffer that should back the vma
1452 : * @vma: [in] vma for the mmap
1453 : * @pgoff: [in] offset in pages where this mmap should start within the
1454 : * dma-buf buffer.
1455 : *
1456 : * This function adjusts the passed in vma so that it points at the file of the
1457 : * dma_buf operation. It also adjusts the starting pgoff and does bounds
1458 : * checking on the size of the vma. Then it calls the exporters mmap function to
1459 : * set up the mapping.
1460 : *
1461 : * Can return negative error values, returns 0 on success.
1462 : */
1463 0 : int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1464 : unsigned long pgoff)
1465 : {
1466 : int ret;
1467 :
1468 0 : if (WARN_ON(!dmabuf || !vma))
1469 : return -EINVAL;
1470 :
1471 : /* check if buffer supports mmap */
1472 0 : if (!dmabuf->ops->mmap)
1473 : return -EINVAL;
1474 :
1475 : /* check for offset overflow */
1476 0 : if (pgoff + vma_pages(vma) < pgoff)
1477 : return -EOVERFLOW;
1478 :
1479 : /* check for overflowing the buffer's size */
1480 0 : if (pgoff + vma_pages(vma) >
1481 0 : dmabuf->size >> PAGE_SHIFT)
1482 : return -EINVAL;
1483 :
1484 : /* readjust the vma */
1485 0 : vma_set_file(vma, dmabuf->file);
1486 0 : vma->vm_pgoff = pgoff;
1487 :
1488 0 : dma_resv_lock(dmabuf->resv, NULL);
1489 0 : ret = dmabuf->ops->mmap(dmabuf, vma);
1490 0 : dma_resv_unlock(dmabuf->resv);
1491 :
1492 0 : return ret;
1493 : }
1494 : EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1495 :
1496 : /**
1497 : * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1498 : * address space. Same restrictions as for vmap and friends apply.
1499 : * @dmabuf: [in] buffer to vmap
1500 : * @map: [out] returns the vmap pointer
1501 : *
1502 : * This call may fail due to lack of virtual mapping address space.
1503 : * These calls are optional in drivers. The intended use for them
1504 : * is for mapping objects linear in kernel space for high use objects.
1505 : *
1506 : * To ensure coherency users must call dma_buf_begin_cpu_access() and
1507 : * dma_buf_end_cpu_access() around any cpu access performed through this
1508 : * mapping.
1509 : *
1510 : * Returns 0 on success, or a negative errno code otherwise.
1511 : */
1512 0 : int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1513 : {
1514 : struct iosys_map ptr;
1515 : int ret;
1516 :
1517 0 : iosys_map_clear(map);
1518 :
1519 0 : if (WARN_ON(!dmabuf))
1520 : return -EINVAL;
1521 :
1522 : dma_resv_assert_held(dmabuf->resv);
1523 :
1524 0 : if (!dmabuf->ops->vmap)
1525 : return -EINVAL;
1526 :
1527 0 : if (dmabuf->vmapping_counter) {
1528 0 : dmabuf->vmapping_counter++;
1529 0 : BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1530 0 : *map = dmabuf->vmap_ptr;
1531 0 : return 0;
1532 : }
1533 :
1534 0 : BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1535 :
1536 0 : ret = dmabuf->ops->vmap(dmabuf, &ptr);
1537 0 : if (WARN_ON_ONCE(ret))
1538 : return ret;
1539 :
1540 0 : dmabuf->vmap_ptr = ptr;
1541 0 : dmabuf->vmapping_counter = 1;
1542 :
1543 0 : *map = dmabuf->vmap_ptr;
1544 :
1545 0 : return 0;
1546 : }
1547 : EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1548 :
1549 : /**
1550 : * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1551 : * address space. Same restrictions as for vmap and friends apply.
1552 : * @dmabuf: [in] buffer to vmap
1553 : * @map: [out] returns the vmap pointer
1554 : *
1555 : * Unlocked version of dma_buf_vmap()
1556 : *
1557 : * Returns 0 on success, or a negative errno code otherwise.
1558 : */
1559 0 : int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1560 : {
1561 : int ret;
1562 :
1563 0 : iosys_map_clear(map);
1564 :
1565 0 : if (WARN_ON(!dmabuf))
1566 : return -EINVAL;
1567 :
1568 0 : dma_resv_lock(dmabuf->resv, NULL);
1569 0 : ret = dma_buf_vmap(dmabuf, map);
1570 0 : dma_resv_unlock(dmabuf->resv);
1571 :
1572 0 : return ret;
1573 : }
1574 : EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1575 :
1576 : /**
1577 : * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1578 : * @dmabuf: [in] buffer to vunmap
1579 : * @map: [in] vmap pointer to vunmap
1580 : */
1581 0 : void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1582 : {
1583 0 : if (WARN_ON(!dmabuf))
1584 : return;
1585 :
1586 : dma_resv_assert_held(dmabuf->resv);
1587 :
1588 0 : BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1589 0 : BUG_ON(dmabuf->vmapping_counter == 0);
1590 0 : BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1591 :
1592 0 : if (--dmabuf->vmapping_counter == 0) {
1593 0 : if (dmabuf->ops->vunmap)
1594 0 : dmabuf->ops->vunmap(dmabuf, map);
1595 0 : iosys_map_clear(&dmabuf->vmap_ptr);
1596 : }
1597 : }
1598 : EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1599 :
1600 : /**
1601 : * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1602 : * @dmabuf: [in] buffer to vunmap
1603 : * @map: [in] vmap pointer to vunmap
1604 : */
1605 0 : void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1606 : {
1607 0 : if (WARN_ON(!dmabuf))
1608 : return;
1609 :
1610 0 : dma_resv_lock(dmabuf->resv, NULL);
1611 0 : dma_buf_vunmap(dmabuf, map);
1612 0 : dma_resv_unlock(dmabuf->resv);
1613 : }
1614 : EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1615 :
1616 : #ifdef CONFIG_DEBUG_FS
1617 : static int dma_buf_debug_show(struct seq_file *s, void *unused)
1618 : {
1619 : struct dma_buf *buf_obj;
1620 : struct dma_buf_attachment *attach_obj;
1621 : int count = 0, attach_count;
1622 : size_t size = 0;
1623 : int ret;
1624 :
1625 : ret = mutex_lock_interruptible(&db_list.lock);
1626 :
1627 : if (ret)
1628 : return ret;
1629 :
1630 : seq_puts(s, "\nDma-buf Objects:\n");
1631 : seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1632 : "size", "flags", "mode", "count", "ino");
1633 :
1634 : list_for_each_entry(buf_obj, &db_list.head, list_node) {
1635 :
1636 : ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1637 : if (ret)
1638 : goto error_unlock;
1639 :
1640 :
1641 : spin_lock(&buf_obj->name_lock);
1642 : seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1643 : buf_obj->size,
1644 : buf_obj->file->f_flags, buf_obj->file->f_mode,
1645 : file_count(buf_obj->file),
1646 : buf_obj->exp_name,
1647 : file_inode(buf_obj->file)->i_ino,
1648 : buf_obj->name ?: "<none>");
1649 : spin_unlock(&buf_obj->name_lock);
1650 :
1651 : dma_resv_describe(buf_obj->resv, s);
1652 :
1653 : seq_puts(s, "\tAttached Devices:\n");
1654 : attach_count = 0;
1655 :
1656 : list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1657 : seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1658 : attach_count++;
1659 : }
1660 : dma_resv_unlock(buf_obj->resv);
1661 :
1662 : seq_printf(s, "Total %d devices attached\n\n",
1663 : attach_count);
1664 :
1665 : count++;
1666 : size += buf_obj->size;
1667 : }
1668 :
1669 : seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1670 :
1671 : mutex_unlock(&db_list.lock);
1672 : return 0;
1673 :
1674 : error_unlock:
1675 : mutex_unlock(&db_list.lock);
1676 : return ret;
1677 : }
1678 :
1679 : DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1680 :
1681 : static struct dentry *dma_buf_debugfs_dir;
1682 :
1683 : static int dma_buf_init_debugfs(void)
1684 : {
1685 : struct dentry *d;
1686 : int err = 0;
1687 :
1688 : d = debugfs_create_dir("dma_buf", NULL);
1689 : if (IS_ERR(d))
1690 : return PTR_ERR(d);
1691 :
1692 : dma_buf_debugfs_dir = d;
1693 :
1694 : d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1695 : NULL, &dma_buf_debug_fops);
1696 : if (IS_ERR(d)) {
1697 : pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1698 : debugfs_remove_recursive(dma_buf_debugfs_dir);
1699 : dma_buf_debugfs_dir = NULL;
1700 : err = PTR_ERR(d);
1701 : }
1702 :
1703 : return err;
1704 : }
1705 :
1706 : static void dma_buf_uninit_debugfs(void)
1707 : {
1708 : debugfs_remove_recursive(dma_buf_debugfs_dir);
1709 : }
1710 : #else
1711 : static inline int dma_buf_init_debugfs(void)
1712 : {
1713 : return 0;
1714 : }
1715 : static inline void dma_buf_uninit_debugfs(void)
1716 : {
1717 : }
1718 : #endif
1719 :
1720 1 : static int __init dma_buf_init(void)
1721 : {
1722 : int ret;
1723 :
1724 1 : ret = dma_buf_init_sysfs_statistics();
1725 : if (ret)
1726 : return ret;
1727 :
1728 1 : dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1729 2 : if (IS_ERR(dma_buf_mnt))
1730 0 : return PTR_ERR(dma_buf_mnt);
1731 :
1732 1 : mutex_init(&db_list.lock);
1733 1 : INIT_LIST_HEAD(&db_list.head);
1734 : dma_buf_init_debugfs();
1735 1 : return 0;
1736 : }
1737 : subsys_initcall(dma_buf_init);
1738 :
1739 0 : static void __exit dma_buf_deinit(void)
1740 : {
1741 : dma_buf_uninit_debugfs();
1742 0 : kern_unmount(dma_buf_mnt);
1743 : dma_buf_uninit_sysfs_statistics();
1744 0 : }
1745 : __exitcall(dma_buf_deinit);
|