LCOV - code coverage report
Current view: top level - drivers/dma-buf - dma-buf.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 12 435 2.8 %
Date: 2023-08-24 13:40:31 Functions: 2 39 5.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * Framework for buffer objects that can be shared across devices/subsystems.
       4             :  *
       5             :  * Copyright(C) 2011 Linaro Limited. All rights reserved.
       6             :  * Author: Sumit Semwal <sumit.semwal@ti.com>
       7             :  *
       8             :  * Many thanks to linaro-mm-sig list, and specially
       9             :  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
      10             :  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
      11             :  * refining of this idea.
      12             :  */
      13             : 
      14             : #include <linux/fs.h>
      15             : #include <linux/slab.h>
      16             : #include <linux/dma-buf.h>
      17             : #include <linux/dma-fence.h>
      18             : #include <linux/dma-fence-unwrap.h>
      19             : #include <linux/anon_inodes.h>
      20             : #include <linux/export.h>
      21             : #include <linux/debugfs.h>
      22             : #include <linux/module.h>
      23             : #include <linux/seq_file.h>
      24             : #include <linux/sync_file.h>
      25             : #include <linux/poll.h>
      26             : #include <linux/dma-resv.h>
      27             : #include <linux/mm.h>
      28             : #include <linux/mount.h>
      29             : #include <linux/pseudo_fs.h>
      30             : 
      31             : #include <uapi/linux/dma-buf.h>
      32             : #include <uapi/linux/magic.h>
      33             : 
      34             : #include "dma-buf-sysfs-stats.h"
      35             : 
      36             : static inline int is_dma_buf_file(struct file *);
      37             : 
      38             : struct dma_buf_list {
      39             :         struct list_head head;
      40             :         struct mutex lock;
      41             : };
      42             : 
      43             : static struct dma_buf_list db_list;
      44             : 
      45           0 : static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
      46             : {
      47             :         struct dma_buf *dmabuf;
      48             :         char name[DMA_BUF_NAME_LEN];
      49           0 :         size_t ret = 0;
      50             : 
      51           0 :         dmabuf = dentry->d_fsdata;
      52           0 :         spin_lock(&dmabuf->name_lock);
      53           0 :         if (dmabuf->name)
      54           0 :                 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
      55           0 :         spin_unlock(&dmabuf->name_lock);
      56             : 
      57           0 :         return dynamic_dname(buffer, buflen, "/%s:%s",
      58             :                              dentry->d_name.name, ret > 0 ? name : "");
      59             : }
      60             : 
      61           0 : static void dma_buf_release(struct dentry *dentry)
      62             : {
      63             :         struct dma_buf *dmabuf;
      64             : 
      65           0 :         dmabuf = dentry->d_fsdata;
      66           0 :         if (unlikely(!dmabuf))
      67             :                 return;
      68             : 
      69           0 :         BUG_ON(dmabuf->vmapping_counter);
      70             : 
      71             :         /*
      72             :          * If you hit this BUG() it could mean:
      73             :          * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
      74             :          * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
      75             :          */
      76           0 :         BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
      77             : 
      78           0 :         dma_buf_stats_teardown(dmabuf);
      79           0 :         dmabuf->ops->release(dmabuf);
      80             : 
      81           0 :         if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
      82           0 :                 dma_resv_fini(dmabuf->resv);
      83             : 
      84           0 :         WARN_ON(!list_empty(&dmabuf->attachments));
      85           0 :         module_put(dmabuf->owner);
      86           0 :         kfree(dmabuf->name);
      87           0 :         kfree(dmabuf);
      88             : }
      89             : 
      90           0 : static int dma_buf_file_release(struct inode *inode, struct file *file)
      91             : {
      92             :         struct dma_buf *dmabuf;
      93             : 
      94           0 :         if (!is_dma_buf_file(file))
      95             :                 return -EINVAL;
      96             : 
      97           0 :         dmabuf = file->private_data;
      98           0 :         if (dmabuf) {
      99           0 :                 mutex_lock(&db_list.lock);
     100           0 :                 list_del(&dmabuf->list_node);
     101           0 :                 mutex_unlock(&db_list.lock);
     102             :         }
     103             : 
     104             :         return 0;
     105             : }
     106             : 
     107             : static const struct dentry_operations dma_buf_dentry_ops = {
     108             :         .d_dname = dmabuffs_dname,
     109             :         .d_release = dma_buf_release,
     110             : };
     111             : 
     112             : static struct vfsmount *dma_buf_mnt;
     113             : 
     114           1 : static int dma_buf_fs_init_context(struct fs_context *fc)
     115             : {
     116             :         struct pseudo_fs_context *ctx;
     117             : 
     118           1 :         ctx = init_pseudo(fc, DMA_BUF_MAGIC);
     119           1 :         if (!ctx)
     120             :                 return -ENOMEM;
     121           1 :         ctx->dops = &dma_buf_dentry_ops;
     122           1 :         return 0;
     123             : }
     124             : 
     125             : static struct file_system_type dma_buf_fs_type = {
     126             :         .name = "dmabuf",
     127             :         .init_fs_context = dma_buf_fs_init_context,
     128             :         .kill_sb = kill_anon_super,
     129             : };
     130             : 
     131           0 : static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
     132             : {
     133             :         struct dma_buf *dmabuf;
     134             : 
     135           0 :         if (!is_dma_buf_file(file))
     136             :                 return -EINVAL;
     137             : 
     138           0 :         dmabuf = file->private_data;
     139             : 
     140             :         /* check if buffer supports mmap */
     141           0 :         if (!dmabuf->ops->mmap)
     142             :                 return -EINVAL;
     143             : 
     144             :         /* check for overflowing the buffer's size */
     145           0 :         if (vma->vm_pgoff + vma_pages(vma) >
     146           0 :             dmabuf->size >> PAGE_SHIFT)
     147             :                 return -EINVAL;
     148             : 
     149           0 :         return dmabuf->ops->mmap(dmabuf, vma);
     150             : }
     151             : 
     152           0 : static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
     153             : {
     154             :         struct dma_buf *dmabuf;
     155             :         loff_t base;
     156             : 
     157           0 :         if (!is_dma_buf_file(file))
     158             :                 return -EBADF;
     159             : 
     160           0 :         dmabuf = file->private_data;
     161             : 
     162             :         /* only support discovering the end of the buffer,
     163             :            but also allow SEEK_SET to maintain the idiomatic
     164             :            SEEK_END(0), SEEK_CUR(0) pattern */
     165           0 :         if (whence == SEEK_END)
     166           0 :                 base = dmabuf->size;
     167           0 :         else if (whence == SEEK_SET)
     168             :                 base = 0;
     169             :         else
     170             :                 return -EINVAL;
     171             : 
     172           0 :         if (offset != 0)
     173             :                 return -EINVAL;
     174             : 
     175           0 :         return base + offset;
     176             : }
     177             : 
     178             : /**
     179             :  * DOC: implicit fence polling
     180             :  *
     181             :  * To support cross-device and cross-driver synchronization of buffer access
     182             :  * implicit fences (represented internally in the kernel with &struct dma_fence)
     183             :  * can be attached to a &dma_buf. The glue for that and a few related things are
     184             :  * provided in the &dma_resv structure.
     185             :  *
     186             :  * Userspace can query the state of these implicitly tracked fences using poll()
     187             :  * and related system calls:
     188             :  *
     189             :  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
     190             :  *   most recent write or exclusive fence.
     191             :  *
     192             :  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
     193             :  *   all attached fences, shared and exclusive ones.
     194             :  *
     195             :  * Note that this only signals the completion of the respective fences, i.e. the
     196             :  * DMA transfers are complete. Cache flushing and any other necessary
     197             :  * preparations before CPU access can begin still need to happen.
     198             :  *
     199             :  * As an alternative to poll(), the set of fences on DMA buffer can be
     200             :  * exported as a &sync_file using &dma_buf_sync_file_export.
     201             :  */
     202             : 
     203           0 : static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
     204             : {
     205           0 :         struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
     206           0 :         struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
     207             :         unsigned long flags;
     208             : 
     209           0 :         spin_lock_irqsave(&dcb->poll->lock, flags);
     210           0 :         wake_up_locked_poll(dcb->poll, dcb->active);
     211           0 :         dcb->active = 0;
     212           0 :         spin_unlock_irqrestore(&dcb->poll->lock, flags);
     213           0 :         dma_fence_put(fence);
     214             :         /* Paired with get_file in dma_buf_poll */
     215           0 :         fput(dmabuf->file);
     216           0 : }
     217             : 
     218           0 : static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
     219             :                                 struct dma_buf_poll_cb_t *dcb)
     220             : {
     221             :         struct dma_resv_iter cursor;
     222             :         struct dma_fence *fence;
     223             :         int r;
     224             : 
     225           0 :         dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
     226             :                                 fence) {
     227           0 :                 dma_fence_get(fence);
     228           0 :                 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
     229           0 :                 if (!r)
     230             :                         return true;
     231           0 :                 dma_fence_put(fence);
     232             :         }
     233             : 
     234             :         return false;
     235             : }
     236             : 
     237           0 : static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
     238             : {
     239             :         struct dma_buf *dmabuf;
     240             :         struct dma_resv *resv;
     241             :         __poll_t events;
     242             : 
     243           0 :         dmabuf = file->private_data;
     244           0 :         if (!dmabuf || !dmabuf->resv)
     245             :                 return EPOLLERR;
     246             : 
     247           0 :         resv = dmabuf->resv;
     248             : 
     249           0 :         poll_wait(file, &dmabuf->poll, poll);
     250             : 
     251           0 :         events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
     252           0 :         if (!events)
     253             :                 return 0;
     254             : 
     255           0 :         dma_resv_lock(resv, NULL);
     256             : 
     257           0 :         if (events & EPOLLOUT) {
     258           0 :                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
     259             : 
     260             :                 /* Check that callback isn't busy */
     261           0 :                 spin_lock_irq(&dmabuf->poll.lock);
     262           0 :                 if (dcb->active)
     263           0 :                         events &= ~EPOLLOUT;
     264             :                 else
     265           0 :                         dcb->active = EPOLLOUT;
     266           0 :                 spin_unlock_irq(&dmabuf->poll.lock);
     267             : 
     268           0 :                 if (events & EPOLLOUT) {
     269             :                         /* Paired with fput in dma_buf_poll_cb */
     270           0 :                         get_file(dmabuf->file);
     271             : 
     272           0 :                         if (!dma_buf_poll_add_cb(resv, true, dcb))
     273             :                                 /* No callback queued, wake up any other waiters */
     274           0 :                                 dma_buf_poll_cb(NULL, &dcb->cb);
     275             :                         else
     276           0 :                                 events &= ~EPOLLOUT;
     277             :                 }
     278             :         }
     279             : 
     280           0 :         if (events & EPOLLIN) {
     281           0 :                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
     282             : 
     283             :                 /* Check that callback isn't busy */
     284           0 :                 spin_lock_irq(&dmabuf->poll.lock);
     285           0 :                 if (dcb->active)
     286           0 :                         events &= ~EPOLLIN;
     287             :                 else
     288           0 :                         dcb->active = EPOLLIN;
     289           0 :                 spin_unlock_irq(&dmabuf->poll.lock);
     290             : 
     291           0 :                 if (events & EPOLLIN) {
     292             :                         /* Paired with fput in dma_buf_poll_cb */
     293           0 :                         get_file(dmabuf->file);
     294             : 
     295           0 :                         if (!dma_buf_poll_add_cb(resv, false, dcb))
     296             :                                 /* No callback queued, wake up any other waiters */
     297           0 :                                 dma_buf_poll_cb(NULL, &dcb->cb);
     298             :                         else
     299           0 :                                 events &= ~EPOLLIN;
     300             :                 }
     301             :         }
     302             : 
     303           0 :         dma_resv_unlock(resv);
     304           0 :         return events;
     305             : }
     306             : 
     307             : /**
     308             :  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
     309             :  * It could support changing the name of the dma-buf if the same
     310             :  * piece of memory is used for multiple purpose between different devices.
     311             :  *
     312             :  * @dmabuf: [in]     dmabuf buffer that will be renamed.
     313             :  * @buf:    [in]     A piece of userspace memory that contains the name of
     314             :  *                   the dma-buf.
     315             :  *
     316             :  * Returns 0 on success. If the dma-buf buffer is already attached to
     317             :  * devices, return -EBUSY.
     318             :  *
     319             :  */
     320           0 : static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
     321             : {
     322           0 :         char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
     323             : 
     324           0 :         if (IS_ERR(name))
     325           0 :                 return PTR_ERR(name);
     326             : 
     327           0 :         spin_lock(&dmabuf->name_lock);
     328           0 :         kfree(dmabuf->name);
     329           0 :         dmabuf->name = name;
     330           0 :         spin_unlock(&dmabuf->name_lock);
     331             : 
     332             :         return 0;
     333             : }
     334             : 
     335             : #if IS_ENABLED(CONFIG_SYNC_FILE)
     336           0 : static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
     337             :                                      void __user *user_data)
     338             : {
     339             :         struct dma_buf_export_sync_file arg;
     340             :         enum dma_resv_usage usage;
     341           0 :         struct dma_fence *fence = NULL;
     342             :         struct sync_file *sync_file;
     343             :         int fd, ret;
     344             : 
     345           0 :         if (copy_from_user(&arg, user_data, sizeof(arg)))
     346             :                 return -EFAULT;
     347             : 
     348           0 :         if (arg.flags & ~DMA_BUF_SYNC_RW)
     349             :                 return -EINVAL;
     350             : 
     351           0 :         if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
     352             :                 return -EINVAL;
     353             : 
     354           0 :         fd = get_unused_fd_flags(O_CLOEXEC);
     355           0 :         if (fd < 0)
     356           0 :                 return fd;
     357             : 
     358           0 :         usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
     359           0 :         ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
     360           0 :         if (ret)
     361             :                 goto err_put_fd;
     362             : 
     363           0 :         if (!fence)
     364           0 :                 fence = dma_fence_get_stub();
     365             : 
     366           0 :         sync_file = sync_file_create(fence);
     367             : 
     368           0 :         dma_fence_put(fence);
     369             : 
     370           0 :         if (!sync_file) {
     371             :                 ret = -ENOMEM;
     372             :                 goto err_put_fd;
     373             :         }
     374             : 
     375           0 :         arg.fd = fd;
     376           0 :         if (copy_to_user(user_data, &arg, sizeof(arg))) {
     377           0 :                 ret = -EFAULT;
     378             :                 goto err_put_file;
     379             :         }
     380             : 
     381           0 :         fd_install(fd, sync_file->file);
     382             : 
     383             :         return 0;
     384             : 
     385             : err_put_file:
     386           0 :         fput(sync_file->file);
     387             : err_put_fd:
     388           0 :         put_unused_fd(fd);
     389           0 :         return ret;
     390             : }
     391             : 
     392           0 : static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
     393             :                                      const void __user *user_data)
     394             : {
     395             :         struct dma_buf_import_sync_file arg;
     396             :         struct dma_fence *fence, *f;
     397             :         enum dma_resv_usage usage;
     398             :         struct dma_fence_unwrap iter;
     399             :         unsigned int num_fences;
     400           0 :         int ret = 0;
     401             : 
     402           0 :         if (copy_from_user(&arg, user_data, sizeof(arg)))
     403             :                 return -EFAULT;
     404             : 
     405           0 :         if (arg.flags & ~DMA_BUF_SYNC_RW)
     406             :                 return -EINVAL;
     407             : 
     408           0 :         if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
     409             :                 return -EINVAL;
     410             : 
     411           0 :         fence = sync_file_get_fence(arg.fd);
     412           0 :         if (!fence)
     413             :                 return -EINVAL;
     414             : 
     415           0 :         usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
     416             :                                                    DMA_RESV_USAGE_READ;
     417             : 
     418           0 :         num_fences = 0;
     419           0 :         dma_fence_unwrap_for_each(f, &iter, fence)
     420           0 :                 ++num_fences;
     421             : 
     422           0 :         if (num_fences > 0) {
     423           0 :                 dma_resv_lock(dmabuf->resv, NULL);
     424             : 
     425           0 :                 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
     426           0 :                 if (!ret) {
     427           0 :                         dma_fence_unwrap_for_each(f, &iter, fence)
     428           0 :                                 dma_resv_add_fence(dmabuf->resv, f, usage);
     429             :                 }
     430             : 
     431           0 :                 dma_resv_unlock(dmabuf->resv);
     432             :         }
     433             : 
     434           0 :         dma_fence_put(fence);
     435             : 
     436           0 :         return ret;
     437             : }
     438             : #endif
     439             : 
     440           0 : static long dma_buf_ioctl(struct file *file,
     441             :                           unsigned int cmd, unsigned long arg)
     442             : {
     443             :         struct dma_buf *dmabuf;
     444             :         struct dma_buf_sync sync;
     445             :         enum dma_data_direction direction;
     446             :         int ret;
     447             : 
     448           0 :         dmabuf = file->private_data;
     449             : 
     450           0 :         switch (cmd) {
     451             :         case DMA_BUF_IOCTL_SYNC:
     452           0 :                 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
     453             :                         return -EFAULT;
     454             : 
     455           0 :                 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
     456             :                         return -EINVAL;
     457             : 
     458           0 :                 switch (sync.flags & DMA_BUF_SYNC_RW) {
     459             :                 case DMA_BUF_SYNC_READ:
     460             :                         direction = DMA_FROM_DEVICE;
     461             :                         break;
     462             :                 case DMA_BUF_SYNC_WRITE:
     463           0 :                         direction = DMA_TO_DEVICE;
     464           0 :                         break;
     465             :                 case DMA_BUF_SYNC_RW:
     466           0 :                         direction = DMA_BIDIRECTIONAL;
     467           0 :                         break;
     468             :                 default:
     469             :                         return -EINVAL;
     470             :                 }
     471             : 
     472           0 :                 if (sync.flags & DMA_BUF_SYNC_END)
     473           0 :                         ret = dma_buf_end_cpu_access(dmabuf, direction);
     474             :                 else
     475           0 :                         ret = dma_buf_begin_cpu_access(dmabuf, direction);
     476             : 
     477           0 :                 return ret;
     478             : 
     479             :         case DMA_BUF_SET_NAME_A:
     480             :         case DMA_BUF_SET_NAME_B:
     481           0 :                 return dma_buf_set_name(dmabuf, (const char __user *)arg);
     482             : 
     483             : #if IS_ENABLED(CONFIG_SYNC_FILE)
     484             :         case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
     485           0 :                 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
     486             :         case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
     487           0 :                 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
     488             : #endif
     489             : 
     490             :         default:
     491             :                 return -ENOTTY;
     492             :         }
     493             : }
     494             : 
     495           0 : static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
     496             : {
     497           0 :         struct dma_buf *dmabuf = file->private_data;
     498             : 
     499           0 :         seq_printf(m, "size:\t%zu\n", dmabuf->size);
     500             :         /* Don't count the temporary reference taken inside procfs seq_show */
     501           0 :         seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
     502           0 :         seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
     503           0 :         spin_lock(&dmabuf->name_lock);
     504           0 :         if (dmabuf->name)
     505           0 :                 seq_printf(m, "name:\t%s\n", dmabuf->name);
     506           0 :         spin_unlock(&dmabuf->name_lock);
     507           0 : }
     508             : 
     509             : static const struct file_operations dma_buf_fops = {
     510             :         .release        = dma_buf_file_release,
     511             :         .mmap           = dma_buf_mmap_internal,
     512             :         .llseek         = dma_buf_llseek,
     513             :         .poll           = dma_buf_poll,
     514             :         .unlocked_ioctl = dma_buf_ioctl,
     515             :         .compat_ioctl   = compat_ptr_ioctl,
     516             :         .show_fdinfo    = dma_buf_show_fdinfo,
     517             : };
     518             : 
     519             : /*
     520             :  * is_dma_buf_file - Check if struct file* is associated with dma_buf
     521             :  */
     522             : static inline int is_dma_buf_file(struct file *file)
     523             : {
     524             :         return file->f_op == &dma_buf_fops;
     525             : }
     526             : 
     527           0 : static struct file *dma_buf_getfile(size_t size, int flags)
     528             : {
     529             :         static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
     530           0 :         struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
     531             :         struct file *file;
     532             : 
     533           0 :         if (IS_ERR(inode))
     534             :                 return ERR_CAST(inode);
     535             : 
     536           0 :         inode->i_size = size;
     537           0 :         inode_set_bytes(inode, size);
     538             : 
     539             :         /*
     540             :          * The ->i_ino acquired from get_next_ino() is not unique thus
     541             :          * not suitable for using it as dentry name by dmabuf stats.
     542             :          * Override ->i_ino with the unique and dmabuffs specific
     543             :          * value.
     544             :          */
     545           0 :         inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
     546           0 :         flags &= O_ACCMODE | O_NONBLOCK;
     547           0 :         file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
     548             :                                  flags, &dma_buf_fops);
     549           0 :         if (IS_ERR(file))
     550             :                 goto err_alloc_file;
     551             : 
     552             :         return file;
     553             : 
     554             : err_alloc_file:
     555           0 :         iput(inode);
     556           0 :         return file;
     557             : }
     558             : 
     559             : /**
     560             :  * DOC: dma buf device access
     561             :  *
     562             :  * For device DMA access to a shared DMA buffer the usual sequence of operations
     563             :  * is fairly simple:
     564             :  *
     565             :  * 1. The exporter defines his exporter instance using
     566             :  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
     567             :  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
     568             :  *    as a file descriptor by calling dma_buf_fd().
     569             :  *
     570             :  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
     571             :  *    to share with: First the file descriptor is converted to a &dma_buf using
     572             :  *    dma_buf_get(). Then the buffer is attached to the device using
     573             :  *    dma_buf_attach().
     574             :  *
     575             :  *    Up to this stage the exporter is still free to migrate or reallocate the
     576             :  *    backing storage.
     577             :  *
     578             :  * 3. Once the buffer is attached to all devices userspace can initiate DMA
     579             :  *    access to the shared buffer. In the kernel this is done by calling
     580             :  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
     581             :  *
     582             :  * 4. Once a driver is done with a shared buffer it needs to call
     583             :  *    dma_buf_detach() (after cleaning up any mappings) and then release the
     584             :  *    reference acquired with dma_buf_get() by calling dma_buf_put().
     585             :  *
     586             :  * For the detailed semantics exporters are expected to implement see
     587             :  * &dma_buf_ops.
     588             :  */
     589             : 
     590             : /**
     591             :  * dma_buf_export - Creates a new dma_buf, and associates an anon file
     592             :  * with this buffer, so it can be exported.
     593             :  * Also connect the allocator specific data and ops to the buffer.
     594             :  * Additionally, provide a name string for exporter; useful in debugging.
     595             :  *
     596             :  * @exp_info:   [in]    holds all the export related information provided
     597             :  *                      by the exporter. see &struct dma_buf_export_info
     598             :  *                      for further details.
     599             :  *
     600             :  * Returns, on success, a newly created struct dma_buf object, which wraps the
     601             :  * supplied private data and operations for struct dma_buf_ops. On either
     602             :  * missing ops, or error in allocating struct dma_buf, will return negative
     603             :  * error.
     604             :  *
     605             :  * For most cases the easiest way to create @exp_info is through the
     606             :  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
     607             :  */
     608           0 : struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
     609             : {
     610             :         struct dma_buf *dmabuf;
     611           0 :         struct dma_resv *resv = exp_info->resv;
     612             :         struct file *file;
     613           0 :         size_t alloc_size = sizeof(struct dma_buf);
     614             :         int ret;
     615             : 
     616           0 :         if (WARN_ON(!exp_info->priv || !exp_info->ops
     617             :                     || !exp_info->ops->map_dma_buf
     618             :                     || !exp_info->ops->unmap_dma_buf
     619             :                     || !exp_info->ops->release))
     620             :                 return ERR_PTR(-EINVAL);
     621             : 
     622           0 :         if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
     623             :                     (exp_info->ops->pin || exp_info->ops->unpin)))
     624             :                 return ERR_PTR(-EINVAL);
     625             : 
     626           0 :         if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
     627             :                 return ERR_PTR(-EINVAL);
     628             : 
     629           0 :         if (!try_module_get(exp_info->owner))
     630             :                 return ERR_PTR(-ENOENT);
     631             : 
     632           0 :         file = dma_buf_getfile(exp_info->size, exp_info->flags);
     633           0 :         if (IS_ERR(file)) {
     634           0 :                 ret = PTR_ERR(file);
     635           0 :                 goto err_module;
     636             :         }
     637             : 
     638           0 :         if (!exp_info->resv)
     639             :                 alloc_size += sizeof(struct dma_resv);
     640             :         else
     641             :                 /* prevent &dma_buf[1] == dma_buf->resv */
     642           0 :                 alloc_size += 1;
     643           0 :         dmabuf = kzalloc(alloc_size, GFP_KERNEL);
     644           0 :         if (!dmabuf) {
     645           0 :                 ret = -ENOMEM;
     646             :                 goto err_file;
     647             :         }
     648             : 
     649           0 :         dmabuf->priv = exp_info->priv;
     650           0 :         dmabuf->ops = exp_info->ops;
     651           0 :         dmabuf->size = exp_info->size;
     652           0 :         dmabuf->exp_name = exp_info->exp_name;
     653           0 :         dmabuf->owner = exp_info->owner;
     654           0 :         spin_lock_init(&dmabuf->name_lock);
     655           0 :         init_waitqueue_head(&dmabuf->poll);
     656           0 :         dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
     657           0 :         dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
     658           0 :         INIT_LIST_HEAD(&dmabuf->attachments);
     659             : 
     660           0 :         if (!resv) {
     661           0 :                 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
     662           0 :                 dma_resv_init(dmabuf->resv);
     663             :         } else {
     664           0 :                 dmabuf->resv = resv;
     665             :         }
     666             : 
     667           0 :         ret = dma_buf_stats_setup(dmabuf, file);
     668             :         if (ret)
     669             :                 goto err_dmabuf;
     670             : 
     671           0 :         file->private_data = dmabuf;
     672           0 :         file->f_path.dentry->d_fsdata = dmabuf;
     673           0 :         dmabuf->file = file;
     674             : 
     675           0 :         mutex_lock(&db_list.lock);
     676           0 :         list_add(&dmabuf->list_node, &db_list.head);
     677           0 :         mutex_unlock(&db_list.lock);
     678             : 
     679           0 :         return dmabuf;
     680             : 
     681             : err_dmabuf:
     682             :         if (!resv)
     683             :                 dma_resv_fini(dmabuf->resv);
     684             :         kfree(dmabuf);
     685             : err_file:
     686           0 :         fput(file);
     687             : err_module:
     688           0 :         module_put(exp_info->owner);
     689           0 :         return ERR_PTR(ret);
     690             : }
     691             : EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
     692             : 
     693             : /**
     694             :  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
     695             :  * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
     696             :  * @flags:      [in]    flags to give to fd
     697             :  *
     698             :  * On success, returns an associated 'fd'. Else, returns error.
     699             :  */
     700           0 : int dma_buf_fd(struct dma_buf *dmabuf, int flags)
     701             : {
     702             :         int fd;
     703             : 
     704           0 :         if (!dmabuf || !dmabuf->file)
     705             :                 return -EINVAL;
     706             : 
     707           0 :         fd = get_unused_fd_flags(flags);
     708           0 :         if (fd < 0)
     709             :                 return fd;
     710             : 
     711           0 :         fd_install(fd, dmabuf->file);
     712             : 
     713           0 :         return fd;
     714             : }
     715             : EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
     716             : 
     717             : /**
     718             :  * dma_buf_get - returns the struct dma_buf related to an fd
     719             :  * @fd: [in]    fd associated with the struct dma_buf to be returned
     720             :  *
     721             :  * On success, returns the struct dma_buf associated with an fd; uses
     722             :  * file's refcounting done by fget to increase refcount. returns ERR_PTR
     723             :  * otherwise.
     724             :  */
     725           0 : struct dma_buf *dma_buf_get(int fd)
     726             : {
     727             :         struct file *file;
     728             : 
     729           0 :         file = fget(fd);
     730             : 
     731           0 :         if (!file)
     732             :                 return ERR_PTR(-EBADF);
     733             : 
     734           0 :         if (!is_dma_buf_file(file)) {
     735           0 :                 fput(file);
     736           0 :                 return ERR_PTR(-EINVAL);
     737             :         }
     738             : 
     739           0 :         return file->private_data;
     740             : }
     741             : EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
     742             : 
     743             : /**
     744             :  * dma_buf_put - decreases refcount of the buffer
     745             :  * @dmabuf:     [in]    buffer to reduce refcount of
     746             :  *
     747             :  * Uses file's refcounting done implicitly by fput().
     748             :  *
     749             :  * If, as a result of this call, the refcount becomes 0, the 'release' file
     750             :  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
     751             :  * in turn, and frees the memory allocated for dmabuf when exported.
     752             :  */
     753           0 : void dma_buf_put(struct dma_buf *dmabuf)
     754             : {
     755           0 :         if (WARN_ON(!dmabuf || !dmabuf->file))
     756             :                 return;
     757             : 
     758           0 :         fput(dmabuf->file);
     759             : }
     760             : EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
     761             : 
     762             : static void mangle_sg_table(struct sg_table *sg_table)
     763             : {
     764             : #ifdef CONFIG_DMABUF_DEBUG
     765             :         int i;
     766             :         struct scatterlist *sg;
     767             : 
     768             :         /* To catch abuse of the underlying struct page by importers mix
     769             :          * up the bits, but take care to preserve the low SG_ bits to
     770             :          * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
     771             :          * before passing the sgt back to the exporter. */
     772             :         for_each_sgtable_sg(sg_table, sg, i)
     773             :                 sg->page_link ^= ~0xffUL;
     774             : #endif
     775             : 
     776             : }
     777           0 : static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
     778             :                                        enum dma_data_direction direction)
     779             : {
     780             :         struct sg_table *sg_table;
     781             :         signed long ret;
     782             : 
     783           0 :         sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
     784           0 :         if (IS_ERR_OR_NULL(sg_table))
     785             :                 return sg_table;
     786             : 
     787           0 :         if (!dma_buf_attachment_is_dynamic(attach)) {
     788           0 :                 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
     789             :                                             DMA_RESV_USAGE_KERNEL, true,
     790             :                                             MAX_SCHEDULE_TIMEOUT);
     791           0 :                 if (ret < 0) {
     792           0 :                         attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
     793             :                                                            direction);
     794           0 :                         return ERR_PTR(ret);
     795             :                 }
     796             :         }
     797             : 
     798             :         mangle_sg_table(sg_table);
     799             :         return sg_table;
     800             : }
     801             : 
     802             : /**
     803             :  * DOC: locking convention
     804             :  *
     805             :  * In order to avoid deadlock situations between dma-buf exports and importers,
     806             :  * all dma-buf API users must follow the common dma-buf locking convention.
     807             :  *
     808             :  * Convention for importers
     809             :  *
     810             :  * 1. Importers must hold the dma-buf reservation lock when calling these
     811             :  *    functions:
     812             :  *
     813             :  *     - dma_buf_pin()
     814             :  *     - dma_buf_unpin()
     815             :  *     - dma_buf_map_attachment()
     816             :  *     - dma_buf_unmap_attachment()
     817             :  *     - dma_buf_vmap()
     818             :  *     - dma_buf_vunmap()
     819             :  *
     820             :  * 2. Importers must not hold the dma-buf reservation lock when calling these
     821             :  *    functions:
     822             :  *
     823             :  *     - dma_buf_attach()
     824             :  *     - dma_buf_dynamic_attach()
     825             :  *     - dma_buf_detach()
     826             :  *     - dma_buf_export()
     827             :  *     - dma_buf_fd()
     828             :  *     - dma_buf_get()
     829             :  *     - dma_buf_put()
     830             :  *     - dma_buf_mmap()
     831             :  *     - dma_buf_begin_cpu_access()
     832             :  *     - dma_buf_end_cpu_access()
     833             :  *     - dma_buf_map_attachment_unlocked()
     834             :  *     - dma_buf_unmap_attachment_unlocked()
     835             :  *     - dma_buf_vmap_unlocked()
     836             :  *     - dma_buf_vunmap_unlocked()
     837             :  *
     838             :  * Convention for exporters
     839             :  *
     840             :  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
     841             :  *    reservation and exporter can take the lock:
     842             :  *
     843             :  *     - &dma_buf_ops.attach()
     844             :  *     - &dma_buf_ops.detach()
     845             :  *     - &dma_buf_ops.release()
     846             :  *     - &dma_buf_ops.begin_cpu_access()
     847             :  *     - &dma_buf_ops.end_cpu_access()
     848             :  *     - &dma_buf_ops.mmap()
     849             :  *
     850             :  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
     851             :  *    reservation and exporter can't take the lock:
     852             :  *
     853             :  *     - &dma_buf_ops.pin()
     854             :  *     - &dma_buf_ops.unpin()
     855             :  *     - &dma_buf_ops.map_dma_buf()
     856             :  *     - &dma_buf_ops.unmap_dma_buf()
     857             :  *     - &dma_buf_ops.vmap()
     858             :  *     - &dma_buf_ops.vunmap()
     859             :  *
     860             :  * 3. Exporters must hold the dma-buf reservation lock when calling these
     861             :  *    functions:
     862             :  *
     863             :  *     - dma_buf_move_notify()
     864             :  */
     865             : 
     866             : /**
     867             :  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
     868             :  * @dmabuf:             [in]    buffer to attach device to.
     869             :  * @dev:                [in]    device to be attached.
     870             :  * @importer_ops:       [in]    importer operations for the attachment
     871             :  * @importer_priv:      [in]    importer private pointer for the attachment
     872             :  *
     873             :  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
     874             :  * must be cleaned up by calling dma_buf_detach().
     875             :  *
     876             :  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
     877             :  * functionality.
     878             :  *
     879             :  * Returns:
     880             :  *
     881             :  * A pointer to newly created &dma_buf_attachment on success, or a negative
     882             :  * error code wrapped into a pointer on failure.
     883             :  *
     884             :  * Note that this can fail if the backing storage of @dmabuf is in a place not
     885             :  * accessible to @dev, and cannot be moved to a more suitable place. This is
     886             :  * indicated with the error code -EBUSY.
     887             :  */
     888             : struct dma_buf_attachment *
     889           0 : dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
     890             :                        const struct dma_buf_attach_ops *importer_ops,
     891             :                        void *importer_priv)
     892             : {
     893             :         struct dma_buf_attachment *attach;
     894             :         int ret;
     895             : 
     896           0 :         if (WARN_ON(!dmabuf || !dev))
     897             :                 return ERR_PTR(-EINVAL);
     898             : 
     899           0 :         if (WARN_ON(importer_ops && !importer_ops->move_notify))
     900             :                 return ERR_PTR(-EINVAL);
     901             : 
     902           0 :         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
     903           0 :         if (!attach)
     904             :                 return ERR_PTR(-ENOMEM);
     905             : 
     906           0 :         attach->dev = dev;
     907           0 :         attach->dmabuf = dmabuf;
     908           0 :         if (importer_ops)
     909           0 :                 attach->peer2peer = importer_ops->allow_peer2peer;
     910           0 :         attach->importer_ops = importer_ops;
     911           0 :         attach->importer_priv = importer_priv;
     912             : 
     913           0 :         if (dmabuf->ops->attach) {
     914           0 :                 ret = dmabuf->ops->attach(dmabuf, attach);
     915           0 :                 if (ret)
     916             :                         goto err_attach;
     917             :         }
     918           0 :         dma_resv_lock(dmabuf->resv, NULL);
     919           0 :         list_add(&attach->node, &dmabuf->attachments);
     920           0 :         dma_resv_unlock(dmabuf->resv);
     921             : 
     922             :         /* When either the importer or the exporter can't handle dynamic
     923             :          * mappings we cache the mapping here to avoid issues with the
     924             :          * reservation object lock.
     925             :          */
     926           0 :         if (dma_buf_attachment_is_dynamic(attach) !=
     927           0 :             dma_buf_is_dynamic(dmabuf)) {
     928             :                 struct sg_table *sgt;
     929             : 
     930           0 :                 dma_resv_lock(attach->dmabuf->resv, NULL);
     931           0 :                 if (dma_buf_is_dynamic(attach->dmabuf)) {
     932           0 :                         ret = dmabuf->ops->pin(attach);
     933           0 :                         if (ret)
     934             :                                 goto err_unlock;
     935             :                 }
     936             : 
     937           0 :                 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
     938           0 :                 if (!sgt)
     939           0 :                         sgt = ERR_PTR(-ENOMEM);
     940           0 :                 if (IS_ERR(sgt)) {
     941           0 :                         ret = PTR_ERR(sgt);
     942             :                         goto err_unpin;
     943             :                 }
     944           0 :                 dma_resv_unlock(attach->dmabuf->resv);
     945           0 :                 attach->sgt = sgt;
     946           0 :                 attach->dir = DMA_BIDIRECTIONAL;
     947             :         }
     948             : 
     949             :         return attach;
     950             : 
     951             : err_attach:
     952           0 :         kfree(attach);
     953           0 :         return ERR_PTR(ret);
     954             : 
     955             : err_unpin:
     956           0 :         if (dma_buf_is_dynamic(attach->dmabuf))
     957           0 :                 dmabuf->ops->unpin(attach);
     958             : 
     959             : err_unlock:
     960           0 :         dma_resv_unlock(attach->dmabuf->resv);
     961             : 
     962           0 :         dma_buf_detach(dmabuf, attach);
     963           0 :         return ERR_PTR(ret);
     964             : }
     965             : EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
     966             : 
     967             : /**
     968             :  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
     969             :  * @dmabuf:     [in]    buffer to attach device to.
     970             :  * @dev:        [in]    device to be attached.
     971             :  *
     972             :  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
     973             :  * mapping.
     974             :  */
     975           0 : struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
     976             :                                           struct device *dev)
     977             : {
     978           0 :         return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
     979             : }
     980             : EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
     981             : 
     982             : static void __unmap_dma_buf(struct dma_buf_attachment *attach,
     983             :                             struct sg_table *sg_table,
     984             :                             enum dma_data_direction direction)
     985             : {
     986             :         /* uses XOR, hence this unmangles */
     987           0 :         mangle_sg_table(sg_table);
     988             : 
     989           0 :         attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
     990             : }
     991             : 
     992             : /**
     993             :  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
     994             :  * @dmabuf:     [in]    buffer to detach from.
     995             :  * @attach:     [in]    attachment to be detached; is free'd after this call.
     996             :  *
     997             :  * Clean up a device attachment obtained by calling dma_buf_attach().
     998             :  *
     999             :  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
    1000             :  */
    1001           0 : void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
    1002             : {
    1003           0 :         if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
    1004             :                 return;
    1005             : 
    1006           0 :         dma_resv_lock(dmabuf->resv, NULL);
    1007             : 
    1008           0 :         if (attach->sgt) {
    1009             : 
    1010           0 :                 __unmap_dma_buf(attach, attach->sgt, attach->dir);
    1011             : 
    1012           0 :                 if (dma_buf_is_dynamic(attach->dmabuf))
    1013           0 :                         dmabuf->ops->unpin(attach);
    1014             :         }
    1015           0 :         list_del(&attach->node);
    1016             : 
    1017           0 :         dma_resv_unlock(dmabuf->resv);
    1018             : 
    1019           0 :         if (dmabuf->ops->detach)
    1020           0 :                 dmabuf->ops->detach(dmabuf, attach);
    1021             : 
    1022           0 :         kfree(attach);
    1023             : }
    1024             : EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
    1025             : 
    1026             : /**
    1027             :  * dma_buf_pin - Lock down the DMA-buf
    1028             :  * @attach:     [in]    attachment which should be pinned
    1029             :  *
    1030             :  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
    1031             :  * call this, and only for limited use cases like scanout and not for temporary
    1032             :  * pin operations. It is not permitted to allow userspace to pin arbitrary
    1033             :  * amounts of buffers through this interface.
    1034             :  *
    1035             :  * Buffers must be unpinned by calling dma_buf_unpin().
    1036             :  *
    1037             :  * Returns:
    1038             :  * 0 on success, negative error code on failure.
    1039             :  */
    1040           0 : int dma_buf_pin(struct dma_buf_attachment *attach)
    1041             : {
    1042           0 :         struct dma_buf *dmabuf = attach->dmabuf;
    1043           0 :         int ret = 0;
    1044             : 
    1045           0 :         WARN_ON(!dma_buf_attachment_is_dynamic(attach));
    1046             : 
    1047             :         dma_resv_assert_held(dmabuf->resv);
    1048             : 
    1049           0 :         if (dmabuf->ops->pin)
    1050           0 :                 ret = dmabuf->ops->pin(attach);
    1051             : 
    1052           0 :         return ret;
    1053             : }
    1054             : EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
    1055             : 
    1056             : /**
    1057             :  * dma_buf_unpin - Unpin a DMA-buf
    1058             :  * @attach:     [in]    attachment which should be unpinned
    1059             :  *
    1060             :  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
    1061             :  * any mapping of @attach again and inform the importer through
    1062             :  * &dma_buf_attach_ops.move_notify.
    1063             :  */
    1064           0 : void dma_buf_unpin(struct dma_buf_attachment *attach)
    1065             : {
    1066           0 :         struct dma_buf *dmabuf = attach->dmabuf;
    1067             : 
    1068           0 :         WARN_ON(!dma_buf_attachment_is_dynamic(attach));
    1069             : 
    1070             :         dma_resv_assert_held(dmabuf->resv);
    1071             : 
    1072           0 :         if (dmabuf->ops->unpin)
    1073           0 :                 dmabuf->ops->unpin(attach);
    1074           0 : }
    1075             : EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
    1076             : 
    1077             : /**
    1078             :  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
    1079             :  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
    1080             :  * dma_buf_ops.
    1081             :  * @attach:     [in]    attachment whose scatterlist is to be returned
    1082             :  * @direction:  [in]    direction of DMA transfer
    1083             :  *
    1084             :  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
    1085             :  * on error. May return -EINTR if it is interrupted by a signal.
    1086             :  *
    1087             :  * On success, the DMA addresses and lengths in the returned scatterlist are
    1088             :  * PAGE_SIZE aligned.
    1089             :  *
    1090             :  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
    1091             :  * the underlying backing storage is pinned for as long as a mapping exists,
    1092             :  * therefore users/importers should not hold onto a mapping for undue amounts of
    1093             :  * time.
    1094             :  *
    1095             :  * Important: Dynamic importers must wait for the exclusive fence of the struct
    1096             :  * dma_resv attached to the DMA-BUF first.
    1097             :  */
    1098           0 : struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
    1099             :                                         enum dma_data_direction direction)
    1100             : {
    1101             :         struct sg_table *sg_table;
    1102             :         int r;
    1103             : 
    1104             :         might_sleep();
    1105             : 
    1106           0 :         if (WARN_ON(!attach || !attach->dmabuf))
    1107             :                 return ERR_PTR(-EINVAL);
    1108             : 
    1109           0 :         dma_resv_assert_held(attach->dmabuf->resv);
    1110             : 
    1111           0 :         if (attach->sgt) {
    1112             :                 /*
    1113             :                  * Two mappings with different directions for the same
    1114             :                  * attachment are not allowed.
    1115             :                  */
    1116           0 :                 if (attach->dir != direction &&
    1117             :                     attach->dir != DMA_BIDIRECTIONAL)
    1118             :                         return ERR_PTR(-EBUSY);
    1119             : 
    1120           0 :                 return attach->sgt;
    1121             :         }
    1122             : 
    1123           0 :         if (dma_buf_is_dynamic(attach->dmabuf)) {
    1124             :                 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
    1125           0 :                         r = attach->dmabuf->ops->pin(attach);
    1126           0 :                         if (r)
    1127           0 :                                 return ERR_PTR(r);
    1128             :                 }
    1129             :         }
    1130             : 
    1131           0 :         sg_table = __map_dma_buf(attach, direction);
    1132           0 :         if (!sg_table)
    1133           0 :                 sg_table = ERR_PTR(-ENOMEM);
    1134             : 
    1135           0 :         if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
    1136             :              !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
    1137           0 :                 attach->dmabuf->ops->unpin(attach);
    1138             : 
    1139           0 :         if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
    1140           0 :                 attach->sgt = sg_table;
    1141           0 :                 attach->dir = direction;
    1142             :         }
    1143             : 
    1144             : #ifdef CONFIG_DMA_API_DEBUG
    1145             :         if (!IS_ERR(sg_table)) {
    1146             :                 struct scatterlist *sg;
    1147             :                 u64 addr;
    1148             :                 int len;
    1149             :                 int i;
    1150             : 
    1151             :                 for_each_sgtable_dma_sg(sg_table, sg, i) {
    1152             :                         addr = sg_dma_address(sg);
    1153             :                         len = sg_dma_len(sg);
    1154             :                         if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
    1155             :                                 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
    1156             :                                          __func__, addr, len);
    1157             :                         }
    1158             :                 }
    1159             :         }
    1160             : #endif /* CONFIG_DMA_API_DEBUG */
    1161             :         return sg_table;
    1162             : }
    1163             : EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
    1164             : 
    1165             : /**
    1166             :  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
    1167             :  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
    1168             :  * dma_buf_ops.
    1169             :  * @attach:     [in]    attachment whose scatterlist is to be returned
    1170             :  * @direction:  [in]    direction of DMA transfer
    1171             :  *
    1172             :  * Unlocked variant of dma_buf_map_attachment().
    1173             :  */
    1174             : struct sg_table *
    1175           0 : dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
    1176             :                                 enum dma_data_direction direction)
    1177             : {
    1178             :         struct sg_table *sg_table;
    1179             : 
    1180             :         might_sleep();
    1181             : 
    1182           0 :         if (WARN_ON(!attach || !attach->dmabuf))
    1183             :                 return ERR_PTR(-EINVAL);
    1184             : 
    1185           0 :         dma_resv_lock(attach->dmabuf->resv, NULL);
    1186           0 :         sg_table = dma_buf_map_attachment(attach, direction);
    1187           0 :         dma_resv_unlock(attach->dmabuf->resv);
    1188             : 
    1189           0 :         return sg_table;
    1190             : }
    1191             : EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
    1192             : 
    1193             : /**
    1194             :  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
    1195             :  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
    1196             :  * dma_buf_ops.
    1197             :  * @attach:     [in]    attachment to unmap buffer from
    1198             :  * @sg_table:   [in]    scatterlist info of the buffer to unmap
    1199             :  * @direction:  [in]    direction of DMA transfer
    1200             :  *
    1201             :  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
    1202             :  */
    1203           0 : void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
    1204             :                                 struct sg_table *sg_table,
    1205             :                                 enum dma_data_direction direction)
    1206             : {
    1207             :         might_sleep();
    1208             : 
    1209           0 :         if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
    1210             :                 return;
    1211             : 
    1212           0 :         dma_resv_assert_held(attach->dmabuf->resv);
    1213             : 
    1214           0 :         if (attach->sgt == sg_table)
    1215             :                 return;
    1216             : 
    1217           0 :         __unmap_dma_buf(attach, sg_table, direction);
    1218             : 
    1219           0 :         if (dma_buf_is_dynamic(attach->dmabuf) &&
    1220             :             !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
    1221           0 :                 dma_buf_unpin(attach);
    1222             : }
    1223             : EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
    1224             : 
    1225             : /**
    1226             :  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
    1227             :  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
    1228             :  * dma_buf_ops.
    1229             :  * @attach:     [in]    attachment to unmap buffer from
    1230             :  * @sg_table:   [in]    scatterlist info of the buffer to unmap
    1231             :  * @direction:  [in]    direction of DMA transfer
    1232             :  *
    1233             :  * Unlocked variant of dma_buf_unmap_attachment().
    1234             :  */
    1235           0 : void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
    1236             :                                        struct sg_table *sg_table,
    1237             :                                        enum dma_data_direction direction)
    1238             : {
    1239             :         might_sleep();
    1240             : 
    1241           0 :         if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
    1242             :                 return;
    1243             : 
    1244           0 :         dma_resv_lock(attach->dmabuf->resv, NULL);
    1245           0 :         dma_buf_unmap_attachment(attach, sg_table, direction);
    1246           0 :         dma_resv_unlock(attach->dmabuf->resv);
    1247             : }
    1248             : EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
    1249             : 
    1250             : /**
    1251             :  * dma_buf_move_notify - notify attachments that DMA-buf is moving
    1252             :  *
    1253             :  * @dmabuf:     [in]    buffer which is moving
    1254             :  *
    1255             :  * Informs all attachments that they need to destroy and recreate all their
    1256             :  * mappings.
    1257             :  */
    1258           0 : void dma_buf_move_notify(struct dma_buf *dmabuf)
    1259             : {
    1260             :         struct dma_buf_attachment *attach;
    1261             : 
    1262             :         dma_resv_assert_held(dmabuf->resv);
    1263             : 
    1264           0 :         list_for_each_entry(attach, &dmabuf->attachments, node)
    1265           0 :                 if (attach->importer_ops)
    1266           0 :                         attach->importer_ops->move_notify(attach);
    1267           0 : }
    1268             : EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
    1269             : 
    1270             : /**
    1271             :  * DOC: cpu access
    1272             :  *
    1273             :  * There are multiple reasons for supporting CPU access to a dma buffer object:
    1274             :  *
    1275             :  * - Fallback operations in the kernel, for example when a device is connected
    1276             :  *   over USB and the kernel needs to shuffle the data around first before
    1277             :  *   sending it away. Cache coherency is handled by bracketing any transactions
    1278             :  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
    1279             :  *   access.
    1280             :  *
    1281             :  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
    1282             :  *   vmap interface is introduced. Note that on very old 32-bit architectures
    1283             :  *   vmalloc space might be limited and result in vmap calls failing.
    1284             :  *
    1285             :  *   Interfaces::
    1286             :  *
    1287             :  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
    1288             :  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
    1289             :  *
    1290             :  *   The vmap call can fail if there is no vmap support in the exporter, or if
    1291             :  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
    1292             :  *   count for all vmap access and calls down into the exporter's vmap function
    1293             :  *   only when no vmapping exists, and only unmaps it once. Protection against
    1294             :  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
    1295             :  *
    1296             :  * - For full compatibility on the importer side with existing userspace
    1297             :  *   interfaces, which might already support mmap'ing buffers. This is needed in
    1298             :  *   many processing pipelines (e.g. feeding a software rendered image into a
    1299             :  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
    1300             :  *   framework already supported this and for DMA buffer file descriptors to
    1301             :  *   replace ION buffers mmap support was needed.
    1302             :  *
    1303             :  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
    1304             :  *   fd. But like for CPU access there's a need to bracket the actual access,
    1305             :  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
    1306             :  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
    1307             :  *   be restarted.
    1308             :  *
    1309             :  *   Some systems might need some sort of cache coherency management e.g. when
    1310             :  *   CPU and GPU domains are being accessed through dma-buf at the same time.
    1311             :  *   To circumvent this problem there are begin/end coherency markers, that
    1312             :  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
    1313             :  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
    1314             :  *   sequence would be used like following:
    1315             :  *
    1316             :  *     - mmap dma-buf fd
    1317             :  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
    1318             :  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
    1319             :  *       want (with the new data being consumed by say the GPU or the scanout
    1320             :  *       device)
    1321             :  *     - munmap once you don't need the buffer any more
    1322             :  *
    1323             :  *    For correctness and optimal performance, it is always required to use
    1324             :  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
    1325             :  *    mapped address. Userspace cannot rely on coherent access, even when there
    1326             :  *    are systems where it just works without calling these ioctls.
    1327             :  *
    1328             :  * - And as a CPU fallback in userspace processing pipelines.
    1329             :  *
    1330             :  *   Similar to the motivation for kernel cpu access it is again important that
    1331             :  *   the userspace code of a given importing subsystem can use the same
    1332             :  *   interfaces with a imported dma-buf buffer object as with a native buffer
    1333             :  *   object. This is especially important for drm where the userspace part of
    1334             :  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
    1335             :  *   use a different way to mmap a buffer rather invasive.
    1336             :  *
    1337             :  *   The assumption in the current dma-buf interfaces is that redirecting the
    1338             :  *   initial mmap is all that's needed. A survey of some of the existing
    1339             :  *   subsystems shows that no driver seems to do any nefarious thing like
    1340             :  *   syncing up with outstanding asynchronous processing on the device or
    1341             :  *   allocating special resources at fault time. So hopefully this is good
    1342             :  *   enough, since adding interfaces to intercept pagefaults and allow pte
    1343             :  *   shootdowns would increase the complexity quite a bit.
    1344             :  *
    1345             :  *   Interface::
    1346             :  *
    1347             :  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
    1348             :  *                     unsigned long);
    1349             :  *
    1350             :  *   If the importing subsystem simply provides a special-purpose mmap call to
    1351             :  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
    1352             :  *   equally achieve that for a dma-buf object.
    1353             :  */
    1354             : 
    1355             : static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
    1356             :                                       enum dma_data_direction direction)
    1357             : {
    1358           0 :         bool write = (direction == DMA_BIDIRECTIONAL ||
    1359             :                       direction == DMA_TO_DEVICE);
    1360           0 :         struct dma_resv *resv = dmabuf->resv;
    1361             :         long ret;
    1362             : 
    1363             :         /* Wait on any implicit rendering fences */
    1364           0 :         ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
    1365             :                                     true, MAX_SCHEDULE_TIMEOUT);
    1366           0 :         if (ret < 0)
    1367           0 :                 return ret;
    1368             : 
    1369             :         return 0;
    1370             : }
    1371             : 
    1372             : /**
    1373             :  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
    1374             :  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
    1375             :  * preparations. Coherency is only guaranteed in the specified range for the
    1376             :  * specified access direction.
    1377             :  * @dmabuf:     [in]    buffer to prepare cpu access for.
    1378             :  * @direction:  [in]    direction of access.
    1379             :  *
    1380             :  * After the cpu access is complete the caller should call
    1381             :  * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
    1382             :  * it guaranteed to be coherent with other DMA access.
    1383             :  *
    1384             :  * This function will also wait for any DMA transactions tracked through
    1385             :  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
    1386             :  * synchronization this function will only ensure cache coherency, callers must
    1387             :  * ensure synchronization with such DMA transactions on their own.
    1388             :  *
    1389             :  * Can return negative error values, returns 0 on success.
    1390             :  */
    1391           0 : int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
    1392             :                              enum dma_data_direction direction)
    1393             : {
    1394           0 :         int ret = 0;
    1395             : 
    1396           0 :         if (WARN_ON(!dmabuf))
    1397             :                 return -EINVAL;
    1398             : 
    1399             :         might_lock(&dmabuf->resv->lock.base);
    1400             : 
    1401           0 :         if (dmabuf->ops->begin_cpu_access)
    1402           0 :                 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
    1403             : 
    1404             :         /* Ensure that all fences are waited upon - but we first allow
    1405             :          * the native handler the chance to do so more efficiently if it
    1406             :          * chooses. A double invocation here will be reasonably cheap no-op.
    1407             :          */
    1408           0 :         if (ret == 0)
    1409           0 :                 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
    1410             : 
    1411             :         return ret;
    1412             : }
    1413             : EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
    1414             : 
    1415             : /**
    1416             :  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
    1417             :  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
    1418             :  * actions. Coherency is only guaranteed in the specified range for the
    1419             :  * specified access direction.
    1420             :  * @dmabuf:     [in]    buffer to complete cpu access for.
    1421             :  * @direction:  [in]    direction of access.
    1422             :  *
    1423             :  * This terminates CPU access started with dma_buf_begin_cpu_access().
    1424             :  *
    1425             :  * Can return negative error values, returns 0 on success.
    1426             :  */
    1427           0 : int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
    1428             :                            enum dma_data_direction direction)
    1429             : {
    1430           0 :         int ret = 0;
    1431             : 
    1432           0 :         WARN_ON(!dmabuf);
    1433             : 
    1434             :         might_lock(&dmabuf->resv->lock.base);
    1435             : 
    1436           0 :         if (dmabuf->ops->end_cpu_access)
    1437           0 :                 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
    1438             : 
    1439           0 :         return ret;
    1440             : }
    1441             : EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
    1442             : 
    1443             : 
    1444             : /**
    1445             :  * dma_buf_mmap - Setup up a userspace mmap with the given vma
    1446             :  * @dmabuf:     [in]    buffer that should back the vma
    1447             :  * @vma:        [in]    vma for the mmap
    1448             :  * @pgoff:      [in]    offset in pages where this mmap should start within the
    1449             :  *                      dma-buf buffer.
    1450             :  *
    1451             :  * This function adjusts the passed in vma so that it points at the file of the
    1452             :  * dma_buf operation. It also adjusts the starting pgoff and does bounds
    1453             :  * checking on the size of the vma. Then it calls the exporters mmap function to
    1454             :  * set up the mapping.
    1455             :  *
    1456             :  * Can return negative error values, returns 0 on success.
    1457             :  */
    1458           0 : int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
    1459             :                  unsigned long pgoff)
    1460             : {
    1461           0 :         if (WARN_ON(!dmabuf || !vma))
    1462             :                 return -EINVAL;
    1463             : 
    1464             :         /* check if buffer supports mmap */
    1465           0 :         if (!dmabuf->ops->mmap)
    1466             :                 return -EINVAL;
    1467             : 
    1468             :         /* check for offset overflow */
    1469           0 :         if (pgoff + vma_pages(vma) < pgoff)
    1470             :                 return -EOVERFLOW;
    1471             : 
    1472             :         /* check for overflowing the buffer's size */
    1473           0 :         if (pgoff + vma_pages(vma) >
    1474           0 :             dmabuf->size >> PAGE_SHIFT)
    1475             :                 return -EINVAL;
    1476             : 
    1477             :         /* readjust the vma */
    1478           0 :         vma_set_file(vma, dmabuf->file);
    1479           0 :         vma->vm_pgoff = pgoff;
    1480             : 
    1481           0 :         return dmabuf->ops->mmap(dmabuf, vma);
    1482             : }
    1483             : EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
    1484             : 
    1485             : /**
    1486             :  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
    1487             :  * address space. Same restrictions as for vmap and friends apply.
    1488             :  * @dmabuf:     [in]    buffer to vmap
    1489             :  * @map:        [out]   returns the vmap pointer
    1490             :  *
    1491             :  * This call may fail due to lack of virtual mapping address space.
    1492             :  * These calls are optional in drivers. The intended use for them
    1493             :  * is for mapping objects linear in kernel space for high use objects.
    1494             :  *
    1495             :  * To ensure coherency users must call dma_buf_begin_cpu_access() and
    1496             :  * dma_buf_end_cpu_access() around any cpu access performed through this
    1497             :  * mapping.
    1498             :  *
    1499             :  * Returns 0 on success, or a negative errno code otherwise.
    1500             :  */
    1501           0 : int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
    1502             : {
    1503             :         struct iosys_map ptr;
    1504             :         int ret;
    1505             : 
    1506           0 :         iosys_map_clear(map);
    1507             : 
    1508           0 :         if (WARN_ON(!dmabuf))
    1509             :                 return -EINVAL;
    1510             : 
    1511             :         dma_resv_assert_held(dmabuf->resv);
    1512             : 
    1513           0 :         if (!dmabuf->ops->vmap)
    1514             :                 return -EINVAL;
    1515             : 
    1516           0 :         if (dmabuf->vmapping_counter) {
    1517           0 :                 dmabuf->vmapping_counter++;
    1518           0 :                 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
    1519           0 :                 *map = dmabuf->vmap_ptr;
    1520           0 :                 return 0;
    1521             :         }
    1522             : 
    1523           0 :         BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
    1524             : 
    1525           0 :         ret = dmabuf->ops->vmap(dmabuf, &ptr);
    1526           0 :         if (WARN_ON_ONCE(ret))
    1527             :                 return ret;
    1528             : 
    1529           0 :         dmabuf->vmap_ptr = ptr;
    1530           0 :         dmabuf->vmapping_counter = 1;
    1531             : 
    1532           0 :         *map = dmabuf->vmap_ptr;
    1533             : 
    1534           0 :         return 0;
    1535             : }
    1536             : EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
    1537             : 
    1538             : /**
    1539             :  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
    1540             :  * address space. Same restrictions as for vmap and friends apply.
    1541             :  * @dmabuf:     [in]    buffer to vmap
    1542             :  * @map:        [out]   returns the vmap pointer
    1543             :  *
    1544             :  * Unlocked version of dma_buf_vmap()
    1545             :  *
    1546             :  * Returns 0 on success, or a negative errno code otherwise.
    1547             :  */
    1548           0 : int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
    1549             : {
    1550             :         int ret;
    1551             : 
    1552           0 :         iosys_map_clear(map);
    1553             : 
    1554           0 :         if (WARN_ON(!dmabuf))
    1555             :                 return -EINVAL;
    1556             : 
    1557           0 :         dma_resv_lock(dmabuf->resv, NULL);
    1558           0 :         ret = dma_buf_vmap(dmabuf, map);
    1559           0 :         dma_resv_unlock(dmabuf->resv);
    1560             : 
    1561           0 :         return ret;
    1562             : }
    1563             : EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
    1564             : 
    1565             : /**
    1566             :  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
    1567             :  * @dmabuf:     [in]    buffer to vunmap
    1568             :  * @map:        [in]    vmap pointer to vunmap
    1569             :  */
    1570           0 : void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
    1571             : {
    1572           0 :         if (WARN_ON(!dmabuf))
    1573             :                 return;
    1574             : 
    1575             :         dma_resv_assert_held(dmabuf->resv);
    1576             : 
    1577           0 :         BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
    1578           0 :         BUG_ON(dmabuf->vmapping_counter == 0);
    1579           0 :         BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
    1580             : 
    1581           0 :         if (--dmabuf->vmapping_counter == 0) {
    1582           0 :                 if (dmabuf->ops->vunmap)
    1583           0 :                         dmabuf->ops->vunmap(dmabuf, map);
    1584           0 :                 iosys_map_clear(&dmabuf->vmap_ptr);
    1585             :         }
    1586             : }
    1587             : EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
    1588             : 
    1589             : /**
    1590             :  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
    1591             :  * @dmabuf:     [in]    buffer to vunmap
    1592             :  * @map:        [in]    vmap pointer to vunmap
    1593             :  */
    1594           0 : void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
    1595             : {
    1596           0 :         if (WARN_ON(!dmabuf))
    1597             :                 return;
    1598             : 
    1599           0 :         dma_resv_lock(dmabuf->resv, NULL);
    1600           0 :         dma_buf_vunmap(dmabuf, map);
    1601           0 :         dma_resv_unlock(dmabuf->resv);
    1602             : }
    1603             : EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
    1604             : 
    1605             : #ifdef CONFIG_DEBUG_FS
    1606             : static int dma_buf_debug_show(struct seq_file *s, void *unused)
    1607             : {
    1608             :         struct dma_buf *buf_obj;
    1609             :         struct dma_buf_attachment *attach_obj;
    1610             :         int count = 0, attach_count;
    1611             :         size_t size = 0;
    1612             :         int ret;
    1613             : 
    1614             :         ret = mutex_lock_interruptible(&db_list.lock);
    1615             : 
    1616             :         if (ret)
    1617             :                 return ret;
    1618             : 
    1619             :         seq_puts(s, "\nDma-buf Objects:\n");
    1620             :         seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
    1621             :                    "size", "flags", "mode", "count", "ino");
    1622             : 
    1623             :         list_for_each_entry(buf_obj, &db_list.head, list_node) {
    1624             : 
    1625             :                 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
    1626             :                 if (ret)
    1627             :                         goto error_unlock;
    1628             : 
    1629             : 
    1630             :                 spin_lock(&buf_obj->name_lock);
    1631             :                 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
    1632             :                                 buf_obj->size,
    1633             :                                 buf_obj->file->f_flags, buf_obj->file->f_mode,
    1634             :                                 file_count(buf_obj->file),
    1635             :                                 buf_obj->exp_name,
    1636             :                                 file_inode(buf_obj->file)->i_ino,
    1637             :                                 buf_obj->name ?: "<none>");
    1638             :                 spin_unlock(&buf_obj->name_lock);
    1639             : 
    1640             :                 dma_resv_describe(buf_obj->resv, s);
    1641             : 
    1642             :                 seq_puts(s, "\tAttached Devices:\n");
    1643             :                 attach_count = 0;
    1644             : 
    1645             :                 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
    1646             :                         seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
    1647             :                         attach_count++;
    1648             :                 }
    1649             :                 dma_resv_unlock(buf_obj->resv);
    1650             : 
    1651             :                 seq_printf(s, "Total %d devices attached\n\n",
    1652             :                                 attach_count);
    1653             : 
    1654             :                 count++;
    1655             :                 size += buf_obj->size;
    1656             :         }
    1657             : 
    1658             :         seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
    1659             : 
    1660             :         mutex_unlock(&db_list.lock);
    1661             :         return 0;
    1662             : 
    1663             : error_unlock:
    1664             :         mutex_unlock(&db_list.lock);
    1665             :         return ret;
    1666             : }
    1667             : 
    1668             : DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
    1669             : 
    1670             : static struct dentry *dma_buf_debugfs_dir;
    1671             : 
    1672             : static int dma_buf_init_debugfs(void)
    1673             : {
    1674             :         struct dentry *d;
    1675             :         int err = 0;
    1676             : 
    1677             :         d = debugfs_create_dir("dma_buf", NULL);
    1678             :         if (IS_ERR(d))
    1679             :                 return PTR_ERR(d);
    1680             : 
    1681             :         dma_buf_debugfs_dir = d;
    1682             : 
    1683             :         d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
    1684             :                                 NULL, &dma_buf_debug_fops);
    1685             :         if (IS_ERR(d)) {
    1686             :                 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
    1687             :                 debugfs_remove_recursive(dma_buf_debugfs_dir);
    1688             :                 dma_buf_debugfs_dir = NULL;
    1689             :                 err = PTR_ERR(d);
    1690             :         }
    1691             : 
    1692             :         return err;
    1693             : }
    1694             : 
    1695             : static void dma_buf_uninit_debugfs(void)
    1696             : {
    1697             :         debugfs_remove_recursive(dma_buf_debugfs_dir);
    1698             : }
    1699             : #else
    1700             : static inline int dma_buf_init_debugfs(void)
    1701             : {
    1702             :         return 0;
    1703             : }
    1704             : static inline void dma_buf_uninit_debugfs(void)
    1705             : {
    1706             : }
    1707             : #endif
    1708             : 
    1709           1 : static int __init dma_buf_init(void)
    1710             : {
    1711             :         int ret;
    1712             : 
    1713           1 :         ret = dma_buf_init_sysfs_statistics();
    1714             :         if (ret)
    1715             :                 return ret;
    1716             : 
    1717           1 :         dma_buf_mnt = kern_mount(&dma_buf_fs_type);
    1718           2 :         if (IS_ERR(dma_buf_mnt))
    1719           0 :                 return PTR_ERR(dma_buf_mnt);
    1720             : 
    1721           1 :         mutex_init(&db_list.lock);
    1722           1 :         INIT_LIST_HEAD(&db_list.head);
    1723             :         dma_buf_init_debugfs();
    1724           1 :         return 0;
    1725             : }
    1726             : subsys_initcall(dma_buf_init);
    1727             : 
    1728           0 : static void __exit dma_buf_deinit(void)
    1729             : {
    1730             :         dma_buf_uninit_debugfs();
    1731           0 :         kern_unmount(dma_buf_mnt);
    1732             :         dma_buf_uninit_sysfs_statistics();
    1733           0 : }
    1734             : __exitcall(dma_buf_deinit);

Generated by: LCOV version 1.14