LCOV - code coverage report
Current view: top level - drivers/gpu/drm - drm_prime.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 293 0.0 %
Date: 2023-04-06 08:38:28 Functions: 0 27 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright © 2012 Red Hat
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice (including the next
      12             :  * paragraph) shall be included in all copies or substantial portions of the
      13             :  * Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      19             :  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
      20             :  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
      21             :  * IN THE SOFTWARE.
      22             :  *
      23             :  * Authors:
      24             :  *      Dave Airlie <airlied@redhat.com>
      25             :  *      Rob Clark <rob.clark@linaro.org>
      26             :  *
      27             :  */
      28             : 
      29             : #include <linux/export.h>
      30             : #include <linux/dma-buf.h>
      31             : #include <linux/rbtree.h>
      32             : #include <linux/module.h>
      33             : 
      34             : #include <drm/drm.h>
      35             : #include <drm/drm_drv.h>
      36             : #include <drm/drm_file.h>
      37             : #include <drm/drm_framebuffer.h>
      38             : #include <drm/drm_gem.h>
      39             : #include <drm/drm_prime.h>
      40             : 
      41             : #include "drm_internal.h"
      42             : 
      43             : MODULE_IMPORT_NS(DMA_BUF);
      44             : 
      45             : /**
      46             :  * DOC: overview and lifetime rules
      47             :  *
      48             :  * Similar to GEM global names, PRIME file descriptors are also used to share
      49             :  * buffer objects across processes. They offer additional security: as file
      50             :  * descriptors must be explicitly sent over UNIX domain sockets to be shared
      51             :  * between applications, they can't be guessed like the globally unique GEM
      52             :  * names.
      53             :  *
      54             :  * Drivers that support the PRIME API implement the
      55             :  * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
      56             :  * GEM based drivers must use drm_gem_prime_handle_to_fd() and
      57             :  * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
      58             :  * actual driver interfaces is provided through the &drm_gem_object_funcs.export
      59             :  * and &drm_driver.gem_prime_import hooks.
      60             :  *
      61             :  * &dma_buf_ops implementations for GEM drivers are all individually exported
      62             :  * for drivers which need to overwrite or reimplement some of them.
      63             :  *
      64             :  * Reference Counting for GEM Drivers
      65             :  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      66             :  *
      67             :  * On the export the &dma_buf holds a reference to the exported buffer object,
      68             :  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
      69             :  * IOCTL, when it first calls &drm_gem_object_funcs.export
      70             :  * and stores the exporting GEM object in the &dma_buf.priv field. This
      71             :  * reference needs to be released when the final reference to the &dma_buf
      72             :  * itself is dropped and its &dma_buf_ops.release function is called.  For
      73             :  * GEM-based drivers, the &dma_buf should be exported using
      74             :  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
      75             :  *
      76             :  * Thus the chain of references always flows in one direction, avoiding loops:
      77             :  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
      78             :  * are the lookup caches for import and export. These are required to guarantee
      79             :  * that any given object will always have only one unique userspace handle. This
      80             :  * is required to allow userspace to detect duplicated imports, since some GEM
      81             :  * drivers do fail command submissions if a given buffer object is listed more
      82             :  * than once. These import and export caches in &drm_prime_file_private only
      83             :  * retain a weak reference, which is cleaned up when the corresponding object is
      84             :  * released.
      85             :  *
      86             :  * Self-importing: If userspace is using PRIME as a replacement for flink then
      87             :  * it will get a fd->handle request for a GEM object that it created.  Drivers
      88             :  * should detect this situation and return back the underlying object from the
      89             :  * dma-buf private. For GEM based drivers this is handled in
      90             :  * drm_gem_prime_import() already.
      91             :  */
      92             : 
      93             : struct drm_prime_member {
      94             :         struct dma_buf *dma_buf;
      95             :         uint32_t handle;
      96             : 
      97             :         struct rb_node dmabuf_rb;
      98             :         struct rb_node handle_rb;
      99             : };
     100             : 
     101           0 : static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
     102             :                                     struct dma_buf *dma_buf, uint32_t handle)
     103             : {
     104             :         struct drm_prime_member *member;
     105             :         struct rb_node **p, *rb;
     106             : 
     107           0 :         member = kmalloc(sizeof(*member), GFP_KERNEL);
     108           0 :         if (!member)
     109             :                 return -ENOMEM;
     110             : 
     111           0 :         get_dma_buf(dma_buf);
     112           0 :         member->dma_buf = dma_buf;
     113           0 :         member->handle = handle;
     114             : 
     115           0 :         rb = NULL;
     116           0 :         p = &prime_fpriv->dmabufs.rb_node;
     117           0 :         while (*p) {
     118             :                 struct drm_prime_member *pos;
     119             : 
     120           0 :                 rb = *p;
     121           0 :                 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     122           0 :                 if (dma_buf > pos->dma_buf)
     123           0 :                         p = &rb->rb_right;
     124             :                 else
     125           0 :                         p = &rb->rb_left;
     126             :         }
     127           0 :         rb_link_node(&member->dmabuf_rb, rb, p);
     128           0 :         rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     129             : 
     130           0 :         rb = NULL;
     131           0 :         p = &prime_fpriv->handles.rb_node;
     132           0 :         while (*p) {
     133             :                 struct drm_prime_member *pos;
     134             : 
     135           0 :                 rb = *p;
     136           0 :                 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
     137           0 :                 if (handle > pos->handle)
     138           0 :                         p = &rb->rb_right;
     139             :                 else
     140           0 :                         p = &rb->rb_left;
     141             :         }
     142           0 :         rb_link_node(&member->handle_rb, rb, p);
     143           0 :         rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
     144             : 
     145           0 :         return 0;
     146             : }
     147             : 
     148             : static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
     149             :                                                       uint32_t handle)
     150             : {
     151             :         struct rb_node *rb;
     152             : 
     153           0 :         rb = prime_fpriv->handles.rb_node;
     154           0 :         while (rb) {
     155             :                 struct drm_prime_member *member;
     156             : 
     157           0 :                 member = rb_entry(rb, struct drm_prime_member, handle_rb);
     158           0 :                 if (member->handle == handle)
     159           0 :                         return member->dma_buf;
     160           0 :                 else if (member->handle < handle)
     161           0 :                         rb = rb->rb_right;
     162             :                 else
     163           0 :                         rb = rb->rb_left;
     164             :         }
     165             : 
     166             :         return NULL;
     167             : }
     168             : 
     169             : static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
     170             :                                        struct dma_buf *dma_buf,
     171             :                                        uint32_t *handle)
     172             : {
     173             :         struct rb_node *rb;
     174             : 
     175           0 :         rb = prime_fpriv->dmabufs.rb_node;
     176           0 :         while (rb) {
     177             :                 struct drm_prime_member *member;
     178             : 
     179           0 :                 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     180           0 :                 if (member->dma_buf == dma_buf) {
     181           0 :                         *handle = member->handle;
     182             :                         return 0;
     183           0 :                 } else if (member->dma_buf < dma_buf) {
     184           0 :                         rb = rb->rb_right;
     185             :                 } else {
     186           0 :                         rb = rb->rb_left;
     187             :                 }
     188             :         }
     189             : 
     190             :         return -ENOENT;
     191             : }
     192             : 
     193           0 : void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
     194             :                                  uint32_t handle)
     195             : {
     196             :         struct rb_node *rb;
     197             : 
     198           0 :         mutex_lock(&prime_fpriv->lock);
     199             : 
     200           0 :         rb = prime_fpriv->handles.rb_node;
     201           0 :         while (rb) {
     202             :                 struct drm_prime_member *member;
     203             : 
     204           0 :                 member = rb_entry(rb, struct drm_prime_member, handle_rb);
     205           0 :                 if (member->handle == handle) {
     206           0 :                         rb_erase(&member->handle_rb, &prime_fpriv->handles);
     207           0 :                         rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     208             : 
     209           0 :                         dma_buf_put(member->dma_buf);
     210           0 :                         kfree(member);
     211           0 :                         break;
     212           0 :                 } else if (member->handle < handle) {
     213           0 :                         rb = rb->rb_right;
     214             :                 } else {
     215           0 :                         rb = rb->rb_left;
     216             :                 }
     217             :         }
     218             : 
     219           0 :         mutex_unlock(&prime_fpriv->lock);
     220           0 : }
     221             : 
     222           0 : void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
     223             : {
     224           0 :         mutex_init(&prime_fpriv->lock);
     225           0 :         prime_fpriv->dmabufs = RB_ROOT;
     226           0 :         prime_fpriv->handles = RB_ROOT;
     227           0 : }
     228             : 
     229           0 : void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
     230             : {
     231             :         /* by now drm_gem_release should've made sure the list is empty */
     232           0 :         WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
     233           0 : }
     234             : 
     235             : /**
     236             :  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
     237             :  * @dev: parent device for the exported dmabuf
     238             :  * @exp_info: the export information used by dma_buf_export()
     239             :  *
     240             :  * This wraps dma_buf_export() for use by generic GEM drivers that are using
     241             :  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
     242             :  * a reference to the &drm_device and the exported &drm_gem_object (stored in
     243             :  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
     244             :  *
     245             :  * Returns the new dmabuf.
     246             :  */
     247           0 : struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
     248             :                                       struct dma_buf_export_info *exp_info)
     249             : {
     250           0 :         struct drm_gem_object *obj = exp_info->priv;
     251             :         struct dma_buf *dma_buf;
     252             : 
     253           0 :         dma_buf = dma_buf_export(exp_info);
     254           0 :         if (IS_ERR(dma_buf))
     255             :                 return dma_buf;
     256             : 
     257           0 :         drm_dev_get(dev);
     258           0 :         drm_gem_object_get(obj);
     259           0 :         dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
     260             : 
     261           0 :         return dma_buf;
     262             : }
     263             : EXPORT_SYMBOL(drm_gem_dmabuf_export);
     264             : 
     265             : /**
     266             :  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
     267             :  * @dma_buf: buffer to be released
     268             :  *
     269             :  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
     270             :  * must use this in their &dma_buf_ops structure as the release callback.
     271             :  * drm_gem_dmabuf_release() should be used in conjunction with
     272             :  * drm_gem_dmabuf_export().
     273             :  */
     274           0 : void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
     275             : {
     276           0 :         struct drm_gem_object *obj = dma_buf->priv;
     277           0 :         struct drm_device *dev = obj->dev;
     278             : 
     279             :         /* drop the reference on the export fd holds */
     280           0 :         drm_gem_object_put(obj);
     281             : 
     282           0 :         drm_dev_put(dev);
     283           0 : }
     284             : EXPORT_SYMBOL(drm_gem_dmabuf_release);
     285             : 
     286             : /**
     287             :  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
     288             :  * @dev: drm_device to import into
     289             :  * @file_priv: drm file-private structure
     290             :  * @prime_fd: fd id of the dma-buf which should be imported
     291             :  * @handle: pointer to storage for the handle of the imported buffer object
     292             :  *
     293             :  * This is the PRIME import function which must be used mandatorily by GEM
     294             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     295             :  * The actual importing of GEM object from the dma-buf is done through the
     296             :  * &drm_driver.gem_prime_import driver callback.
     297             :  *
     298             :  * Returns 0 on success or a negative error code on failure.
     299             :  */
     300           0 : int drm_gem_prime_fd_to_handle(struct drm_device *dev,
     301             :                                struct drm_file *file_priv, int prime_fd,
     302             :                                uint32_t *handle)
     303             : {
     304             :         struct dma_buf *dma_buf;
     305             :         struct drm_gem_object *obj;
     306             :         int ret;
     307             : 
     308           0 :         dma_buf = dma_buf_get(prime_fd);
     309           0 :         if (IS_ERR(dma_buf))
     310           0 :                 return PTR_ERR(dma_buf);
     311             : 
     312           0 :         mutex_lock(&file_priv->prime.lock);
     313             : 
     314           0 :         ret = drm_prime_lookup_buf_handle(&file_priv->prime,
     315             :                         dma_buf, handle);
     316           0 :         if (ret == 0)
     317             :                 goto out_put;
     318             : 
     319             :         /* never seen this one, need to import */
     320           0 :         mutex_lock(&dev->object_name_lock);
     321           0 :         if (dev->driver->gem_prime_import)
     322           0 :                 obj = dev->driver->gem_prime_import(dev, dma_buf);
     323             :         else
     324           0 :                 obj = drm_gem_prime_import(dev, dma_buf);
     325           0 :         if (IS_ERR(obj)) {
     326           0 :                 ret = PTR_ERR(obj);
     327             :                 goto out_unlock;
     328             :         }
     329             : 
     330           0 :         if (obj->dma_buf) {
     331           0 :                 WARN_ON(obj->dma_buf != dma_buf);
     332             :         } else {
     333           0 :                 obj->dma_buf = dma_buf;
     334           0 :                 get_dma_buf(dma_buf);
     335             :         }
     336             : 
     337             :         /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
     338           0 :         ret = drm_gem_handle_create_tail(file_priv, obj, handle);
     339           0 :         drm_gem_object_put(obj);
     340           0 :         if (ret)
     341             :                 goto out_put;
     342             : 
     343           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     344             :                         dma_buf, *handle);
     345           0 :         mutex_unlock(&file_priv->prime.lock);
     346           0 :         if (ret)
     347             :                 goto fail;
     348             : 
     349           0 :         dma_buf_put(dma_buf);
     350             : 
     351           0 :         return 0;
     352             : 
     353             : fail:
     354             :         /* hmm, if driver attached, we are relying on the free-object path
     355             :          * to detach.. which seems ok..
     356             :          */
     357           0 :         drm_gem_handle_delete(file_priv, *handle);
     358           0 :         dma_buf_put(dma_buf);
     359           0 :         return ret;
     360             : 
     361             : out_unlock:
     362           0 :         mutex_unlock(&dev->object_name_lock);
     363             : out_put:
     364           0 :         mutex_unlock(&file_priv->prime.lock);
     365           0 :         dma_buf_put(dma_buf);
     366           0 :         return ret;
     367             : }
     368             : EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
     369             : 
     370           0 : int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
     371             :                                  struct drm_file *file_priv)
     372             : {
     373           0 :         struct drm_prime_handle *args = data;
     374             : 
     375           0 :         if (!dev->driver->prime_fd_to_handle)
     376             :                 return -ENOSYS;
     377             : 
     378           0 :         return dev->driver->prime_fd_to_handle(dev, file_priv,
     379           0 :                         args->fd, &args->handle);
     380             : }
     381             : 
     382           0 : static struct dma_buf *export_and_register_object(struct drm_device *dev,
     383             :                                                   struct drm_gem_object *obj,
     384             :                                                   uint32_t flags)
     385             : {
     386             :         struct dma_buf *dmabuf;
     387             : 
     388             :         /* prevent races with concurrent gem_close. */
     389           0 :         if (obj->handle_count == 0) {
     390             :                 dmabuf = ERR_PTR(-ENOENT);
     391             :                 return dmabuf;
     392             :         }
     393             : 
     394           0 :         if (obj->funcs && obj->funcs->export)
     395           0 :                 dmabuf = obj->funcs->export(obj, flags);
     396             :         else
     397           0 :                 dmabuf = drm_gem_prime_export(obj, flags);
     398           0 :         if (IS_ERR(dmabuf)) {
     399             :                 /* normally the created dma-buf takes ownership of the ref,
     400             :                  * but if that fails then drop the ref
     401             :                  */
     402             :                 return dmabuf;
     403             :         }
     404             : 
     405             :         /*
     406             :          * Note that callers do not need to clean up the export cache
     407             :          * since the check for obj->handle_count guarantees that someone
     408             :          * will clean it up.
     409             :          */
     410           0 :         obj->dma_buf = dmabuf;
     411           0 :         get_dma_buf(obj->dma_buf);
     412             : 
     413             :         return dmabuf;
     414             : }
     415             : 
     416             : /**
     417             :  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
     418             :  * @dev: dev to export the buffer from
     419             :  * @file_priv: drm file-private structure
     420             :  * @handle: buffer handle to export
     421             :  * @flags: flags like DRM_CLOEXEC
     422             :  * @prime_fd: pointer to storage for the fd id of the create dma-buf
     423             :  *
     424             :  * This is the PRIME export function which must be used mandatorily by GEM
     425             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     426             :  * The actual exporting from GEM object to a dma-buf is done through the
     427             :  * &drm_gem_object_funcs.export callback.
     428             :  */
     429           0 : int drm_gem_prime_handle_to_fd(struct drm_device *dev,
     430             :                                struct drm_file *file_priv, uint32_t handle,
     431             :                                uint32_t flags,
     432             :                                int *prime_fd)
     433             : {
     434             :         struct drm_gem_object *obj;
     435           0 :         int ret = 0;
     436             :         struct dma_buf *dmabuf;
     437             : 
     438           0 :         mutex_lock(&file_priv->prime.lock);
     439           0 :         obj = drm_gem_object_lookup(file_priv, handle);
     440           0 :         if (!obj)  {
     441             :                 ret = -ENOENT;
     442             :                 goto out_unlock;
     443             :         }
     444             : 
     445           0 :         dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
     446           0 :         if (dmabuf) {
     447             :                 get_dma_buf(dmabuf);
     448             :                 goto out_have_handle;
     449             :         }
     450             : 
     451           0 :         mutex_lock(&dev->object_name_lock);
     452             :         /* re-export the original imported object */
     453           0 :         if (obj->import_attach) {
     454           0 :                 dmabuf = obj->import_attach->dmabuf;
     455           0 :                 get_dma_buf(dmabuf);
     456             :                 goto out_have_obj;
     457             :         }
     458             : 
     459           0 :         if (obj->dma_buf) {
     460           0 :                 get_dma_buf(obj->dma_buf);
     461           0 :                 dmabuf = obj->dma_buf;
     462           0 :                 goto out_have_obj;
     463             :         }
     464             : 
     465           0 :         dmabuf = export_and_register_object(dev, obj, flags);
     466           0 :         if (IS_ERR(dmabuf)) {
     467             :                 /* normally the created dma-buf takes ownership of the ref,
     468             :                  * but if that fails then drop the ref
     469             :                  */
     470           0 :                 ret = PTR_ERR(dmabuf);
     471           0 :                 mutex_unlock(&dev->object_name_lock);
     472           0 :                 goto out;
     473             :         }
     474             : 
     475             : out_have_obj:
     476             :         /*
     477             :          * If we've exported this buffer then cheat and add it to the import list
     478             :          * so we get the correct handle back. We must do this under the
     479             :          * protection of dev->object_name_lock to ensure that a racing gem close
     480             :          * ioctl doesn't miss to remove this buffer handle from the cache.
     481             :          */
     482           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     483             :                                        dmabuf, handle);
     484           0 :         mutex_unlock(&dev->object_name_lock);
     485           0 :         if (ret)
     486             :                 goto fail_put_dmabuf;
     487             : 
     488             : out_have_handle:
     489           0 :         ret = dma_buf_fd(dmabuf, flags);
     490             :         /*
     491             :          * We must _not_ remove the buffer from the handle cache since the newly
     492             :          * created dma buf is already linked in the global obj->dma_buf pointer,
     493             :          * and that is invariant as long as a userspace gem handle exists.
     494             :          * Closing the handle will clean out the cache anyway, so we don't leak.
     495             :          */
     496           0 :         if (ret < 0) {
     497             :                 goto fail_put_dmabuf;
     498             :         } else {
     499           0 :                 *prime_fd = ret;
     500           0 :                 ret = 0;
     501             :         }
     502             : 
     503           0 :         goto out;
     504             : 
     505             : fail_put_dmabuf:
     506           0 :         dma_buf_put(dmabuf);
     507             : out:
     508             :         drm_gem_object_put(obj);
     509             : out_unlock:
     510           0 :         mutex_unlock(&file_priv->prime.lock);
     511             : 
     512           0 :         return ret;
     513             : }
     514             : EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
     515             : 
     516           0 : int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
     517             :                                  struct drm_file *file_priv)
     518             : {
     519           0 :         struct drm_prime_handle *args = data;
     520             : 
     521           0 :         if (!dev->driver->prime_handle_to_fd)
     522             :                 return -ENOSYS;
     523             : 
     524             :         /* check flags are valid */
     525           0 :         if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
     526             :                 return -EINVAL;
     527             : 
     528           0 :         return dev->driver->prime_handle_to_fd(dev, file_priv,
     529           0 :                         args->handle, args->flags, &args->fd);
     530             : }
     531             : 
     532             : /**
     533             :  * DOC: PRIME Helpers
     534             :  *
     535             :  * Drivers can implement &drm_gem_object_funcs.export and
     536             :  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
     537             :  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
     538             :  * implement dma-buf support in terms of some lower-level helpers, which are
     539             :  * again exported for drivers to use individually:
     540             :  *
     541             :  * Exporting buffers
     542             :  * ~~~~~~~~~~~~~~~~~
     543             :  *
     544             :  * Optional pinning of buffers is handled at dma-buf attach and detach time in
     545             :  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
     546             :  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
     547             :  * &drm_gem_object_funcs.get_sg_table.
     548             :  *
     549             :  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
     550             :  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
     551             :  * drm_gem_dmabuf_mmap().
     552             :  *
     553             :  * Note that these export helpers can only be used if the underlying backing
     554             :  * storage is fully coherent and either permanently pinned, or it is safe to pin
     555             :  * it indefinitely.
     556             :  *
     557             :  * FIXME: The underlying helper functions are named rather inconsistently.
     558             :  *
     559             :  * Importing buffers
     560             :  * ~~~~~~~~~~~~~~~~~
     561             :  *
     562             :  * Importing dma-bufs using drm_gem_prime_import() relies on
     563             :  * &drm_driver.gem_prime_import_sg_table.
     564             :  *
     565             :  * Note that similarly to the export helpers this permanently pins the
     566             :  * underlying backing storage. Which is ok for scanout, but is not the best
     567             :  * option for sharing lots of buffers for rendering.
     568             :  */
     569             : 
     570             : /**
     571             :  * drm_gem_map_attach - dma_buf attach implementation for GEM
     572             :  * @dma_buf: buffer to attach device to
     573             :  * @attach: buffer attachment data
     574             :  *
     575             :  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
     576             :  * used as the &dma_buf_ops.attach callback. Must be used together with
     577             :  * drm_gem_map_detach().
     578             :  *
     579             :  * Returns 0 on success, negative error code on failure.
     580             :  */
     581           0 : int drm_gem_map_attach(struct dma_buf *dma_buf,
     582             :                        struct dma_buf_attachment *attach)
     583             : {
     584           0 :         struct drm_gem_object *obj = dma_buf->priv;
     585             : 
     586           0 :         return drm_gem_pin(obj);
     587             : }
     588             : EXPORT_SYMBOL(drm_gem_map_attach);
     589             : 
     590             : /**
     591             :  * drm_gem_map_detach - dma_buf detach implementation for GEM
     592             :  * @dma_buf: buffer to detach from
     593             :  * @attach: attachment to be detached
     594             :  *
     595             :  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
     596             :  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
     597             :  * &dma_buf_ops.detach callback.
     598             :  */
     599           0 : void drm_gem_map_detach(struct dma_buf *dma_buf,
     600             :                         struct dma_buf_attachment *attach)
     601             : {
     602           0 :         struct drm_gem_object *obj = dma_buf->priv;
     603             : 
     604           0 :         drm_gem_unpin(obj);
     605           0 : }
     606             : EXPORT_SYMBOL(drm_gem_map_detach);
     607             : 
     608             : /**
     609             :  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
     610             :  * @attach: attachment whose scatterlist is to be returned
     611             :  * @dir: direction of DMA transfer
     612             :  *
     613             :  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
     614             :  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
     615             :  * with drm_gem_unmap_dma_buf().
     616             :  *
     617             :  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
     618             :  * on error. May return -EINTR if it is interrupted by a signal.
     619             :  */
     620           0 : struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
     621             :                                      enum dma_data_direction dir)
     622             : {
     623           0 :         struct drm_gem_object *obj = attach->dmabuf->priv;
     624             :         struct sg_table *sgt;
     625             :         int ret;
     626             : 
     627           0 :         if (WARN_ON(dir == DMA_NONE))
     628             :                 return ERR_PTR(-EINVAL);
     629             : 
     630           0 :         if (WARN_ON(!obj->funcs->get_sg_table))
     631             :                 return ERR_PTR(-ENOSYS);
     632             : 
     633           0 :         sgt = obj->funcs->get_sg_table(obj);
     634           0 :         if (IS_ERR(sgt))
     635             :                 return sgt;
     636             : 
     637           0 :         ret = dma_map_sgtable(attach->dev, sgt, dir,
     638             :                               DMA_ATTR_SKIP_CPU_SYNC);
     639           0 :         if (ret) {
     640           0 :                 sg_free_table(sgt);
     641           0 :                 kfree(sgt);
     642           0 :                 sgt = ERR_PTR(ret);
     643             :         }
     644             : 
     645             :         return sgt;
     646             : }
     647             : EXPORT_SYMBOL(drm_gem_map_dma_buf);
     648             : 
     649             : /**
     650             :  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
     651             :  * @attach: attachment to unmap buffer from
     652             :  * @sgt: scatterlist info of the buffer to unmap
     653             :  * @dir: direction of DMA transfer
     654             :  *
     655             :  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
     656             :  */
     657           0 : void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
     658             :                            struct sg_table *sgt,
     659             :                            enum dma_data_direction dir)
     660             : {
     661           0 :         if (!sgt)
     662             :                 return;
     663             : 
     664           0 :         dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
     665           0 :         sg_free_table(sgt);
     666           0 :         kfree(sgt);
     667             : }
     668             : EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
     669             : 
     670             : /**
     671             :  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
     672             :  * @dma_buf: buffer to be mapped
     673             :  * @map: the virtual address of the buffer
     674             :  *
     675             :  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
     676             :  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
     677             :  * The kernel virtual address is returned in map.
     678             :  *
     679             :  * Returns 0 on success or a negative errno code otherwise.
     680             :  */
     681           0 : int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
     682             : {
     683           0 :         struct drm_gem_object *obj = dma_buf->priv;
     684             : 
     685           0 :         return drm_gem_vmap(obj, map);
     686             : }
     687             : EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
     688             : 
     689             : /**
     690             :  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
     691             :  * @dma_buf: buffer to be unmapped
     692             :  * @map: the virtual address of the buffer
     693             :  *
     694             :  * Releases a kernel virtual mapping. This can be used as the
     695             :  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
     696             :  */
     697           0 : void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
     698             : {
     699           0 :         struct drm_gem_object *obj = dma_buf->priv;
     700             : 
     701           0 :         drm_gem_vunmap(obj, map);
     702           0 : }
     703             : EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
     704             : 
     705             : /**
     706             :  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
     707             :  * @obj: GEM object
     708             :  * @vma: Virtual address range
     709             :  *
     710             :  * This function sets up a userspace mapping for PRIME exported buffers using
     711             :  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
     712             :  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
     713             :  * called to set up the mapping.
     714             :  *
     715             :  * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
     716             :  */
     717           0 : int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
     718             : {
     719             :         struct drm_file *priv;
     720             :         struct file *fil;
     721             :         int ret;
     722             : 
     723             :         /* Add the fake offset */
     724           0 :         vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
     725             : 
     726           0 :         if (obj->funcs && obj->funcs->mmap) {
     727           0 :                 vma->vm_ops = obj->funcs->vm_ops;
     728             : 
     729           0 :                 drm_gem_object_get(obj);
     730           0 :                 ret = obj->funcs->mmap(obj, vma);
     731           0 :                 if (ret) {
     732             :                         drm_gem_object_put(obj);
     733             :                         return ret;
     734             :                 }
     735           0 :                 vma->vm_private_data = obj;
     736           0 :                 return 0;
     737             :         }
     738             : 
     739           0 :         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
     740           0 :         fil = kzalloc(sizeof(*fil), GFP_KERNEL);
     741           0 :         if (!priv || !fil) {
     742             :                 ret = -ENOMEM;
     743             :                 goto out;
     744             :         }
     745             : 
     746             :         /* Used by drm_gem_mmap() to lookup the GEM object */
     747           0 :         priv->minor = obj->dev->primary;
     748           0 :         fil->private_data = priv;
     749             : 
     750           0 :         ret = drm_vma_node_allow(&obj->vma_node, priv);
     751           0 :         if (ret)
     752             :                 goto out;
     753             : 
     754           0 :         ret = obj->dev->driver->fops->mmap(fil, vma);
     755             : 
     756           0 :         drm_vma_node_revoke(&obj->vma_node, priv);
     757             : out:
     758           0 :         kfree(priv);
     759           0 :         kfree(fil);
     760             : 
     761           0 :         return ret;
     762             : }
     763             : EXPORT_SYMBOL(drm_gem_prime_mmap);
     764             : 
     765             : /**
     766             :  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
     767             :  * @dma_buf: buffer to be mapped
     768             :  * @vma: virtual address range
     769             :  *
     770             :  * Provides memory mapping for the buffer. This can be used as the
     771             :  * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
     772             :  * which should be set to drm_gem_prime_mmap().
     773             :  *
     774             :  * FIXME: There's really no point to this wrapper, drivers which need anything
     775             :  * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
     776             :  *
     777             :  * Returns 0 on success or a negative error code on failure.
     778             :  */
     779           0 : int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
     780             : {
     781           0 :         struct drm_gem_object *obj = dma_buf->priv;
     782           0 :         struct drm_device *dev = obj->dev;
     783             : 
     784             :         dma_resv_assert_held(dma_buf->resv);
     785             : 
     786           0 :         if (!dev->driver->gem_prime_mmap)
     787             :                 return -ENOSYS;
     788             : 
     789           0 :         return dev->driver->gem_prime_mmap(obj, vma);
     790             : }
     791             : EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
     792             : 
     793             : static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
     794             :         .cache_sgt_mapping = true,
     795             :         .attach = drm_gem_map_attach,
     796             :         .detach = drm_gem_map_detach,
     797             :         .map_dma_buf = drm_gem_map_dma_buf,
     798             :         .unmap_dma_buf = drm_gem_unmap_dma_buf,
     799             :         .release = drm_gem_dmabuf_release,
     800             :         .mmap = drm_gem_dmabuf_mmap,
     801             :         .vmap = drm_gem_dmabuf_vmap,
     802             :         .vunmap = drm_gem_dmabuf_vunmap,
     803             : };
     804             : 
     805             : /**
     806             :  * drm_prime_pages_to_sg - converts a page array into an sg list
     807             :  * @dev: DRM device
     808             :  * @pages: pointer to the array of page pointers to convert
     809             :  * @nr_pages: length of the page vector
     810             :  *
     811             :  * This helper creates an sg table object from a set of pages
     812             :  * the driver is responsible for mapping the pages into the
     813             :  * importers address space for use with dma_buf itself.
     814             :  *
     815             :  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
     816             :  */
     817           0 : struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
     818             :                                        struct page **pages, unsigned int nr_pages)
     819             : {
     820             :         struct sg_table *sg;
     821           0 :         size_t max_segment = 0;
     822             :         int err;
     823             : 
     824           0 :         sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
     825           0 :         if (!sg)
     826             :                 return ERR_PTR(-ENOMEM);
     827             : 
     828           0 :         if (dev)
     829           0 :                 max_segment = dma_max_mapping_size(dev->dev);
     830           0 :         if (max_segment == 0)
     831           0 :                 max_segment = UINT_MAX;
     832           0 :         err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
     833           0 :                                                 nr_pages << PAGE_SHIFT,
     834             :                                                 max_segment, GFP_KERNEL);
     835           0 :         if (err) {
     836           0 :                 kfree(sg);
     837           0 :                 sg = ERR_PTR(err);
     838             :         }
     839             :         return sg;
     840             : }
     841             : EXPORT_SYMBOL(drm_prime_pages_to_sg);
     842             : 
     843             : /**
     844             :  * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
     845             :  * @sgt: sg_table describing the buffer to check
     846             :  *
     847             :  * This helper calculates the contiguous size in the DMA address space
     848             :  * of the buffer described by the provided sg_table.
     849             :  *
     850             :  * This is useful for implementing
     851             :  * &drm_gem_object_funcs.gem_prime_import_sg_table.
     852             :  */
     853           0 : unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
     854             : {
     855           0 :         dma_addr_t expected = sg_dma_address(sgt->sgl);
     856             :         struct scatterlist *sg;
     857           0 :         unsigned long size = 0;
     858             :         int i;
     859             : 
     860           0 :         for_each_sgtable_dma_sg(sgt, sg, i) {
     861           0 :                 unsigned int len = sg_dma_len(sg);
     862             : 
     863           0 :                 if (!len)
     864             :                         break;
     865           0 :                 if (sg_dma_address(sg) != expected)
     866             :                         break;
     867           0 :                 expected += len;
     868           0 :                 size += len;
     869             :         }
     870           0 :         return size;
     871             : }
     872             : EXPORT_SYMBOL(drm_prime_get_contiguous_size);
     873             : 
     874             : /**
     875             :  * drm_gem_prime_export - helper library implementation of the export callback
     876             :  * @obj: GEM object to export
     877             :  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
     878             :  *
     879             :  * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
     880             :  * using the PRIME helpers. It is used as the default in
     881             :  * drm_gem_prime_handle_to_fd().
     882             :  */
     883           0 : struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
     884             :                                      int flags)
     885             : {
     886           0 :         struct drm_device *dev = obj->dev;
     887           0 :         struct dma_buf_export_info exp_info = {
     888             :                 .exp_name = KBUILD_MODNAME, /* white lie for debug */
     889           0 :                 .owner = dev->driver->fops->owner,
     890             :                 .ops = &drm_gem_prime_dmabuf_ops,
     891           0 :                 .size = obj->size,
     892             :                 .flags = flags,
     893             :                 .priv = obj,
     894           0 :                 .resv = obj->resv,
     895             :         };
     896             : 
     897           0 :         return drm_gem_dmabuf_export(dev, &exp_info);
     898             : }
     899             : EXPORT_SYMBOL(drm_gem_prime_export);
     900             : 
     901             : /**
     902             :  * drm_gem_prime_import_dev - core implementation of the import callback
     903             :  * @dev: drm_device to import into
     904             :  * @dma_buf: dma-buf object to import
     905             :  * @attach_dev: struct device to dma_buf attach
     906             :  *
     907             :  * This is the core of drm_gem_prime_import(). It's designed to be called by
     908             :  * drivers who want to use a different device structure than &drm_device.dev for
     909             :  * attaching via dma_buf. This function calls
     910             :  * &drm_driver.gem_prime_import_sg_table internally.
     911             :  *
     912             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     913             :  * &drm_gem_object_funcs.free hook when using this function.
     914             :  */
     915           0 : struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
     916             :                                             struct dma_buf *dma_buf,
     917             :                                             struct device *attach_dev)
     918             : {
     919             :         struct dma_buf_attachment *attach;
     920             :         struct sg_table *sgt;
     921             :         struct drm_gem_object *obj;
     922             :         int ret;
     923             : 
     924           0 :         if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
     925           0 :                 obj = dma_buf->priv;
     926           0 :                 if (obj->dev == dev) {
     927             :                         /*
     928             :                          * Importing dmabuf exported from our own gem increases
     929             :                          * refcount on gem itself instead of f_count of dmabuf.
     930             :                          */
     931           0 :                         drm_gem_object_get(obj);
     932           0 :                         return obj;
     933             :                 }
     934             :         }
     935             : 
     936           0 :         if (!dev->driver->gem_prime_import_sg_table)
     937             :                 return ERR_PTR(-EINVAL);
     938             : 
     939           0 :         attach = dma_buf_attach(dma_buf, attach_dev);
     940           0 :         if (IS_ERR(attach))
     941             :                 return ERR_CAST(attach);
     942             : 
     943           0 :         get_dma_buf(dma_buf);
     944             : 
     945           0 :         sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
     946           0 :         if (IS_ERR(sgt)) {
     947           0 :                 ret = PTR_ERR(sgt);
     948           0 :                 goto fail_detach;
     949             :         }
     950             : 
     951           0 :         obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
     952           0 :         if (IS_ERR(obj)) {
     953           0 :                 ret = PTR_ERR(obj);
     954             :                 goto fail_unmap;
     955             :         }
     956             : 
     957           0 :         obj->import_attach = attach;
     958           0 :         obj->resv = dma_buf->resv;
     959             : 
     960           0 :         return obj;
     961             : 
     962             : fail_unmap:
     963           0 :         dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
     964             : fail_detach:
     965           0 :         dma_buf_detach(dma_buf, attach);
     966           0 :         dma_buf_put(dma_buf);
     967             : 
     968           0 :         return ERR_PTR(ret);
     969             : }
     970             : EXPORT_SYMBOL(drm_gem_prime_import_dev);
     971             : 
     972             : /**
     973             :  * drm_gem_prime_import - helper library implementation of the import callback
     974             :  * @dev: drm_device to import into
     975             :  * @dma_buf: dma-buf object to import
     976             :  *
     977             :  * This is the implementation of the gem_prime_import functions for GEM drivers
     978             :  * using the PRIME helpers. Drivers can use this as their
     979             :  * &drm_driver.gem_prime_import implementation. It is used as the default
     980             :  * implementation in drm_gem_prime_fd_to_handle().
     981             :  *
     982             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     983             :  * &drm_gem_object_funcs.free hook when using this function.
     984             :  */
     985           0 : struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
     986             :                                             struct dma_buf *dma_buf)
     987             : {
     988           0 :         return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
     989             : }
     990             : EXPORT_SYMBOL(drm_gem_prime_import);
     991             : 
     992             : /**
     993             :  * drm_prime_sg_to_page_array - convert an sg table into a page array
     994             :  * @sgt: scatter-gather table to convert
     995             :  * @pages: array of page pointers to store the pages in
     996             :  * @max_entries: size of the passed-in array
     997             :  *
     998             :  * Exports an sg table into an array of pages.
     999             :  *
    1000             :  * This function is deprecated and strongly discouraged to be used.
    1001             :  * The page array is only useful for page faults and those can corrupt fields
    1002             :  * in the struct page if they are not handled by the exporting driver.
    1003             :  */
    1004           0 : int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
    1005             :                                             struct page **pages,
    1006             :                                             int max_entries)
    1007             : {
    1008             :         struct sg_page_iter page_iter;
    1009           0 :         struct page **p = pages;
    1010             : 
    1011           0 :         for_each_sgtable_page(sgt, &page_iter, 0) {
    1012           0 :                 if (WARN_ON(p - pages >= max_entries))
    1013             :                         return -1;
    1014           0 :                 *p++ = sg_page_iter_page(&page_iter);
    1015             :         }
    1016             :         return 0;
    1017             : }
    1018             : EXPORT_SYMBOL(drm_prime_sg_to_page_array);
    1019             : 
    1020             : /**
    1021             :  * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
    1022             :  * @sgt: scatter-gather table to convert
    1023             :  * @addrs: array to store the dma bus address of each page
    1024             :  * @max_entries: size of both the passed-in arrays
    1025             :  *
    1026             :  * Exports an sg table into an array of addresses.
    1027             :  *
    1028             :  * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
    1029             :  * implementation.
    1030             :  */
    1031           0 : int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
    1032             :                                    int max_entries)
    1033             : {
    1034             :         struct sg_dma_page_iter dma_iter;
    1035           0 :         dma_addr_t *a = addrs;
    1036             : 
    1037           0 :         for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
    1038           0 :                 if (WARN_ON(a - addrs >= max_entries))
    1039             :                         return -1;
    1040           0 :                 *a++ = sg_page_iter_dma_address(&dma_iter);
    1041             :         }
    1042             :         return 0;
    1043             : }
    1044             : EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
    1045             : 
    1046             : /**
    1047             :  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
    1048             :  * @obj: GEM object which was created from a dma-buf
    1049             :  * @sg: the sg-table which was pinned at import time
    1050             :  *
    1051             :  * This is the cleanup functions which GEM drivers need to call when they use
    1052             :  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
    1053             :  */
    1054           0 : void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
    1055             : {
    1056             :         struct dma_buf_attachment *attach;
    1057             :         struct dma_buf *dma_buf;
    1058             : 
    1059           0 :         attach = obj->import_attach;
    1060           0 :         if (sg)
    1061           0 :                 dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
    1062           0 :         dma_buf = attach->dmabuf;
    1063           0 :         dma_buf_detach(attach->dmabuf, attach);
    1064             :         /* remove the reference */
    1065           0 :         dma_buf_put(dma_buf);
    1066           0 : }
    1067             : EXPORT_SYMBOL(drm_prime_gem_destroy);

Generated by: LCOV version 1.14