LCOV - code coverage report
Current view: top level - drivers/gpu/drm - drm_prime.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 295 0.0 %
Date: 2023-07-19 18:55:55 Functions: 0 27 0.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright © 2012 Red Hat
       3             :  *
       4             :  * Permission is hereby granted, free of charge, to any person obtaining a
       5             :  * copy of this software and associated documentation files (the "Software"),
       6             :  * to deal in the Software without restriction, including without limitation
       7             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       8             :  * and/or sell copies of the Software, and to permit persons to whom the
       9             :  * Software is furnished to do so, subject to the following conditions:
      10             :  *
      11             :  * The above copyright notice and this permission notice (including the next
      12             :  * paragraph) shall be included in all copies or substantial portions of the
      13             :  * Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
      19             :  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
      20             :  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
      21             :  * IN THE SOFTWARE.
      22             :  *
      23             :  * Authors:
      24             :  *      Dave Airlie <airlied@redhat.com>
      25             :  *      Rob Clark <rob.clark@linaro.org>
      26             :  *
      27             :  */
      28             : 
      29             : #include <linux/export.h>
      30             : #include <linux/dma-buf.h>
      31             : #include <linux/rbtree.h>
      32             : #include <linux/module.h>
      33             : 
      34             : #include <drm/drm.h>
      35             : #include <drm/drm_drv.h>
      36             : #include <drm/drm_file.h>
      37             : #include <drm/drm_framebuffer.h>
      38             : #include <drm/drm_gem.h>
      39             : #include <drm/drm_prime.h>
      40             : 
      41             : #include "drm_internal.h"
      42             : 
      43             : MODULE_IMPORT_NS(DMA_BUF);
      44             : 
      45             : /**
      46             :  * DOC: overview and lifetime rules
      47             :  *
      48             :  * Similar to GEM global names, PRIME file descriptors are also used to share
      49             :  * buffer objects across processes. They offer additional security: as file
      50             :  * descriptors must be explicitly sent over UNIX domain sockets to be shared
      51             :  * between applications, they can't be guessed like the globally unique GEM
      52             :  * names.
      53             :  *
      54             :  * Drivers that support the PRIME API implement the drm_gem_object_funcs.export
      55             :  * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for
      56             :  * drivers are all individually exported for drivers which need to overwrite
      57             :  * or reimplement some of them.
      58             :  *
      59             :  * Reference Counting for GEM Drivers
      60             :  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      61             :  *
      62             :  * On the export the &dma_buf holds a reference to the exported buffer object,
      63             :  * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
      64             :  * IOCTL, when it first calls &drm_gem_object_funcs.export
      65             :  * and stores the exporting GEM object in the &dma_buf.priv field. This
      66             :  * reference needs to be released when the final reference to the &dma_buf
      67             :  * itself is dropped and its &dma_buf_ops.release function is called.  For
      68             :  * GEM-based drivers, the &dma_buf should be exported using
      69             :  * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
      70             :  *
      71             :  * Thus the chain of references always flows in one direction, avoiding loops:
      72             :  * importing GEM object -> dma-buf -> exported GEM bo. A further complication
      73             :  * are the lookup caches for import and export. These are required to guarantee
      74             :  * that any given object will always have only one unique userspace handle. This
      75             :  * is required to allow userspace to detect duplicated imports, since some GEM
      76             :  * drivers do fail command submissions if a given buffer object is listed more
      77             :  * than once. These import and export caches in &drm_prime_file_private only
      78             :  * retain a weak reference, which is cleaned up when the corresponding object is
      79             :  * released.
      80             :  *
      81             :  * Self-importing: If userspace is using PRIME as a replacement for flink then
      82             :  * it will get a fd->handle request for a GEM object that it created.  Drivers
      83             :  * should detect this situation and return back the underlying object from the
      84             :  * dma-buf private. For GEM based drivers this is handled in
      85             :  * drm_gem_prime_import() already.
      86             :  */
      87             : 
      88             : struct drm_prime_member {
      89             :         struct dma_buf *dma_buf;
      90             :         uint32_t handle;
      91             : 
      92             :         struct rb_node dmabuf_rb;
      93             :         struct rb_node handle_rb;
      94             : };
      95             : 
      96           0 : static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
      97             :                                     struct dma_buf *dma_buf, uint32_t handle)
      98             : {
      99             :         struct drm_prime_member *member;
     100             :         struct rb_node **p, *rb;
     101             : 
     102           0 :         member = kmalloc(sizeof(*member), GFP_KERNEL);
     103           0 :         if (!member)
     104             :                 return -ENOMEM;
     105             : 
     106           0 :         get_dma_buf(dma_buf);
     107           0 :         member->dma_buf = dma_buf;
     108           0 :         member->handle = handle;
     109             : 
     110           0 :         rb = NULL;
     111           0 :         p = &prime_fpriv->dmabufs.rb_node;
     112           0 :         while (*p) {
     113             :                 struct drm_prime_member *pos;
     114             : 
     115           0 :                 rb = *p;
     116           0 :                 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     117           0 :                 if (dma_buf > pos->dma_buf)
     118           0 :                         p = &rb->rb_right;
     119             :                 else
     120           0 :                         p = &rb->rb_left;
     121             :         }
     122           0 :         rb_link_node(&member->dmabuf_rb, rb, p);
     123           0 :         rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     124             : 
     125           0 :         rb = NULL;
     126           0 :         p = &prime_fpriv->handles.rb_node;
     127           0 :         while (*p) {
     128             :                 struct drm_prime_member *pos;
     129             : 
     130           0 :                 rb = *p;
     131           0 :                 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
     132           0 :                 if (handle > pos->handle)
     133           0 :                         p = &rb->rb_right;
     134             :                 else
     135           0 :                         p = &rb->rb_left;
     136             :         }
     137           0 :         rb_link_node(&member->handle_rb, rb, p);
     138           0 :         rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
     139             : 
     140           0 :         return 0;
     141             : }
     142             : 
     143             : static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
     144             :                                                       uint32_t handle)
     145             : {
     146             :         struct rb_node *rb;
     147             : 
     148           0 :         rb = prime_fpriv->handles.rb_node;
     149           0 :         while (rb) {
     150             :                 struct drm_prime_member *member;
     151             : 
     152           0 :                 member = rb_entry(rb, struct drm_prime_member, handle_rb);
     153           0 :                 if (member->handle == handle)
     154           0 :                         return member->dma_buf;
     155           0 :                 else if (member->handle < handle)
     156           0 :                         rb = rb->rb_right;
     157             :                 else
     158           0 :                         rb = rb->rb_left;
     159             :         }
     160             : 
     161             :         return NULL;
     162             : }
     163             : 
     164             : static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
     165             :                                        struct dma_buf *dma_buf,
     166             :                                        uint32_t *handle)
     167             : {
     168             :         struct rb_node *rb;
     169             : 
     170           0 :         rb = prime_fpriv->dmabufs.rb_node;
     171           0 :         while (rb) {
     172             :                 struct drm_prime_member *member;
     173             : 
     174           0 :                 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
     175           0 :                 if (member->dma_buf == dma_buf) {
     176           0 :                         *handle = member->handle;
     177             :                         return 0;
     178           0 :                 } else if (member->dma_buf < dma_buf) {
     179           0 :                         rb = rb->rb_right;
     180             :                 } else {
     181           0 :                         rb = rb->rb_left;
     182             :                 }
     183             :         }
     184             : 
     185             :         return -ENOENT;
     186             : }
     187             : 
     188           0 : void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
     189             :                                  uint32_t handle)
     190             : {
     191             :         struct rb_node *rb;
     192             : 
     193           0 :         mutex_lock(&prime_fpriv->lock);
     194             : 
     195           0 :         rb = prime_fpriv->handles.rb_node;
     196           0 :         while (rb) {
     197             :                 struct drm_prime_member *member;
     198             : 
     199           0 :                 member = rb_entry(rb, struct drm_prime_member, handle_rb);
     200           0 :                 if (member->handle == handle) {
     201           0 :                         rb_erase(&member->handle_rb, &prime_fpriv->handles);
     202           0 :                         rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
     203             : 
     204           0 :                         dma_buf_put(member->dma_buf);
     205           0 :                         kfree(member);
     206           0 :                         break;
     207           0 :                 } else if (member->handle < handle) {
     208           0 :                         rb = rb->rb_right;
     209             :                 } else {
     210           0 :                         rb = rb->rb_left;
     211             :                 }
     212             :         }
     213             : 
     214           0 :         mutex_unlock(&prime_fpriv->lock);
     215           0 : }
     216             : 
     217           0 : void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
     218             : {
     219           0 :         mutex_init(&prime_fpriv->lock);
     220           0 :         prime_fpriv->dmabufs = RB_ROOT;
     221           0 :         prime_fpriv->handles = RB_ROOT;
     222           0 : }
     223             : 
     224           0 : void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
     225             : {
     226             :         /* by now drm_gem_release should've made sure the list is empty */
     227           0 :         WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
     228           0 : }
     229             : 
     230             : /**
     231             :  * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
     232             :  * @dev: parent device for the exported dmabuf
     233             :  * @exp_info: the export information used by dma_buf_export()
     234             :  *
     235             :  * This wraps dma_buf_export() for use by generic GEM drivers that are using
     236             :  * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take
     237             :  * a reference to the &drm_device and the exported &drm_gem_object (stored in
     238             :  * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release().
     239             :  *
     240             :  * Returns the new dmabuf.
     241             :  */
     242           0 : struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
     243             :                                       struct dma_buf_export_info *exp_info)
     244             : {
     245           0 :         struct drm_gem_object *obj = exp_info->priv;
     246             :         struct dma_buf *dma_buf;
     247             : 
     248           0 :         dma_buf = dma_buf_export(exp_info);
     249           0 :         if (IS_ERR(dma_buf))
     250             :                 return dma_buf;
     251             : 
     252           0 :         drm_dev_get(dev);
     253           0 :         drm_gem_object_get(obj);
     254           0 :         dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
     255             : 
     256           0 :         return dma_buf;
     257             : }
     258             : EXPORT_SYMBOL(drm_gem_dmabuf_export);
     259             : 
     260             : /**
     261             :  * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
     262             :  * @dma_buf: buffer to be released
     263             :  *
     264             :  * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
     265             :  * must use this in their &dma_buf_ops structure as the release callback.
     266             :  * drm_gem_dmabuf_release() should be used in conjunction with
     267             :  * drm_gem_dmabuf_export().
     268             :  */
     269           0 : void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
     270             : {
     271           0 :         struct drm_gem_object *obj = dma_buf->priv;
     272           0 :         struct drm_device *dev = obj->dev;
     273             : 
     274             :         /* drop the reference on the export fd holds */
     275           0 :         drm_gem_object_put(obj);
     276             : 
     277           0 :         drm_dev_put(dev);
     278           0 : }
     279             : EXPORT_SYMBOL(drm_gem_dmabuf_release);
     280             : 
     281             : /*
     282             :  * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
     283             :  * @dev: drm_device to import into
     284             :  * @file_priv: drm file-private structure
     285             :  * @prime_fd: fd id of the dma-buf which should be imported
     286             :  * @handle: pointer to storage for the handle of the imported buffer object
     287             :  *
     288             :  * This is the PRIME import function which must be used mandatorily by GEM
     289             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     290             :  * The actual importing of GEM object from the dma-buf is done through the
     291             :  * &drm_driver.gem_prime_import driver callback.
     292             :  *
     293             :  * Returns 0 on success or a negative error code on failure.
     294             :  */
     295           0 : static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
     296             :                                       struct drm_file *file_priv, int prime_fd,
     297             :                                       uint32_t *handle)
     298             : {
     299             :         struct dma_buf *dma_buf;
     300             :         struct drm_gem_object *obj;
     301             :         int ret;
     302             : 
     303           0 :         dma_buf = dma_buf_get(prime_fd);
     304           0 :         if (IS_ERR(dma_buf))
     305           0 :                 return PTR_ERR(dma_buf);
     306             : 
     307           0 :         mutex_lock(&file_priv->prime.lock);
     308             : 
     309           0 :         ret = drm_prime_lookup_buf_handle(&file_priv->prime,
     310             :                         dma_buf, handle);
     311           0 :         if (ret == 0)
     312             :                 goto out_put;
     313             : 
     314             :         /* never seen this one, need to import */
     315           0 :         mutex_lock(&dev->object_name_lock);
     316           0 :         if (dev->driver->gem_prime_import)
     317           0 :                 obj = dev->driver->gem_prime_import(dev, dma_buf);
     318             :         else
     319           0 :                 obj = drm_gem_prime_import(dev, dma_buf);
     320           0 :         if (IS_ERR(obj)) {
     321           0 :                 ret = PTR_ERR(obj);
     322             :                 goto out_unlock;
     323             :         }
     324             : 
     325           0 :         if (obj->dma_buf) {
     326           0 :                 WARN_ON(obj->dma_buf != dma_buf);
     327             :         } else {
     328           0 :                 obj->dma_buf = dma_buf;
     329           0 :                 get_dma_buf(dma_buf);
     330             :         }
     331             : 
     332             :         /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
     333           0 :         ret = drm_gem_handle_create_tail(file_priv, obj, handle);
     334           0 :         drm_gem_object_put(obj);
     335           0 :         if (ret)
     336             :                 goto out_put;
     337             : 
     338           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     339             :                         dma_buf, *handle);
     340           0 :         mutex_unlock(&file_priv->prime.lock);
     341           0 :         if (ret)
     342             :                 goto fail;
     343             : 
     344           0 :         dma_buf_put(dma_buf);
     345             : 
     346           0 :         return 0;
     347             : 
     348             : fail:
     349             :         /* hmm, if driver attached, we are relying on the free-object path
     350             :          * to detach.. which seems ok..
     351             :          */
     352           0 :         drm_gem_handle_delete(file_priv, *handle);
     353           0 :         dma_buf_put(dma_buf);
     354           0 :         return ret;
     355             : 
     356             : out_unlock:
     357           0 :         mutex_unlock(&dev->object_name_lock);
     358             : out_put:
     359           0 :         mutex_unlock(&file_priv->prime.lock);
     360           0 :         dma_buf_put(dma_buf);
     361           0 :         return ret;
     362             : }
     363             : 
     364           0 : int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
     365             :                                  struct drm_file *file_priv)
     366             : {
     367           0 :         struct drm_prime_handle *args = data;
     368             : 
     369           0 :         if (dev->driver->prime_fd_to_handle) {
     370           0 :                 return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd,
     371           0 :                                                        &args->handle);
     372             :         }
     373             : 
     374           0 :         return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle);
     375             : }
     376             : 
     377           0 : static struct dma_buf *export_and_register_object(struct drm_device *dev,
     378             :                                                   struct drm_gem_object *obj,
     379             :                                                   uint32_t flags)
     380             : {
     381             :         struct dma_buf *dmabuf;
     382             : 
     383             :         /* prevent races with concurrent gem_close. */
     384           0 :         if (obj->handle_count == 0) {
     385             :                 dmabuf = ERR_PTR(-ENOENT);
     386             :                 return dmabuf;
     387             :         }
     388             : 
     389           0 :         if (obj->funcs && obj->funcs->export)
     390           0 :                 dmabuf = obj->funcs->export(obj, flags);
     391             :         else
     392           0 :                 dmabuf = drm_gem_prime_export(obj, flags);
     393           0 :         if (IS_ERR(dmabuf)) {
     394             :                 /* normally the created dma-buf takes ownership of the ref,
     395             :                  * but if that fails then drop the ref
     396             :                  */
     397             :                 return dmabuf;
     398             :         }
     399             : 
     400             :         /*
     401             :          * Note that callers do not need to clean up the export cache
     402             :          * since the check for obj->handle_count guarantees that someone
     403             :          * will clean it up.
     404             :          */
     405           0 :         obj->dma_buf = dmabuf;
     406           0 :         get_dma_buf(obj->dma_buf);
     407             : 
     408             :         return dmabuf;
     409             : }
     410             : 
     411             : /*
     412             :  * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
     413             :  * @dev: dev to export the buffer from
     414             :  * @file_priv: drm file-private structure
     415             :  * @handle: buffer handle to export
     416             :  * @flags: flags like DRM_CLOEXEC
     417             :  * @prime_fd: pointer to storage for the fd id of the create dma-buf
     418             :  *
     419             :  * This is the PRIME export function which must be used mandatorily by GEM
     420             :  * drivers to ensure correct lifetime management of the underlying GEM object.
     421             :  * The actual exporting from GEM object to a dma-buf is done through the
     422             :  * &drm_gem_object_funcs.export callback.
     423             :  */
     424           0 : static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
     425             :                                       struct drm_file *file_priv, uint32_t handle,
     426             :                                       uint32_t flags,
     427             :                                       int *prime_fd)
     428             : {
     429             :         struct drm_gem_object *obj;
     430           0 :         int ret = 0;
     431             :         struct dma_buf *dmabuf;
     432             : 
     433           0 :         mutex_lock(&file_priv->prime.lock);
     434           0 :         obj = drm_gem_object_lookup(file_priv, handle);
     435           0 :         if (!obj)  {
     436             :                 ret = -ENOENT;
     437             :                 goto out_unlock;
     438             :         }
     439             : 
     440           0 :         dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
     441           0 :         if (dmabuf) {
     442             :                 get_dma_buf(dmabuf);
     443             :                 goto out_have_handle;
     444             :         }
     445             : 
     446           0 :         mutex_lock(&dev->object_name_lock);
     447             :         /* re-export the original imported object */
     448           0 :         if (obj->import_attach) {
     449           0 :                 dmabuf = obj->import_attach->dmabuf;
     450           0 :                 get_dma_buf(dmabuf);
     451             :                 goto out_have_obj;
     452             :         }
     453             : 
     454           0 :         if (obj->dma_buf) {
     455           0 :                 get_dma_buf(obj->dma_buf);
     456           0 :                 dmabuf = obj->dma_buf;
     457           0 :                 goto out_have_obj;
     458             :         }
     459             : 
     460           0 :         dmabuf = export_and_register_object(dev, obj, flags);
     461           0 :         if (IS_ERR(dmabuf)) {
     462             :                 /* normally the created dma-buf takes ownership of the ref,
     463             :                  * but if that fails then drop the ref
     464             :                  */
     465           0 :                 ret = PTR_ERR(dmabuf);
     466           0 :                 mutex_unlock(&dev->object_name_lock);
     467           0 :                 goto out;
     468             :         }
     469             : 
     470             : out_have_obj:
     471             :         /*
     472             :          * If we've exported this buffer then cheat and add it to the import list
     473             :          * so we get the correct handle back. We must do this under the
     474             :          * protection of dev->object_name_lock to ensure that a racing gem close
     475             :          * ioctl doesn't miss to remove this buffer handle from the cache.
     476             :          */
     477           0 :         ret = drm_prime_add_buf_handle(&file_priv->prime,
     478             :                                        dmabuf, handle);
     479           0 :         mutex_unlock(&dev->object_name_lock);
     480           0 :         if (ret)
     481             :                 goto fail_put_dmabuf;
     482             : 
     483             : out_have_handle:
     484           0 :         ret = dma_buf_fd(dmabuf, flags);
     485             :         /*
     486             :          * We must _not_ remove the buffer from the handle cache since the newly
     487             :          * created dma buf is already linked in the global obj->dma_buf pointer,
     488             :          * and that is invariant as long as a userspace gem handle exists.
     489             :          * Closing the handle will clean out the cache anyway, so we don't leak.
     490             :          */
     491           0 :         if (ret < 0) {
     492             :                 goto fail_put_dmabuf;
     493             :         } else {
     494           0 :                 *prime_fd = ret;
     495           0 :                 ret = 0;
     496             :         }
     497             : 
     498           0 :         goto out;
     499             : 
     500             : fail_put_dmabuf:
     501           0 :         dma_buf_put(dmabuf);
     502             : out:
     503             :         drm_gem_object_put(obj);
     504             : out_unlock:
     505           0 :         mutex_unlock(&file_priv->prime.lock);
     506             : 
     507           0 :         return ret;
     508             : }
     509             : 
     510           0 : int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
     511             :                                  struct drm_file *file_priv)
     512             : {
     513           0 :         struct drm_prime_handle *args = data;
     514             : 
     515             :         /* check flags are valid */
     516           0 :         if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
     517             :                 return -EINVAL;
     518             : 
     519           0 :         if (dev->driver->prime_handle_to_fd) {
     520           0 :                 return dev->driver->prime_handle_to_fd(dev, file_priv,
     521             :                                                        args->handle, args->flags,
     522           0 :                                                        &args->fd);
     523             :         }
     524           0 :         return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle,
     525           0 :                                           args->flags, &args->fd);
     526             : }
     527             : 
     528             : /**
     529             :  * DOC: PRIME Helpers
     530             :  *
     531             :  * Drivers can implement &drm_gem_object_funcs.export and
     532             :  * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
     533             :  * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
     534             :  * implement dma-buf support in terms of some lower-level helpers, which are
     535             :  * again exported for drivers to use individually:
     536             :  *
     537             :  * Exporting buffers
     538             :  * ~~~~~~~~~~~~~~~~~
     539             :  *
     540             :  * Optional pinning of buffers is handled at dma-buf attach and detach time in
     541             :  * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
     542             :  * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
     543             :  * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is
     544             :  * unimplemented, exports into another device are rejected.
     545             :  *
     546             :  * For kernel-internal access there's drm_gem_dmabuf_vmap() and
     547             :  * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
     548             :  * drm_gem_dmabuf_mmap().
     549             :  *
     550             :  * Note that these export helpers can only be used if the underlying backing
     551             :  * storage is fully coherent and either permanently pinned, or it is safe to pin
     552             :  * it indefinitely.
     553             :  *
     554             :  * FIXME: The underlying helper functions are named rather inconsistently.
     555             :  *
     556             :  * Importing buffers
     557             :  * ~~~~~~~~~~~~~~~~~
     558             :  *
     559             :  * Importing dma-bufs using drm_gem_prime_import() relies on
     560             :  * &drm_driver.gem_prime_import_sg_table.
     561             :  *
     562             :  * Note that similarly to the export helpers this permanently pins the
     563             :  * underlying backing storage. Which is ok for scanout, but is not the best
     564             :  * option for sharing lots of buffers for rendering.
     565             :  */
     566             : 
     567             : /**
     568             :  * drm_gem_map_attach - dma_buf attach implementation for GEM
     569             :  * @dma_buf: buffer to attach device to
     570             :  * @attach: buffer attachment data
     571             :  *
     572             :  * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
     573             :  * used as the &dma_buf_ops.attach callback. Must be used together with
     574             :  * drm_gem_map_detach().
     575             :  *
     576             :  * Returns 0 on success, negative error code on failure.
     577             :  */
     578           0 : int drm_gem_map_attach(struct dma_buf *dma_buf,
     579             :                        struct dma_buf_attachment *attach)
     580             : {
     581           0 :         struct drm_gem_object *obj = dma_buf->priv;
     582             : 
     583           0 :         if (!obj->funcs->get_sg_table)
     584             :                 return -ENOSYS;
     585             : 
     586           0 :         return drm_gem_pin(obj);
     587             : }
     588             : EXPORT_SYMBOL(drm_gem_map_attach);
     589             : 
     590             : /**
     591             :  * drm_gem_map_detach - dma_buf detach implementation for GEM
     592             :  * @dma_buf: buffer to detach from
     593             :  * @attach: attachment to be detached
     594             :  *
     595             :  * Calls &drm_gem_object_funcs.pin for device specific handling.  Cleans up
     596             :  * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
     597             :  * &dma_buf_ops.detach callback.
     598             :  */
     599           0 : void drm_gem_map_detach(struct dma_buf *dma_buf,
     600             :                         struct dma_buf_attachment *attach)
     601             : {
     602           0 :         struct drm_gem_object *obj = dma_buf->priv;
     603             : 
     604           0 :         drm_gem_unpin(obj);
     605           0 : }
     606             : EXPORT_SYMBOL(drm_gem_map_detach);
     607             : 
     608             : /**
     609             :  * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
     610             :  * @attach: attachment whose scatterlist is to be returned
     611             :  * @dir: direction of DMA transfer
     612             :  *
     613             :  * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
     614             :  * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
     615             :  * with drm_gem_unmap_dma_buf().
     616             :  *
     617             :  * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
     618             :  * on error. May return -EINTR if it is interrupted by a signal.
     619             :  */
     620           0 : struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
     621             :                                      enum dma_data_direction dir)
     622             : {
     623           0 :         struct drm_gem_object *obj = attach->dmabuf->priv;
     624             :         struct sg_table *sgt;
     625             :         int ret;
     626             : 
     627           0 :         if (WARN_ON(dir == DMA_NONE))
     628             :                 return ERR_PTR(-EINVAL);
     629             : 
     630           0 :         if (WARN_ON(!obj->funcs->get_sg_table))
     631             :                 return ERR_PTR(-ENOSYS);
     632             : 
     633           0 :         sgt = obj->funcs->get_sg_table(obj);
     634           0 :         if (IS_ERR(sgt))
     635             :                 return sgt;
     636             : 
     637           0 :         ret = dma_map_sgtable(attach->dev, sgt, dir,
     638             :                               DMA_ATTR_SKIP_CPU_SYNC);
     639           0 :         if (ret) {
     640           0 :                 sg_free_table(sgt);
     641           0 :                 kfree(sgt);
     642           0 :                 sgt = ERR_PTR(ret);
     643             :         }
     644             : 
     645             :         return sgt;
     646             : }
     647             : EXPORT_SYMBOL(drm_gem_map_dma_buf);
     648             : 
     649             : /**
     650             :  * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
     651             :  * @attach: attachment to unmap buffer from
     652             :  * @sgt: scatterlist info of the buffer to unmap
     653             :  * @dir: direction of DMA transfer
     654             :  *
     655             :  * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
     656             :  */
     657           0 : void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
     658             :                            struct sg_table *sgt,
     659             :                            enum dma_data_direction dir)
     660             : {
     661           0 :         if (!sgt)
     662             :                 return;
     663             : 
     664           0 :         dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
     665           0 :         sg_free_table(sgt);
     666           0 :         kfree(sgt);
     667             : }
     668             : EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
     669             : 
     670             : /**
     671             :  * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
     672             :  * @dma_buf: buffer to be mapped
     673             :  * @map: the virtual address of the buffer
     674             :  *
     675             :  * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
     676             :  * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
     677             :  * The kernel virtual address is returned in map.
     678             :  *
     679             :  * Returns 0 on success or a negative errno code otherwise.
     680             :  */
     681           0 : int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
     682             : {
     683           0 :         struct drm_gem_object *obj = dma_buf->priv;
     684             : 
     685           0 :         return drm_gem_vmap(obj, map);
     686             : }
     687             : EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
     688             : 
     689             : /**
     690             :  * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
     691             :  * @dma_buf: buffer to be unmapped
     692             :  * @map: the virtual address of the buffer
     693             :  *
     694             :  * Releases a kernel virtual mapping. This can be used as the
     695             :  * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
     696             :  */
     697           0 : void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
     698             : {
     699           0 :         struct drm_gem_object *obj = dma_buf->priv;
     700             : 
     701           0 :         drm_gem_vunmap(obj, map);
     702           0 : }
     703             : EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
     704             : 
     705             : /**
     706             :  * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
     707             :  * @obj: GEM object
     708             :  * @vma: Virtual address range
     709             :  *
     710             :  * This function sets up a userspace mapping for PRIME exported buffers using
     711             :  * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
     712             :  * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
     713             :  * called to set up the mapping.
     714             :  */
     715           0 : int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
     716             : {
     717             :         struct drm_file *priv;
     718             :         struct file *fil;
     719             :         int ret;
     720             : 
     721             :         /* Add the fake offset */
     722           0 :         vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
     723             : 
     724           0 :         if (obj->funcs && obj->funcs->mmap) {
     725           0 :                 vma->vm_ops = obj->funcs->vm_ops;
     726             : 
     727           0 :                 drm_gem_object_get(obj);
     728           0 :                 ret = obj->funcs->mmap(obj, vma);
     729           0 :                 if (ret) {
     730             :                         drm_gem_object_put(obj);
     731             :                         return ret;
     732             :                 }
     733           0 :                 vma->vm_private_data = obj;
     734           0 :                 return 0;
     735             :         }
     736             : 
     737           0 :         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
     738           0 :         fil = kzalloc(sizeof(*fil), GFP_KERNEL);
     739           0 :         if (!priv || !fil) {
     740             :                 ret = -ENOMEM;
     741             :                 goto out;
     742             :         }
     743             : 
     744             :         /* Used by drm_gem_mmap() to lookup the GEM object */
     745           0 :         priv->minor = obj->dev->primary;
     746           0 :         fil->private_data = priv;
     747             : 
     748           0 :         ret = drm_vma_node_allow(&obj->vma_node, priv);
     749           0 :         if (ret)
     750             :                 goto out;
     751             : 
     752           0 :         ret = obj->dev->driver->fops->mmap(fil, vma);
     753             : 
     754           0 :         drm_vma_node_revoke(&obj->vma_node, priv);
     755             : out:
     756           0 :         kfree(priv);
     757           0 :         kfree(fil);
     758             : 
     759           0 :         return ret;
     760             : }
     761             : EXPORT_SYMBOL(drm_gem_prime_mmap);
     762             : 
     763             : /**
     764             :  * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
     765             :  * @dma_buf: buffer to be mapped
     766             :  * @vma: virtual address range
     767             :  *
     768             :  * Provides memory mapping for the buffer. This can be used as the
     769             :  * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap().
     770             :  *
     771             :  * Returns 0 on success or a negative error code on failure.
     772             :  */
     773           0 : int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
     774             : {
     775           0 :         struct drm_gem_object *obj = dma_buf->priv;
     776             : 
     777           0 :         return drm_gem_prime_mmap(obj, vma);
     778             : }
     779             : EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
     780             : 
     781             : static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
     782             :         .cache_sgt_mapping = true,
     783             :         .attach = drm_gem_map_attach,
     784             :         .detach = drm_gem_map_detach,
     785             :         .map_dma_buf = drm_gem_map_dma_buf,
     786             :         .unmap_dma_buf = drm_gem_unmap_dma_buf,
     787             :         .release = drm_gem_dmabuf_release,
     788             :         .mmap = drm_gem_dmabuf_mmap,
     789             :         .vmap = drm_gem_dmabuf_vmap,
     790             :         .vunmap = drm_gem_dmabuf_vunmap,
     791             : };
     792             : 
     793             : /**
     794             :  * drm_prime_pages_to_sg - converts a page array into an sg list
     795             :  * @dev: DRM device
     796             :  * @pages: pointer to the array of page pointers to convert
     797             :  * @nr_pages: length of the page vector
     798             :  *
     799             :  * This helper creates an sg table object from a set of pages
     800             :  * the driver is responsible for mapping the pages into the
     801             :  * importers address space for use with dma_buf itself.
     802             :  *
     803             :  * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
     804             :  */
     805           0 : struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
     806             :                                        struct page **pages, unsigned int nr_pages)
     807             : {
     808             :         struct sg_table *sg;
     809           0 :         size_t max_segment = 0;
     810             :         int err;
     811             : 
     812           0 :         sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
     813           0 :         if (!sg)
     814             :                 return ERR_PTR(-ENOMEM);
     815             : 
     816           0 :         if (dev)
     817           0 :                 max_segment = dma_max_mapping_size(dev->dev);
     818           0 :         if (max_segment == 0)
     819           0 :                 max_segment = UINT_MAX;
     820           0 :         err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
     821           0 :                                                 nr_pages << PAGE_SHIFT,
     822             :                                                 max_segment, GFP_KERNEL);
     823           0 :         if (err) {
     824           0 :                 kfree(sg);
     825           0 :                 sg = ERR_PTR(err);
     826             :         }
     827             :         return sg;
     828             : }
     829             : EXPORT_SYMBOL(drm_prime_pages_to_sg);
     830             : 
     831             : /**
     832             :  * drm_prime_get_contiguous_size - returns the contiguous size of the buffer
     833             :  * @sgt: sg_table describing the buffer to check
     834             :  *
     835             :  * This helper calculates the contiguous size in the DMA address space
     836             :  * of the buffer described by the provided sg_table.
     837             :  *
     838             :  * This is useful for implementing
     839             :  * &drm_gem_object_funcs.gem_prime_import_sg_table.
     840             :  */
     841           0 : unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
     842             : {
     843           0 :         dma_addr_t expected = sg_dma_address(sgt->sgl);
     844             :         struct scatterlist *sg;
     845           0 :         unsigned long size = 0;
     846             :         int i;
     847             : 
     848           0 :         for_each_sgtable_dma_sg(sgt, sg, i) {
     849           0 :                 unsigned int len = sg_dma_len(sg);
     850             : 
     851           0 :                 if (!len)
     852             :                         break;
     853           0 :                 if (sg_dma_address(sg) != expected)
     854             :                         break;
     855           0 :                 expected += len;
     856           0 :                 size += len;
     857             :         }
     858           0 :         return size;
     859             : }
     860             : EXPORT_SYMBOL(drm_prime_get_contiguous_size);
     861             : 
     862             : /**
     863             :  * drm_gem_prime_export - helper library implementation of the export callback
     864             :  * @obj: GEM object to export
     865             :  * @flags: flags like DRM_CLOEXEC and DRM_RDWR
     866             :  *
     867             :  * This is the implementation of the &drm_gem_object_funcs.export functions
     868             :  * for GEM drivers using the PRIME helpers. It is used as the default for
     869             :  * drivers that do not set their own.
     870             :  */
     871           0 : struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
     872             :                                      int flags)
     873             : {
     874           0 :         struct drm_device *dev = obj->dev;
     875           0 :         struct dma_buf_export_info exp_info = {
     876             :                 .exp_name = KBUILD_MODNAME, /* white lie for debug */
     877           0 :                 .owner = dev->driver->fops->owner,
     878             :                 .ops = &drm_gem_prime_dmabuf_ops,
     879           0 :                 .size = obj->size,
     880             :                 .flags = flags,
     881             :                 .priv = obj,
     882           0 :                 .resv = obj->resv,
     883             :         };
     884             : 
     885           0 :         return drm_gem_dmabuf_export(dev, &exp_info);
     886             : }
     887             : EXPORT_SYMBOL(drm_gem_prime_export);
     888             : 
     889             : /**
     890             :  * drm_gem_prime_import_dev - core implementation of the import callback
     891             :  * @dev: drm_device to import into
     892             :  * @dma_buf: dma-buf object to import
     893             :  * @attach_dev: struct device to dma_buf attach
     894             :  *
     895             :  * This is the core of drm_gem_prime_import(). It's designed to be called by
     896             :  * drivers who want to use a different device structure than &drm_device.dev for
     897             :  * attaching via dma_buf. This function calls
     898             :  * &drm_driver.gem_prime_import_sg_table internally.
     899             :  *
     900             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     901             :  * &drm_gem_object_funcs.free hook when using this function.
     902             :  */
     903           0 : struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
     904             :                                             struct dma_buf *dma_buf,
     905             :                                             struct device *attach_dev)
     906             : {
     907             :         struct dma_buf_attachment *attach;
     908             :         struct sg_table *sgt;
     909             :         struct drm_gem_object *obj;
     910             :         int ret;
     911             : 
     912           0 :         if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
     913           0 :                 obj = dma_buf->priv;
     914           0 :                 if (obj->dev == dev) {
     915             :                         /*
     916             :                          * Importing dmabuf exported from our own gem increases
     917             :                          * refcount on gem itself instead of f_count of dmabuf.
     918             :                          */
     919           0 :                         drm_gem_object_get(obj);
     920           0 :                         return obj;
     921             :                 }
     922             :         }
     923             : 
     924           0 :         if (!dev->driver->gem_prime_import_sg_table)
     925             :                 return ERR_PTR(-EINVAL);
     926             : 
     927           0 :         attach = dma_buf_attach(dma_buf, attach_dev);
     928           0 :         if (IS_ERR(attach))
     929             :                 return ERR_CAST(attach);
     930             : 
     931           0 :         get_dma_buf(dma_buf);
     932             : 
     933           0 :         sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
     934           0 :         if (IS_ERR(sgt)) {
     935           0 :                 ret = PTR_ERR(sgt);
     936           0 :                 goto fail_detach;
     937             :         }
     938             : 
     939           0 :         obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
     940           0 :         if (IS_ERR(obj)) {
     941           0 :                 ret = PTR_ERR(obj);
     942             :                 goto fail_unmap;
     943             :         }
     944             : 
     945           0 :         obj->import_attach = attach;
     946           0 :         obj->resv = dma_buf->resv;
     947             : 
     948           0 :         return obj;
     949             : 
     950             : fail_unmap:
     951           0 :         dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
     952             : fail_detach:
     953           0 :         dma_buf_detach(dma_buf, attach);
     954           0 :         dma_buf_put(dma_buf);
     955             : 
     956           0 :         return ERR_PTR(ret);
     957             : }
     958             : EXPORT_SYMBOL(drm_gem_prime_import_dev);
     959             : 
     960             : /**
     961             :  * drm_gem_prime_import - helper library implementation of the import callback
     962             :  * @dev: drm_device to import into
     963             :  * @dma_buf: dma-buf object to import
     964             :  *
     965             :  * This is the implementation of the gem_prime_import functions for GEM
     966             :  * drivers using the PRIME helpers. It is the default for drivers that do
     967             :  * not set their own &drm_driver.gem_prime_import.
     968             :  *
     969             :  * Drivers must arrange to call drm_prime_gem_destroy() from their
     970             :  * &drm_gem_object_funcs.free hook when using this function.
     971             :  */
     972           0 : struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
     973             :                                             struct dma_buf *dma_buf)
     974             : {
     975           0 :         return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
     976             : }
     977             : EXPORT_SYMBOL(drm_gem_prime_import);
     978             : 
     979             : /**
     980             :  * drm_prime_sg_to_page_array - convert an sg table into a page array
     981             :  * @sgt: scatter-gather table to convert
     982             :  * @pages: array of page pointers to store the pages in
     983             :  * @max_entries: size of the passed-in array
     984             :  *
     985             :  * Exports an sg table into an array of pages.
     986             :  *
     987             :  * This function is deprecated and strongly discouraged to be used.
     988             :  * The page array is only useful for page faults and those can corrupt fields
     989             :  * in the struct page if they are not handled by the exporting driver.
     990             :  */
     991           0 : int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
     992             :                                             struct page **pages,
     993             :                                             int max_entries)
     994             : {
     995             :         struct sg_page_iter page_iter;
     996           0 :         struct page **p = pages;
     997             : 
     998           0 :         for_each_sgtable_page(sgt, &page_iter, 0) {
     999           0 :                 if (WARN_ON(p - pages >= max_entries))
    1000             :                         return -1;
    1001           0 :                 *p++ = sg_page_iter_page(&page_iter);
    1002             :         }
    1003             :         return 0;
    1004             : }
    1005             : EXPORT_SYMBOL(drm_prime_sg_to_page_array);
    1006             : 
    1007             : /**
    1008             :  * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
    1009             :  * @sgt: scatter-gather table to convert
    1010             :  * @addrs: array to store the dma bus address of each page
    1011             :  * @max_entries: size of both the passed-in arrays
    1012             :  *
    1013             :  * Exports an sg table into an array of addresses.
    1014             :  *
    1015             :  * Drivers should use this in their &drm_driver.gem_prime_import_sg_table
    1016             :  * implementation.
    1017             :  */
    1018           0 : int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
    1019             :                                    int max_entries)
    1020             : {
    1021             :         struct sg_dma_page_iter dma_iter;
    1022           0 :         dma_addr_t *a = addrs;
    1023             : 
    1024           0 :         for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
    1025           0 :                 if (WARN_ON(a - addrs >= max_entries))
    1026             :                         return -1;
    1027           0 :                 *a++ = sg_page_iter_dma_address(&dma_iter);
    1028             :         }
    1029             :         return 0;
    1030             : }
    1031             : EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
    1032             : 
    1033             : /**
    1034             :  * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
    1035             :  * @obj: GEM object which was created from a dma-buf
    1036             :  * @sg: the sg-table which was pinned at import time
    1037             :  *
    1038             :  * This is the cleanup functions which GEM drivers need to call when they use
    1039             :  * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
    1040             :  */
    1041           0 : void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
    1042             : {
    1043             :         struct dma_buf_attachment *attach;
    1044             :         struct dma_buf *dma_buf;
    1045             : 
    1046           0 :         attach = obj->import_attach;
    1047           0 :         if (sg)
    1048           0 :                 dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
    1049           0 :         dma_buf = attach->dmabuf;
    1050           0 :         dma_buf_detach(attach->dmabuf, attach);
    1051             :         /* remove the reference */
    1052           0 :         dma_buf_put(dma_buf);
    1053           0 : }
    1054             : EXPORT_SYMBOL(drm_prime_gem_destroy);

Generated by: LCOV version 1.14