LCOV - code coverage report
Current view: top level - drivers/gpu/drm - drm_gpuva_mgr.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 327 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 38 0.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : /*
       3             :  * Copyright (c) 2022 Red Hat.
       4             :  *
       5             :  * Permission is hereby granted, free of charge, to any person obtaining a
       6             :  * copy of this software and associated documentation files (the "Software"),
       7             :  * to deal in the Software without restriction, including without limitation
       8             :  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
       9             :  * and/or sell copies of the Software, and to permit persons to whom the
      10             :  * Software is furnished to do so, subject to the following conditions:
      11             :  *
      12             :  * The above copyright notice and this permission notice shall be included in
      13             :  * all copies or substantial portions of the Software.
      14             :  *
      15             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      16             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      17             :  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
      18             :  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
      19             :  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
      20             :  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
      21             :  * OTHER DEALINGS IN THE SOFTWARE.
      22             :  *
      23             :  * Authors:
      24             :  *     Danilo Krummrich <dakr@redhat.com>
      25             :  *
      26             :  */
      27             : 
      28             : #include <drm/drm_gpuva_mgr.h>
      29             : 
      30             : #include <linux/interval_tree_generic.h>
      31             : #include <linux/mm.h>
      32             : 
      33             : /**
      34             :  * DOC: Overview
      35             :  *
      36             :  * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
      37             :  * of a GPU's virtual address (VA) space and manages the corresponding virtual
      38             :  * mappings represented by &drm_gpuva objects. It also keeps track of the
      39             :  * mapping's backing &drm_gem_object buffers.
      40             :  *
      41             :  * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing
      42             :  * all existent GPU VA mappings using this &drm_gem_object as backing buffer.
      43             :  *
      44             :  * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also
      45             :  * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'.
      46             :  *
      47             :  * The GPU VA manager internally uses a rb-tree to manage the
      48             :  * &drm_gpuva mappings within a GPU's virtual address space.
      49             :  *
      50             :  * The &drm_gpuva_manager contains a special &drm_gpuva representing the
      51             :  * portion of VA space reserved by the kernel. This node is initialized together
      52             :  * with the GPU VA manager instance and removed when the GPU VA manager is
      53             :  * destroyed.
      54             :  *
      55             :  * In a typical application drivers would embed struct drm_gpuva_manager and
      56             :  * struct drm_gpuva within their own driver specific structures, there won't be
      57             :  * any memory allocations of its own nor memory allocations of &drm_gpuva
      58             :  * entries.
      59             :  *
      60             :  * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
      61             :  * are contained within struct drm_gpuva already. Hence, for inserting
      62             :  * &drm_gpuva entries from within dma-fence signalling critical sections it is
      63             :  * enough to pre-allocate the &drm_gpuva structures.
      64             :  */
      65             : 
      66             : /**
      67             :  * DOC: Split and Merge
      68             :  *
      69             :  * Besides its capability to manage and represent a GPU VA space, the
      70             :  * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
      71             :  * calculate a sequence of operations to satisfy a given map or unmap request.
      72             :  *
      73             :  * Therefore the DRM GPU VA manager provides an algorithm implementing splitting
      74             :  * and merging of existent GPU VA mappings with the ones that are requested to
      75             :  * be mapped or unmapped. This feature is required by the Vulkan API to
      76             :  * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
      77             :  * as VM BIND.
      78             :  *
      79             :  * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
      80             :  * containing map, unmap and remap operations for a given newly requested
      81             :  * mapping. The sequence of callbacks represents the set of operations to
      82             :  * execute in order to integrate the new mapping cleanly into the current state
      83             :  * of the GPU VA space.
      84             :  *
      85             :  * Depending on how the new GPU VA mapping intersects with the existent mappings
      86             :  * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
      87             :  * amount of unmap operations, a maximum of two remap operations and a single
      88             :  * map operation. The caller might receive no callback at all if no operation is
      89             :  * required, e.g. if the requested mapping already exists in the exact same way.
      90             :  *
      91             :  * The single map operation represents the original map operation requested by
      92             :  * the caller.
      93             :  *
      94             :  * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the
      95             :  * &drm_gpuva to unmap is physically contiguous with the original mapping
      96             :  * request. Optionally, if 'keep' is set, drivers may keep the actual page table
      97             :  * entries for this &drm_gpuva, adding the missing page table entries only and
      98             :  * update the &drm_gpuva_manager's view of things accordingly.
      99             :  *
     100             :  * Drivers may do the same optimization, namely delta page table updates, also
     101             :  * for remap operations. This is possible since &drm_gpuva_op_remap consists of
     102             :  * one unmap operation and one or two map operations, such that drivers can
     103             :  * derive the page table update delta accordingly.
     104             :  *
     105             :  * Note that there can't be more than two existent mappings to split up, one at
     106             :  * the beginning and one at the end of the new mapping, hence there is a
     107             :  * maximum of two remap operations.
     108             :  *
     109             :  * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
     110             :  * to call back into the driver in order to unmap a range of GPU VA space. The
     111             :  * logic behind this function is way simpler though: For all existent mappings
     112             :  * enclosed by the given range unmap operations are created. For mappings which
     113             :  * are only partically located within the given range, remap operations are
     114             :  * created such that those mappings are split up and re-mapped partically.
     115             :  *
     116             :  * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
     117             :  * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
     118             :  * to directly obtain an instance of struct drm_gpuva_ops containing a list of
     119             :  * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
     120             :  * contains the &drm_gpuva_ops analogous to the callbacks one would receive when
     121             :  * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
     122             :  * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
     123             :  * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
     124             :  * allocations are possible (e.g. to allocate GPU page tables) and once in the
     125             :  * dma-fence signalling critical path.
     126             :  *
     127             :  * To update the &drm_gpuva_manager's view of the GPU VA space
     128             :  * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
     129             :  * safely be used from &drm_gpuva_fn_ops callbacks originating from
     130             :  * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
     131             :  * convenient to use the provided helper functions drm_gpuva_map(),
     132             :  * drm_gpuva_remap() and drm_gpuva_unmap() instead.
     133             :  *
     134             :  * The following diagram depicts the basic relationships of existent GPU VA
     135             :  * mappings, a newly requested mapping and the resulting mappings as implemented
     136             :  * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
     137             :  *
     138             :  * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
     139             :  *    could be kept.
     140             :  *
     141             :  *    ::
     142             :  *
     143             :  *           0     a     1
     144             :  *      old: |-----------| (bo_offset=n)
     145             :  *
     146             :  *           0     a     1
     147             :  *      req: |-----------| (bo_offset=n)
     148             :  *
     149             :  *           0     a     1
     150             :  *      new: |-----------| (bo_offset=n)
     151             :  *
     152             :  *
     153             :  * 2) Requested mapping is identical, except for the BO offset, hence replace
     154             :  *    the mapping.
     155             :  *
     156             :  *    ::
     157             :  *
     158             :  *           0     a     1
     159             :  *      old: |-----------| (bo_offset=n)
     160             :  *
     161             :  *           0     a     1
     162             :  *      req: |-----------| (bo_offset=m)
     163             :  *
     164             :  *           0     a     1
     165             :  *      new: |-----------| (bo_offset=m)
     166             :  *
     167             :  *
     168             :  * 3) Requested mapping is identical, except for the backing BO, hence replace
     169             :  *    the mapping.
     170             :  *
     171             :  *    ::
     172             :  *
     173             :  *           0     a     1
     174             :  *      old: |-----------| (bo_offset=n)
     175             :  *
     176             :  *           0     b     1
     177             :  *      req: |-----------| (bo_offset=n)
     178             :  *
     179             :  *           0     b     1
     180             :  *      new: |-----------| (bo_offset=n)
     181             :  *
     182             :  *
     183             :  * 4) Existent mapping is a left aligned subset of the requested one, hence
     184             :  *    replace the existent one.
     185             :  *
     186             :  *    ::
     187             :  *
     188             :  *           0  a  1
     189             :  *      old: |-----|       (bo_offset=n)
     190             :  *
     191             :  *           0     a     2
     192             :  *      req: |-----------| (bo_offset=n)
     193             :  *
     194             :  *           0     a     2
     195             :  *      new: |-----------| (bo_offset=n)
     196             :  *
     197             :  *    .. note::
     198             :  *       We expect to see the same result for a request with a different BO
     199             :  *       and/or non-contiguous BO offset.
     200             :  *
     201             :  *
     202             :  * 5) Requested mapping's range is a left aligned subset of the existent one,
     203             :  *    but backed by a different BO. Hence, map the requested mapping and split
     204             :  *    the existent one adjusting its BO offset.
     205             :  *
     206             :  *    ::
     207             :  *
     208             :  *           0     a     2
     209             :  *      old: |-----------| (bo_offset=n)
     210             :  *
     211             :  *           0  b  1
     212             :  *      req: |-----|       (bo_offset=n)
     213             :  *
     214             :  *           0  b  1  a' 2
     215             :  *      new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1)
     216             :  *
     217             :  *    .. note::
     218             :  *       We expect to see the same result for a request with a different BO
     219             :  *       and/or non-contiguous BO offset.
     220             :  *
     221             :  *
     222             :  * 6) Existent mapping is a superset of the requested mapping. Split it up, but
     223             :  *    indicate that the backing PTEs could be kept.
     224             :  *
     225             :  *    ::
     226             :  *
     227             :  *           0     a     2
     228             :  *      old: |-----------| (bo_offset=n)
     229             :  *
     230             :  *           0  a  1
     231             :  *      req: |-----|       (bo_offset=n)
     232             :  *
     233             :  *           0  a  1  a' 2
     234             :  *      new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1)
     235             :  *
     236             :  *
     237             :  * 7) Requested mapping's range is a right aligned subset of the existent one,
     238             :  *    but backed by a different BO. Hence, map the requested mapping and split
     239             :  *    the existent one, without adjusting the BO offset.
     240             :  *
     241             :  *    ::
     242             :  *
     243             :  *           0     a     2
     244             :  *      old: |-----------| (bo_offset=n)
     245             :  *
     246             :  *                 1  b  2
     247             :  *      req:       |-----| (bo_offset=m)
     248             :  *
     249             :  *           0  a  1  b  2
     250             :  *      new: |-----|-----| (a.bo_offset=n,b.bo_offset=m)
     251             :  *
     252             :  *
     253             :  * 8) Existent mapping is a superset of the requested mapping. Split it up, but
     254             :  *    indicate that the backing PTEs could be kept.
     255             :  *
     256             :  *    ::
     257             :  *
     258             :  *            0     a     2
     259             :  *      old: |-----------| (bo_offset=n)
     260             :  *
     261             :  *                 1  a  2
     262             :  *      req:       |-----| (bo_offset=n+1)
     263             :  *
     264             :  *           0  a' 1  a  2
     265             :  *      new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1)
     266             :  *
     267             :  *
     268             :  * 9) Existent mapping is overlapped at the end by the requested mapping backed
     269             :  *    by a different BO. Hence, map the requested mapping and split up the
     270             :  *    existent one, without adjusting the BO offset.
     271             :  *
     272             :  *    ::
     273             :  *
     274             :  *           0     a     2
     275             :  *      old: |-----------|       (bo_offset=n)
     276             :  *
     277             :  *                 1     b     3
     278             :  *      req:       |-----------| (bo_offset=m)
     279             :  *
     280             :  *           0  a  1     b     3
     281             :  *      new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m)
     282             :  *
     283             :  *
     284             :  * 10) Existent mapping is overlapped by the requested mapping, both having the
     285             :  *     same backing BO with a contiguous offset. Indicate the backing PTEs of
     286             :  *     the old mapping could be kept.
     287             :  *
     288             :  *     ::
     289             :  *
     290             :  *            0     a     2
     291             :  *       old: |-----------|       (bo_offset=n)
     292             :  *
     293             :  *                  1     a     3
     294             :  *       req:       |-----------| (bo_offset=n+1)
     295             :  *
     296             :  *            0  a' 1     a     3
     297             :  *       new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1)
     298             :  *
     299             :  *
     300             :  * 11) Requested mapping's range is a centered subset of the existent one
     301             :  *     having a different backing BO. Hence, map the requested mapping and split
     302             :  *     up the existent one in two mappings, adjusting the BO offset of the right
     303             :  *     one accordingly.
     304             :  *
     305             :  *     ::
     306             :  *
     307             :  *            0        a        3
     308             :  *       old: |-----------------| (bo_offset=n)
     309             :  *
     310             :  *                  1  b  2
     311             :  *       req:       |-----|       (bo_offset=m)
     312             :  *
     313             :  *            0  a  1  b  2  a' 3
     314             :  *       new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2)
     315             :  *
     316             :  *
     317             :  * 12) Requested mapping is a contiguous subset of the existent one. Split it
     318             :  *     up, but indicate that the backing PTEs could be kept.
     319             :  *
     320             :  *     ::
     321             :  *
     322             :  *            0        a        3
     323             :  *       old: |-----------------| (bo_offset=n)
     324             :  *
     325             :  *                  1  a  2
     326             :  *       req:       |-----|       (bo_offset=n+1)
     327             :  *
     328             :  *            0  a' 1  a  2 a'' 3
     329             :  *       old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2)
     330             :  *
     331             :  *
     332             :  * 13) Existent mapping is a right aligned subset of the requested one, hence
     333             :  *     replace the existent one.
     334             :  *
     335             :  *     ::
     336             :  *
     337             :  *                  1  a  2
     338             :  *       old:       |-----| (bo_offset=n+1)
     339             :  *
     340             :  *            0     a     2
     341             :  *       req: |-----------| (bo_offset=n)
     342             :  *
     343             :  *            0     a     2
     344             :  *       new: |-----------| (bo_offset=n)
     345             :  *
     346             :  *     .. note::
     347             :  *        We expect to see the same result for a request with a different bo
     348             :  *        and/or non-contiguous bo_offset.
     349             :  *
     350             :  *
     351             :  * 14) Existent mapping is a centered subset of the requested one, hence
     352             :  *     replace the existent one.
     353             :  *
     354             :  *     ::
     355             :  *
     356             :  *                  1  a  2
     357             :  *       old:       |-----| (bo_offset=n+1)
     358             :  *
     359             :  *            0        a       3
     360             :  *       req: |----------------| (bo_offset=n)
     361             :  *
     362             :  *            0        a       3
     363             :  *       new: |----------------| (bo_offset=n)
     364             :  *
     365             :  *     .. note::
     366             :  *        We expect to see the same result for a request with a different bo
     367             :  *        and/or non-contiguous bo_offset.
     368             :  *
     369             :  *
     370             :  * 15) Existent mappings is overlapped at the beginning by the requested mapping
     371             :  *     backed by a different BO. Hence, map the requested mapping and split up
     372             :  *     the existent one, adjusting its BO offset accordingly.
     373             :  *
     374             :  *     ::
     375             :  *
     376             :  *                  1     a     3
     377             :  *       old:       |-----------| (bo_offset=n)
     378             :  *
     379             :  *            0     b     2
     380             :  *       req: |-----------|       (bo_offset=m)
     381             :  *
     382             :  *            0     b     2  a' 3
     383             :  *       new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2)
     384             :  */
     385             : 
     386             : /**
     387             :  * DOC: Locking
     388             :  *
     389             :  * Generally, the GPU VA manager does not take care of locking itself, it is
     390             :  * the drivers responsibility to take care about locking. Drivers might want to
     391             :  * protect the following operations: inserting, removing and iterating
     392             :  * &drm_gpuva objects as well as generating all kinds of operations, such as
     393             :  * split / merge or prefetch.
     394             :  *
     395             :  * The GPU VA manager also does not take care of the locking of the backing
     396             :  * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to
     397             :  * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively
     398             :  * a driver specific external lock. For the latter see also
     399             :  * drm_gem_gpuva_set_lock().
     400             :  *
     401             :  * However, the GPU VA manager contains lockdep checks to ensure callers of its
     402             :  * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is
     403             :  * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink().
     404             :  */
     405             : 
     406             : /**
     407             :  * DOC: Examples
     408             :  *
     409             :  * This section gives two examples on how to let the DRM GPUVA Manager generate
     410             :  * &drm_gpuva_op in order to satisfy a given map or unmap request and how to
     411             :  * make use of them.
     412             :  *
     413             :  * The below code is strictly limited to illustrate the generic usage pattern.
     414             :  * To maintain simplicitly, it doesn't make use of any abstractions for common
     415             :  * code, different (asyncronous) stages with fence signalling critical paths,
     416             :  * any other helpers or error handling in terms of freeing memory and dropping
     417             :  * previously taken locks.
     418             :  *
     419             :  * 1) Obtain a list of &drm_gpuva_op to create a new mapping::
     420             :  *
     421             :  *      // Allocates a new &drm_gpuva.
     422             :  *      struct drm_gpuva * driver_gpuva_alloc(void);
     423             :  *
     424             :  *      // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
     425             :  *      // structure in individual driver structures and lock the dma-resv with
     426             :  *      // drm_exec or similar helpers.
     427             :  *      int driver_mapping_create(struct drm_gpuva_manager *mgr,
     428             :  *                                u64 addr, u64 range,
     429             :  *                                struct drm_gem_object *obj, u64 offset)
     430             :  *      {
     431             :  *              struct drm_gpuva_ops *ops;
     432             :  *              struct drm_gpuva_op *op
     433             :  *
     434             :  *              driver_lock_va_space();
     435             :  *              ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
     436             :  *                                                obj, offset);
     437             :  *              if (IS_ERR(ops))
     438             :  *                      return PTR_ERR(ops);
     439             :  *
     440             :  *              drm_gpuva_for_each_op(op, ops) {
     441             :  *                      struct drm_gpuva *va;
     442             :  *
     443             :  *                      switch (op->op) {
     444             :  *                      case DRM_GPUVA_OP_MAP:
     445             :  *                              va = driver_gpuva_alloc();
     446             :  *                              if (!va)
     447             :  *                                      ; // unwind previous VA space updates,
     448             :  *                                        // free memory and unlock
     449             :  *
     450             :  *                              driver_vm_map();
     451             :  *                              drm_gpuva_map(mgr, va, &op->map);
     452             :  *                              drm_gpuva_link(va);
     453             :  *
     454             :  *                              break;
     455             :  *                      case DRM_GPUVA_OP_REMAP: {
     456             :  *                              struct drm_gpuva *prev = NULL, *next = NULL;
     457             :  *
     458             :  *                              va = op->remap.unmap->va;
     459             :  *
     460             :  *                              if (op->remap.prev) {
     461             :  *                                      prev = driver_gpuva_alloc();
     462             :  *                                      if (!prev)
     463             :  *                                              ; // unwind previous VA space
     464             :  *                                                // updates, free memory and
     465             :  *                                                // unlock
     466             :  *                              }
     467             :  *
     468             :  *                              if (op->remap.next) {
     469             :  *                                      next = driver_gpuva_alloc();
     470             :  *                                      if (!next)
     471             :  *                                              ; // unwind previous VA space
     472             :  *                                                // updates, free memory and
     473             :  *                                                // unlock
     474             :  *                              }
     475             :  *
     476             :  *                              driver_vm_remap();
     477             :  *                              drm_gpuva_remap(prev, next, &op->remap);
     478             :  *
     479             :  *                              drm_gpuva_unlink(va);
     480             :  *                              if (prev)
     481             :  *                                      drm_gpuva_link(prev);
     482             :  *                              if (next)
     483             :  *                                      drm_gpuva_link(next);
     484             :  *
     485             :  *                              break;
     486             :  *                      }
     487             :  *                      case DRM_GPUVA_OP_UNMAP:
     488             :  *                              va = op->unmap->va;
     489             :  *
     490             :  *                              driver_vm_unmap();
     491             :  *                              drm_gpuva_unlink(va);
     492             :  *                              drm_gpuva_unmap(&op->unmap);
     493             :  *
     494             :  *                              break;
     495             :  *                      default:
     496             :  *                              break;
     497             :  *                      }
     498             :  *              }
     499             :  *              driver_unlock_va_space();
     500             :  *
     501             :  *              return 0;
     502             :  *      }
     503             :  *
     504             :  * 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
     505             :  *
     506             :  *      struct driver_context {
     507             :  *              struct drm_gpuva_manager *mgr;
     508             :  *              struct drm_gpuva *new_va;
     509             :  *              struct drm_gpuva *prev_va;
     510             :  *              struct drm_gpuva *next_va;
     511             :  *      };
     512             :  *
     513             :  *      // ops to pass to drm_gpuva_manager_init()
     514             :  *      static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
     515             :  *              .sm_step_map = driver_gpuva_map,
     516             :  *              .sm_step_remap = driver_gpuva_remap,
     517             :  *              .sm_step_unmap = driver_gpuva_unmap,
     518             :  *      };
     519             :  *
     520             :  *      // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
     521             :  *      // structure in individual driver structures and lock the dma-resv with
     522             :  *      // drm_exec or similar helpers.
     523             :  *      int driver_mapping_create(struct drm_gpuva_manager *mgr,
     524             :  *                                u64 addr, u64 range,
     525             :  *                                struct drm_gem_object *obj, u64 offset)
     526             :  *      {
     527             :  *              struct driver_context ctx;
     528             :  *              struct drm_gpuva_ops *ops;
     529             :  *              struct drm_gpuva_op *op;
     530             :  *              int ret = 0;
     531             :  *
     532             :  *              ctx.mgr = mgr;
     533             :  *
     534             :  *              ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
     535             :  *              ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
     536             :  *              ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL);
     537             :  *              if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) {
     538             :  *                      ret = -ENOMEM;
     539             :  *                      goto out;
     540             :  *              }
     541             :  *
     542             :  *              driver_lock_va_space();
     543             :  *              ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
     544             :  *              driver_unlock_va_space();
     545             :  *
     546             :  *      out:
     547             :  *              kfree(ctx.new_va);
     548             :  *              kfree(ctx.prev_va);
     549             :  *              kfree(ctx.next_va);
     550             :  *              return ret;
     551             :  *      }
     552             :  *
     553             :  *      int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx)
     554             :  *      {
     555             :  *              struct driver_context *ctx = __ctx;
     556             :  *
     557             :  *              drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
     558             :  *
     559             :  *              drm_gpuva_link(ctx->new_va);
     560             :  *
     561             :  *              // prevent the new GPUVA from being freed in
     562             :  *              // driver_mapping_create()
     563             :  *              ctx->new_va = NULL;
     564             :  *
     565             :  *              return 0;
     566             :  *      }
     567             :  *
     568             :  *      int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx)
     569             :  *      {
     570             :  *              struct driver_context *ctx = __ctx;
     571             :  *
     572             :  *              drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap);
     573             :  *
     574             :  *              drm_gpuva_unlink(op->remap.unmap->va);
     575             :  *              kfree(op->remap.unmap->va);
     576             :  *
     577             :  *              if (op->remap.prev) {
     578             :  *                      drm_gpuva_link(ctx->prev_va);
     579             :  *                      ctx->prev_va = NULL;
     580             :  *              }
     581             :  *
     582             :  *              if (op->remap.next) {
     583             :  *                      drm_gpuva_link(ctx->next_va);
     584             :  *                      ctx->next_va = NULL;
     585             :  *              }
     586             :  *
     587             :  *              return 0;
     588             :  *      }
     589             :  *
     590             :  *      int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx)
     591             :  *      {
     592             :  *              drm_gpuva_unlink(op->unmap.va);
     593             :  *              drm_gpuva_unmap(&op->unmap);
     594             :  *              kfree(op->unmap.va);
     595             :  *
     596             :  *              return 0;
     597             :  *      }
     598             :  */
     599             : 
     600             : #define to_drm_gpuva(__node)    container_of((__node), struct drm_gpuva, rb.node)
     601             : 
     602             : #define GPUVA_START(node) ((node)->va.addr)
     603             : #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
     604             : 
     605             : /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain
     606             :  * about this.
     607             :  */
     608           0 : INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
     609             :                      GPUVA_START, GPUVA_LAST, static __maybe_unused,
     610             :                      drm_gpuva_it)
     611             : 
     612             : static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
     613             :                               struct drm_gpuva *va);
     614             : static void __drm_gpuva_remove(struct drm_gpuva *va);
     615             : 
     616             : static bool
     617           0 : drm_gpuva_check_overflow(u64 addr, u64 range)
     618             : {
     619             :         u64 end;
     620             : 
     621           0 :         return WARN(check_add_overflow(addr, range, &end),
     622             :                     "GPUVA address limited to %zu bytes.\n", sizeof(end));
     623             : }
     624             : 
     625             : static bool
     626             : drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
     627             : {
     628           0 :         u64 end = addr + range;
     629           0 :         u64 mm_start = mgr->mm_start;
     630           0 :         u64 mm_end = mm_start + mgr->mm_range;
     631             : 
     632           0 :         return addr >= mm_start && end <= mm_end;
     633             : }
     634             : 
     635             : static bool
     636             : drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
     637             : {
     638           0 :         u64 end = addr + range;
     639           0 :         u64 kstart = mgr->kernel_alloc_node.va.addr;
     640           0 :         u64 krange = mgr->kernel_alloc_node.va.range;
     641           0 :         u64 kend = kstart + krange;
     642             : 
     643           0 :         return krange && addr < kend && kstart < end;
     644             : }
     645             : 
     646             : static bool
     647           0 : drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
     648             :                       u64 addr, u64 range)
     649             : {
     650           0 :         return !drm_gpuva_check_overflow(addr, range) &&
     651           0 :                drm_gpuva_in_mm_range(mgr, addr, range) &&
     652           0 :                !drm_gpuva_in_kernel_node(mgr, addr, range);
     653             : }
     654             : 
     655             : /**
     656             :  * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
     657             :  * @mgr: pointer to the &drm_gpuva_manager to initialize
     658             :  * @name: the name of the GPU VA space
     659             :  * @start_offset: the start offset of the GPU VA space
     660             :  * @range: the size of the GPU VA space
     661             :  * @reserve_offset: the start of the kernel reserved GPU VA area
     662             :  * @reserve_range: the size of the kernel reserved GPU VA area
     663             :  * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
     664             :  *
     665             :  * The &drm_gpuva_manager must be initialized with this function before use.
     666             :  *
     667             :  * Note that @mgr must be cleared to 0 before calling this function. The given
     668             :  * &name is expected to be managed by the surrounding driver structures.
     669             :  */
     670             : void
     671           0 : drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
     672             :                        const char *name,
     673             :                        u64 start_offset, u64 range,
     674             :                        u64 reserve_offset, u64 reserve_range,
     675             :                        const struct drm_gpuva_fn_ops *ops)
     676             : {
     677           0 :         mgr->rb.tree = RB_ROOT_CACHED;
     678           0 :         INIT_LIST_HEAD(&mgr->rb.list);
     679             : 
     680           0 :         drm_gpuva_check_overflow(start_offset, range);
     681           0 :         mgr->mm_start = start_offset;
     682           0 :         mgr->mm_range = range;
     683             : 
     684           0 :         mgr->name = name ? name : "unknown";
     685           0 :         mgr->ops = ops;
     686             : 
     687           0 :         memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
     688             : 
     689           0 :         if (reserve_range) {
     690           0 :                 mgr->kernel_alloc_node.va.addr = reserve_offset;
     691           0 :                 mgr->kernel_alloc_node.va.range = reserve_range;
     692             : 
     693           0 :                 if (likely(!drm_gpuva_check_overflow(reserve_offset,
     694             :                                                      reserve_range)))
     695           0 :                         __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
     696             :         }
     697           0 : }
     698             : EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
     699             : 
     700             : /**
     701             :  * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
     702             :  * @mgr: pointer to the &drm_gpuva_manager to clean up
     703             :  *
     704             :  * Note that it is a bug to call this function on a manager that still
     705             :  * holds GPU VA mappings.
     706             :  */
     707             : void
     708           0 : drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
     709             : {
     710           0 :         mgr->name = NULL;
     711             : 
     712           0 :         if (mgr->kernel_alloc_node.va.range)
     713           0 :                 __drm_gpuva_remove(&mgr->kernel_alloc_node);
     714             : 
     715           0 :         WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
     716             :              "GPUVA tree is not empty, potentially leaking memory.");
     717           0 : }
     718             : EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
     719             : 
     720             : static int
     721           0 : __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
     722             :                    struct drm_gpuva *va)
     723             : {
     724             :         struct rb_node *node;
     725             :         struct list_head *head;
     726             : 
     727           0 :         if (drm_gpuva_it_iter_first(&mgr->rb.tree,
     728             :                                     GPUVA_START(va),
     729           0 :                                     GPUVA_LAST(va)))
     730             :                 return -EEXIST;
     731             : 
     732           0 :         va->mgr = mgr;
     733             : 
     734           0 :         drm_gpuva_it_insert(va, &mgr->rb.tree);
     735             : 
     736           0 :         node = rb_prev(&va->rb.node);
     737           0 :         if (node)
     738           0 :                 head = &(to_drm_gpuva(node))->rb.entry;
     739             :         else
     740           0 :                 head = &mgr->rb.list;
     741             : 
     742           0 :         list_add(&va->rb.entry, head);
     743             : 
     744           0 :         return 0;
     745             : }
     746             : 
     747             : /**
     748             :  * drm_gpuva_insert() - insert a &drm_gpuva
     749             :  * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
     750             :  * @va: the &drm_gpuva to insert
     751             :  *
     752             :  * Insert a &drm_gpuva with a given address and range into a
     753             :  * &drm_gpuva_manager.
     754             :  *
     755             :  * It is safe to use this function using the safe versions of iterating the GPU
     756             :  * VA space, such as drm_gpuva_for_each_va_safe() and
     757             :  * drm_gpuva_for_each_va_range_safe().
     758             :  *
     759             :  * Returns: 0 on success, negative error code on failure.
     760             :  */
     761             : int
     762           0 : drm_gpuva_insert(struct drm_gpuva_manager *mgr,
     763             :                  struct drm_gpuva *va)
     764             : {
     765           0 :         u64 addr = va->va.addr;
     766           0 :         u64 range = va->va.range;
     767             : 
     768           0 :         if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
     769             :                 return -EINVAL;
     770             : 
     771           0 :         return __drm_gpuva_insert(mgr, va);
     772             : }
     773             : EXPORT_SYMBOL_GPL(drm_gpuva_insert);
     774             : 
     775             : static void
     776             : __drm_gpuva_remove(struct drm_gpuva *va)
     777             : {
     778           0 :         drm_gpuva_it_remove(va, &va->mgr->rb.tree);
     779           0 :         list_del_init(&va->rb.entry);
     780             : }
     781             : 
     782             : /**
     783             :  * drm_gpuva_remove() - remove a &drm_gpuva
     784             :  * @va: the &drm_gpuva to remove
     785             :  *
     786             :  * This removes the given &va from the underlaying tree.
     787             :  *
     788             :  * It is safe to use this function using the safe versions of iterating the GPU
     789             :  * VA space, such as drm_gpuva_for_each_va_safe() and
     790             :  * drm_gpuva_for_each_va_range_safe().
     791             :  */
     792             : void
     793           0 : drm_gpuva_remove(struct drm_gpuva *va)
     794             : {
     795           0 :         struct drm_gpuva_manager *mgr = va->mgr;
     796             : 
     797           0 :         if (unlikely(va == &mgr->kernel_alloc_node)) {
     798           0 :                 WARN(1, "Can't destroy kernel reserved node.\n");
     799           0 :                 return;
     800             :         }
     801             : 
     802             :         __drm_gpuva_remove(va);
     803             : }
     804             : EXPORT_SYMBOL_GPL(drm_gpuva_remove);
     805             : 
     806             : /**
     807             :  * drm_gpuva_link() - link a &drm_gpuva
     808             :  * @va: the &drm_gpuva to link
     809             :  *
     810             :  * This adds the given &va to the GPU VA list of the &drm_gem_object it is
     811             :  * associated with.
     812             :  *
     813             :  * This function expects the caller to protect the GEM's GPUVA list against
     814             :  * concurrent access using the GEMs dma_resv lock.
     815             :  */
     816             : void
     817           0 : drm_gpuva_link(struct drm_gpuva *va)
     818             : {
     819           0 :         struct drm_gem_object *obj = va->gem.obj;
     820             : 
     821           0 :         if (unlikely(!obj))
     822             :                 return;
     823             : 
     824             :         drm_gem_gpuva_assert_lock_held(obj);
     825             : 
     826           0 :         list_add_tail(&va->gem.entry, &obj->gpuva.list);
     827             : }
     828             : EXPORT_SYMBOL_GPL(drm_gpuva_link);
     829             : 
     830             : /**
     831             :  * drm_gpuva_unlink() - unlink a &drm_gpuva
     832             :  * @va: the &drm_gpuva to unlink
     833             :  *
     834             :  * This removes the given &va from the GPU VA list of the &drm_gem_object it is
     835             :  * associated with.
     836             :  *
     837             :  * This function expects the caller to protect the GEM's GPUVA list against
     838             :  * concurrent access using the GEMs dma_resv lock.
     839             :  */
     840             : void
     841           0 : drm_gpuva_unlink(struct drm_gpuva *va)
     842             : {
     843           0 :         struct drm_gem_object *obj = va->gem.obj;
     844             : 
     845           0 :         if (unlikely(!obj))
     846             :                 return;
     847             : 
     848             :         drm_gem_gpuva_assert_lock_held(obj);
     849             : 
     850           0 :         list_del_init(&va->gem.entry);
     851             : }
     852             : EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
     853             : 
     854             : /**
     855             :  * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
     856             :  * @mgr: the &drm_gpuva_manager to search in
     857             :  * @addr: the &drm_gpuvas address
     858             :  * @range: the &drm_gpuvas range
     859             :  *
     860             :  * Returns: the first &drm_gpuva within the given range
     861             :  */
     862             : struct drm_gpuva *
     863           0 : drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
     864             :                      u64 addr, u64 range)
     865             : {
     866           0 :         u64 last = addr + range - 1;
     867             : 
     868           0 :         return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
     869             : }
     870             : EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
     871             : 
     872             : /**
     873             :  * drm_gpuva_find() - find a &drm_gpuva
     874             :  * @mgr: the &drm_gpuva_manager to search in
     875             :  * @addr: the &drm_gpuvas address
     876             :  * @range: the &drm_gpuvas range
     877             :  *
     878             :  * Returns: the &drm_gpuva at a given &addr and with a given &range
     879             :  */
     880             : struct drm_gpuva *
     881           0 : drm_gpuva_find(struct drm_gpuva_manager *mgr,
     882             :                u64 addr, u64 range)
     883             : {
     884             :         struct drm_gpuva *va;
     885             : 
     886           0 :         va = drm_gpuva_find_first(mgr, addr, range);
     887           0 :         if (!va)
     888             :                 goto out;
     889             : 
     890           0 :         if (va->va.addr != addr ||
     891           0 :             va->va.range != range)
     892             :                 goto out;
     893             : 
     894             :         return va;
     895             : 
     896             : out:
     897           0 :         return NULL;
     898             : }
     899             : EXPORT_SYMBOL_GPL(drm_gpuva_find);
     900             : 
     901             : /**
     902             :  * drm_gpuva_find_prev() - find the &drm_gpuva before the given address
     903             :  * @mgr: the &drm_gpuva_manager to search in
     904             :  * @start: the given GPU VA's start address
     905             :  *
     906             :  * Find the adjacent &drm_gpuva before the GPU VA with given &start address.
     907             :  *
     908             :  * Note that if there is any free space between the GPU VA mappings no mapping
     909             :  * is returned.
     910             :  *
     911             :  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
     912             :  */
     913             : struct drm_gpuva *
     914           0 : drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
     915             : {
     916           0 :         if (!drm_gpuva_range_valid(mgr, start - 1, 1))
     917             :                 return NULL;
     918             : 
     919           0 :         return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
     920             : }
     921             : EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
     922             : 
     923             : /**
     924             :  * drm_gpuva_find_next() - find the &drm_gpuva after the given address
     925             :  * @mgr: the &drm_gpuva_manager to search in
     926             :  * @end: the given GPU VA's end address
     927             :  *
     928             :  * Find the adjacent &drm_gpuva after the GPU VA with given &end address.
     929             :  *
     930             :  * Note that if there is any free space between the GPU VA mappings no mapping
     931             :  * is returned.
     932             :  *
     933             :  * Returns: a pointer to the found &drm_gpuva or NULL if none was found
     934             :  */
     935             : struct drm_gpuva *
     936           0 : drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
     937             : {
     938           0 :         if (!drm_gpuva_range_valid(mgr, end, 1))
     939             :                 return NULL;
     940             : 
     941           0 :         return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
     942             : }
     943             : EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
     944             : 
     945             : /**
     946             :  * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
     947             :  * is empty
     948             :  * @mgr: the &drm_gpuva_manager to check the range for
     949             :  * @addr: the start address of the range
     950             :  * @range: the range of the interval
     951             :  *
     952             :  * Returns: true if the interval is empty, false otherwise
     953             :  */
     954             : bool
     955           0 : drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
     956             : {
     957           0 :         return !drm_gpuva_find_first(mgr, addr, range);
     958             : }
     959             : EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
     960             : 
     961             : /**
     962             :  * drm_gpuva_map() - helper to insert a &drm_gpuva according to a
     963             :  * &drm_gpuva_op_map
     964             :  * @mgr: the &drm_gpuva_manager
     965             :  * @va: the &drm_gpuva to insert
     966             :  * @op: the &drm_gpuva_op_map to initialize @va with
     967             :  *
     968             :  * Initializes the @va from the @op and inserts it into the given @mgr.
     969             :  */
     970             : void
     971           0 : drm_gpuva_map(struct drm_gpuva_manager *mgr,
     972             :               struct drm_gpuva *va,
     973             :               struct drm_gpuva_op_map *op)
     974             : {
     975           0 :         drm_gpuva_init_from_op(va, op);
     976           0 :         drm_gpuva_insert(mgr, va);
     977           0 : }
     978             : EXPORT_SYMBOL_GPL(drm_gpuva_map);
     979             : 
     980             : /**
     981             :  * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a
     982             :  * &drm_gpuva_op_remap
     983             :  * @prev: the &drm_gpuva to remap when keeping the start of a mapping
     984             :  * @next: the &drm_gpuva to remap when keeping the end of a mapping
     985             :  * @op: the &drm_gpuva_op_remap to initialize @prev and @next with
     986             :  *
     987             :  * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or
     988             :  * @next.
     989             :  */
     990             : void
     991           0 : drm_gpuva_remap(struct drm_gpuva *prev,
     992             :                 struct drm_gpuva *next,
     993             :                 struct drm_gpuva_op_remap *op)
     994             : {
     995           0 :         struct drm_gpuva *curr = op->unmap->va;
     996           0 :         struct drm_gpuva_manager *mgr = curr->mgr;
     997             : 
     998           0 :         drm_gpuva_remove(curr);
     999             : 
    1000           0 :         if (op->prev) {
    1001           0 :                 drm_gpuva_init_from_op(prev, op->prev);
    1002           0 :                 drm_gpuva_insert(mgr, prev);
    1003             :         }
    1004             : 
    1005           0 :         if (op->next) {
    1006           0 :                 drm_gpuva_init_from_op(next, op->next);
    1007           0 :                 drm_gpuva_insert(mgr, next);
    1008             :         }
    1009           0 : }
    1010             : EXPORT_SYMBOL_GPL(drm_gpuva_remap);
    1011             : 
    1012             : /**
    1013             :  * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a
    1014             :  * &drm_gpuva_op_unmap
    1015             :  * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove
    1016             :  *
    1017             :  * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap.
    1018             :  */
    1019             : void
    1020           0 : drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
    1021             : {
    1022           0 :         drm_gpuva_remove(op->va);
    1023           0 : }
    1024             : EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
    1025             : 
    1026             : static int
    1027             : op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
    1028             :           u64 addr, u64 range,
    1029             :           struct drm_gem_object *obj, u64 offset)
    1030             : {
    1031           0 :         struct drm_gpuva_op op = {};
    1032             : 
    1033             :         op.op = DRM_GPUVA_OP_MAP;
    1034           0 :         op.map.va.addr = addr;
    1035           0 :         op.map.va.range = range;
    1036           0 :         op.map.gem.obj = obj;
    1037           0 :         op.map.gem.offset = offset;
    1038             : 
    1039           0 :         return fn->sm_step_map(&op, priv);
    1040             : }
    1041             : 
    1042             : static int
    1043           0 : op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
    1044             :             struct drm_gpuva_op_map *prev,
    1045             :             struct drm_gpuva_op_map *next,
    1046             :             struct drm_gpuva_op_unmap *unmap)
    1047             : {
    1048           0 :         struct drm_gpuva_op op = {};
    1049             :         struct drm_gpuva_op_remap *r;
    1050             : 
    1051           0 :         op.op = DRM_GPUVA_OP_REMAP;
    1052           0 :         r = &op.remap;
    1053           0 :         r->prev = prev;
    1054           0 :         r->next = next;
    1055           0 :         r->unmap = unmap;
    1056             : 
    1057           0 :         return fn->sm_step_remap(&op, priv);
    1058             : }
    1059             : 
    1060             : static int
    1061           0 : op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
    1062             :             struct drm_gpuva *va, bool merge)
    1063             : {
    1064           0 :         struct drm_gpuva_op op = {};
    1065             : 
    1066           0 :         op.op = DRM_GPUVA_OP_UNMAP;
    1067           0 :         op.unmap.va = va;
    1068           0 :         op.unmap.keep = merge;
    1069             : 
    1070           0 :         return fn->sm_step_unmap(&op, priv);
    1071             : }
    1072             : 
    1073             : static int
    1074           0 : __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
    1075             :                    const struct drm_gpuva_fn_ops *ops, void *priv,
    1076             :                    u64 req_addr, u64 req_range,
    1077             :                    struct drm_gem_object *req_obj, u64 req_offset)
    1078             : {
    1079           0 :         struct drm_gpuva *va, *next, *prev = NULL;
    1080           0 :         u64 req_end = req_addr + req_range;
    1081             :         int ret;
    1082             : 
    1083           0 :         if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
    1084             :                 return -EINVAL;
    1085             : 
    1086           0 :         drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
    1087           0 :                 struct drm_gem_object *obj = va->gem.obj;
    1088           0 :                 u64 offset = va->gem.offset;
    1089           0 :                 u64 addr = va->va.addr;
    1090           0 :                 u64 range = va->va.range;
    1091           0 :                 u64 end = addr + range;
    1092           0 :                 bool merge = !!va->gem.obj;
    1093             : 
    1094           0 :                 if (addr == req_addr) {
    1095           0 :                         merge &= obj == req_obj &&
    1096           0 :                                  offset == req_offset;
    1097             : 
    1098           0 :                         if (end == req_end) {
    1099           0 :                                 ret = op_unmap_cb(ops, priv, va, merge);
    1100           0 :                                 if (ret)
    1101             :                                         return ret;
    1102             :                                 break;
    1103             :                         }
    1104             : 
    1105           0 :                         if (end < req_end) {
    1106           0 :                                 ret = op_unmap_cb(ops, priv, va, merge);
    1107           0 :                                 if (ret)
    1108             :                                         return ret;
    1109             :                                 goto next;
    1110             :                         }
    1111             : 
    1112           0 :                         if (end > req_end) {
    1113           0 :                                 struct drm_gpuva_op_map n = {
    1114             :                                         .va.addr = req_end,
    1115           0 :                                         .va.range = range - req_range,
    1116             :                                         .gem.obj = obj,
    1117           0 :                                         .gem.offset = offset + req_range,
    1118             :                                 };
    1119           0 :                                 struct drm_gpuva_op_unmap u = {
    1120             :                                         .va = va,
    1121             :                                         .keep = merge,
    1122             :                                 };
    1123             : 
    1124           0 :                                 ret = op_remap_cb(ops, priv, NULL, &n, &u);
    1125           0 :                                 if (ret)
    1126           0 :                                         return ret;
    1127           0 :                                 break;
    1128             :                         }
    1129           0 :                 } else if (addr < req_addr) {
    1130           0 :                         u64 ls_range = req_addr - addr;
    1131           0 :                         struct drm_gpuva_op_map p = {
    1132             :                                 .va.addr = addr,
    1133             :                                 .va.range = ls_range,
    1134             :                                 .gem.obj = obj,
    1135             :                                 .gem.offset = offset,
    1136             :                         };
    1137           0 :                         struct drm_gpuva_op_unmap u = { .va = va };
    1138             : 
    1139           0 :                         merge &= obj == req_obj &&
    1140           0 :                                  offset + ls_range == req_offset;
    1141           0 :                         u.keep = merge;
    1142             : 
    1143           0 :                         if (end == req_end) {
    1144           0 :                                 ret = op_remap_cb(ops, priv, &p, NULL, &u);
    1145           0 :                                 if (ret)
    1146           0 :                                         return ret;
    1147           0 :                                 break;
    1148             :                         }
    1149             : 
    1150           0 :                         if (end < req_end) {
    1151           0 :                                 ret = op_remap_cb(ops, priv, &p, NULL, &u);
    1152           0 :                                 if (ret)
    1153             :                                         return ret;
    1154           0 :                                 goto next;
    1155             :                         }
    1156             : 
    1157           0 :                         if (end > req_end) {
    1158           0 :                                 struct drm_gpuva_op_map n = {
    1159             :                                         .va.addr = req_end,
    1160           0 :                                         .va.range = end - req_end,
    1161             :                                         .gem.obj = obj,
    1162           0 :                                         .gem.offset = offset + ls_range +
    1163             :                                                       req_range,
    1164             :                                 };
    1165             : 
    1166           0 :                                 ret = op_remap_cb(ops, priv, &p, &n, &u);
    1167           0 :                                 if (ret)
    1168           0 :                                         return ret;
    1169           0 :                                 break;
    1170             :                         }
    1171           0 :                 } else if (addr > req_addr) {
    1172           0 :                         merge &= obj == req_obj &&
    1173           0 :                                  offset == req_offset +
    1174           0 :                                            (addr - req_addr);
    1175             : 
    1176           0 :                         if (end == req_end) {
    1177           0 :                                 ret = op_unmap_cb(ops, priv, va, merge);
    1178           0 :                                 if (ret)
    1179             :                                         return ret;
    1180             :                                 break;
    1181             :                         }
    1182             : 
    1183           0 :                         if (end < req_end) {
    1184           0 :                                 ret = op_unmap_cb(ops, priv, va, merge);
    1185           0 :                                 if (ret)
    1186             :                                         return ret;
    1187             :                                 goto next;
    1188             :                         }
    1189             : 
    1190           0 :                         if (end > req_end) {
    1191           0 :                                 struct drm_gpuva_op_map n = {
    1192             :                                         .va.addr = req_end,
    1193           0 :                                         .va.range = end - req_end,
    1194             :                                         .gem.obj = obj,
    1195           0 :                                         .gem.offset = offset + req_end - addr,
    1196             :                                 };
    1197           0 :                                 struct drm_gpuva_op_unmap u = {
    1198             :                                         .va = va,
    1199             :                                         .keep = merge,
    1200             :                                 };
    1201             : 
    1202           0 :                                 ret = op_remap_cb(ops, priv, NULL, &n, &u);
    1203           0 :                                 if (ret)
    1204           0 :                                         return ret;
    1205           0 :                                 break;
    1206             :                         }
    1207             :                 }
    1208             : next:
    1209           0 :                 prev = va;
    1210             :         }
    1211             : 
    1212           0 :         return op_map_cb(ops, priv,
    1213             :                          req_addr, req_range,
    1214             :                          req_obj, req_offset);
    1215             : }
    1216             : 
    1217             : static int
    1218           0 : __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
    1219             :                      const struct drm_gpuva_fn_ops *ops, void *priv,
    1220             :                      u64 req_addr, u64 req_range)
    1221             : {
    1222             :         struct drm_gpuva *va, *next;
    1223           0 :         u64 req_end = req_addr + req_range;
    1224             :         int ret;
    1225             : 
    1226           0 :         if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
    1227             :                 return -EINVAL;
    1228             : 
    1229           0 :         drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
    1230           0 :                 struct drm_gpuva_op_map prev = {}, next = {};
    1231           0 :                 bool prev_split = false, next_split = false;
    1232           0 :                 struct drm_gem_object *obj = va->gem.obj;
    1233           0 :                 u64 offset = va->gem.offset;
    1234           0 :                 u64 addr = va->va.addr;
    1235           0 :                 u64 range = va->va.range;
    1236           0 :                 u64 end = addr + range;
    1237             : 
    1238           0 :                 if (addr < req_addr) {
    1239           0 :                         prev.va.addr = addr;
    1240           0 :                         prev.va.range = req_addr - addr;
    1241           0 :                         prev.gem.obj = obj;
    1242           0 :                         prev.gem.offset = offset;
    1243             : 
    1244           0 :                         prev_split = true;
    1245             :                 }
    1246             : 
    1247           0 :                 if (end > req_end) {
    1248           0 :                         next.va.addr = req_end;
    1249           0 :                         next.va.range = end - req_end;
    1250           0 :                         next.gem.obj = obj;
    1251           0 :                         next.gem.offset = offset + (req_end - addr);
    1252             : 
    1253           0 :                         next_split = true;
    1254             :                 }
    1255             : 
    1256           0 :                 if (prev_split || next_split) {
    1257           0 :                         struct drm_gpuva_op_unmap unmap = { .va = va };
    1258             : 
    1259           0 :                         ret = op_remap_cb(ops, priv,
    1260             :                                           prev_split ? &prev : NULL,
    1261             :                                           next_split ? &next : NULL,
    1262             :                                           &unmap);
    1263           0 :                         if (ret)
    1264           0 :                                 return ret;
    1265             :                 } else {
    1266           0 :                         ret = op_unmap_cb(ops, priv, va, false);
    1267           0 :                         if (ret)
    1268             :                                 return ret;
    1269             :                 }
    1270             :         }
    1271             : 
    1272             :         return 0;
    1273             : }
    1274             : 
    1275             : /**
    1276             :  * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
    1277             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1278             :  * @req_addr: the start address of the new mapping
    1279             :  * @req_range: the range of the new mapping
    1280             :  * @req_obj: the &drm_gem_object to map
    1281             :  * @req_offset: the offset within the &drm_gem_object
    1282             :  * @priv: pointer to a driver private data structure
    1283             :  *
    1284             :  * This function iterates the given range of the GPU VA space. It utilizes the
    1285             :  * &drm_gpuva_fn_ops to call back into the driver providing the split and merge
    1286             :  * steps.
    1287             :  *
    1288             :  * Drivers may use these callbacks to update the GPU VA space right away within
    1289             :  * the callback. In case the driver decides to copy and store the operations for
    1290             :  * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
    1291             :  * be called before the &drm_gpuva_manager's view of the GPU VA space was
    1292             :  * updated with the previous set of operations. To update the
    1293             :  * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
    1294             :  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
    1295             :  * used.
    1296             :  *
    1297             :  * A sequence of callbacks can contain map, unmap and remap operations, but
    1298             :  * the sequence of callbacks might also be empty if no operation is required,
    1299             :  * e.g. if the requested mapping already exists in the exact same way.
    1300             :  *
    1301             :  * There can be an arbitrary amount of unmap operations, a maximum of two remap
    1302             :  * operations and a single map operation. The latter one represents the original
    1303             :  * map operation requested by the caller.
    1304             :  *
    1305             :  * Returns: 0 on success or a negative error code
    1306             :  */
    1307             : int
    1308           0 : drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
    1309             :                  u64 req_addr, u64 req_range,
    1310             :                  struct drm_gem_object *req_obj, u64 req_offset)
    1311             : {
    1312           0 :         const struct drm_gpuva_fn_ops *ops = mgr->ops;
    1313             : 
    1314           0 :         if (unlikely(!(ops && ops->sm_step_map &&
    1315             :                        ops->sm_step_remap &&
    1316             :                        ops->sm_step_unmap)))
    1317             :                 return -EINVAL;
    1318             : 
    1319           0 :         return __drm_gpuva_sm_map(mgr, ops, priv,
    1320             :                                   req_addr, req_range,
    1321             :                                   req_obj, req_offset);
    1322             : }
    1323             : EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
    1324             : 
    1325             : /**
    1326             :  * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
    1327             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1328             :  * @priv: pointer to a driver private data structure
    1329             :  * @req_addr: the start address of the range to unmap
    1330             :  * @req_range: the range of the mappings to unmap
    1331             :  *
    1332             :  * This function iterates the given range of the GPU VA space. It utilizes the
    1333             :  * &drm_gpuva_fn_ops to call back into the driver providing the operations to
    1334             :  * unmap and, if required, split existent mappings.
    1335             :  *
    1336             :  * Drivers may use these callbacks to update the GPU VA space right away within
    1337             :  * the callback. In case the driver decides to copy and store the operations for
    1338             :  * later processing neither this function nor &drm_gpuva_sm_map is allowed to be
    1339             :  * called before the &drm_gpuva_manager's view of the GPU VA space was updated
    1340             :  * with the previous set of operations. To update the &drm_gpuva_manager's view
    1341             :  * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
    1342             :  * drm_gpuva_destroy_unlocked() should be used.
    1343             :  *
    1344             :  * A sequence of callbacks can contain unmap and remap operations, depending on
    1345             :  * whether there are actual overlapping mappings to split.
    1346             :  *
    1347             :  * There can be an arbitrary amount of unmap operations and a maximum of two
    1348             :  * remap operations.
    1349             :  *
    1350             :  * Returns: 0 on success or a negative error code
    1351             :  */
    1352             : int
    1353           0 : drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
    1354             :                    u64 req_addr, u64 req_range)
    1355             : {
    1356           0 :         const struct drm_gpuva_fn_ops *ops = mgr->ops;
    1357             : 
    1358           0 :         if (unlikely(!(ops && ops->sm_step_remap &&
    1359             :                        ops->sm_step_unmap)))
    1360             :                 return -EINVAL;
    1361             : 
    1362           0 :         return __drm_gpuva_sm_unmap(mgr, ops, priv,
    1363             :                                     req_addr, req_range);
    1364             : }
    1365             : EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
    1366             : 
    1367             : static struct drm_gpuva_op *
    1368           0 : gpuva_op_alloc(struct drm_gpuva_manager *mgr)
    1369             : {
    1370           0 :         const struct drm_gpuva_fn_ops *fn = mgr->ops;
    1371             :         struct drm_gpuva_op *op;
    1372             : 
    1373           0 :         if (fn && fn->op_alloc)
    1374           0 :                 op = fn->op_alloc();
    1375             :         else
    1376           0 :                 op = kzalloc(sizeof(*op), GFP_KERNEL);
    1377             : 
    1378           0 :         if (unlikely(!op))
    1379             :                 return NULL;
    1380             : 
    1381             :         return op;
    1382             : }
    1383             : 
    1384             : static void
    1385           0 : gpuva_op_free(struct drm_gpuva_manager *mgr,
    1386             :               struct drm_gpuva_op *op)
    1387             : {
    1388           0 :         const struct drm_gpuva_fn_ops *fn = mgr->ops;
    1389             : 
    1390           0 :         if (fn && fn->op_free)
    1391           0 :                 fn->op_free(op);
    1392             :         else
    1393           0 :                 kfree(op);
    1394           0 : }
    1395             : 
    1396             : static int
    1397           0 : drm_gpuva_sm_step(struct drm_gpuva_op *__op,
    1398             :                   void *priv)
    1399             : {
    1400             :         struct {
    1401             :                 struct drm_gpuva_manager *mgr;
    1402             :                 struct drm_gpuva_ops *ops;
    1403           0 :         } *args = priv;
    1404           0 :         struct drm_gpuva_manager *mgr = args->mgr;
    1405           0 :         struct drm_gpuva_ops *ops = args->ops;
    1406             :         struct drm_gpuva_op *op;
    1407             : 
    1408           0 :         op = gpuva_op_alloc(mgr);
    1409           0 :         if (unlikely(!op))
    1410             :                 goto err;
    1411             : 
    1412           0 :         memcpy(op, __op, sizeof(*op));
    1413             : 
    1414           0 :         if (op->op == DRM_GPUVA_OP_REMAP) {
    1415           0 :                 struct drm_gpuva_op_remap *__r = &__op->remap;
    1416           0 :                 struct drm_gpuva_op_remap *r = &op->remap;
    1417             : 
    1418           0 :                 r->unmap = kmemdup(__r->unmap, sizeof(*r->unmap),
    1419             :                                    GFP_KERNEL);
    1420           0 :                 if (unlikely(!r->unmap))
    1421             :                         goto err_free_op;
    1422             : 
    1423           0 :                 if (__r->prev) {
    1424           0 :                         r->prev = kmemdup(__r->prev, sizeof(*r->prev),
    1425             :                                           GFP_KERNEL);
    1426           0 :                         if (unlikely(!r->prev))
    1427             :                                 goto err_free_unmap;
    1428             :                 }
    1429             : 
    1430           0 :                 if (__r->next) {
    1431           0 :                         r->next = kmemdup(__r->next, sizeof(*r->next),
    1432             :                                           GFP_KERNEL);
    1433           0 :                         if (unlikely(!r->next))
    1434             :                                 goto err_free_prev;
    1435             :                 }
    1436             :         }
    1437             : 
    1438           0 :         list_add_tail(&op->entry, &ops->list);
    1439             : 
    1440           0 :         return 0;
    1441             : 
    1442             : err_free_unmap:
    1443           0 :         kfree(op->remap.unmap);
    1444             : err_free_prev:
    1445           0 :         kfree(op->remap.prev);
    1446             : err_free_op:
    1447           0 :         gpuva_op_free(mgr, op);
    1448             : err:
    1449             :         return -ENOMEM;
    1450             : }
    1451             : 
    1452             : static const struct drm_gpuva_fn_ops gpuva_list_ops = {
    1453             :         .sm_step_map = drm_gpuva_sm_step,
    1454             :         .sm_step_remap = drm_gpuva_sm_step,
    1455             :         .sm_step_unmap = drm_gpuva_sm_step,
    1456             : };
    1457             : 
    1458             : /**
    1459             :  * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
    1460             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1461             :  * @req_addr: the start address of the new mapping
    1462             :  * @req_range: the range of the new mapping
    1463             :  * @req_obj: the &drm_gem_object to map
    1464             :  * @req_offset: the offset within the &drm_gem_object
    1465             :  *
    1466             :  * This function creates a list of operations to perform splitting and merging
    1467             :  * of existent mapping(s) with the newly requested one.
    1468             :  *
    1469             :  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
    1470             :  * in the given order. It can contain map, unmap and remap operations, but it
    1471             :  * also can be empty if no operation is required, e.g. if the requested mapping
    1472             :  * already exists is the exact same way.
    1473             :  *
    1474             :  * There can be an arbitrary amount of unmap operations, a maximum of two remap
    1475             :  * operations and a single map operation. The latter one represents the original
    1476             :  * map operation requested by the caller.
    1477             :  *
    1478             :  * Note that before calling this function again with another mapping request it
    1479             :  * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
    1480             :  * previously obtained operations must be either processed or abandoned. To
    1481             :  * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
    1482             :  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
    1483             :  * used.
    1484             :  *
    1485             :  * After the caller finished processing the returned &drm_gpuva_ops, they must
    1486             :  * be freed with &drm_gpuva_ops_free.
    1487             :  *
    1488             :  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
    1489             :  */
    1490             : struct drm_gpuva_ops *
    1491           0 : drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
    1492             :                             u64 req_addr, u64 req_range,
    1493             :                             struct drm_gem_object *req_obj, u64 req_offset)
    1494             : {
    1495             :         struct drm_gpuva_ops *ops;
    1496             :         struct {
    1497             :                 struct drm_gpuva_manager *mgr;
    1498             :                 struct drm_gpuva_ops *ops;
    1499             :         } args;
    1500             :         int ret;
    1501             : 
    1502           0 :         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
    1503           0 :         if (unlikely(!ops))
    1504             :                 return ERR_PTR(-ENOMEM);
    1505             : 
    1506           0 :         INIT_LIST_HEAD(&ops->list);
    1507             : 
    1508           0 :         args.mgr = mgr;
    1509           0 :         args.ops = ops;
    1510             : 
    1511           0 :         ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
    1512             :                                  req_addr, req_range,
    1513             :                                  req_obj, req_offset);
    1514           0 :         if (ret)
    1515             :                 goto err_free_ops;
    1516             : 
    1517             :         return ops;
    1518             : 
    1519             : err_free_ops:
    1520           0 :         drm_gpuva_ops_free(mgr, ops);
    1521           0 :         return ERR_PTR(ret);
    1522             : }
    1523             : EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
    1524             : 
    1525             : /**
    1526             :  * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
    1527             :  * unmap
    1528             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1529             :  * @req_addr: the start address of the range to unmap
    1530             :  * @req_range: the range of the mappings to unmap
    1531             :  *
    1532             :  * This function creates a list of operations to perform unmapping and, if
    1533             :  * required, splitting of the mappings overlapping the unmap range.
    1534             :  *
    1535             :  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
    1536             :  * in the given order. It can contain unmap and remap operations, depending on
    1537             :  * whether there are actual overlapping mappings to split.
    1538             :  *
    1539             :  * There can be an arbitrary amount of unmap operations and a maximum of two
    1540             :  * remap operations.
    1541             :  *
    1542             :  * Note that before calling this function again with another range to unmap it
    1543             :  * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
    1544             :  * previously obtained operations must be processed or abandoned. To update the
    1545             :  * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
    1546             :  * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
    1547             :  * used.
    1548             :  *
    1549             :  * After the caller finished processing the returned &drm_gpuva_ops, they must
    1550             :  * be freed with &drm_gpuva_ops_free.
    1551             :  *
    1552             :  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
    1553             :  */
    1554             : struct drm_gpuva_ops *
    1555           0 : drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
    1556             :                               u64 req_addr, u64 req_range)
    1557             : {
    1558             :         struct drm_gpuva_ops *ops;
    1559             :         struct {
    1560             :                 struct drm_gpuva_manager *mgr;
    1561             :                 struct drm_gpuva_ops *ops;
    1562             :         } args;
    1563             :         int ret;
    1564             : 
    1565           0 :         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
    1566           0 :         if (unlikely(!ops))
    1567             :                 return ERR_PTR(-ENOMEM);
    1568             : 
    1569           0 :         INIT_LIST_HEAD(&ops->list);
    1570             : 
    1571           0 :         args.mgr = mgr;
    1572           0 :         args.ops = ops;
    1573             : 
    1574           0 :         ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
    1575             :                                    req_addr, req_range);
    1576           0 :         if (ret)
    1577             :                 goto err_free_ops;
    1578             : 
    1579             :         return ops;
    1580             : 
    1581             : err_free_ops:
    1582           0 :         drm_gpuva_ops_free(mgr, ops);
    1583           0 :         return ERR_PTR(ret);
    1584             : }
    1585             : EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
    1586             : 
    1587             : /**
    1588             :  * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
    1589             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1590             :  * @addr: the start address of the range to prefetch
    1591             :  * @range: the range of the mappings to prefetch
    1592             :  *
    1593             :  * This function creates a list of operations to perform prefetching.
    1594             :  *
    1595             :  * The list can be iterated with &drm_gpuva_for_each_op and must be processed
    1596             :  * in the given order. It can contain prefetch operations.
    1597             :  *
    1598             :  * There can be an arbitrary amount of prefetch operations.
    1599             :  *
    1600             :  * After the caller finished processing the returned &drm_gpuva_ops, they must
    1601             :  * be freed with &drm_gpuva_ops_free.
    1602             :  *
    1603             :  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
    1604             :  */
    1605             : struct drm_gpuva_ops *
    1606           0 : drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
    1607             :                               u64 addr, u64 range)
    1608             : {
    1609             :         struct drm_gpuva_ops *ops;
    1610             :         struct drm_gpuva_op *op;
    1611             :         struct drm_gpuva *va;
    1612           0 :         u64 end = addr + range;
    1613             :         int ret;
    1614             : 
    1615           0 :         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
    1616           0 :         if (!ops)
    1617             :                 return ERR_PTR(-ENOMEM);
    1618             : 
    1619           0 :         INIT_LIST_HEAD(&ops->list);
    1620             : 
    1621           0 :         drm_gpuva_for_each_va_range(va, mgr, addr, end) {
    1622           0 :                 op = gpuva_op_alloc(mgr);
    1623           0 :                 if (!op) {
    1624           0 :                         ret = -ENOMEM;
    1625             :                         goto err_free_ops;
    1626             :                 }
    1627             : 
    1628           0 :                 op->op = DRM_GPUVA_OP_PREFETCH;
    1629           0 :                 op->prefetch.va = va;
    1630           0 :                 list_add_tail(&op->entry, &ops->list);
    1631             :         }
    1632             : 
    1633             :         return ops;
    1634             : 
    1635             : err_free_ops:
    1636           0 :         drm_gpuva_ops_free(mgr, ops);
    1637           0 :         return ERR_PTR(ret);
    1638             : }
    1639             : EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
    1640             : 
    1641             : /**
    1642             :  * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
    1643             :  * @mgr: the &drm_gpuva_manager representing the GPU VA space
    1644             :  * @obj: the &drm_gem_object to unmap
    1645             :  *
    1646             :  * This function creates a list of operations to perform unmapping for every
    1647             :  * GPUVA attached to a GEM.
    1648             :  *
    1649             :  * The list can be iterated with &drm_gpuva_for_each_op and consists out of an
    1650             :  * arbitrary amount of unmap operations.
    1651             :  *
    1652             :  * After the caller finished processing the returned &drm_gpuva_ops, they must
    1653             :  * be freed with &drm_gpuva_ops_free.
    1654             :  *
    1655             :  * It is the callers responsibility to protect the GEMs GPUVA list against
    1656             :  * concurrent access using the GEMs dma_resv lock.
    1657             :  *
    1658             :  * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
    1659             :  */
    1660             : struct drm_gpuva_ops *
    1661           0 : drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
    1662             :                                struct drm_gem_object *obj)
    1663             : {
    1664             :         struct drm_gpuva_ops *ops;
    1665             :         struct drm_gpuva_op *op;
    1666             :         struct drm_gpuva *va;
    1667             :         int ret;
    1668             : 
    1669             :         drm_gem_gpuva_assert_lock_held(obj);
    1670             : 
    1671           0 :         ops = kzalloc(sizeof(*ops), GFP_KERNEL);
    1672           0 :         if (!ops)
    1673             :                 return ERR_PTR(-ENOMEM);
    1674             : 
    1675           0 :         INIT_LIST_HEAD(&ops->list);
    1676             : 
    1677           0 :         drm_gem_for_each_gpuva(va, obj) {
    1678           0 :                 op = gpuva_op_alloc(mgr);
    1679           0 :                 if (!op) {
    1680           0 :                         ret = -ENOMEM;
    1681             :                         goto err_free_ops;
    1682             :                 }
    1683             : 
    1684           0 :                 op->op = DRM_GPUVA_OP_UNMAP;
    1685           0 :                 op->unmap.va = va;
    1686           0 :                 list_add_tail(&op->entry, &ops->list);
    1687             :         }
    1688             : 
    1689             :         return ops;
    1690             : 
    1691             : err_free_ops:
    1692           0 :         drm_gpuva_ops_free(mgr, ops);
    1693           0 :         return ERR_PTR(ret);
    1694             : }
    1695             : EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
    1696             : 
    1697             : /**
    1698             :  * drm_gpuva_ops_free() - free the given &drm_gpuva_ops
    1699             :  * @mgr: the &drm_gpuva_manager the ops were created for
    1700             :  * @ops: the &drm_gpuva_ops to free
    1701             :  *
    1702             :  * Frees the given &drm_gpuva_ops structure including all the ops associated
    1703             :  * with it.
    1704             :  */
    1705             : void
    1706           0 : drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
    1707             :                    struct drm_gpuva_ops *ops)
    1708             : {
    1709             :         struct drm_gpuva_op *op, *next;
    1710             : 
    1711           0 :         drm_gpuva_for_each_op_safe(op, next, ops) {
    1712           0 :                 list_del(&op->entry);
    1713             : 
    1714           0 :                 if (op->op == DRM_GPUVA_OP_REMAP) {
    1715           0 :                         kfree(op->remap.prev);
    1716           0 :                         kfree(op->remap.next);
    1717           0 :                         kfree(op->remap.unmap);
    1718             :                 }
    1719             : 
    1720           0 :                 gpuva_op_free(mgr, op);
    1721             :         }
    1722             : 
    1723           0 :         kfree(ops);
    1724           0 : }
    1725             : EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);

Generated by: LCOV version 1.14