LCOV - code coverage report
Current view: top level - include/drm - drm_mm.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 0 11 0.0 %
Date: 2023-08-24 13:40:31 Functions: 0 0 -

          Line data    Source code
       1             : /**************************************************************************
       2             :  *
       3             :  * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
       4             :  * Copyright 2016 Intel Corporation
       5             :  * All Rights Reserved.
       6             :  *
       7             :  * Permission is hereby granted, free of charge, to any person obtaining a
       8             :  * copy of this software and associated documentation files (the
       9             :  * "Software"), to deal in the Software without restriction, including
      10             :  * without limitation the rights to use, copy, modify, merge, publish,
      11             :  * distribute, sub license, and/or sell copies of the Software, and to
      12             :  * permit persons to whom the Software is furnished to do so, subject to
      13             :  * the following conditions:
      14             :  *
      15             :  * The above copyright notice and this permission notice (including the
      16             :  * next paragraph) shall be included in all copies or substantial portions
      17             :  * of the Software.
      18             :  *
      19             :  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
      20             :  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
      21             :  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
      22             :  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
      23             :  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
      24             :  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
      25             :  * USE OR OTHER DEALINGS IN THE SOFTWARE.
      26             :  *
      27             :  *
      28             :  **************************************************************************/
      29             : /*
      30             :  * Authors:
      31             :  * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
      32             :  */
      33             : 
      34             : #ifndef _DRM_MM_H_
      35             : #define _DRM_MM_H_
      36             : 
      37             : /*
      38             :  * Generic range manager structs
      39             :  */
      40             : #include <linux/bug.h>
      41             : #include <linux/rbtree.h>
      42             : #include <linux/limits.h>
      43             : #include <linux/mm_types.h>
      44             : #include <linux/list.h>
      45             : #include <linux/spinlock.h>
      46             : #ifdef CONFIG_DRM_DEBUG_MM
      47             : #include <linux/stackdepot.h>
      48             : #endif
      49             : #include <linux/types.h>
      50             : 
      51             : #include <drm/drm_print.h>
      52             : 
      53             : #ifdef CONFIG_DRM_DEBUG_MM
      54             : #define DRM_MM_BUG_ON(expr) BUG_ON(expr)
      55             : #else
      56             : #define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
      57             : #endif
      58             : 
      59             : /**
      60             :  * enum drm_mm_insert_mode - control search and allocation behaviour
      61             :  *
      62             :  * The &struct drm_mm range manager supports finding a suitable modes using
      63             :  * a number of search trees. These trees are oranised by size, by address and
      64             :  * in most recent eviction order. This allows the user to find either the
      65             :  * smallest hole to reuse, the lowest or highest address to reuse, or simply
      66             :  * reuse the most recent eviction that fits. When allocating the &drm_mm_node
      67             :  * from within the hole, the &drm_mm_insert_mode also dictate whether to
      68             :  * allocate the lowest matching address or the highest.
      69             :  */
      70             : enum drm_mm_insert_mode {
      71             :         /**
      72             :          * @DRM_MM_INSERT_BEST:
      73             :          *
      74             :          * Search for the smallest hole (within the search range) that fits
      75             :          * the desired node.
      76             :          *
      77             :          * Allocates the node from the bottom of the found hole.
      78             :          */
      79             :         DRM_MM_INSERT_BEST = 0,
      80             : 
      81             :         /**
      82             :          * @DRM_MM_INSERT_LOW:
      83             :          *
      84             :          * Search for the lowest hole (address closest to 0, within the search
      85             :          * range) that fits the desired node.
      86             :          *
      87             :          * Allocates the node from the bottom of the found hole.
      88             :          */
      89             :         DRM_MM_INSERT_LOW,
      90             : 
      91             :         /**
      92             :          * @DRM_MM_INSERT_HIGH:
      93             :          *
      94             :          * Search for the highest hole (address closest to U64_MAX, within the
      95             :          * search range) that fits the desired node.
      96             :          *
      97             :          * Allocates the node from the *top* of the found hole. The specified
      98             :          * alignment for the node is applied to the base of the node
      99             :          * (&drm_mm_node.start).
     100             :          */
     101             :         DRM_MM_INSERT_HIGH,
     102             : 
     103             :         /**
     104             :          * @DRM_MM_INSERT_EVICT:
     105             :          *
     106             :          * Search for the most recently evicted hole (within the search range)
     107             :          * that fits the desired node. This is appropriate for use immediately
     108             :          * after performing an eviction scan (see drm_mm_scan_init()) and
     109             :          * removing the selected nodes to form a hole.
     110             :          *
     111             :          * Allocates the node from the bottom of the found hole.
     112             :          */
     113             :         DRM_MM_INSERT_EVICT,
     114             : 
     115             :         /**
     116             :          * @DRM_MM_INSERT_ONCE:
     117             :          *
     118             :          * Only check the first hole for suitablity and report -ENOSPC
     119             :          * immediately otherwise, rather than check every hole until a
     120             :          * suitable one is found. Can only be used in conjunction with another
     121             :          * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
     122             :          */
     123             :         DRM_MM_INSERT_ONCE = BIT(31),
     124             : 
     125             :         /**
     126             :          * @DRM_MM_INSERT_HIGHEST:
     127             :          *
     128             :          * Only check the highest hole (the hole with the largest address) and
     129             :          * insert the node at the top of the hole or report -ENOSPC if
     130             :          * unsuitable.
     131             :          *
     132             :          * Does not search all holes.
     133             :          */
     134             :         DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
     135             : 
     136             :         /**
     137             :          * @DRM_MM_INSERT_LOWEST:
     138             :          *
     139             :          * Only check the lowest hole (the hole with the smallest address) and
     140             :          * insert the node at the bottom of the hole or report -ENOSPC if
     141             :          * unsuitable.
     142             :          *
     143             :          * Does not search all holes.
     144             :          */
     145             :         DRM_MM_INSERT_LOWEST  = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
     146             : };
     147             : 
     148             : /**
     149             :  * struct drm_mm_node - allocated block in the DRM allocator
     150             :  *
     151             :  * This represents an allocated block in a &drm_mm allocator. Except for
     152             :  * pre-reserved nodes inserted using drm_mm_reserve_node() the structure is
     153             :  * entirely opaque and should only be accessed through the provided funcions.
     154             :  * Since allocation of these nodes is entirely handled by the driver they can be
     155             :  * embedded.
     156             :  */
     157             : struct drm_mm_node {
     158             :         /** @color: Opaque driver-private tag. */
     159             :         unsigned long color;
     160             :         /** @start: Start address of the allocated block. */
     161             :         u64 start;
     162             :         /** @size: Size of the allocated block. */
     163             :         u64 size;
     164             :         /* private: */
     165             :         struct drm_mm *mm;
     166             :         struct list_head node_list;
     167             :         struct list_head hole_stack;
     168             :         struct rb_node rb;
     169             :         struct rb_node rb_hole_size;
     170             :         struct rb_node rb_hole_addr;
     171             :         u64 __subtree_last;
     172             :         u64 hole_size;
     173             :         u64 subtree_max_hole;
     174             :         unsigned long flags;
     175             : #define DRM_MM_NODE_ALLOCATED_BIT       0
     176             : #define DRM_MM_NODE_SCANNED_BIT         1
     177             : #ifdef CONFIG_DRM_DEBUG_MM
     178             :         depot_stack_handle_t stack;
     179             : #endif
     180             : };
     181             : 
     182             : /**
     183             :  * struct drm_mm - DRM allocator
     184             :  *
     185             :  * DRM range allocator with a few special functions and features geared towards
     186             :  * managing GPU memory. Except for the @color_adjust callback the structure is
     187             :  * entirely opaque and should only be accessed through the provided functions
     188             :  * and macros. This structure can be embedded into larger driver structures.
     189             :  */
     190             : struct drm_mm {
     191             :         /**
     192             :          * @color_adjust:
     193             :          *
     194             :          * Optional driver callback to further apply restrictions on a hole. The
     195             :          * node argument points at the node containing the hole from which the
     196             :          * block would be allocated (see drm_mm_hole_follows() and friends). The
     197             :          * other arguments are the size of the block to be allocated. The driver
     198             :          * can adjust the start and end as needed to e.g. insert guard pages.
     199             :          */
     200             :         void (*color_adjust)(const struct drm_mm_node *node,
     201             :                              unsigned long color,
     202             :                              u64 *start, u64 *end);
     203             : 
     204             :         /* private: */
     205             :         /* List of all memory nodes that immediately precede a free hole. */
     206             :         struct list_head hole_stack;
     207             :         /* head_node.node_list is the list of all memory nodes, ordered
     208             :          * according to the (increasing) start address of the memory node. */
     209             :         struct drm_mm_node head_node;
     210             :         /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
     211             :         struct rb_root_cached interval_tree;
     212             :         struct rb_root_cached holes_size;
     213             :         struct rb_root holes_addr;
     214             : 
     215             :         unsigned long scan_active;
     216             : };
     217             : 
     218             : /**
     219             :  * struct drm_mm_scan - DRM allocator eviction roaster data
     220             :  *
     221             :  * This structure tracks data needed for the eviction roaster set up using
     222             :  * drm_mm_scan_init(), and used with drm_mm_scan_add_block() and
     223             :  * drm_mm_scan_remove_block(). The structure is entirely opaque and should only
     224             :  * be accessed through the provided functions and macros. It is meant to be
     225             :  * allocated temporarily by the driver on the stack.
     226             :  */
     227             : struct drm_mm_scan {
     228             :         /* private: */
     229             :         struct drm_mm *mm;
     230             : 
     231             :         u64 size;
     232             :         u64 alignment;
     233             :         u64 remainder_mask;
     234             : 
     235             :         u64 range_start;
     236             :         u64 range_end;
     237             : 
     238             :         u64 hit_start;
     239             :         u64 hit_end;
     240             : 
     241             :         unsigned long color;
     242             :         enum drm_mm_insert_mode mode;
     243             : };
     244             : 
     245             : /**
     246             :  * drm_mm_node_allocated - checks whether a node is allocated
     247             :  * @node: drm_mm_node to check
     248             :  *
     249             :  * Drivers are required to clear a node prior to using it with the
     250             :  * drm_mm range manager.
     251             :  *
     252             :  * Drivers should use this helper for proper encapsulation of drm_mm
     253             :  * internals.
     254             :  *
     255             :  * Returns:
     256             :  * True if the @node is allocated.
     257             :  */
     258             : static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
     259             : {
     260           0 :         return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
     261             : }
     262             : 
     263             : /**
     264             :  * drm_mm_initialized - checks whether an allocator is initialized
     265             :  * @mm: drm_mm to check
     266             :  *
     267             :  * Drivers should clear the struct drm_mm prior to initialisation if they
     268             :  * want to use this function.
     269             :  *
     270             :  * Drivers should use this helper for proper encapsulation of drm_mm
     271             :  * internals.
     272             :  *
     273             :  * Returns:
     274             :  * True if the @mm is initialized.
     275             :  */
     276             : static inline bool drm_mm_initialized(const struct drm_mm *mm)
     277             : {
     278           0 :         return READ_ONCE(mm->hole_stack.next);
     279             : }
     280             : 
     281             : /**
     282             :  * drm_mm_hole_follows - checks whether a hole follows this node
     283             :  * @node: drm_mm_node to check
     284             :  *
     285             :  * Holes are embedded into the drm_mm using the tail of a drm_mm_node.
     286             :  * If you wish to know whether a hole follows this particular node,
     287             :  * query this function. See also drm_mm_hole_node_start() and
     288             :  * drm_mm_hole_node_end().
     289             :  *
     290             :  * Returns:
     291             :  * True if a hole follows the @node.
     292             :  */
     293             : static inline bool drm_mm_hole_follows(const struct drm_mm_node *node)
     294             : {
     295           0 :         return node->hole_size;
     296             : }
     297             : 
     298             : static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
     299             : {
     300           0 :         return hole_node->start + hole_node->size;
     301             : }
     302             : 
     303             : /**
     304             :  * drm_mm_hole_node_start - computes the start of the hole following @node
     305             :  * @hole_node: drm_mm_node which implicitly tracks the following hole
     306             :  *
     307             :  * This is useful for driver-specific debug dumpers. Otherwise drivers should
     308             :  * not inspect holes themselves. Drivers must check first whether a hole indeed
     309             :  * follows by looking at drm_mm_hole_follows()
     310             :  *
     311             :  * Returns:
     312             :  * Start of the subsequent hole.
     313             :  */
     314             : static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node)
     315             : {
     316             :         DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node));
     317           0 :         return __drm_mm_hole_node_start(hole_node);
     318             : }
     319             : 
     320             : static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
     321             : {
     322           0 :         return list_next_entry(hole_node, node_list)->start;
     323             : }
     324             : 
     325             : /**
     326             :  * drm_mm_hole_node_end - computes the end of the hole following @node
     327             :  * @hole_node: drm_mm_node which implicitly tracks the following hole
     328             :  *
     329             :  * This is useful for driver-specific debug dumpers. Otherwise drivers should
     330             :  * not inspect holes themselves. Drivers must check first whether a hole indeed
     331             :  * follows by looking at drm_mm_hole_follows().
     332             :  *
     333             :  * Returns:
     334             :  * End of the subsequent hole.
     335             :  */
     336             : static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node)
     337             : {
     338           0 :         return __drm_mm_hole_node_end(hole_node);
     339             : }
     340             : 
     341             : /**
     342             :  * drm_mm_nodes - list of nodes under the drm_mm range manager
     343             :  * @mm: the struct drm_mm range manager
     344             :  *
     345             :  * As the drm_mm range manager hides its node_list deep with its
     346             :  * structure, extracting it looks painful and repetitive. This is
     347             :  * not expected to be used outside of the drm_mm_for_each_node()
     348             :  * macros and similar internal functions.
     349             :  *
     350             :  * Returns:
     351             :  * The node list, may be empty.
     352             :  */
     353             : #define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
     354             : 
     355             : /**
     356             :  * drm_mm_for_each_node - iterator to walk over all allocated nodes
     357             :  * @entry: &struct drm_mm_node to assign to in each iteration step
     358             :  * @mm: &drm_mm allocator to walk
     359             :  *
     360             :  * This iterator walks over all nodes in the range allocator. It is implemented
     361             :  * with list_for_each(), so not save against removal of elements.
     362             :  */
     363             : #define drm_mm_for_each_node(entry, mm) \
     364             :         list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
     365             : 
     366             : /**
     367             :  * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
     368             :  * @entry: &struct drm_mm_node to assign to in each iteration step
     369             :  * @next: &struct drm_mm_node to store the next step
     370             :  * @mm: &drm_mm allocator to walk
     371             :  *
     372             :  * This iterator walks over all nodes in the range allocator. It is implemented
     373             :  * with list_for_each_safe(), so save against removal of elements.
     374             :  */
     375             : #define drm_mm_for_each_node_safe(entry, next, mm) \
     376             :         list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
     377             : 
     378             : /**
     379             :  * drm_mm_for_each_hole - iterator to walk over all holes
     380             :  * @pos: &drm_mm_node used internally to track progress
     381             :  * @mm: &drm_mm allocator to walk
     382             :  * @hole_start: ulong variable to assign the hole start to on each iteration
     383             :  * @hole_end: ulong variable to assign the hole end to on each iteration
     384             :  *
     385             :  * This iterator walks over all holes in the range allocator. It is implemented
     386             :  * with list_for_each(), so not save against removal of elements. @entry is used
     387             :  * internally and will not reflect a real drm_mm_node for the very first hole.
     388             :  * Hence users of this iterator may not access it.
     389             :  *
     390             :  * Implementation Note:
     391             :  * We need to inline list_for_each_entry in order to be able to set hole_start
     392             :  * and hole_end on each iteration while keeping the macro sane.
     393             :  */
     394             : #define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
     395             :         for (pos = list_first_entry(&(mm)->hole_stack, \
     396             :                                     typeof(*pos), hole_stack); \
     397             :              &pos->hole_stack != &(mm)->hole_stack ? \
     398             :              hole_start = drm_mm_hole_node_start(pos), \
     399             :              hole_end = hole_start + pos->hole_size, \
     400             :              1 : 0; \
     401             :              pos = list_next_entry(pos, hole_stack))
     402             : 
     403             : /*
     404             :  * Basic range manager support (drm_mm.c)
     405             :  */
     406             : int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
     407             : int drm_mm_insert_node_in_range(struct drm_mm *mm,
     408             :                                 struct drm_mm_node *node,
     409             :                                 u64 size,
     410             :                                 u64 alignment,
     411             :                                 unsigned long color,
     412             :                                 u64 start,
     413             :                                 u64 end,
     414             :                                 enum drm_mm_insert_mode mode);
     415             : 
     416             : /**
     417             :  * drm_mm_insert_node_generic - search for space and insert @node
     418             :  * @mm: drm_mm to allocate from
     419             :  * @node: preallocate node to insert
     420             :  * @size: size of the allocation
     421             :  * @alignment: alignment of the allocation
     422             :  * @color: opaque tag value to use for this node
     423             :  * @mode: fine-tune the allocation search and placement
     424             :  *
     425             :  * This is a simplified version of drm_mm_insert_node_in_range() with no
     426             :  * range restrictions applied.
     427             :  *
     428             :  * The preallocated node must be cleared to 0.
     429             :  *
     430             :  * Returns:
     431             :  * 0 on success, -ENOSPC if there's no suitable hole.
     432             :  */
     433             : static inline int
     434             : drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
     435             :                            u64 size, u64 alignment,
     436             :                            unsigned long color,
     437             :                            enum drm_mm_insert_mode mode)
     438             : {
     439           0 :         return drm_mm_insert_node_in_range(mm, node,
     440             :                                            size, alignment, color,
     441             :                                            0, U64_MAX, mode);
     442             : }
     443             : 
     444             : /**
     445             :  * drm_mm_insert_node - search for space and insert @node
     446             :  * @mm: drm_mm to allocate from
     447             :  * @node: preallocate node to insert
     448             :  * @size: size of the allocation
     449             :  *
     450             :  * This is a simplified version of drm_mm_insert_node_generic() with @color set
     451             :  * to 0.
     452             :  *
     453             :  * The preallocated node must be cleared to 0.
     454             :  *
     455             :  * Returns:
     456             :  * 0 on success, -ENOSPC if there's no suitable hole.
     457             :  */
     458             : static inline int drm_mm_insert_node(struct drm_mm *mm,
     459             :                                      struct drm_mm_node *node,
     460             :                                      u64 size)
     461             : {
     462           0 :         return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
     463             : }
     464             : 
     465             : void drm_mm_remove_node(struct drm_mm_node *node);
     466             : void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
     467             : void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
     468             : void drm_mm_takedown(struct drm_mm *mm);
     469             : 
     470             : /**
     471             :  * drm_mm_clean - checks whether an allocator is clean
     472             :  * @mm: drm_mm allocator to check
     473             :  *
     474             :  * Returns:
     475             :  * True if the allocator is completely free, false if there's still a node
     476             :  * allocated in it.
     477             :  */
     478             : static inline bool drm_mm_clean(const struct drm_mm *mm)
     479             : {
     480           0 :         return list_empty(drm_mm_nodes(mm));
     481             : }
     482             : 
     483             : struct drm_mm_node *
     484             : __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
     485             : 
     486             : /**
     487             :  * drm_mm_for_each_node_in_range - iterator to walk over a range of
     488             :  * allocated nodes
     489             :  * @node__: drm_mm_node structure to assign to in each iteration step
     490             :  * @mm__: drm_mm allocator to walk
     491             :  * @start__: starting offset, the first node will overlap this
     492             :  * @end__: ending offset, the last node will start before this (but may overlap)
     493             :  *
     494             :  * This iterator walks over all nodes in the range allocator that lie
     495             :  * between @start and @end. It is implemented similarly to list_for_each(),
     496             :  * but using the internal interval tree to accelerate the search for the
     497             :  * starting node, and so not safe against removal of elements. It assumes
     498             :  * that @end is within (or is the upper limit of) the drm_mm allocator.
     499             :  * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
     500             :  * over the special _unallocated_ &drm_mm.head_node, and may even continue
     501             :  * indefinitely.
     502             :  */
     503             : #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)     \
     504             :         for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
     505             :              node__->start < (end__);                                     \
     506             :              node__ = list_next_entry(node__, node_list))
     507             : 
     508             : void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
     509             :                                  struct drm_mm *mm,
     510             :                                  u64 size, u64 alignment, unsigned long color,
     511             :                                  u64 start, u64 end,
     512             :                                  enum drm_mm_insert_mode mode);
     513             : 
     514             : /**
     515             :  * drm_mm_scan_init - initialize lru scanning
     516             :  * @scan: scan state
     517             :  * @mm: drm_mm to scan
     518             :  * @size: size of the allocation
     519             :  * @alignment: alignment of the allocation
     520             :  * @color: opaque tag value to use for the allocation
     521             :  * @mode: fine-tune the allocation search and placement
     522             :  *
     523             :  * This is a simplified version of drm_mm_scan_init_with_range() with no range
     524             :  * restrictions applied.
     525             :  *
     526             :  * This simply sets up the scanning routines with the parameters for the desired
     527             :  * hole.
     528             :  *
     529             :  * Warning:
     530             :  * As long as the scan list is non-empty, no other operations than
     531             :  * adding/removing nodes to/from the scan list are allowed.
     532             :  */
     533             : static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
     534             :                                     struct drm_mm *mm,
     535             :                                     u64 size,
     536             :                                     u64 alignment,
     537             :                                     unsigned long color,
     538             :                                     enum drm_mm_insert_mode mode)
     539             : {
     540           0 :         drm_mm_scan_init_with_range(scan, mm,
     541             :                                     size, alignment, color,
     542             :                                     0, U64_MAX, mode);
     543             : }
     544             : 
     545             : bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
     546             :                            struct drm_mm_node *node);
     547             : bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
     548             :                               struct drm_mm_node *node);
     549             : struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan);
     550             : 
     551             : void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
     552             : 
     553             : #endif

Generated by: LCOV version 1.14