Line data Source code
1 : /**************************************************************************
2 : *
3 : * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 : * Copyright 2016 Intel Corporation
5 : * All Rights Reserved.
6 : *
7 : * Permission is hereby granted, free of charge, to any person obtaining a
8 : * copy of this software and associated documentation files (the
9 : * "Software"), to deal in the Software without restriction, including
10 : * without limitation the rights to use, copy, modify, merge, publish,
11 : * distribute, sub license, and/or sell copies of the Software, and to
12 : * permit persons to whom the Software is furnished to do so, subject to
13 : * the following conditions:
14 : *
15 : * The above copyright notice and this permission notice (including the
16 : * next paragraph) shall be included in all copies or substantial portions
17 : * of the Software.
18 : *
19 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 : * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 : * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 : * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 : * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 : * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 : *
27 : *
28 : **************************************************************************/
29 :
30 : /*
31 : * Generic simple memory manager implementation. Intended to be used as a base
32 : * class implementation for more advanced memory managers.
33 : *
34 : * Note that the algorithm used is quite simple and there might be substantial
35 : * performance gains if a smarter free list is implemented. Currently it is
36 : * just an unordered stack of free regions. This could easily be improved if
37 : * an RB-tree is used instead. At least if we expect heavy fragmentation.
38 : *
39 : * Aligned allocations can also see improvement.
40 : *
41 : * Authors:
42 : * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
43 : */
44 :
45 : #include <linux/export.h>
46 : #include <linux/interval_tree_generic.h>
47 : #include <linux/seq_file.h>
48 : #include <linux/slab.h>
49 : #include <linux/stacktrace.h>
50 :
51 : #include <drm/drm_mm.h>
52 :
53 : /**
54 : * DOC: Overview
55 : *
56 : * drm_mm provides a simple range allocator. The drivers are free to use the
57 : * resource allocator from the linux core if it suits them, the upside of drm_mm
58 : * is that it's in the DRM core. Which means that it's easier to extend for
59 : * some of the crazier special purpose needs of gpus.
60 : *
61 : * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
62 : * Drivers are free to embed either of them into their own suitable
63 : * datastructures. drm_mm itself will not do any memory allocations of its own,
64 : * so if drivers choose not to embed nodes they need to still allocate them
65 : * themselves.
66 : *
67 : * The range allocator also supports reservation of preallocated blocks. This is
68 : * useful for taking over initial mode setting configurations from the firmware,
69 : * where an object needs to be created which exactly matches the firmware's
70 : * scanout target. As long as the range is still free it can be inserted anytime
71 : * after the allocator is initialized, which helps with avoiding looped
72 : * dependencies in the driver load sequence.
73 : *
74 : * drm_mm maintains a stack of most recently freed holes, which of all
75 : * simplistic datastructures seems to be a fairly decent approach to clustering
76 : * allocations and avoiding too much fragmentation. This means free space
77 : * searches are O(num_holes). Given that all the fancy features drm_mm supports
78 : * something better would be fairly complex and since gfx thrashing is a fairly
79 : * steep cliff not a real concern. Removing a node again is O(1).
80 : *
81 : * drm_mm supports a few features: Alignment and range restrictions can be
82 : * supplied. Furthermore every &drm_mm_node has a color value (which is just an
83 : * opaque unsigned long) which in conjunction with a driver callback can be used
84 : * to implement sophisticated placement restrictions. The i915 DRM driver uses
85 : * this to implement guard pages between incompatible caching domains in the
86 : * graphics TT.
87 : *
88 : * Two behaviors are supported for searching and allocating: bottom-up and
89 : * top-down. The default is bottom-up. Top-down allocation can be used if the
90 : * memory area has different restrictions, or just to reduce fragmentation.
91 : *
92 : * Finally iteration helpers to walk all nodes and all holes are provided as are
93 : * some basic allocator dumpers for debugging.
94 : *
95 : * Note that this range allocator is not thread-safe, drivers need to protect
96 : * modifications with their own locking. The idea behind this is that for a full
97 : * memory manager additional data needs to be protected anyway, hence internal
98 : * locking would be fully redundant.
99 : */
100 :
101 : #ifdef CONFIG_DRM_DEBUG_MM
102 : #include <linux/stackdepot.h>
103 :
104 : #define STACKDEPTH 32
105 : #define BUFSZ 4096
106 :
107 : static noinline void save_stack(struct drm_mm_node *node)
108 : {
109 : unsigned long entries[STACKDEPTH];
110 : unsigned int n;
111 :
112 : n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
113 :
114 : /* May be called under spinlock, so avoid sleeping */
115 : node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
116 : }
117 :
118 : static void show_leaks(struct drm_mm *mm)
119 : {
120 : struct drm_mm_node *node;
121 : char *buf;
122 :
123 : buf = kmalloc(BUFSZ, GFP_KERNEL);
124 : if (!buf)
125 : return;
126 :
127 : list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
128 : if (!node->stack) {
129 : DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
130 : node->start, node->size);
131 : continue;
132 : }
133 :
134 : stack_depot_snprint(node->stack, buf, BUFSZ, 0);
135 : DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
136 : node->start, node->size, buf);
137 : }
138 :
139 : kfree(buf);
140 : }
141 :
142 : #undef STACKDEPTH
143 : #undef BUFSZ
144 : #else
145 : static void save_stack(struct drm_mm_node *node) { }
146 : static void show_leaks(struct drm_mm *mm) { }
147 : #endif
148 :
149 : #define START(node) ((node)->start)
150 : #define LAST(node) ((node)->start + (node)->size - 1)
151 :
152 0 : INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
153 : u64, __subtree_last,
154 : START, LAST, static inline, drm_mm_interval_tree)
155 :
156 : struct drm_mm_node *
157 0 : __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
158 : {
159 0 : return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
160 0 : start, last) ?: (struct drm_mm_node *)&mm->head_node;
161 : }
162 : EXPORT_SYMBOL(__drm_mm_interval_first);
163 :
164 0 : static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
165 : struct drm_mm_node *node)
166 : {
167 0 : struct drm_mm *mm = hole_node->mm;
168 : struct rb_node **link, *rb;
169 : struct drm_mm_node *parent;
170 : bool leftmost;
171 :
172 0 : node->__subtree_last = LAST(node);
173 :
174 0 : if (drm_mm_node_allocated(hole_node)) {
175 0 : rb = &hole_node->rb;
176 0 : while (rb) {
177 0 : parent = rb_entry(rb, struct drm_mm_node, rb);
178 0 : if (parent->__subtree_last >= node->__subtree_last)
179 : break;
180 :
181 0 : parent->__subtree_last = node->__subtree_last;
182 0 : rb = rb_parent(rb);
183 : }
184 :
185 0 : rb = &hole_node->rb;
186 0 : link = &hole_node->rb.rb_right;
187 0 : leftmost = false;
188 : } else {
189 0 : rb = NULL;
190 0 : link = &mm->interval_tree.rb_root.rb_node;
191 0 : leftmost = true;
192 : }
193 :
194 0 : while (*link) {
195 0 : rb = *link;
196 0 : parent = rb_entry(rb, struct drm_mm_node, rb);
197 0 : if (parent->__subtree_last < node->__subtree_last)
198 0 : parent->__subtree_last = node->__subtree_last;
199 0 : if (node->start < parent->start) {
200 0 : link = &parent->rb.rb_left;
201 : } else {
202 0 : link = &parent->rb.rb_right;
203 0 : leftmost = false;
204 : }
205 : }
206 :
207 0 : rb_link_node(&node->rb, rb, link);
208 0 : rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
209 : &drm_mm_interval_tree_augment);
210 0 : }
211 :
212 : #define HOLE_SIZE(NODE) ((NODE)->hole_size)
213 : #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
214 :
215 : static u64 rb_to_hole_size(struct rb_node *rb)
216 : {
217 0 : return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
218 : }
219 :
220 0 : static void insert_hole_size(struct rb_root_cached *root,
221 : struct drm_mm_node *node)
222 : {
223 0 : struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
224 0 : u64 x = node->hole_size;
225 0 : bool first = true;
226 :
227 0 : while (*link) {
228 0 : rb = *link;
229 0 : if (x > rb_to_hole_size(rb)) {
230 0 : link = &rb->rb_left;
231 : } else {
232 0 : link = &rb->rb_right;
233 0 : first = false;
234 : }
235 : }
236 :
237 0 : rb_link_node(&node->rb_hole_size, rb, link);
238 0 : rb_insert_color_cached(&node->rb_hole_size, root, first);
239 0 : }
240 :
241 0 : RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
242 : struct drm_mm_node, rb_hole_addr,
243 : u64, subtree_max_hole, HOLE_SIZE)
244 :
245 0 : static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
246 : {
247 0 : struct rb_node **link = &root->rb_node, *rb_parent = NULL;
248 0 : u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
249 : struct drm_mm_node *parent;
250 :
251 0 : while (*link) {
252 0 : rb_parent = *link;
253 0 : parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
254 0 : if (parent->subtree_max_hole < subtree_max_hole)
255 0 : parent->subtree_max_hole = subtree_max_hole;
256 0 : if (start < HOLE_ADDR(parent))
257 0 : link = &parent->rb_hole_addr.rb_left;
258 : else
259 0 : link = &parent->rb_hole_addr.rb_right;
260 : }
261 :
262 0 : rb_link_node(&node->rb_hole_addr, rb_parent, link);
263 0 : rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
264 0 : }
265 :
266 0 : static void add_hole(struct drm_mm_node *node)
267 : {
268 0 : struct drm_mm *mm = node->mm;
269 :
270 0 : node->hole_size =
271 0 : __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
272 0 : node->subtree_max_hole = node->hole_size;
273 : DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
274 :
275 0 : insert_hole_size(&mm->holes_size, node);
276 0 : insert_hole_addr(&mm->holes_addr, node);
277 :
278 0 : list_add(&node->hole_stack, &mm->hole_stack);
279 0 : }
280 :
281 0 : static void rm_hole(struct drm_mm_node *node)
282 : {
283 : DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
284 :
285 0 : list_del(&node->hole_stack);
286 0 : rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
287 0 : rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
288 : &augment_callbacks);
289 0 : node->hole_size = 0;
290 0 : node->subtree_max_hole = 0;
291 :
292 : DRM_MM_BUG_ON(drm_mm_hole_follows(node));
293 0 : }
294 :
295 : static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
296 : {
297 0 : return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
298 : }
299 :
300 : static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
301 : {
302 0 : return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
303 : }
304 :
305 : static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
306 : {
307 0 : struct rb_node *rb = mm->holes_size.rb_root.rb_node;
308 0 : struct drm_mm_node *best = NULL;
309 :
310 : do {
311 0 : struct drm_mm_node *node =
312 0 : rb_entry(rb, struct drm_mm_node, rb_hole_size);
313 :
314 0 : if (size <= node->hole_size) {
315 0 : best = node;
316 0 : rb = rb->rb_right;
317 : } else {
318 0 : rb = rb->rb_left;
319 : }
320 0 : } while (rb);
321 :
322 : return best;
323 : }
324 :
325 : static bool usable_hole_addr(struct rb_node *rb, u64 size)
326 : {
327 0 : return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
328 : }
329 :
330 : static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
331 : {
332 0 : struct rb_node *rb = mm->holes_addr.rb_node;
333 0 : struct drm_mm_node *node = NULL;
334 :
335 0 : while (rb) {
336 : u64 hole_start;
337 :
338 0 : if (!usable_hole_addr(rb, size))
339 : break;
340 :
341 0 : node = rb_hole_addr_to_node(rb);
342 0 : hole_start = __drm_mm_hole_node_start(node);
343 :
344 0 : if (addr < hole_start)
345 0 : rb = node->rb_hole_addr.rb_left;
346 0 : else if (addr > hole_start + node->hole_size)
347 0 : rb = node->rb_hole_addr.rb_right;
348 : else
349 : break;
350 : }
351 :
352 : return node;
353 : }
354 :
355 : static struct drm_mm_node *
356 0 : first_hole(struct drm_mm *mm,
357 : u64 start, u64 end, u64 size,
358 : enum drm_mm_insert_mode mode)
359 : {
360 0 : switch (mode) {
361 : default:
362 : case DRM_MM_INSERT_BEST:
363 0 : return best_hole(mm, size);
364 :
365 : case DRM_MM_INSERT_LOW:
366 0 : return find_hole_addr(mm, start, size);
367 :
368 : case DRM_MM_INSERT_HIGH:
369 0 : return find_hole_addr(mm, end, size);
370 :
371 : case DRM_MM_INSERT_EVICT:
372 0 : return list_first_entry_or_null(&mm->hole_stack,
373 : struct drm_mm_node,
374 : hole_stack);
375 : }
376 : }
377 :
378 : /**
379 : * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
380 : * @name: name of function to declare
381 : * @first: first rb member to traverse (either rb_left or rb_right).
382 : * @last: last rb member to traverse (either rb_right or rb_left).
383 : *
384 : * This macro declares a function to return the next hole of the addr rb tree.
385 : * While traversing the tree we take the searched size into account and only
386 : * visit branches with potential big enough holes.
387 : */
388 :
389 : #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \
390 : static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \
391 : { \
392 : struct rb_node *parent, *node = &entry->rb_hole_addr; \
393 : \
394 : if (!entry || RB_EMPTY_NODE(node)) \
395 : return NULL; \
396 : \
397 : if (usable_hole_addr(node->first, size)) { \
398 : node = node->first; \
399 : while (usable_hole_addr(node->last, size)) \
400 : node = node->last; \
401 : return rb_hole_addr_to_node(node); \
402 : } \
403 : \
404 : while ((parent = rb_parent(node)) && node == parent->first) \
405 : node = parent; \
406 : \
407 : return rb_hole_addr_to_node(parent); \
408 : }
409 :
410 0 : DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
411 0 : DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
412 :
413 : static struct drm_mm_node *
414 0 : next_hole(struct drm_mm *mm,
415 : struct drm_mm_node *node,
416 : u64 size,
417 : enum drm_mm_insert_mode mode)
418 : {
419 0 : switch (mode) {
420 : default:
421 : case DRM_MM_INSERT_BEST:
422 0 : return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
423 :
424 : case DRM_MM_INSERT_LOW:
425 0 : return next_hole_low_addr(node, size);
426 :
427 : case DRM_MM_INSERT_HIGH:
428 0 : return next_hole_high_addr(node, size);
429 :
430 : case DRM_MM_INSERT_EVICT:
431 0 : node = list_next_entry(node, hole_stack);
432 0 : return &node->hole_stack == &mm->hole_stack ? NULL : node;
433 : }
434 : }
435 :
436 : /**
437 : * drm_mm_reserve_node - insert an pre-initialized node
438 : * @mm: drm_mm allocator to insert @node into
439 : * @node: drm_mm_node to insert
440 : *
441 : * This functions inserts an already set-up &drm_mm_node into the allocator,
442 : * meaning that start, size and color must be set by the caller. All other
443 : * fields must be cleared to 0. This is useful to initialize the allocator with
444 : * preallocated objects which must be set-up before the range allocator can be
445 : * set-up, e.g. when taking over a firmware framebuffer.
446 : *
447 : * Returns:
448 : * 0 on success, -ENOSPC if there's no hole where @node is.
449 : */
450 0 : int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
451 : {
452 : struct drm_mm_node *hole;
453 : u64 hole_start, hole_end;
454 : u64 adj_start, adj_end;
455 : u64 end;
456 :
457 0 : end = node->start + node->size;
458 0 : if (unlikely(end <= node->start))
459 : return -ENOSPC;
460 :
461 : /* Find the relevant hole to add our node to */
462 0 : hole = find_hole_addr(mm, node->start, 0);
463 0 : if (!hole)
464 : return -ENOSPC;
465 :
466 0 : adj_start = hole_start = __drm_mm_hole_node_start(hole);
467 0 : adj_end = hole_end = hole_start + hole->hole_size;
468 :
469 0 : if (mm->color_adjust)
470 0 : mm->color_adjust(hole, node->color, &adj_start, &adj_end);
471 :
472 0 : if (adj_start > node->start || adj_end < end)
473 : return -ENOSPC;
474 :
475 0 : node->mm = mm;
476 :
477 0 : __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
478 0 : list_add(&node->node_list, &hole->node_list);
479 0 : drm_mm_interval_tree_add_node(hole, node);
480 0 : node->hole_size = 0;
481 :
482 0 : rm_hole(hole);
483 0 : if (node->start > hole_start)
484 0 : add_hole(hole);
485 0 : if (end < hole_end)
486 0 : add_hole(node);
487 :
488 : save_stack(node);
489 : return 0;
490 : }
491 : EXPORT_SYMBOL(drm_mm_reserve_node);
492 :
493 : static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
494 : {
495 0 : return rb ? rb_to_hole_size(rb) : 0;
496 : }
497 :
498 : /**
499 : * drm_mm_insert_node_in_range - ranged search for space and insert @node
500 : * @mm: drm_mm to allocate from
501 : * @node: preallocate node to insert
502 : * @size: size of the allocation
503 : * @alignment: alignment of the allocation
504 : * @color: opaque tag value to use for this node
505 : * @range_start: start of the allowed range for this node
506 : * @range_end: end of the allowed range for this node
507 : * @mode: fine-tune the allocation search and placement
508 : *
509 : * The preallocated @node must be cleared to 0.
510 : *
511 : * Returns:
512 : * 0 on success, -ENOSPC if there's no suitable hole.
513 : */
514 0 : int drm_mm_insert_node_in_range(struct drm_mm * const mm,
515 : struct drm_mm_node * const node,
516 : u64 size, u64 alignment,
517 : unsigned long color,
518 : u64 range_start, u64 range_end,
519 : enum drm_mm_insert_mode mode)
520 : {
521 : struct drm_mm_node *hole;
522 : u64 remainder_mask;
523 : bool once;
524 :
525 : DRM_MM_BUG_ON(range_start > range_end);
526 :
527 0 : if (unlikely(size == 0 || range_end - range_start < size))
528 : return -ENOSPC;
529 :
530 0 : if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
531 : return -ENOSPC;
532 :
533 0 : if (alignment <= 1)
534 0 : alignment = 0;
535 :
536 0 : once = mode & DRM_MM_INSERT_ONCE;
537 0 : mode &= ~DRM_MM_INSERT_ONCE;
538 :
539 0 : remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
540 0 : for (hole = first_hole(mm, range_start, range_end, size, mode);
541 : hole;
542 0 : hole = once ? NULL : next_hole(mm, hole, size, mode)) {
543 0 : u64 hole_start = __drm_mm_hole_node_start(hole);
544 0 : u64 hole_end = hole_start + hole->hole_size;
545 : u64 adj_start, adj_end;
546 : u64 col_start, col_end;
547 :
548 0 : if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
549 : break;
550 :
551 0 : if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
552 : break;
553 :
554 0 : col_start = hole_start;
555 0 : col_end = hole_end;
556 0 : if (mm->color_adjust)
557 0 : mm->color_adjust(hole, color, &col_start, &col_end);
558 :
559 0 : adj_start = max(col_start, range_start);
560 0 : adj_end = min(col_end, range_end);
561 :
562 0 : if (adj_end <= adj_start || adj_end - adj_start < size)
563 0 : continue;
564 :
565 0 : if (mode == DRM_MM_INSERT_HIGH)
566 0 : adj_start = adj_end - size;
567 :
568 0 : if (alignment) {
569 : u64 rem;
570 :
571 0 : if (likely(remainder_mask))
572 0 : rem = adj_start & remainder_mask;
573 : else
574 : div64_u64_rem(adj_start, alignment, &rem);
575 0 : if (rem) {
576 0 : adj_start -= rem;
577 0 : if (mode != DRM_MM_INSERT_HIGH)
578 0 : adj_start += alignment;
579 :
580 0 : if (adj_start < max(col_start, range_start) ||
581 0 : min(col_end, range_end) - adj_start < size)
582 0 : continue;
583 :
584 0 : if (adj_end <= adj_start ||
585 : adj_end - adj_start < size)
586 0 : continue;
587 : }
588 : }
589 :
590 0 : node->mm = mm;
591 0 : node->size = size;
592 0 : node->start = adj_start;
593 0 : node->color = color;
594 0 : node->hole_size = 0;
595 :
596 0 : __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
597 0 : list_add(&node->node_list, &hole->node_list);
598 0 : drm_mm_interval_tree_add_node(hole, node);
599 :
600 0 : rm_hole(hole);
601 0 : if (adj_start > hole_start)
602 0 : add_hole(hole);
603 0 : if (adj_start + size < hole_end)
604 0 : add_hole(node);
605 :
606 0 : save_stack(node);
607 0 : return 0;
608 : }
609 :
610 : return -ENOSPC;
611 : }
612 : EXPORT_SYMBOL(drm_mm_insert_node_in_range);
613 :
614 : static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
615 : {
616 : return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
617 : }
618 :
619 : /**
620 : * drm_mm_remove_node - Remove a memory node from the allocator.
621 : * @node: drm_mm_node to remove
622 : *
623 : * This just removes a node from its drm_mm allocator. The node does not need to
624 : * be cleared again before it can be re-inserted into this or any other drm_mm
625 : * allocator. It is a bug to call this function on a unallocated node.
626 : */
627 0 : void drm_mm_remove_node(struct drm_mm_node *node)
628 : {
629 0 : struct drm_mm *mm = node->mm;
630 : struct drm_mm_node *prev_node;
631 :
632 : DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
633 : DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
634 :
635 0 : prev_node = list_prev_entry(node, node_list);
636 :
637 0 : if (drm_mm_hole_follows(node))
638 0 : rm_hole(node);
639 :
640 0 : drm_mm_interval_tree_remove(node, &mm->interval_tree);
641 0 : list_del(&node->node_list);
642 :
643 0 : if (drm_mm_hole_follows(prev_node))
644 0 : rm_hole(prev_node);
645 0 : add_hole(prev_node);
646 :
647 0 : clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
648 0 : }
649 : EXPORT_SYMBOL(drm_mm_remove_node);
650 :
651 : /**
652 : * drm_mm_replace_node - move an allocation from @old to @new
653 : * @old: drm_mm_node to remove from the allocator
654 : * @new: drm_mm_node which should inherit @old's allocation
655 : *
656 : * This is useful for when drivers embed the drm_mm_node structure and hence
657 : * can't move allocations by reassigning pointers. It's a combination of remove
658 : * and insert with the guarantee that the allocation start will match.
659 : */
660 0 : void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
661 : {
662 0 : struct drm_mm *mm = old->mm;
663 :
664 : DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
665 :
666 0 : *new = *old;
667 :
668 0 : __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
669 0 : list_replace(&old->node_list, &new->node_list);
670 0 : rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
671 :
672 0 : if (drm_mm_hole_follows(old)) {
673 0 : list_replace(&old->hole_stack, &new->hole_stack);
674 0 : rb_replace_node_cached(&old->rb_hole_size,
675 : &new->rb_hole_size,
676 : &mm->holes_size);
677 0 : rb_replace_node(&old->rb_hole_addr,
678 : &new->rb_hole_addr,
679 : &mm->holes_addr);
680 : }
681 :
682 0 : clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
683 0 : }
684 : EXPORT_SYMBOL(drm_mm_replace_node);
685 :
686 : /**
687 : * DOC: lru scan roster
688 : *
689 : * Very often GPUs need to have continuous allocations for a given object. When
690 : * evicting objects to make space for a new one it is therefore not most
691 : * efficient when we simply start to select all objects from the tail of an LRU
692 : * until there's a suitable hole: Especially for big objects or nodes that
693 : * otherwise have special allocation constraints there's a good chance we evict
694 : * lots of (smaller) objects unnecessarily.
695 : *
696 : * The DRM range allocator supports this use-case through the scanning
697 : * interfaces. First a scan operation needs to be initialized with
698 : * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
699 : * objects to the roster, probably by walking an LRU list, but this can be
700 : * freely implemented. Eviction candidates are added using
701 : * drm_mm_scan_add_block() until a suitable hole is found or there are no
702 : * further evictable objects. Eviction roster metadata is tracked in &struct
703 : * drm_mm_scan.
704 : *
705 : * The driver must walk through all objects again in exactly the reverse
706 : * order to restore the allocator state. Note that while the allocator is used
707 : * in the scan mode no other operation is allowed.
708 : *
709 : * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
710 : * reported true) in the scan, and any overlapping nodes after color adjustment
711 : * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
712 : * since freeing a node is also O(1) the overall complexity is
713 : * O(scanned_objects). So like the free stack which needs to be walked before a
714 : * scan operation even begins this is linear in the number of objects. It
715 : * doesn't seem to hurt too badly.
716 : */
717 :
718 : /**
719 : * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
720 : * @scan: scan state
721 : * @mm: drm_mm to scan
722 : * @size: size of the allocation
723 : * @alignment: alignment of the allocation
724 : * @color: opaque tag value to use for the allocation
725 : * @start: start of the allowed range for the allocation
726 : * @end: end of the allowed range for the allocation
727 : * @mode: fine-tune the allocation search and placement
728 : *
729 : * This simply sets up the scanning routines with the parameters for the desired
730 : * hole.
731 : *
732 : * Warning:
733 : * As long as the scan list is non-empty, no other operations than
734 : * adding/removing nodes to/from the scan list are allowed.
735 : */
736 0 : void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
737 : struct drm_mm *mm,
738 : u64 size,
739 : u64 alignment,
740 : unsigned long color,
741 : u64 start,
742 : u64 end,
743 : enum drm_mm_insert_mode mode)
744 : {
745 : DRM_MM_BUG_ON(start >= end);
746 : DRM_MM_BUG_ON(!size || size > end - start);
747 : DRM_MM_BUG_ON(mm->scan_active);
748 :
749 0 : scan->mm = mm;
750 :
751 0 : if (alignment <= 1)
752 0 : alignment = 0;
753 :
754 0 : scan->color = color;
755 0 : scan->alignment = alignment;
756 0 : scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
757 0 : scan->size = size;
758 0 : scan->mode = mode;
759 :
760 : DRM_MM_BUG_ON(end <= start);
761 0 : scan->range_start = start;
762 0 : scan->range_end = end;
763 :
764 0 : scan->hit_start = U64_MAX;
765 0 : scan->hit_end = 0;
766 0 : }
767 : EXPORT_SYMBOL(drm_mm_scan_init_with_range);
768 :
769 : /**
770 : * drm_mm_scan_add_block - add a node to the scan list
771 : * @scan: the active drm_mm scanner
772 : * @node: drm_mm_node to add
773 : *
774 : * Add a node to the scan list that might be freed to make space for the desired
775 : * hole.
776 : *
777 : * Returns:
778 : * True if a hole has been found, false otherwise.
779 : */
780 0 : bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
781 : struct drm_mm_node *node)
782 : {
783 0 : struct drm_mm *mm = scan->mm;
784 : struct drm_mm_node *hole;
785 : u64 hole_start, hole_end;
786 : u64 col_start, col_end;
787 : u64 adj_start, adj_end;
788 :
789 : DRM_MM_BUG_ON(node->mm != mm);
790 : DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
791 : DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
792 0 : __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
793 0 : mm->scan_active++;
794 :
795 : /* Remove this block from the node_list so that we enlarge the hole
796 : * (distance between the end of our previous node and the start of
797 : * or next), without poisoning the link so that we can restore it
798 : * later in drm_mm_scan_remove_block().
799 : */
800 0 : hole = list_prev_entry(node, node_list);
801 : DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
802 0 : __list_del_entry(&node->node_list);
803 :
804 0 : hole_start = __drm_mm_hole_node_start(hole);
805 0 : hole_end = __drm_mm_hole_node_end(hole);
806 :
807 0 : col_start = hole_start;
808 0 : col_end = hole_end;
809 0 : if (mm->color_adjust)
810 0 : mm->color_adjust(hole, scan->color, &col_start, &col_end);
811 :
812 0 : adj_start = max(col_start, scan->range_start);
813 0 : adj_end = min(col_end, scan->range_end);
814 0 : if (adj_end <= adj_start || adj_end - adj_start < scan->size)
815 : return false;
816 :
817 0 : if (scan->mode == DRM_MM_INSERT_HIGH)
818 0 : adj_start = adj_end - scan->size;
819 :
820 0 : if (scan->alignment) {
821 : u64 rem;
822 :
823 0 : if (likely(scan->remainder_mask))
824 0 : rem = adj_start & scan->remainder_mask;
825 : else
826 0 : div64_u64_rem(adj_start, scan->alignment, &rem);
827 0 : if (rem) {
828 0 : adj_start -= rem;
829 0 : if (scan->mode != DRM_MM_INSERT_HIGH)
830 0 : adj_start += scan->alignment;
831 0 : if (adj_start < max(col_start, scan->range_start) ||
832 0 : min(col_end, scan->range_end) - adj_start < scan->size)
833 : return false;
834 :
835 0 : if (adj_end <= adj_start ||
836 : adj_end - adj_start < scan->size)
837 : return false;
838 : }
839 : }
840 :
841 0 : scan->hit_start = adj_start;
842 0 : scan->hit_end = adj_start + scan->size;
843 :
844 : DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
845 : DRM_MM_BUG_ON(scan->hit_start < hole_start);
846 : DRM_MM_BUG_ON(scan->hit_end > hole_end);
847 :
848 0 : return true;
849 : }
850 : EXPORT_SYMBOL(drm_mm_scan_add_block);
851 :
852 : /**
853 : * drm_mm_scan_remove_block - remove a node from the scan list
854 : * @scan: the active drm_mm scanner
855 : * @node: drm_mm_node to remove
856 : *
857 : * Nodes **must** be removed in exactly the reverse order from the scan list as
858 : * they have been added (e.g. using list_add() as they are added and then
859 : * list_for_each() over that eviction list to remove), otherwise the internal
860 : * state of the memory manager will be corrupted.
861 : *
862 : * When the scan list is empty, the selected memory nodes can be freed. An
863 : * immediately following drm_mm_insert_node_in_range_generic() or one of the
864 : * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
865 : * the just freed block (because it's at the top of the free_stack list).
866 : *
867 : * Returns:
868 : * True if this block should be evicted, false otherwise. Will always
869 : * return false when no hole has been found.
870 : */
871 0 : bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
872 : struct drm_mm_node *node)
873 : {
874 : struct drm_mm_node *prev_node;
875 :
876 : DRM_MM_BUG_ON(node->mm != scan->mm);
877 : DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
878 0 : __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
879 :
880 : DRM_MM_BUG_ON(!node->mm->scan_active);
881 0 : node->mm->scan_active--;
882 :
883 : /* During drm_mm_scan_add_block() we decoupled this node leaving
884 : * its pointers intact. Now that the caller is walking back along
885 : * the eviction list we can restore this block into its rightful
886 : * place on the full node_list. To confirm that the caller is walking
887 : * backwards correctly we check that prev_node->next == node->next,
888 : * i.e. both believe the same node should be on the other side of the
889 : * hole.
890 : */
891 0 : prev_node = list_prev_entry(node, node_list);
892 : DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
893 : list_next_entry(node, node_list));
894 0 : list_add(&node->node_list, &prev_node->node_list);
895 :
896 0 : return (node->start + node->size > scan->hit_start &&
897 0 : node->start < scan->hit_end);
898 : }
899 : EXPORT_SYMBOL(drm_mm_scan_remove_block);
900 :
901 : /**
902 : * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
903 : * @scan: drm_mm scan with target hole
904 : *
905 : * After completing an eviction scan and removing the selected nodes, we may
906 : * need to remove a few more nodes from either side of the target hole if
907 : * mm.color_adjust is being used.
908 : *
909 : * Returns:
910 : * A node to evict, or NULL if there are no overlapping nodes.
911 : */
912 0 : struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
913 : {
914 0 : struct drm_mm *mm = scan->mm;
915 : struct drm_mm_node *hole;
916 : u64 hole_start, hole_end;
917 :
918 : DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
919 :
920 0 : if (!mm->color_adjust)
921 : return NULL;
922 :
923 : /*
924 : * The hole found during scanning should ideally be the first element
925 : * in the hole_stack list, but due to side-effects in the driver it
926 : * may not be.
927 : */
928 0 : list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
929 0 : hole_start = __drm_mm_hole_node_start(hole);
930 0 : hole_end = hole_start + hole->hole_size;
931 :
932 0 : if (hole_start <= scan->hit_start &&
933 0 : hole_end >= scan->hit_end)
934 : break;
935 : }
936 :
937 : /* We should only be called after we found the hole previously */
938 : DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
939 0 : if (unlikely(&hole->hole_stack == &mm->hole_stack))
940 : return NULL;
941 :
942 : DRM_MM_BUG_ON(hole_start > scan->hit_start);
943 : DRM_MM_BUG_ON(hole_end < scan->hit_end);
944 :
945 0 : mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
946 0 : if (hole_start > scan->hit_start)
947 : return hole;
948 0 : if (hole_end < scan->hit_end)
949 0 : return list_next_entry(hole, node_list);
950 :
951 : return NULL;
952 : }
953 : EXPORT_SYMBOL(drm_mm_scan_color_evict);
954 :
955 : /**
956 : * drm_mm_init - initialize a drm-mm allocator
957 : * @mm: the drm_mm structure to initialize
958 : * @start: start of the range managed by @mm
959 : * @size: end of the range managed by @mm
960 : *
961 : * Note that @mm must be cleared to 0 before calling this function.
962 : */
963 0 : void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
964 : {
965 : DRM_MM_BUG_ON(start + size <= start);
966 :
967 0 : mm->color_adjust = NULL;
968 :
969 0 : INIT_LIST_HEAD(&mm->hole_stack);
970 0 : mm->interval_tree = RB_ROOT_CACHED;
971 0 : mm->holes_size = RB_ROOT_CACHED;
972 0 : mm->holes_addr = RB_ROOT;
973 :
974 : /* Clever trick to avoid a special case in the free hole tracking. */
975 0 : INIT_LIST_HEAD(&mm->head_node.node_list);
976 0 : mm->head_node.flags = 0;
977 0 : mm->head_node.mm = mm;
978 0 : mm->head_node.start = start + size;
979 0 : mm->head_node.size = -size;
980 0 : add_hole(&mm->head_node);
981 :
982 0 : mm->scan_active = 0;
983 :
984 : #ifdef CONFIG_DRM_DEBUG_MM
985 : stack_depot_init();
986 : #endif
987 0 : }
988 : EXPORT_SYMBOL(drm_mm_init);
989 :
990 : /**
991 : * drm_mm_takedown - clean up a drm_mm allocator
992 : * @mm: drm_mm allocator to clean up
993 : *
994 : * Note that it is a bug to call this function on an allocator which is not
995 : * clean.
996 : */
997 0 : void drm_mm_takedown(struct drm_mm *mm)
998 : {
999 0 : if (WARN(!drm_mm_clean(mm),
1000 : "Memory manager not clean during takedown.\n"))
1001 : show_leaks(mm);
1002 0 : }
1003 : EXPORT_SYMBOL(drm_mm_takedown);
1004 :
1005 0 : static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1006 : {
1007 : u64 start, size;
1008 :
1009 0 : size = entry->hole_size;
1010 0 : if (size) {
1011 0 : start = drm_mm_hole_node_start(entry);
1012 0 : drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1013 : start, start + size, size);
1014 : }
1015 :
1016 0 : return size;
1017 : }
1018 : /**
1019 : * drm_mm_print - print allocator state
1020 : * @mm: drm_mm allocator to print
1021 : * @p: DRM printer to use
1022 : */
1023 0 : void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1024 : {
1025 : const struct drm_mm_node *entry;
1026 0 : u64 total_used = 0, total_free = 0, total = 0;
1027 :
1028 0 : total_free += drm_mm_dump_hole(p, &mm->head_node);
1029 :
1030 0 : drm_mm_for_each_node(entry, mm) {
1031 0 : drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1032 0 : entry->start + entry->size, entry->size);
1033 0 : total_used += entry->size;
1034 0 : total_free += drm_mm_dump_hole(p, entry);
1035 : }
1036 0 : total = total_free + total_used;
1037 :
1038 0 : drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1039 : total_used, total_free);
1040 0 : }
1041 : EXPORT_SYMBOL(drm_mm_print);
|