Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Test cases for the drm_mm range manager
4 : *
5 : * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br>
6 : */
7 :
8 : #include <kunit/test.h>
9 :
10 : #include <linux/prime_numbers.h>
11 : #include <linux/slab.h>
12 : #include <linux/random.h>
13 : #include <linux/vmalloc.h>
14 : #include <linux/ktime.h>
15 :
16 : #include <drm/drm_mm.h>
17 :
18 : #include "../lib/drm_random.h"
19 :
20 : static unsigned int random_seed;
21 : static unsigned int max_iterations = 8192;
22 : static unsigned int max_prime = 128;
23 :
24 : enum {
25 : BEST,
26 : BOTTOMUP,
27 : TOPDOWN,
28 : EVICT,
29 : };
30 :
31 : static const struct insert_mode {
32 : const char *name;
33 : enum drm_mm_insert_mode mode;
34 : } insert_modes[] = {
35 : [BEST] = { "best", DRM_MM_INSERT_BEST },
36 : [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 : [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 : [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
39 : {}
40 : }, evict_modes[] = {
41 : { "bottom-up", DRM_MM_INSERT_LOW },
42 : { "top-down", DRM_MM_INSERT_HIGH },
43 : {}
44 : };
45 :
46 0 : static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
47 : {
48 : struct drm_mm_node *hole;
49 : u64 hole_start, __always_unused hole_end;
50 : unsigned long count;
51 :
52 0 : count = 0;
53 0 : drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
54 0 : count++;
55 0 : if (count) {
56 0 : KUNIT_FAIL(test,
57 : "Expected to find no holes (after reserve), found %lu instead\n", count);
58 0 : return false;
59 : }
60 :
61 0 : drm_mm_for_each_node(hole, mm) {
62 0 : if (drm_mm_hole_follows(hole)) {
63 0 : KUNIT_FAIL(test, "Hole follows node, expected none!\n");
64 0 : return false;
65 : }
66 : }
67 :
68 : return true;
69 : }
70 :
71 0 : static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
72 : {
73 : struct drm_mm_node *hole;
74 : u64 hole_start, hole_end;
75 : unsigned long count;
76 0 : bool ok = true;
77 :
78 0 : if (end <= start)
79 : return true;
80 :
81 0 : count = 0;
82 0 : drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
83 0 : if (start != hole_start || end != hole_end) {
84 0 : if (ok)
85 0 : KUNIT_FAIL(test,
86 : "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
87 : hole_start, hole_end, start, end);
88 : ok = false;
89 : }
90 0 : count++;
91 : }
92 0 : if (count != 1) {
93 0 : KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
94 0 : ok = false;
95 : }
96 :
97 : return ok;
98 : }
99 :
100 0 : static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
101 : {
102 : struct drm_mm_node *node, *check, *found;
103 : unsigned long n;
104 : u64 addr;
105 :
106 0 : if (!assert_no_holes(test, mm))
107 : return false;
108 :
109 0 : n = 0;
110 0 : addr = 0;
111 0 : drm_mm_for_each_node(node, mm) {
112 0 : if (node->start != addr) {
113 0 : KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
114 : n, addr, node->start);
115 0 : return false;
116 : }
117 :
118 0 : if (node->size != size) {
119 0 : KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
120 : n, size, node->size);
121 0 : return false;
122 : }
123 :
124 0 : if (drm_mm_hole_follows(node)) {
125 0 : KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
126 0 : return false;
127 : }
128 :
129 0 : found = NULL;
130 0 : drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
131 0 : if (node != check) {
132 0 : KUNIT_FAIL(test,
133 : "lookup return wrong node, expected start %llx, found %llx\n",
134 : node->start, check->start);
135 0 : return false;
136 : }
137 0 : found = check;
138 : }
139 0 : if (!found) {
140 0 : KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
141 0 : return false;
142 : }
143 :
144 0 : addr += size;
145 0 : n++;
146 : }
147 :
148 : return true;
149 : }
150 :
151 : static u64 misalignment(struct drm_mm_node *node, u64 alignment)
152 : {
153 : u64 rem;
154 :
155 0 : if (!alignment)
156 : return 0;
157 :
158 0 : div64_u64_rem(node->start, alignment, &rem);
159 : return rem;
160 : }
161 :
162 0 : static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
163 : u64 size, u64 alignment, unsigned long color)
164 : {
165 0 : bool ok = true;
166 :
167 0 : if (!drm_mm_node_allocated(node) || node->mm != mm) {
168 0 : KUNIT_FAIL(test, "node not allocated\n");
169 0 : ok = false;
170 : }
171 :
172 0 : if (node->size != size) {
173 0 : KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
174 : node->size, size);
175 0 : ok = false;
176 : }
177 :
178 0 : if (misalignment(node, alignment)) {
179 0 : KUNIT_FAIL(test,
180 : "node is misaligned, start %llx rem %llu, expected alignment %llu\n",
181 : node->start, misalignment(node, alignment), alignment);
182 0 : ok = false;
183 : }
184 :
185 0 : if (node->color != color) {
186 0 : KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
187 : node->color, color);
188 0 : ok = false;
189 : }
190 :
191 0 : return ok;
192 : }
193 :
194 0 : static void drm_test_mm_init(struct kunit *test)
195 : {
196 0 : const unsigned int size = 4096;
197 : struct drm_mm mm;
198 : struct drm_mm_node tmp;
199 :
200 : /* Start with some simple checks on initialising the struct drm_mm */
201 0 : memset(&mm, 0, sizeof(mm));
202 0 : KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
203 : "zeroed mm claims to be initialized\n");
204 :
205 0 : memset(&mm, 0xff, sizeof(mm));
206 0 : drm_mm_init(&mm, 0, size);
207 0 : if (!drm_mm_initialized(&mm)) {
208 0 : KUNIT_FAIL(test, "mm claims not to be initialized\n");
209 0 : goto out;
210 : }
211 :
212 0 : if (!drm_mm_clean(&mm)) {
213 0 : KUNIT_FAIL(test, "mm not empty on creation\n");
214 0 : goto out;
215 : }
216 :
217 : /* After creation, it should all be one massive hole */
218 0 : if (!assert_one_hole(test, &mm, 0, size)) {
219 0 : KUNIT_FAIL(test, "");
220 0 : goto out;
221 : }
222 :
223 0 : memset(&tmp, 0, sizeof(tmp));
224 : tmp.start = 0;
225 0 : tmp.size = size;
226 0 : if (drm_mm_reserve_node(&mm, &tmp)) {
227 0 : KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
228 0 : goto out;
229 : }
230 :
231 : /* After filling the range entirely, there should be no holes */
232 0 : if (!assert_no_holes(test, &mm)) {
233 0 : KUNIT_FAIL(test, "");
234 0 : goto out;
235 : }
236 :
237 : /* And then after emptying it again, the massive hole should be back */
238 0 : drm_mm_remove_node(&tmp);
239 0 : if (!assert_one_hole(test, &mm, 0, size)) {
240 0 : KUNIT_FAIL(test, "");
241 0 : goto out;
242 : }
243 :
244 : out:
245 0 : drm_mm_takedown(&mm);
246 0 : }
247 :
248 0 : static void drm_test_mm_debug(struct kunit *test)
249 : {
250 : struct drm_mm mm;
251 : struct drm_mm_node nodes[2];
252 :
253 : /* Create a small drm_mm with a couple of nodes and a few holes, and
254 : * check that the debug iterator doesn't explode over a trivial drm_mm.
255 : */
256 :
257 0 : drm_mm_init(&mm, 0, 4096);
258 :
259 0 : memset(nodes, 0, sizeof(nodes));
260 0 : nodes[0].start = 512;
261 0 : nodes[0].size = 1024;
262 0 : KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
263 : "failed to reserve node[0] {start=%lld, size=%lld)\n",
264 : nodes[0].start, nodes[0].size);
265 :
266 0 : nodes[1].size = 1024;
267 0 : nodes[1].start = 4096 - 512 - nodes[1].size;
268 0 : KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
269 : "failed to reserve node[0] {start=%lld, size=%lld)\n",
270 : nodes[0].start, nodes[0].size);
271 0 : }
272 :
273 : static struct drm_mm_node *set_node(struct drm_mm_node *node,
274 : u64 start, u64 size)
275 : {
276 0 : node->start = start;
277 0 : node->size = size;
278 : return node;
279 : }
280 :
281 0 : static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
282 : {
283 : int err;
284 :
285 0 : err = drm_mm_reserve_node(mm, node);
286 0 : if (likely(err == -ENOSPC))
287 : return true;
288 :
289 0 : if (!err) {
290 0 : KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
291 : node->start, node->size);
292 0 : drm_mm_remove_node(node);
293 : } else {
294 0 : KUNIT_FAIL(test,
295 : "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
296 : err, -ENOSPC, node->start, node->size);
297 : }
298 : return false;
299 : }
300 :
301 0 : static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
302 : unsigned int count,
303 : u64 size)
304 : {
305 : const struct boundary {
306 : u64 start, size;
307 : const char *name;
308 0 : } boundaries[] = {
309 : #define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
310 : B(0, 0),
311 0 : B(-size, 0),
312 : B(size, 0),
313 0 : B(size * count, 0),
314 : B(-size, size),
315 : B(-size, -size),
316 0 : B(-size, 2 * size),
317 : B(0, -size),
318 : B(size, -size),
319 : B(count * size, size),
320 : B(count * size, -size),
321 : B(count * size, count * size),
322 0 : B(count * size, -count * size),
323 0 : B(count * size, -(count + 1) * size),
324 0 : B((count + 1) * size, size),
325 : B((count + 1) * size, -size),
326 0 : B((count + 1) * size, -2 * size),
327 : #undef B
328 : };
329 0 : struct drm_mm_node tmp = {};
330 : int n;
331 :
332 0 : for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
333 0 : if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
334 : boundaries[n].size))) {
335 0 : KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
336 : n, boundaries[n].name, count, size);
337 0 : return false;
338 : }
339 : }
340 :
341 : return true;
342 : }
343 :
344 0 : static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
345 : {
346 0 : DRM_RND_STATE(prng, random_seed);
347 : struct drm_mm mm;
348 : struct drm_mm_node tmp, *nodes, *node, *next;
349 0 : unsigned int *order, n, m, o = 0;
350 : int ret, err;
351 :
352 : /* For exercising drm_mm_reserve_node(), we want to check that
353 : * reservations outside of the drm_mm range are rejected, and to
354 : * overlapping and otherwise already occupied ranges. Afterwards,
355 : * the tree and nodes should be intact.
356 : */
357 :
358 : DRM_MM_BUG_ON(!count);
359 : DRM_MM_BUG_ON(!size);
360 :
361 0 : ret = -ENOMEM;
362 0 : order = drm_random_order(count, &prng);
363 0 : if (!order)
364 : goto err;
365 :
366 0 : nodes = vzalloc(array_size(count, sizeof(*nodes)));
367 0 : KUNIT_ASSERT_TRUE(test, nodes);
368 :
369 0 : ret = -EINVAL;
370 0 : drm_mm_init(&mm, 0, count * size);
371 :
372 0 : if (!check_reserve_boundaries(test, &mm, count, size))
373 : goto out;
374 :
375 0 : for (n = 0; n < count; n++) {
376 0 : nodes[n].start = order[n] * size;
377 0 : nodes[n].size = size;
378 :
379 0 : err = drm_mm_reserve_node(&mm, &nodes[n]);
380 0 : if (err) {
381 0 : KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
382 : n, nodes[n].start);
383 0 : ret = err;
384 0 : goto out;
385 : }
386 :
387 0 : if (!drm_mm_node_allocated(&nodes[n])) {
388 0 : KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
389 : n, nodes[n].start);
390 0 : goto out;
391 : }
392 :
393 0 : if (!expect_reserve_fail(test, &mm, &nodes[n]))
394 : goto out;
395 : }
396 :
397 : /* After random insertion the nodes should be in order */
398 0 : if (!assert_continuous(test, &mm, size))
399 : goto out;
400 :
401 : /* Repeated use should then fail */
402 0 : drm_random_reorder(order, count, &prng);
403 0 : for (n = 0; n < count; n++) {
404 0 : if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
405 : goto out;
406 :
407 : /* Remove and reinsert should work */
408 0 : drm_mm_remove_node(&nodes[order[n]]);
409 0 : err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
410 0 : if (err) {
411 0 : KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
412 : n, nodes[n].start);
413 0 : ret = err;
414 0 : goto out;
415 : }
416 : }
417 :
418 0 : if (!assert_continuous(test, &mm, size))
419 : goto out;
420 :
421 : /* Overlapping use should then fail */
422 0 : for (n = 0; n < count; n++) {
423 0 : if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
424 : goto out;
425 : }
426 0 : for (n = 0; n < count; n++) {
427 0 : if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
428 : goto out;
429 : }
430 :
431 : /* Remove several, reinsert, check full */
432 0 : for_each_prime_number(n, min(max_prime, count)) {
433 0 : for (m = 0; m < n; m++) {
434 0 : node = &nodes[order[(o + m) % count]];
435 0 : drm_mm_remove_node(node);
436 : }
437 :
438 0 : for (m = 0; m < n; m++) {
439 0 : node = &nodes[order[(o + m) % count]];
440 0 : err = drm_mm_reserve_node(&mm, node);
441 0 : if (err) {
442 0 : KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
443 : m, n, node->start);
444 0 : ret = err;
445 0 : goto out;
446 : }
447 : }
448 :
449 0 : o += n;
450 :
451 0 : if (!assert_continuous(test, &mm, size))
452 : goto out;
453 : }
454 :
455 : ret = 0;
456 : out:
457 0 : drm_mm_for_each_node_safe(node, next, &mm)
458 0 : drm_mm_remove_node(node);
459 0 : drm_mm_takedown(&mm);
460 0 : vfree(nodes);
461 0 : kfree(order);
462 : err:
463 0 : return ret;
464 : }
465 :
466 0 : static void drm_test_mm_reserve(struct kunit *test)
467 : {
468 0 : const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
469 : int n;
470 :
471 0 : for_each_prime_number_from(n, 1, 54) {
472 0 : u64 size = BIT_ULL(n);
473 :
474 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
475 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
476 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
477 :
478 0 : cond_resched();
479 : }
480 0 : }
481 :
482 0 : static bool expect_insert(struct kunit *test, struct drm_mm *mm,
483 : struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
484 : const struct insert_mode *mode)
485 : {
486 : int err;
487 :
488 0 : err = drm_mm_insert_node_generic(mm, node,
489 : size, alignment, color,
490 : mode->mode);
491 0 : if (err) {
492 0 : KUNIT_FAIL(test,
493 : "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
494 : size, alignment, color, mode->name, err);
495 : return false;
496 : }
497 :
498 0 : if (!assert_node(test, node, mm, size, alignment, color)) {
499 0 : drm_mm_remove_node(node);
500 : return false;
501 : }
502 :
503 : return true;
504 : }
505 :
506 0 : static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
507 : {
508 0 : struct drm_mm_node tmp = {};
509 : int err;
510 :
511 0 : err = drm_mm_insert_node(mm, &tmp, size);
512 0 : if (likely(err == -ENOSPC))
513 : return true;
514 :
515 0 : if (!err) {
516 0 : KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
517 : tmp.start, tmp.size);
518 0 : drm_mm_remove_node(&tmp);
519 : } else {
520 0 : KUNIT_FAIL(test,
521 : "impossible insert failed with wrong error %d [expected %d], size %llu\n",
522 : err, -ENOSPC, size);
523 : }
524 : return false;
525 : }
526 :
527 0 : static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
528 : {
529 0 : DRM_RND_STATE(prng, random_seed);
530 : const struct insert_mode *mode;
531 : struct drm_mm mm;
532 : struct drm_mm_node *nodes, *node, *next;
533 0 : unsigned int *order, n, m, o = 0;
534 : int ret;
535 :
536 : /* Fill a range with lots of nodes, check it doesn't fail too early */
537 :
538 : DRM_MM_BUG_ON(!count);
539 : DRM_MM_BUG_ON(!size);
540 :
541 0 : ret = -ENOMEM;
542 0 : nodes = vmalloc(array_size(count, sizeof(*nodes)));
543 0 : KUNIT_ASSERT_TRUE(test, nodes);
544 :
545 0 : order = drm_random_order(count, &prng);
546 0 : if (!order)
547 : goto err_nodes;
548 :
549 0 : ret = -EINVAL;
550 0 : drm_mm_init(&mm, 0, count * size);
551 :
552 0 : for (mode = insert_modes; mode->name; mode++) {
553 0 : for (n = 0; n < count; n++) {
554 : struct drm_mm_node tmp;
555 :
556 0 : node = replace ? &tmp : &nodes[n];
557 0 : memset(node, 0, sizeof(*node));
558 0 : if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
559 0 : KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
560 : mode->name, size, n);
561 0 : goto out;
562 : }
563 :
564 0 : if (replace) {
565 0 : drm_mm_replace_node(&tmp, &nodes[n]);
566 0 : if (drm_mm_node_allocated(&tmp)) {
567 0 : KUNIT_FAIL(test,
568 : "replaced old-node still allocated! step %d\n",
569 : n);
570 0 : goto out;
571 : }
572 :
573 0 : if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
574 0 : KUNIT_FAIL(test,
575 : "replaced node did not inherit parameters, size %llu step %d\n",
576 : size, n);
577 0 : goto out;
578 : }
579 :
580 0 : if (tmp.start != nodes[n].start) {
581 0 : KUNIT_FAIL(test,
582 : "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
583 : tmp.start, size, nodes[n].start, nodes[n].size);
584 0 : goto out;
585 : }
586 : }
587 : }
588 :
589 : /* After random insertion the nodes should be in order */
590 0 : if (!assert_continuous(test, &mm, size))
591 : goto out;
592 :
593 : /* Repeated use should then fail */
594 0 : if (!expect_insert_fail(test, &mm, size))
595 : goto out;
596 :
597 : /* Remove one and reinsert, as the only hole it should refill itself */
598 0 : for (n = 0; n < count; n++) {
599 0 : u64 addr = nodes[n].start;
600 :
601 0 : drm_mm_remove_node(&nodes[n]);
602 0 : if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
603 0 : KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
604 : mode->name, size, n);
605 0 : goto out;
606 : }
607 :
608 0 : if (nodes[n].start != addr) {
609 0 : KUNIT_FAIL(test,
610 : "%s reinsert node moved, step %d, expected %llx, found %llx\n",
611 : mode->name, n, addr, nodes[n].start);
612 0 : goto out;
613 : }
614 :
615 0 : if (!assert_continuous(test, &mm, size))
616 : goto out;
617 : }
618 :
619 : /* Remove several, reinsert, check full */
620 0 : for_each_prime_number(n, min(max_prime, count)) {
621 0 : for (m = 0; m < n; m++) {
622 0 : node = &nodes[order[(o + m) % count]];
623 0 : drm_mm_remove_node(node);
624 : }
625 :
626 0 : for (m = 0; m < n; m++) {
627 0 : node = &nodes[order[(o + m) % count]];
628 0 : if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
629 0 : KUNIT_FAIL(test,
630 : "%s multiple reinsert failed, size %llu step %d\n",
631 : mode->name, size, n);
632 0 : goto out;
633 : }
634 : }
635 :
636 0 : o += n;
637 :
638 0 : if (!assert_continuous(test, &mm, size))
639 : goto out;
640 :
641 0 : if (!expect_insert_fail(test, &mm, size))
642 : goto out;
643 : }
644 :
645 0 : drm_mm_for_each_node_safe(node, next, &mm)
646 0 : drm_mm_remove_node(node);
647 : DRM_MM_BUG_ON(!drm_mm_clean(&mm));
648 :
649 0 : cond_resched();
650 : }
651 :
652 : ret = 0;
653 : out:
654 0 : drm_mm_for_each_node_safe(node, next, &mm)
655 0 : drm_mm_remove_node(node);
656 0 : drm_mm_takedown(&mm);
657 0 : kfree(order);
658 : err_nodes:
659 0 : vfree(nodes);
660 0 : return ret;
661 : }
662 :
663 0 : static void drm_test_mm_insert(struct kunit *test)
664 : {
665 0 : const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
666 : unsigned int n;
667 :
668 0 : for_each_prime_number_from(n, 1, 54) {
669 0 : u64 size = BIT_ULL(n);
670 :
671 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
672 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
673 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
674 :
675 0 : cond_resched();
676 : }
677 0 : }
678 :
679 0 : static void drm_test_mm_replace(struct kunit *test)
680 : {
681 0 : const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
682 : unsigned int n;
683 :
684 : /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
685 : * then replacing it with the intended node. We want to check that
686 : * the tree is intact and all the information we need is carried
687 : * across to the target node.
688 : */
689 :
690 0 : for_each_prime_number_from(n, 1, 54) {
691 0 : u64 size = BIT_ULL(n);
692 :
693 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
694 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
695 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
696 :
697 0 : cond_resched();
698 : }
699 0 : }
700 :
701 0 : static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
702 : u64 size, u64 alignment, unsigned long color,
703 : u64 range_start, u64 range_end, const struct insert_mode *mode)
704 : {
705 : int err;
706 :
707 0 : err = drm_mm_insert_node_in_range(mm, node,
708 : size, alignment, color,
709 : range_start, range_end,
710 : mode->mode);
711 0 : if (err) {
712 0 : KUNIT_FAIL(test,
713 : "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
714 : size, alignment, color, mode->name,
715 : range_start, range_end, err);
716 : return false;
717 : }
718 :
719 0 : if (!assert_node(test, node, mm, size, alignment, color)) {
720 0 : drm_mm_remove_node(node);
721 : return false;
722 : }
723 :
724 : return true;
725 : }
726 :
727 0 : static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
728 : u64 size, u64 range_start, u64 range_end)
729 : {
730 0 : struct drm_mm_node tmp = {};
731 : int err;
732 :
733 0 : err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
734 : 0);
735 0 : if (likely(err == -ENOSPC))
736 : return true;
737 :
738 0 : if (!err) {
739 0 : KUNIT_FAIL(test,
740 : "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
741 : tmp.start, tmp.size, range_start, range_end);
742 0 : drm_mm_remove_node(&tmp);
743 : } else {
744 0 : KUNIT_FAIL(test,
745 : "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
746 : err, -ENOSPC, size, range_start, range_end);
747 : }
748 :
749 : return false;
750 : }
751 :
752 0 : static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
753 : u64 size, u64 start, u64 end)
754 : {
755 : struct drm_mm_node *node;
756 : unsigned int n;
757 :
758 0 : if (!expect_insert_in_range_fail(test, mm, size, start, end))
759 : return false;
760 :
761 0 : n = div64_u64(start + size - 1, size);
762 0 : drm_mm_for_each_node(node, mm) {
763 0 : if (node->start < start || node->start + node->size > end) {
764 0 : KUNIT_FAIL(test,
765 : "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
766 : n, node->start, node->start + node->size, start, end);
767 0 : return false;
768 : }
769 :
770 0 : if (node->start != n * size) {
771 0 : KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
772 : n, n * size, node->start);
773 0 : return false;
774 : }
775 :
776 0 : if (node->size != size) {
777 0 : KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
778 : n, size, node->size);
779 0 : return false;
780 : }
781 :
782 0 : if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
783 0 : KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
784 0 : return false;
785 : }
786 :
787 0 : n++;
788 : }
789 :
790 0 : if (start > 0) {
791 0 : node = __drm_mm_interval_first(mm, 0, start - 1);
792 0 : if (drm_mm_node_allocated(node)) {
793 0 : KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
794 : node->start, node->size, start);
795 0 : return false;
796 : }
797 : }
798 :
799 0 : if (end < U64_MAX) {
800 0 : node = __drm_mm_interval_first(mm, end, U64_MAX);
801 0 : if (drm_mm_node_allocated(node)) {
802 0 : KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
803 : node->start, node->size, end);
804 0 : return false;
805 : }
806 : }
807 :
808 : return true;
809 : }
810 :
811 0 : static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
812 : u64 start, u64 end)
813 : {
814 : const struct insert_mode *mode;
815 : struct drm_mm mm;
816 : struct drm_mm_node *nodes, *node, *next;
817 : unsigned int n, start_n, end_n;
818 : int ret;
819 :
820 : DRM_MM_BUG_ON(!count);
821 : DRM_MM_BUG_ON(!size);
822 : DRM_MM_BUG_ON(end <= start);
823 :
824 : /* Very similar to __drm_test_mm_insert(), but now instead of populating the
825 : * full range of the drm_mm, we try to fill a small portion of it.
826 : */
827 :
828 0 : ret = -ENOMEM;
829 0 : nodes = vzalloc(array_size(count, sizeof(*nodes)));
830 0 : KUNIT_ASSERT_TRUE(test, nodes);
831 :
832 0 : ret = -EINVAL;
833 0 : drm_mm_init(&mm, 0, count * size);
834 :
835 0 : start_n = div64_u64(start + size - 1, size);
836 0 : end_n = div64_u64(end - size, size);
837 :
838 0 : for (mode = insert_modes; mode->name; mode++) {
839 0 : for (n = start_n; n <= end_n; n++) {
840 0 : if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
841 : start, end, mode)) {
842 0 : KUNIT_FAIL(test,
843 : "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
844 : mode->name, size, n, start_n, end_n, start, end);
845 0 : goto out;
846 : }
847 : }
848 :
849 0 : if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
850 0 : KUNIT_FAIL(test,
851 : "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
852 : mode->name, start, end, size);
853 0 : goto out;
854 : }
855 :
856 : /* Remove one and reinsert, it should refill itself */
857 0 : for (n = start_n; n <= end_n; n++) {
858 0 : u64 addr = nodes[n].start;
859 :
860 0 : drm_mm_remove_node(&nodes[n]);
861 0 : if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
862 : start, end, mode)) {
863 0 : KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
864 0 : goto out;
865 : }
866 :
867 0 : if (nodes[n].start != addr) {
868 0 : KUNIT_FAIL(test,
869 : "%s reinsert node moved, step %d, expected %llx, found %llx\n",
870 : mode->name, n, addr, nodes[n].start);
871 0 : goto out;
872 : }
873 : }
874 :
875 0 : if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
876 0 : KUNIT_FAIL(test,
877 : "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
878 : mode->name, start, end, size);
879 0 : goto out;
880 : }
881 :
882 0 : drm_mm_for_each_node_safe(node, next, &mm)
883 0 : drm_mm_remove_node(node);
884 : DRM_MM_BUG_ON(!drm_mm_clean(&mm));
885 :
886 0 : cond_resched();
887 : }
888 :
889 : ret = 0;
890 : out:
891 0 : drm_mm_for_each_node_safe(node, next, &mm)
892 0 : drm_mm_remove_node(node);
893 0 : drm_mm_takedown(&mm);
894 0 : vfree(nodes);
895 0 : return ret;
896 : }
897 :
898 0 : static int insert_outside_range(struct kunit *test)
899 : {
900 : struct drm_mm mm;
901 0 : const unsigned int start = 1024;
902 0 : const unsigned int end = 2048;
903 0 : const unsigned int size = end - start;
904 :
905 0 : drm_mm_init(&mm, start, size);
906 :
907 0 : if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
908 : return -EINVAL;
909 :
910 0 : if (!expect_insert_in_range_fail(test, &mm, size,
911 : start - size / 2, start + (size + 1) / 2))
912 : return -EINVAL;
913 :
914 0 : if (!expect_insert_in_range_fail(test, &mm, size,
915 : end - (size + 1) / 2, end + size / 2))
916 : return -EINVAL;
917 :
918 0 : if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
919 : return -EINVAL;
920 :
921 0 : drm_mm_takedown(&mm);
922 0 : return 0;
923 : }
924 :
925 0 : static void drm_test_mm_insert_range(struct kunit *test)
926 : {
927 0 : const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
928 : unsigned int n;
929 :
930 : /* Check that requests outside the bounds of drm_mm are rejected. */
931 0 : KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
932 :
933 0 : for_each_prime_number_from(n, 1, 50) {
934 0 : const u64 size = BIT_ULL(n);
935 0 : const u64 max = count * size;
936 :
937 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
938 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
939 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
940 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
941 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
942 : max / 2, max / 2));
943 0 : KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
944 : max / 4 + 1, 3 * max / 4 - 1));
945 :
946 0 : cond_resched();
947 : }
948 0 : }
949 :
950 0 : static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
951 : unsigned int num_insert, const struct insert_mode *mode)
952 : {
953 0 : unsigned int size = 4096;
954 : unsigned int i;
955 :
956 0 : for (i = 0; i < num_insert; i++) {
957 0 : if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
958 0 : KUNIT_FAIL(test, "%s insert failed\n", mode->name);
959 0 : return -EINVAL;
960 : }
961 : }
962 :
963 : /* introduce fragmentation by freeing every other node */
964 0 : for (i = 0; i < num_insert; i++) {
965 0 : if (i % 2 == 0)
966 0 : drm_mm_remove_node(&nodes[i]);
967 : }
968 :
969 : return 0;
970 : }
971 :
972 0 : static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
973 : unsigned int num_insert, struct drm_mm_node *nodes,
974 : const struct insert_mode *mode)
975 : {
976 0 : unsigned int size = 8192;
977 : ktime_t start;
978 : unsigned int i;
979 :
980 0 : start = ktime_get();
981 0 : for (i = 0; i < num_insert; i++) {
982 0 : if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
983 0 : KUNIT_FAIL(test, "%s insert failed\n", mode->name);
984 0 : return 0;
985 : }
986 : }
987 :
988 0 : return ktime_to_ns(ktime_sub(ktime_get(), start));
989 : }
990 :
991 0 : static void drm_test_mm_frag(struct kunit *test)
992 : {
993 : struct drm_mm mm;
994 : const struct insert_mode *mode;
995 : struct drm_mm_node *nodes, *node, *next;
996 0 : unsigned int insert_size = 10000;
997 0 : unsigned int scale_factor = 4;
998 :
999 : /* We need 4 * insert_size nodes to hold intermediate allocated
1000 : * drm_mm nodes.
1001 : * 1 times for prepare_frag()
1002 : * 1 times for get_insert_time()
1003 : * 2 times for get_insert_time()
1004 : */
1005 0 : nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
1006 0 : KUNIT_ASSERT_TRUE(test, nodes);
1007 :
1008 : /* For BOTTOMUP and TOPDOWN, we first fragment the
1009 : * address space using prepare_frag() and then try to verify
1010 : * that insertions scale quadratically from 10k to 20k insertions
1011 : */
1012 0 : drm_mm_init(&mm, 1, U64_MAX - 2);
1013 0 : for (mode = insert_modes; mode->name; mode++) {
1014 : u64 insert_time1, insert_time2;
1015 :
1016 0 : if (mode->mode != DRM_MM_INSERT_LOW &&
1017 : mode->mode != DRM_MM_INSERT_HIGH)
1018 0 : continue;
1019 :
1020 0 : if (prepare_frag(test, &mm, nodes, insert_size, mode))
1021 : goto err;
1022 :
1023 0 : insert_time1 = get_insert_time(test, &mm, insert_size,
1024 : nodes + insert_size, mode);
1025 0 : if (insert_time1 == 0)
1026 : goto err;
1027 :
1028 0 : insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
1029 : nodes + insert_size * 2, mode);
1030 0 : if (insert_time2 == 0)
1031 : goto err;
1032 :
1033 0 : kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
1034 : mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
1035 :
1036 0 : if (insert_time2 > (scale_factor * insert_time1)) {
1037 0 : KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
1038 : mode->name, insert_time2 - (scale_factor * insert_time1));
1039 0 : goto err;
1040 : }
1041 :
1042 0 : drm_mm_for_each_node_safe(node, next, &mm)
1043 0 : drm_mm_remove_node(node);
1044 : }
1045 :
1046 : err:
1047 0 : drm_mm_for_each_node_safe(node, next, &mm)
1048 0 : drm_mm_remove_node(node);
1049 0 : drm_mm_takedown(&mm);
1050 0 : vfree(nodes);
1051 0 : }
1052 :
1053 0 : static void drm_test_mm_align(struct kunit *test)
1054 : {
1055 : const struct insert_mode *mode;
1056 0 : const unsigned int max_count = min(8192u, max_prime);
1057 : struct drm_mm mm;
1058 : struct drm_mm_node *nodes, *node, *next;
1059 : unsigned int prime;
1060 :
1061 : /* For each of the possible insertion modes, we pick a few
1062 : * arbitrary alignments and check that the inserted node
1063 : * meets our requirements.
1064 : */
1065 :
1066 0 : nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1067 0 : KUNIT_ASSERT_TRUE(test, nodes);
1068 :
1069 0 : drm_mm_init(&mm, 1, U64_MAX - 2);
1070 :
1071 0 : for (mode = insert_modes; mode->name; mode++) {
1072 : unsigned int i = 0;
1073 :
1074 0 : for_each_prime_number_from(prime, 1, max_count) {
1075 0 : u64 size = next_prime_number(prime);
1076 :
1077 0 : if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
1078 0 : KUNIT_FAIL(test, "%s insert failed with alignment=%d",
1079 : mode->name, prime);
1080 0 : goto out;
1081 : }
1082 :
1083 0 : i++;
1084 : }
1085 :
1086 0 : drm_mm_for_each_node_safe(node, next, &mm)
1087 0 : drm_mm_remove_node(node);
1088 : DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1089 :
1090 0 : cond_resched();
1091 : }
1092 :
1093 : out:
1094 0 : drm_mm_for_each_node_safe(node, next, &mm)
1095 0 : drm_mm_remove_node(node);
1096 0 : drm_mm_takedown(&mm);
1097 0 : vfree(nodes);
1098 0 : }
1099 :
1100 0 : static void drm_test_mm_align_pot(struct kunit *test, int max)
1101 : {
1102 : struct drm_mm mm;
1103 : struct drm_mm_node *node, *next;
1104 : int bit;
1105 :
1106 : /* Check that we can align to the full u64 address space */
1107 :
1108 0 : drm_mm_init(&mm, 1, U64_MAX - 2);
1109 :
1110 0 : for (bit = max - 1; bit; bit--) {
1111 : u64 align, size;
1112 :
1113 0 : node = kzalloc(sizeof(*node), GFP_KERNEL);
1114 0 : if (!node) {
1115 0 : KUNIT_FAIL(test, "failed to allocate node");
1116 0 : goto out;
1117 : }
1118 :
1119 0 : align = BIT_ULL(bit);
1120 0 : size = BIT_ULL(bit - 1) + 1;
1121 0 : if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
1122 0 : KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
1123 0 : goto out;
1124 : }
1125 :
1126 0 : cond_resched();
1127 : }
1128 :
1129 : out:
1130 0 : drm_mm_for_each_node_safe(node, next, &mm) {
1131 0 : drm_mm_remove_node(node);
1132 0 : kfree(node);
1133 : }
1134 0 : drm_mm_takedown(&mm);
1135 0 : }
1136 :
1137 0 : static void drm_test_mm_align32(struct kunit *test)
1138 : {
1139 0 : drm_test_mm_align_pot(test, 32);
1140 0 : }
1141 :
1142 0 : static void drm_test_mm_align64(struct kunit *test)
1143 : {
1144 0 : drm_test_mm_align_pot(test, 64);
1145 0 : }
1146 :
1147 0 : static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
1148 : {
1149 0 : kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1150 : scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
1151 0 : }
1152 :
1153 0 : static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
1154 : {
1155 : u64 hole_start, hole_end;
1156 : struct drm_mm_node *hole;
1157 :
1158 0 : drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1159 0 : struct drm_mm_node *next = list_next_entry(hole, node_list);
1160 0 : const char *node1 = NULL, *node2 = NULL;
1161 :
1162 0 : if (drm_mm_node_allocated(hole))
1163 0 : node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
1164 : hole->start, hole->size, hole->color);
1165 :
1166 0 : if (drm_mm_node_allocated(next))
1167 0 : node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
1168 : next->start, next->size, next->color);
1169 :
1170 0 : kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
1171 : hole_start, hole_end, hole_end - hole_start, node2);
1172 :
1173 0 : kfree(node2);
1174 0 : kfree(node1);
1175 :
1176 0 : if (!--count)
1177 : break;
1178 : }
1179 0 : }
1180 :
1181 : struct evict_node {
1182 : struct drm_mm_node node;
1183 : struct list_head link;
1184 : };
1185 :
1186 0 : static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
1187 : struct evict_node *nodes, unsigned int *order, unsigned int count,
1188 : bool use_color, struct list_head *evict_list)
1189 : {
1190 : struct evict_node *e, *en;
1191 : unsigned int i;
1192 :
1193 0 : for (i = 0; i < count; i++) {
1194 0 : e = &nodes[order ? order[i] : i];
1195 0 : list_add(&e->link, evict_list);
1196 0 : if (drm_mm_scan_add_block(scan, &e->node))
1197 : break;
1198 : }
1199 0 : list_for_each_entry_safe(e, en, evict_list, link) {
1200 0 : if (!drm_mm_scan_remove_block(scan, &e->node))
1201 0 : list_del(&e->link);
1202 : }
1203 0 : if (list_empty(evict_list)) {
1204 0 : KUNIT_FAIL(test,
1205 : "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1206 : scan->size, count, scan->alignment, scan->color);
1207 0 : return false;
1208 : }
1209 :
1210 0 : list_for_each_entry(e, evict_list, link)
1211 0 : drm_mm_remove_node(&e->node);
1212 :
1213 0 : if (use_color) {
1214 : struct drm_mm_node *node;
1215 :
1216 0 : while ((node = drm_mm_scan_color_evict(scan))) {
1217 0 : e = container_of(node, typeof(*e), node);
1218 0 : drm_mm_remove_node(&e->node);
1219 0 : list_add(&e->link, evict_list);
1220 : }
1221 : } else {
1222 0 : if (drm_mm_scan_color_evict(scan)) {
1223 0 : KUNIT_FAIL(test,
1224 : "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1225 0 : return false;
1226 : }
1227 : }
1228 :
1229 : return true;
1230 : }
1231 :
1232 0 : static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
1233 : unsigned int total_size, struct evict_node *nodes)
1234 : {
1235 : struct drm_mm_scan scan;
1236 0 : LIST_HEAD(evict_list);
1237 : struct evict_node *e;
1238 : struct drm_mm_node *node;
1239 : unsigned int n;
1240 :
1241 0 : drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1242 0 : for (n = 0; n < total_size; n++) {
1243 0 : e = &nodes[n];
1244 0 : list_add(&e->link, &evict_list);
1245 0 : drm_mm_scan_add_block(&scan, &e->node);
1246 : }
1247 0 : list_for_each_entry(e, &evict_list, link)
1248 0 : drm_mm_scan_remove_block(&scan, &e->node);
1249 :
1250 0 : for (n = 0; n < total_size; n++) {
1251 0 : e = &nodes[n];
1252 :
1253 0 : if (!drm_mm_node_allocated(&e->node)) {
1254 0 : KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
1255 0 : return false;
1256 : }
1257 :
1258 0 : e->link.next = NULL;
1259 : }
1260 :
1261 0 : drm_mm_for_each_node(node, mm) {
1262 0 : e = container_of(node, typeof(*e), node);
1263 0 : e->link.next = &e->link;
1264 : }
1265 :
1266 0 : for (n = 0; n < total_size; n++) {
1267 0 : e = &nodes[n];
1268 :
1269 0 : if (!e->link.next) {
1270 0 : KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
1271 0 : return false;
1272 : }
1273 : }
1274 :
1275 0 : return assert_continuous(test, mm, nodes[0].node.size);
1276 : }
1277 :
1278 0 : static bool evict_everything(struct kunit *test, struct drm_mm *mm,
1279 : unsigned int total_size, struct evict_node *nodes)
1280 : {
1281 : struct drm_mm_scan scan;
1282 0 : LIST_HEAD(evict_list);
1283 : struct evict_node *e;
1284 : unsigned int n;
1285 : int err;
1286 :
1287 0 : drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1288 0 : for (n = 0; n < total_size; n++) {
1289 0 : e = &nodes[n];
1290 0 : list_add(&e->link, &evict_list);
1291 0 : if (drm_mm_scan_add_block(&scan, &e->node))
1292 : break;
1293 : }
1294 :
1295 0 : err = 0;
1296 0 : list_for_each_entry(e, &evict_list, link) {
1297 0 : if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1298 0 : if (!err) {
1299 0 : KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
1300 : e->node.start);
1301 0 : err = -EINVAL;
1302 : }
1303 : }
1304 : }
1305 0 : if (err)
1306 : return false;
1307 :
1308 0 : list_for_each_entry(e, &evict_list, link)
1309 0 : drm_mm_remove_node(&e->node);
1310 :
1311 0 : if (!assert_one_hole(test, mm, 0, total_size))
1312 : return false;
1313 :
1314 0 : list_for_each_entry(e, &evict_list, link) {
1315 0 : err = drm_mm_reserve_node(mm, &e->node);
1316 0 : if (err) {
1317 0 : KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
1318 : e->node.start);
1319 0 : return false;
1320 : }
1321 : }
1322 :
1323 0 : return assert_continuous(test, mm, nodes[0].node.size);
1324 : }
1325 :
1326 0 : static int evict_something(struct kunit *test, struct drm_mm *mm,
1327 : u64 range_start, u64 range_end, struct evict_node *nodes,
1328 : unsigned int *order, unsigned int count, unsigned int size,
1329 : unsigned int alignment, const struct insert_mode *mode)
1330 : {
1331 : struct drm_mm_scan scan;
1332 0 : LIST_HEAD(evict_list);
1333 : struct evict_node *e;
1334 : struct drm_mm_node tmp;
1335 : int err;
1336 :
1337 0 : drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
1338 : range_end, mode->mode);
1339 0 : if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
1340 : return -EINVAL;
1341 :
1342 0 : memset(&tmp, 0, sizeof(tmp));
1343 0 : err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1344 : DRM_MM_INSERT_EVICT);
1345 0 : if (err) {
1346 0 : KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
1347 : size, alignment);
1348 0 : show_scan(test, &scan);
1349 0 : show_holes(test, mm, 3);
1350 : return err;
1351 : }
1352 :
1353 0 : if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1354 0 : KUNIT_FAIL(test,
1355 : "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1356 : tmp.start, tmp.size, range_start, range_end);
1357 0 : err = -EINVAL;
1358 : }
1359 :
1360 0 : if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
1361 0 : drm_mm_hole_follows(&tmp)) {
1362 0 : KUNIT_FAIL(test,
1363 : "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1364 : tmp.size, size, alignment, misalignment(&tmp, alignment),
1365 : tmp.start, drm_mm_hole_follows(&tmp));
1366 0 : err = -EINVAL;
1367 : }
1368 :
1369 0 : drm_mm_remove_node(&tmp);
1370 0 : if (err)
1371 : return err;
1372 :
1373 0 : list_for_each_entry(e, &evict_list, link) {
1374 0 : err = drm_mm_reserve_node(mm, &e->node);
1375 0 : if (err) {
1376 0 : KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
1377 : e->node.start);
1378 : return err;
1379 : }
1380 : }
1381 :
1382 0 : if (!assert_continuous(test, mm, nodes[0].node.size)) {
1383 0 : KUNIT_FAIL(test, "range is no longer continuous\n");
1384 : return -EINVAL;
1385 : }
1386 :
1387 : return 0;
1388 : }
1389 :
1390 0 : static void drm_test_mm_evict(struct kunit *test)
1391 : {
1392 0 : DRM_RND_STATE(prng, random_seed);
1393 0 : const unsigned int size = 8192;
1394 : const struct insert_mode *mode;
1395 : struct drm_mm mm;
1396 : struct evict_node *nodes;
1397 : struct drm_mm_node *node, *next;
1398 : unsigned int *order, n;
1399 :
1400 : /* Here we populate a full drm_mm and then try and insert a new node
1401 : * by evicting other nodes in a random order. The drm_mm_scan should
1402 : * pick the first matching hole it finds from the random list. We
1403 : * repeat that for different allocation strategies, alignments and
1404 : * sizes to try and stress the hole finder.
1405 : */
1406 :
1407 0 : nodes = vzalloc(array_size(size, sizeof(*nodes)));
1408 0 : KUNIT_ASSERT_TRUE(test, nodes);
1409 :
1410 0 : order = drm_random_order(size, &prng);
1411 0 : if (!order)
1412 : goto err_nodes;
1413 :
1414 0 : drm_mm_init(&mm, 0, size);
1415 0 : for (n = 0; n < size; n++) {
1416 0 : if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
1417 0 : KUNIT_FAIL(test, "insert failed, step %d\n", n);
1418 0 : goto out;
1419 : }
1420 : }
1421 :
1422 : /* First check that using the scanner doesn't break the mm */
1423 0 : if (!evict_nothing(test, &mm, size, nodes)) {
1424 0 : KUNIT_FAIL(test, "evict_nothing() failed\n");
1425 0 : goto out;
1426 : }
1427 0 : if (!evict_everything(test, &mm, size, nodes)) {
1428 0 : KUNIT_FAIL(test, "evict_everything() failed\n");
1429 0 : goto out;
1430 : }
1431 :
1432 0 : for (mode = evict_modes; mode->name; mode++) {
1433 0 : for (n = 1; n <= size; n <<= 1) {
1434 0 : drm_random_reorder(order, size, &prng);
1435 0 : if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
1436 : mode)) {
1437 0 : KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
1438 : mode->name, n);
1439 0 : goto out;
1440 : }
1441 : }
1442 :
1443 0 : for (n = 1; n < size; n <<= 1) {
1444 0 : drm_random_reorder(order, size, &prng);
1445 0 : if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
1446 : size / 2, n, mode)) {
1447 0 : KUNIT_FAIL(test,
1448 : "%s evict_something(size=%u, alignment=%u) failed\n",
1449 : mode->name, size / 2, n);
1450 0 : goto out;
1451 : }
1452 : }
1453 :
1454 0 : for_each_prime_number_from(n, 1, min(size, max_prime)) {
1455 0 : unsigned int nsize = (size - n + 1) / 2;
1456 :
1457 : DRM_MM_BUG_ON(!nsize);
1458 :
1459 0 : drm_random_reorder(order, size, &prng);
1460 0 : if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
1461 : nsize, n, mode)) {
1462 0 : KUNIT_FAIL(test,
1463 : "%s evict_something(size=%u, alignment=%u) failed\n",
1464 : mode->name, nsize, n);
1465 0 : goto out;
1466 : }
1467 : }
1468 :
1469 0 : cond_resched();
1470 : }
1471 :
1472 : out:
1473 0 : drm_mm_for_each_node_safe(node, next, &mm)
1474 0 : drm_mm_remove_node(node);
1475 0 : drm_mm_takedown(&mm);
1476 0 : kfree(order);
1477 : err_nodes:
1478 0 : vfree(nodes);
1479 0 : }
1480 :
1481 0 : static void drm_test_mm_evict_range(struct kunit *test)
1482 : {
1483 0 : DRM_RND_STATE(prng, random_seed);
1484 0 : const unsigned int size = 8192;
1485 0 : const unsigned int range_size = size / 2;
1486 0 : const unsigned int range_start = size / 4;
1487 0 : const unsigned int range_end = range_start + range_size;
1488 : const struct insert_mode *mode;
1489 : struct drm_mm mm;
1490 : struct evict_node *nodes;
1491 : struct drm_mm_node *node, *next;
1492 : unsigned int *order, n;
1493 :
1494 : /* Like drm_test_mm_evict() but now we are limiting the search to a
1495 : * small portion of the full drm_mm.
1496 : */
1497 :
1498 0 : nodes = vzalloc(array_size(size, sizeof(*nodes)));
1499 0 : KUNIT_ASSERT_TRUE(test, nodes);
1500 :
1501 0 : order = drm_random_order(size, &prng);
1502 0 : if (!order)
1503 : goto err_nodes;
1504 :
1505 0 : drm_mm_init(&mm, 0, size);
1506 0 : for (n = 0; n < size; n++) {
1507 0 : if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
1508 0 : KUNIT_FAIL(test, "insert failed, step %d\n", n);
1509 0 : goto out;
1510 : }
1511 : }
1512 :
1513 0 : for (mode = evict_modes; mode->name; mode++) {
1514 0 : for (n = 1; n <= range_size; n <<= 1) {
1515 0 : drm_random_reorder(order, size, &prng);
1516 0 : if (evict_something(test, &mm, range_start, range_end, nodes,
1517 : order, size, n, 1, mode)) {
1518 0 : KUNIT_FAIL(test,
1519 : "%s evict_something(size=%u) failed with range [%u, %u]\n",
1520 : mode->name, n, range_start, range_end);
1521 0 : goto out;
1522 : }
1523 : }
1524 :
1525 0 : for (n = 1; n <= range_size; n <<= 1) {
1526 0 : drm_random_reorder(order, size, &prng);
1527 0 : if (evict_something(test, &mm, range_start, range_end, nodes,
1528 : order, size, range_size / 2, n, mode)) {
1529 0 : KUNIT_FAIL(test,
1530 : "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1531 : mode->name, range_size / 2, n, range_start, range_end);
1532 0 : goto out;
1533 : }
1534 : }
1535 :
1536 0 : for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1537 0 : unsigned int nsize = (range_size - n + 1) / 2;
1538 :
1539 : DRM_MM_BUG_ON(!nsize);
1540 :
1541 0 : drm_random_reorder(order, size, &prng);
1542 0 : if (evict_something(test, &mm, range_start, range_end, nodes,
1543 : order, size, nsize, n, mode)) {
1544 0 : KUNIT_FAIL(test,
1545 : "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1546 : mode->name, nsize, n, range_start, range_end);
1547 0 : goto out;
1548 : }
1549 : }
1550 :
1551 0 : cond_resched();
1552 : }
1553 :
1554 : out:
1555 0 : drm_mm_for_each_node_safe(node, next, &mm)
1556 0 : drm_mm_remove_node(node);
1557 0 : drm_mm_takedown(&mm);
1558 0 : kfree(order);
1559 : err_nodes:
1560 0 : vfree(nodes);
1561 0 : }
1562 :
1563 : static unsigned int node_index(const struct drm_mm_node *node)
1564 : {
1565 0 : return div64_u64(node->start, node->size);
1566 : }
1567 :
1568 0 : static void drm_test_mm_topdown(struct kunit *test)
1569 : {
1570 0 : const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1571 :
1572 0 : DRM_RND_STATE(prng, random_seed);
1573 0 : const unsigned int count = 8192;
1574 : unsigned int size;
1575 : unsigned long *bitmap;
1576 : struct drm_mm mm;
1577 : struct drm_mm_node *nodes, *node, *next;
1578 0 : unsigned int *order, n, m, o = 0;
1579 :
1580 : /* When allocating top-down, we expect to be returned a node
1581 : * from a suitable hole at the top of the drm_mm. We check that
1582 : * the returned node does match the highest available slot.
1583 : */
1584 :
1585 0 : nodes = vzalloc(array_size(count, sizeof(*nodes)));
1586 0 : KUNIT_ASSERT_TRUE(test, nodes);
1587 :
1588 0 : bitmap = bitmap_zalloc(count, GFP_KERNEL);
1589 0 : if (!bitmap)
1590 : goto err_nodes;
1591 :
1592 0 : order = drm_random_order(count, &prng);
1593 0 : if (!order)
1594 : goto err_bitmap;
1595 :
1596 0 : for (size = 1; size <= 64; size <<= 1) {
1597 0 : drm_mm_init(&mm, 0, size * count);
1598 0 : for (n = 0; n < count; n++) {
1599 0 : if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
1600 0 : KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
1601 0 : goto out;
1602 : }
1603 :
1604 0 : if (drm_mm_hole_follows(&nodes[n])) {
1605 0 : KUNIT_FAIL(test,
1606 : "hole after topdown insert %d, start=%llx\n, size=%u",
1607 : n, nodes[n].start, size);
1608 0 : goto out;
1609 : }
1610 :
1611 0 : if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
1612 : goto out;
1613 : }
1614 :
1615 0 : if (!assert_continuous(test, &mm, size))
1616 : goto out;
1617 :
1618 0 : drm_random_reorder(order, count, &prng);
1619 0 : for_each_prime_number_from(n, 1, min(count, max_prime)) {
1620 0 : for (m = 0; m < n; m++) {
1621 0 : node = &nodes[order[(o + m) % count]];
1622 0 : drm_mm_remove_node(node);
1623 0 : __set_bit(node_index(node), bitmap);
1624 : }
1625 :
1626 0 : for (m = 0; m < n; m++) {
1627 : unsigned int last;
1628 :
1629 0 : node = &nodes[order[(o + m) % count]];
1630 0 : if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
1631 0 : KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
1632 0 : goto out;
1633 : }
1634 :
1635 0 : if (drm_mm_hole_follows(node)) {
1636 0 : KUNIT_FAIL(test,
1637 : "hole after topdown insert %d/%d, start=%llx\n",
1638 : m, n, node->start);
1639 0 : goto out;
1640 : }
1641 :
1642 0 : last = find_last_bit(bitmap, count);
1643 0 : if (node_index(node) != last) {
1644 0 : KUNIT_FAIL(test,
1645 : "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1646 : m, n, size, last, node_index(node));
1647 0 : goto out;
1648 : }
1649 :
1650 0 : __clear_bit(last, bitmap);
1651 : }
1652 :
1653 : DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1654 :
1655 0 : o += n;
1656 : }
1657 :
1658 0 : drm_mm_for_each_node_safe(node, next, &mm)
1659 0 : drm_mm_remove_node(node);
1660 : DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1661 0 : cond_resched();
1662 : }
1663 :
1664 : out:
1665 0 : drm_mm_for_each_node_safe(node, next, &mm)
1666 0 : drm_mm_remove_node(node);
1667 0 : drm_mm_takedown(&mm);
1668 0 : kfree(order);
1669 : err_bitmap:
1670 0 : bitmap_free(bitmap);
1671 : err_nodes:
1672 0 : vfree(nodes);
1673 0 : }
1674 :
1675 0 : static void drm_test_mm_bottomup(struct kunit *test)
1676 : {
1677 0 : const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1678 :
1679 0 : DRM_RND_STATE(prng, random_seed);
1680 0 : const unsigned int count = 8192;
1681 : unsigned int size;
1682 : unsigned long *bitmap;
1683 : struct drm_mm mm;
1684 : struct drm_mm_node *nodes, *node, *next;
1685 0 : unsigned int *order, n, m, o = 0;
1686 :
1687 : /* Like drm_test_mm_topdown, but instead of searching for the last hole,
1688 : * we search for the first.
1689 : */
1690 :
1691 0 : nodes = vzalloc(array_size(count, sizeof(*nodes)));
1692 0 : KUNIT_ASSERT_TRUE(test, nodes);
1693 :
1694 0 : bitmap = bitmap_zalloc(count, GFP_KERNEL);
1695 0 : if (!bitmap)
1696 : goto err_nodes;
1697 :
1698 0 : order = drm_random_order(count, &prng);
1699 0 : if (!order)
1700 : goto err_bitmap;
1701 :
1702 0 : for (size = 1; size <= 64; size <<= 1) {
1703 0 : drm_mm_init(&mm, 0, size * count);
1704 0 : for (n = 0; n < count; n++) {
1705 0 : if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
1706 0 : KUNIT_FAIL(test,
1707 : "bottomup insert failed, size %u step %d\n", size, n);
1708 0 : goto out;
1709 : }
1710 :
1711 0 : if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
1712 : goto out;
1713 : }
1714 :
1715 0 : if (!assert_continuous(test, &mm, size))
1716 : goto out;
1717 :
1718 0 : drm_random_reorder(order, count, &prng);
1719 0 : for_each_prime_number_from(n, 1, min(count, max_prime)) {
1720 0 : for (m = 0; m < n; m++) {
1721 0 : node = &nodes[order[(o + m) % count]];
1722 0 : drm_mm_remove_node(node);
1723 0 : __set_bit(node_index(node), bitmap);
1724 : }
1725 :
1726 0 : for (m = 0; m < n; m++) {
1727 : unsigned int first;
1728 :
1729 0 : node = &nodes[order[(o + m) % count]];
1730 0 : if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
1731 0 : KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
1732 0 : goto out;
1733 : }
1734 :
1735 0 : first = find_first_bit(bitmap, count);
1736 0 : if (node_index(node) != first) {
1737 0 : KUNIT_FAIL(test,
1738 : "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1739 : m, n, first, node_index(node));
1740 0 : goto out;
1741 : }
1742 0 : __clear_bit(first, bitmap);
1743 : }
1744 :
1745 : DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1746 :
1747 0 : o += n;
1748 : }
1749 :
1750 0 : drm_mm_for_each_node_safe(node, next, &mm)
1751 0 : drm_mm_remove_node(node);
1752 : DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1753 0 : cond_resched();
1754 : }
1755 :
1756 : out:
1757 0 : drm_mm_for_each_node_safe(node, next, &mm)
1758 0 : drm_mm_remove_node(node);
1759 0 : drm_mm_takedown(&mm);
1760 0 : kfree(order);
1761 : err_bitmap:
1762 0 : bitmap_free(bitmap);
1763 : err_nodes:
1764 0 : vfree(nodes);
1765 0 : }
1766 :
1767 0 : static void drm_test_mm_once(struct kunit *test, unsigned int mode)
1768 : {
1769 : struct drm_mm mm;
1770 : struct drm_mm_node rsvd_lo, rsvd_hi, node;
1771 :
1772 0 : drm_mm_init(&mm, 0, 7);
1773 :
1774 0 : memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1775 0 : rsvd_lo.start = 1;
1776 0 : rsvd_lo.size = 1;
1777 0 : if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
1778 0 : KUNIT_FAIL(test, "Could not reserve low node\n");
1779 0 : goto err;
1780 : }
1781 :
1782 0 : memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1783 0 : rsvd_hi.start = 5;
1784 0 : rsvd_hi.size = 1;
1785 0 : if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
1786 0 : KUNIT_FAIL(test, "Could not reserve low node\n");
1787 0 : goto err_lo;
1788 : }
1789 :
1790 0 : if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1791 0 : KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
1792 0 : goto err_hi;
1793 : }
1794 :
1795 0 : memset(&node, 0, sizeof(node));
1796 0 : if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
1797 0 : KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
1798 0 : goto err_hi;
1799 : }
1800 :
1801 0 : drm_mm_remove_node(&node);
1802 : err_hi:
1803 0 : drm_mm_remove_node(&rsvd_hi);
1804 : err_lo:
1805 0 : drm_mm_remove_node(&rsvd_lo);
1806 : err:
1807 0 : drm_mm_takedown(&mm);
1808 0 : }
1809 :
1810 0 : static void drm_test_mm_lowest(struct kunit *test)
1811 : {
1812 0 : drm_test_mm_once(test, DRM_MM_INSERT_LOW);
1813 0 : }
1814 :
1815 0 : static void drm_test_mm_highest(struct kunit *test)
1816 : {
1817 0 : drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
1818 0 : }
1819 :
1820 0 : static void separate_adjacent_colors(const struct drm_mm_node *node,
1821 : unsigned long color, u64 *start, u64 *end)
1822 : {
1823 0 : if (drm_mm_node_allocated(node) && node->color != color)
1824 0 : ++*start;
1825 :
1826 0 : node = list_next_entry(node, node_list);
1827 0 : if (drm_mm_node_allocated(node) && node->color != color)
1828 0 : --*end;
1829 0 : }
1830 :
1831 0 : static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
1832 : {
1833 0 : if (!drm_mm_hole_follows(node) &&
1834 0 : drm_mm_node_allocated(list_next_entry(node, node_list))) {
1835 0 : KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1836 : node->color, node->start, node->size,
1837 : list_next_entry(node, node_list)->color,
1838 : list_next_entry(node, node_list)->start,
1839 : list_next_entry(node, node_list)->size);
1840 0 : return true;
1841 : }
1842 :
1843 : return false;
1844 : }
1845 :
1846 0 : static void drm_test_mm_color(struct kunit *test)
1847 : {
1848 0 : const unsigned int count = min(4096u, max_iterations);
1849 : const struct insert_mode *mode;
1850 : struct drm_mm mm;
1851 : struct drm_mm_node *node, *nn;
1852 : unsigned int n;
1853 :
1854 : /* Color adjustment complicates everything. First we just check
1855 : * that when we insert a node we apply any color_adjustment callback.
1856 : * The callback we use should ensure that there is a gap between
1857 : * any two nodes, and so after each insertion we check that those
1858 : * holes are inserted and that they are preserved.
1859 : */
1860 :
1861 0 : drm_mm_init(&mm, 0, U64_MAX);
1862 :
1863 0 : for (n = 1; n <= count; n++) {
1864 0 : node = kzalloc(sizeof(*node), GFP_KERNEL);
1865 0 : if (!node)
1866 : goto out;
1867 :
1868 0 : if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
1869 0 : KUNIT_FAIL(test, "insert failed, step %d\n", n);
1870 0 : kfree(node);
1871 0 : goto out;
1872 : }
1873 : }
1874 :
1875 0 : drm_mm_for_each_node_safe(node, nn, &mm) {
1876 0 : if (node->color != node->size) {
1877 0 : KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
1878 : node->size, node->color);
1879 :
1880 0 : goto out;
1881 : }
1882 :
1883 0 : drm_mm_remove_node(node);
1884 0 : kfree(node);
1885 : }
1886 :
1887 : /* Now, let's start experimenting with applying a color callback */
1888 0 : mm.color_adjust = separate_adjacent_colors;
1889 0 : for (mode = insert_modes; mode->name; mode++) {
1890 : u64 last;
1891 :
1892 0 : node = kzalloc(sizeof(*node), GFP_KERNEL);
1893 0 : if (!node)
1894 : goto out;
1895 :
1896 0 : node->size = 1 + 2 * count;
1897 0 : node->color = node->size;
1898 :
1899 0 : if (drm_mm_reserve_node(&mm, node)) {
1900 0 : KUNIT_FAIL(test, "initial reserve failed!\n");
1901 0 : goto out;
1902 : }
1903 :
1904 0 : last = node->start + node->size;
1905 :
1906 0 : for (n = 1; n <= count; n++) {
1907 : int rem;
1908 :
1909 0 : node = kzalloc(sizeof(*node), GFP_KERNEL);
1910 0 : if (!node)
1911 : goto out;
1912 :
1913 0 : node->start = last;
1914 0 : node->size = n + count;
1915 0 : node->color = node->size;
1916 :
1917 0 : if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
1918 0 : KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
1919 0 : goto out;
1920 : }
1921 :
1922 0 : node->start += n + 1;
1923 0 : rem = misalignment(node, n + count);
1924 0 : node->start += n + count - rem;
1925 :
1926 0 : if (drm_mm_reserve_node(&mm, node)) {
1927 0 : KUNIT_FAIL(test, "reserve %d failed", n);
1928 0 : goto out;
1929 : }
1930 :
1931 0 : last = node->start + node->size;
1932 : }
1933 :
1934 0 : for (n = 1; n <= count; n++) {
1935 0 : node = kzalloc(sizeof(*node), GFP_KERNEL);
1936 0 : if (!node)
1937 : goto out;
1938 :
1939 0 : if (!expect_insert(test, &mm, node, n, n, n, mode)) {
1940 0 : KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
1941 0 : kfree(node);
1942 0 : goto out;
1943 : }
1944 : }
1945 :
1946 0 : drm_mm_for_each_node_safe(node, nn, &mm) {
1947 : u64 rem;
1948 :
1949 0 : if (node->color != node->size) {
1950 0 : KUNIT_FAIL(test,
1951 : "%s invalid color stored: expected %lld, found %ld\n",
1952 : mode->name, node->size, node->color);
1953 :
1954 0 : goto out;
1955 : }
1956 :
1957 0 : if (colors_abutt(test, node))
1958 : goto out;
1959 :
1960 0 : div64_u64_rem(node->start, node->size, &rem);
1961 0 : if (rem) {
1962 0 : KUNIT_FAIL(test,
1963 : "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1964 : mode->name, node->start, node->size, rem);
1965 0 : goto out;
1966 : }
1967 :
1968 0 : drm_mm_remove_node(node);
1969 0 : kfree(node);
1970 : }
1971 :
1972 0 : cond_resched();
1973 : }
1974 :
1975 : out:
1976 0 : drm_mm_for_each_node_safe(node, nn, &mm) {
1977 0 : drm_mm_remove_node(node);
1978 0 : kfree(node);
1979 : }
1980 0 : drm_mm_takedown(&mm);
1981 0 : }
1982 :
1983 0 : static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
1984 : u64 range_end, struct evict_node *nodes, unsigned int *order,
1985 : unsigned int count, unsigned int size, unsigned int alignment,
1986 : unsigned long color, const struct insert_mode *mode)
1987 : {
1988 : struct drm_mm_scan scan;
1989 0 : LIST_HEAD(evict_list);
1990 : struct evict_node *e;
1991 : struct drm_mm_node tmp;
1992 : int err;
1993 :
1994 0 : drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
1995 : range_end, mode->mode);
1996 0 : if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
1997 : return -EINVAL;
1998 :
1999 0 : memset(&tmp, 0, sizeof(tmp));
2000 0 : err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2001 : DRM_MM_INSERT_EVICT);
2002 0 : if (err) {
2003 0 : KUNIT_FAIL(test,
2004 : "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2005 : size, alignment, color, err);
2006 0 : show_scan(test, &scan);
2007 0 : show_holes(test, mm, 3);
2008 : return err;
2009 : }
2010 :
2011 0 : if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2012 0 : KUNIT_FAIL(test,
2013 : "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2014 : tmp.start, tmp.size, range_start, range_end);
2015 0 : err = -EINVAL;
2016 : }
2017 :
2018 0 : if (colors_abutt(test, &tmp))
2019 0 : err = -EINVAL;
2020 :
2021 0 : if (!assert_node(test, &tmp, mm, size, alignment, color)) {
2022 0 : KUNIT_FAIL(test,
2023 : "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2024 : tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
2025 0 : err = -EINVAL;
2026 : }
2027 :
2028 0 : drm_mm_remove_node(&tmp);
2029 0 : if (err)
2030 : return err;
2031 :
2032 0 : list_for_each_entry(e, &evict_list, link) {
2033 0 : err = drm_mm_reserve_node(mm, &e->node);
2034 0 : if (err) {
2035 0 : KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
2036 : e->node.start);
2037 : return err;
2038 : }
2039 : }
2040 :
2041 0 : cond_resched();
2042 : return 0;
2043 : }
2044 :
2045 0 : static void drm_test_mm_color_evict(struct kunit *test)
2046 : {
2047 0 : DRM_RND_STATE(prng, random_seed);
2048 0 : const unsigned int total_size = min(8192u, max_iterations);
2049 : const struct insert_mode *mode;
2050 0 : unsigned long color = 0;
2051 : struct drm_mm mm;
2052 : struct evict_node *nodes;
2053 : struct drm_mm_node *node, *next;
2054 : unsigned int *order, n;
2055 :
2056 : /* Check that the drm_mm_scan also honours color adjustment when
2057 : * choosing its victims to create a hole. Our color_adjust does not
2058 : * allow two nodes to be placed together without an intervening hole
2059 : * enlarging the set of victims that must be evicted.
2060 : */
2061 :
2062 0 : nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2063 0 : KUNIT_ASSERT_TRUE(test, nodes);
2064 :
2065 0 : order = drm_random_order(total_size, &prng);
2066 0 : if (!order)
2067 : goto err_nodes;
2068 :
2069 0 : drm_mm_init(&mm, 0, 2 * total_size - 1);
2070 0 : mm.color_adjust = separate_adjacent_colors;
2071 0 : for (n = 0; n < total_size; n++) {
2072 0 : if (!expect_insert(test, &mm, &nodes[n].node,
2073 : 1, 0, color++,
2074 : &insert_modes[0])) {
2075 0 : KUNIT_FAIL(test, "insert failed, step %d\n", n);
2076 0 : goto out;
2077 : }
2078 : }
2079 :
2080 0 : for (mode = evict_modes; mode->name; mode++) {
2081 0 : for (n = 1; n <= total_size; n <<= 1) {
2082 0 : drm_random_reorder(order, total_size, &prng);
2083 0 : if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2084 : n, 1, color++, mode)) {
2085 0 : KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
2086 0 : goto out;
2087 : }
2088 : }
2089 :
2090 0 : for (n = 1; n < total_size; n <<= 1) {
2091 0 : drm_random_reorder(order, total_size, &prng);
2092 0 : if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2093 : total_size / 2, n, color++, mode)) {
2094 0 : KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
2095 : mode->name, total_size / 2, n);
2096 0 : goto out;
2097 : }
2098 : }
2099 :
2100 0 : for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2101 0 : unsigned int nsize = (total_size - n + 1) / 2;
2102 :
2103 : DRM_MM_BUG_ON(!nsize);
2104 :
2105 0 : drm_random_reorder(order, total_size, &prng);
2106 0 : if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
2107 : nsize, n, color++, mode)) {
2108 0 : KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
2109 : mode->name, nsize, n);
2110 0 : goto out;
2111 : }
2112 : }
2113 :
2114 0 : cond_resched();
2115 : }
2116 :
2117 : out:
2118 0 : drm_mm_for_each_node_safe(node, next, &mm)
2119 0 : drm_mm_remove_node(node);
2120 0 : drm_mm_takedown(&mm);
2121 0 : kfree(order);
2122 : err_nodes:
2123 0 : vfree(nodes);
2124 0 : }
2125 :
2126 0 : static void drm_test_mm_color_evict_range(struct kunit *test)
2127 : {
2128 0 : DRM_RND_STATE(prng, random_seed);
2129 0 : const unsigned int total_size = 8192;
2130 0 : const unsigned int range_size = total_size / 2;
2131 0 : const unsigned int range_start = total_size / 4;
2132 0 : const unsigned int range_end = range_start + range_size;
2133 : const struct insert_mode *mode;
2134 0 : unsigned long color = 0;
2135 : struct drm_mm mm;
2136 : struct evict_node *nodes;
2137 : struct drm_mm_node *node, *next;
2138 : unsigned int *order, n;
2139 :
2140 : /* Like drm_test_mm_color_evict(), but limited to small portion of the full
2141 : * drm_mm range.
2142 : */
2143 :
2144 0 : nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2145 0 : KUNIT_ASSERT_TRUE(test, nodes);
2146 :
2147 0 : order = drm_random_order(total_size, &prng);
2148 0 : if (!order)
2149 : goto err_nodes;
2150 :
2151 0 : drm_mm_init(&mm, 0, 2 * total_size - 1);
2152 0 : mm.color_adjust = separate_adjacent_colors;
2153 0 : for (n = 0; n < total_size; n++) {
2154 0 : if (!expect_insert(test, &mm, &nodes[n].node,
2155 : 1, 0, color++,
2156 : &insert_modes[0])) {
2157 0 : KUNIT_FAIL(test, "insert failed, step %d\n", n);
2158 0 : goto out;
2159 : }
2160 : }
2161 :
2162 0 : for (mode = evict_modes; mode->name; mode++) {
2163 0 : for (n = 1; n <= range_size; n <<= 1) {
2164 0 : drm_random_reorder(order, range_size, &prng);
2165 0 : if (evict_color(test, &mm, range_start, range_end, nodes, order,
2166 : total_size, n, 1, color++, mode)) {
2167 0 : KUNIT_FAIL(test,
2168 : "%s evict_color(size=%u) failed for range [%x, %x]\n",
2169 : mode->name, n, range_start, range_end);
2170 0 : goto out;
2171 : }
2172 : }
2173 :
2174 0 : for (n = 1; n < range_size; n <<= 1) {
2175 0 : drm_random_reorder(order, total_size, &prng);
2176 0 : if (evict_color(test, &mm, range_start, range_end, nodes, order,
2177 : total_size, range_size / 2, n, color++, mode)) {
2178 0 : KUNIT_FAIL(test,
2179 : "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2180 : mode->name, total_size / 2, n, range_start, range_end);
2181 0 : goto out;
2182 : }
2183 : }
2184 :
2185 0 : for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2186 0 : unsigned int nsize = (range_size - n + 1) / 2;
2187 :
2188 : DRM_MM_BUG_ON(!nsize);
2189 :
2190 0 : drm_random_reorder(order, total_size, &prng);
2191 0 : if (evict_color(test, &mm, range_start, range_end, nodes, order,
2192 : total_size, nsize, n, color++, mode)) {
2193 0 : KUNIT_FAIL(test,
2194 : "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2195 : mode->name, nsize, n, range_start, range_end);
2196 0 : goto out;
2197 : }
2198 : }
2199 :
2200 0 : cond_resched();
2201 : }
2202 :
2203 : out:
2204 0 : drm_mm_for_each_node_safe(node, next, &mm)
2205 0 : drm_mm_remove_node(node);
2206 0 : drm_mm_takedown(&mm);
2207 0 : kfree(order);
2208 : err_nodes:
2209 0 : vfree(nodes);
2210 0 : }
2211 :
2212 0 : static int drm_mm_suite_init(struct kunit_suite *suite)
2213 : {
2214 0 : while (!random_seed)
2215 0 : random_seed = get_random_u32();
2216 :
2217 0 : kunit_info(suite,
2218 : "Testing DRM range manager, with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2219 : random_seed, max_iterations, max_prime);
2220 :
2221 0 : return 0;
2222 : }
2223 :
2224 : module_param(random_seed, uint, 0400);
2225 : module_param(max_iterations, uint, 0400);
2226 : module_param(max_prime, uint, 0400);
2227 :
2228 : static struct kunit_case drm_mm_tests[] = {
2229 : KUNIT_CASE(drm_test_mm_init),
2230 : KUNIT_CASE(drm_test_mm_debug),
2231 : KUNIT_CASE(drm_test_mm_reserve),
2232 : KUNIT_CASE(drm_test_mm_insert),
2233 : KUNIT_CASE(drm_test_mm_replace),
2234 : KUNIT_CASE(drm_test_mm_insert_range),
2235 : KUNIT_CASE(drm_test_mm_frag),
2236 : KUNIT_CASE(drm_test_mm_align),
2237 : KUNIT_CASE(drm_test_mm_align32),
2238 : KUNIT_CASE(drm_test_mm_align64),
2239 : KUNIT_CASE(drm_test_mm_evict),
2240 : KUNIT_CASE(drm_test_mm_evict_range),
2241 : KUNIT_CASE(drm_test_mm_topdown),
2242 : KUNIT_CASE(drm_test_mm_bottomup),
2243 : KUNIT_CASE(drm_test_mm_lowest),
2244 : KUNIT_CASE(drm_test_mm_highest),
2245 : KUNIT_CASE(drm_test_mm_color),
2246 : KUNIT_CASE(drm_test_mm_color_evict),
2247 : KUNIT_CASE(drm_test_mm_color_evict_range),
2248 : {}
2249 : };
2250 :
2251 : static struct kunit_suite drm_mm_test_suite = {
2252 : .name = "drm_mm",
2253 : .suite_init = drm_mm_suite_init,
2254 : .test_cases = drm_mm_tests,
2255 : };
2256 :
2257 : kunit_test_suite(drm_mm_test_suite);
2258 :
2259 : MODULE_AUTHOR("Intel Corporation");
2260 : MODULE_LICENSE("GPL");
|