Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Maple Tree implementation
4 : * Copyright (c) 2018-2022 Oracle Corporation
5 : * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 : * Matthew Wilcox <willy@infradead.org>
7 : */
8 :
9 : /*
10 : * DOC: Interesting implementation details of the Maple Tree
11 : *
12 : * Each node type has a number of slots for entries and a number of slots for
13 : * pivots. In the case of dense nodes, the pivots are implied by the position
14 : * and are simply the slot index + the minimum of the node.
15 : *
16 : * In regular B-Tree terms, pivots are called keys. The term pivot is used to
17 : * indicate that the tree is specifying ranges, Pivots may appear in the
18 : * subtree with an entry attached to the value where as keys are unique to a
19 : * specific position of a B-tree. Pivot values are inclusive of the slot with
20 : * the same index.
21 : *
22 : *
23 : * The following illustrates the layout of a range64 nodes slots and pivots.
24 : *
25 : *
26 : * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
27 : * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
28 : * │ │ │ │ │ │ │ │ └─ Implied maximum
29 : * │ │ │ │ │ │ │ └─ Pivot 14
30 : * │ │ │ │ │ │ └─ Pivot 13
31 : * │ │ │ │ │ └─ Pivot 12
32 : * │ │ │ │ └─ Pivot 11
33 : * │ │ │ └─ Pivot 2
34 : * │ │ └─ Pivot 1
35 : * │ └─ Pivot 0
36 : * └─ Implied minimum
37 : *
38 : * Slot contents:
39 : * Internal (non-leaf) nodes contain pointers to other nodes.
40 : * Leaf nodes contain entries.
41 : *
42 : * The location of interest is often referred to as an offset. All offsets have
43 : * a slot, but the last offset has an implied pivot from the node above (or
44 : * UINT_MAX for the root node.
45 : *
46 : * Ranges complicate certain write activities. When modifying any of
47 : * the B-tree variants, it is known that one entry will either be added or
48 : * deleted. When modifying the Maple Tree, one store operation may overwrite
49 : * the entire data set, or one half of the tree, or the middle half of the tree.
50 : *
51 : */
52 :
53 :
54 : #include <linux/maple_tree.h>
55 : #include <linux/xarray.h>
56 : #include <linux/types.h>
57 : #include <linux/export.h>
58 : #include <linux/slab.h>
59 : #include <linux/limits.h>
60 : #include <asm/barrier.h>
61 :
62 : #define CREATE_TRACE_POINTS
63 : #include <trace/events/maple_tree.h>
64 :
65 : #define MA_ROOT_PARENT 1
66 :
67 : /*
68 : * Maple state flags
69 : * * MA_STATE_BULK - Bulk insert mode
70 : * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
71 : * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
72 : */
73 : #define MA_STATE_BULK 1
74 : #define MA_STATE_REBALANCE 2
75 : #define MA_STATE_PREALLOC 4
76 :
77 : #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 : #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 : #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 : static struct kmem_cache *maple_node_cache;
81 :
82 : #ifdef CONFIG_DEBUG_MAPLE_TREE
83 : static const unsigned long mt_max[] = {
84 : [maple_dense] = MAPLE_NODE_SLOTS,
85 : [maple_leaf_64] = ULONG_MAX,
86 : [maple_range_64] = ULONG_MAX,
87 : [maple_arange_64] = ULONG_MAX,
88 : };
89 : #define mt_node_max(x) mt_max[mte_node_type(x)]
90 : #endif
91 :
92 : static const unsigned char mt_slots[] = {
93 : [maple_dense] = MAPLE_NODE_SLOTS,
94 : [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
95 : [maple_range_64] = MAPLE_RANGE64_SLOTS,
96 : [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
97 : };
98 : #define mt_slot_count(x) mt_slots[mte_node_type(x)]
99 :
100 : static const unsigned char mt_pivots[] = {
101 : [maple_dense] = 0,
102 : [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
103 : [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
104 : [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
105 : };
106 : #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
107 :
108 : static const unsigned char mt_min_slots[] = {
109 : [maple_dense] = MAPLE_NODE_SLOTS / 2,
110 : [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
111 : [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
112 : [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
113 : };
114 : #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
115 :
116 : #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
117 : #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
118 :
119 : struct maple_big_node {
120 : struct maple_pnode *parent;
121 : unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
122 : union {
123 : struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
124 : struct {
125 : unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 : unsigned long gap[MAPLE_BIG_NODE_GAPS];
127 : };
128 : };
129 : unsigned char b_end;
130 : enum maple_type type;
131 : };
132 :
133 : /*
134 : * The maple_subtree_state is used to build a tree to replace a segment of an
135 : * existing tree in a more atomic way. Any walkers of the older tree will hit a
136 : * dead node and restart on updates.
137 : */
138 : struct maple_subtree_state {
139 : struct ma_state *orig_l; /* Original left side of subtree */
140 : struct ma_state *orig_r; /* Original right side of subtree */
141 : struct ma_state *l; /* New left side of subtree */
142 : struct ma_state *m; /* New middle of subtree (rare) */
143 : struct ma_state *r; /* New right side of subtree */
144 : struct ma_topiary *free; /* nodes to be freed */
145 : struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
146 : struct maple_big_node *bn;
147 : };
148 :
149 : #ifdef CONFIG_KASAN_STACK
150 : /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 : #define noinline_for_kasan noinline_for_stack
152 : #else
153 : #define noinline_for_kasan inline
154 : #endif
155 :
156 : /* Functions */
157 : static inline struct maple_node *mt_alloc_one(gfp_t gfp)
158 : {
159 0 : return kmem_cache_alloc(maple_node_cache, gfp);
160 : }
161 :
162 : static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
163 : {
164 0 : return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
165 : }
166 :
167 : static inline void mt_free_bulk(size_t size, void __rcu **nodes)
168 : {
169 0 : kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
170 : }
171 :
172 0 : static void mt_free_rcu(struct rcu_head *head)
173 : {
174 0 : struct maple_node *node = container_of(head, struct maple_node, rcu);
175 :
176 0 : kmem_cache_free(maple_node_cache, node);
177 0 : }
178 :
179 : /*
180 : * ma_free_rcu() - Use rcu callback to free a maple node
181 : * @node: The node to free
182 : *
183 : * The maple tree uses the parent pointer to indicate this node is no longer in
184 : * use and will be freed.
185 : */
186 0 : static void ma_free_rcu(struct maple_node *node)
187 : {
188 0 : WARN_ON(node->parent != ma_parent_ptr(node));
189 0 : call_rcu(&node->rcu, mt_free_rcu);
190 0 : }
191 :
192 0 : static void mas_set_height(struct ma_state *mas)
193 : {
194 0 : unsigned int new_flags = mas->tree->ma_flags;
195 :
196 0 : new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 0 : BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
198 0 : new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 0 : mas->tree->ma_flags = new_flags;
200 0 : }
201 :
202 : static unsigned int mas_mt_height(struct ma_state *mas)
203 : {
204 0 : return mt_height(mas->tree);
205 : }
206 :
207 : static inline enum maple_type mte_node_type(const struct maple_enode *entry)
208 : {
209 0 : return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 : MAPLE_NODE_TYPE_MASK;
211 : }
212 :
213 : static inline bool ma_is_dense(const enum maple_type type)
214 : {
215 0 : return type < maple_leaf_64;
216 : }
217 :
218 : static inline bool ma_is_leaf(const enum maple_type type)
219 : {
220 0 : return type < maple_range_64;
221 : }
222 :
223 : static inline bool mte_is_leaf(const struct maple_enode *entry)
224 : {
225 0 : return ma_is_leaf(mte_node_type(entry));
226 : }
227 :
228 : /*
229 : * We also reserve values with the bottom two bits set to '10' which are
230 : * below 4096
231 : */
232 : static inline bool mt_is_reserved(const void *entry)
233 : {
234 0 : return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 0 : xa_is_internal(entry);
236 : }
237 :
238 : static inline void mas_set_err(struct ma_state *mas, long err)
239 : {
240 0 : mas->node = MA_ERROR(err);
241 : }
242 :
243 : static inline bool mas_is_ptr(struct ma_state *mas)
244 : {
245 0 : return mas->node == MAS_ROOT;
246 : }
247 :
248 : static inline bool mas_is_start(struct ma_state *mas)
249 : {
250 0 : return mas->node == MAS_START;
251 : }
252 :
253 0 : bool mas_is_err(struct ma_state *mas)
254 : {
255 0 : return xa_is_err(mas->node);
256 : }
257 :
258 : static inline bool mas_searchable(struct ma_state *mas)
259 : {
260 0 : if (mas_is_none(mas))
261 : return false;
262 :
263 0 : if (mas_is_ptr(mas))
264 : return false;
265 :
266 : return true;
267 : }
268 :
269 : static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
270 : {
271 0 : return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
272 : }
273 :
274 : /*
275 : * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276 : * @entry: The maple encoded node
277 : *
278 : * Return: a maple topiary pointer
279 : */
280 : static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
281 : {
282 0 : return (struct maple_topiary *)
283 0 : ((unsigned long)entry & ~MAPLE_NODE_MASK);
284 : }
285 :
286 : /*
287 : * mas_mn() - Get the maple state node.
288 : * @mas: The maple state
289 : *
290 : * Return: the maple node (not encoded - bare pointer).
291 : */
292 : static inline struct maple_node *mas_mn(const struct ma_state *mas)
293 : {
294 0 : return mte_to_node(mas->node);
295 : }
296 :
297 : /*
298 : * mte_set_node_dead() - Set a maple encoded node as dead.
299 : * @mn: The maple encoded node.
300 : */
301 : static inline void mte_set_node_dead(struct maple_enode *mn)
302 : {
303 0 : mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 0 : smp_wmb(); /* Needed for RCU */
305 : }
306 :
307 : /* Bit 1 indicates the root is a node */
308 : #define MAPLE_ROOT_NODE 0x02
309 : /* maple_type stored bit 3-6 */
310 : #define MAPLE_ENODE_TYPE_SHIFT 0x03
311 : /* Bit 2 means a NULL somewhere below */
312 : #define MAPLE_ENODE_NULL 0x04
313 :
314 : static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 : enum maple_type type)
316 : {
317 0 : return (void *)((unsigned long)node |
318 0 : (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
319 : }
320 :
321 : static inline void *mte_mk_root(const struct maple_enode *node)
322 : {
323 0 : return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
324 : }
325 :
326 : static inline void *mte_safe_root(const struct maple_enode *node)
327 : {
328 0 : return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
329 : }
330 :
331 : static inline void *mte_set_full(const struct maple_enode *node)
332 : {
333 : return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
334 : }
335 :
336 : static inline void *mte_clear_full(const struct maple_enode *node)
337 : {
338 : return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
339 : }
340 :
341 : static inline bool mte_has_null(const struct maple_enode *node)
342 : {
343 : return (unsigned long)node & MAPLE_ENODE_NULL;
344 : }
345 :
346 : static inline bool ma_is_root(struct maple_node *node)
347 : {
348 0 : return ((unsigned long)node->parent & MA_ROOT_PARENT);
349 : }
350 :
351 : static inline bool mte_is_root(const struct maple_enode *node)
352 : {
353 0 : return ma_is_root(mte_to_node(node));
354 : }
355 :
356 : static inline bool mas_is_root_limits(const struct ma_state *mas)
357 : {
358 0 : return !mas->min && mas->max == ULONG_MAX;
359 : }
360 :
361 : static inline bool mt_is_alloc(struct maple_tree *mt)
362 : {
363 0 : return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
364 : }
365 :
366 : /*
367 : * The Parent Pointer
368 : * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369 : * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
370 : * bit values need an extra bit to store the offset. This extra bit comes from
371 : * a reuse of the last bit in the node type. This is possible by using bit 1 to
372 : * indicate if bit 2 is part of the type or the slot.
373 : *
374 : * Note types:
375 : * 0x??1 = Root
376 : * 0x?00 = 16 bit nodes
377 : * 0x010 = 32 bit nodes
378 : * 0x110 = 64 bit nodes
379 : *
380 : * Slot size and alignment
381 : * 0b??1 : Root
382 : * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
383 : * 0b010 : 32 bit values, type in 0-2, slot in 3-7
384 : * 0b110 : 64 bit values, type in 0-2, slot in 3-7
385 : */
386 :
387 : #define MAPLE_PARENT_ROOT 0x01
388 :
389 : #define MAPLE_PARENT_SLOT_SHIFT 0x03
390 : #define MAPLE_PARENT_SLOT_MASK 0xF8
391 :
392 : #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
393 : #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
394 :
395 : #define MAPLE_PARENT_RANGE64 0x06
396 : #define MAPLE_PARENT_RANGE32 0x04
397 : #define MAPLE_PARENT_NOT_RANGE16 0x02
398 :
399 : /*
400 : * mte_parent_shift() - Get the parent shift for the slot storage.
401 : * @parent: The parent pointer cast as an unsigned long
402 : * Return: The shift into that pointer to the star to of the slot
403 : */
404 : static inline unsigned long mte_parent_shift(unsigned long parent)
405 : {
406 : /* Note bit 1 == 0 means 16B */
407 0 : if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 : return MAPLE_PARENT_SLOT_SHIFT;
409 :
410 : return MAPLE_PARENT_16B_SLOT_SHIFT;
411 : }
412 :
413 : /*
414 : * mte_parent_slot_mask() - Get the slot mask for the parent.
415 : * @parent: The parent pointer cast as an unsigned long.
416 : * Return: The slot mask for that parent.
417 : */
418 : static inline unsigned long mte_parent_slot_mask(unsigned long parent)
419 : {
420 : /* Note bit 1 == 0 means 16B */
421 0 : if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 : return MAPLE_PARENT_SLOT_MASK;
423 :
424 : return MAPLE_PARENT_16B_SLOT_MASK;
425 : }
426 :
427 : /*
428 : * mas_parent_enum() - Return the maple_type of the parent from the stored
429 : * parent type.
430 : * @mas: The maple state
431 : * @node: The maple_enode to extract the parent's enum
432 : * Return: The node->parent maple_type
433 : */
434 : static inline
435 : enum maple_type mte_parent_enum(struct maple_enode *p_enode,
436 : struct maple_tree *mt)
437 : {
438 : unsigned long p_type;
439 :
440 0 : p_type = (unsigned long)p_enode;
441 0 : if (p_type & MAPLE_PARENT_ROOT)
442 : return 0; /* Validated in the caller. */
443 :
444 0 : p_type &= MAPLE_NODE_MASK;
445 0 : p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
446 :
447 0 : switch (p_type) {
448 : case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
449 0 : if (mt_is_alloc(mt))
450 : return maple_arange_64;
451 : return maple_range_64;
452 : }
453 :
454 : return 0;
455 : }
456 :
457 : static inline
458 0 : enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
459 : {
460 0 : return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
461 : }
462 :
463 : /*
464 : * mte_set_parent() - Set the parent node and encode the slot
465 : * @enode: The encoded maple node.
466 : * @parent: The encoded maple node that is the parent of @enode.
467 : * @slot: The slot that @enode resides in @parent.
468 : *
469 : * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
470 : * parent type.
471 : */
472 : static inline
473 0 : void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
474 : unsigned char slot)
475 : {
476 0 : unsigned long val = (unsigned long)parent;
477 : unsigned long shift;
478 : unsigned long type;
479 0 : enum maple_type p_type = mte_node_type(parent);
480 :
481 0 : BUG_ON(p_type == maple_dense);
482 0 : BUG_ON(p_type == maple_leaf_64);
483 :
484 0 : switch (p_type) {
485 : case maple_range_64:
486 : case maple_arange_64:
487 : shift = MAPLE_PARENT_SLOT_SHIFT;
488 : type = MAPLE_PARENT_RANGE64;
489 : break;
490 : default:
491 : case maple_dense:
492 : case maple_leaf_64:
493 0 : shift = type = 0;
494 0 : break;
495 : }
496 :
497 0 : val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
498 0 : val |= (slot << shift) | type;
499 0 : mte_to_node(enode)->parent = ma_parent_ptr(val);
500 0 : }
501 :
502 : /*
503 : * mte_parent_slot() - get the parent slot of @enode.
504 : * @enode: The encoded maple node.
505 : *
506 : * Return: The slot in the parent node where @enode resides.
507 : */
508 : static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
509 : {
510 0 : unsigned long val = (unsigned long)mte_to_node(enode)->parent;
511 :
512 0 : if (val & MA_ROOT_PARENT)
513 : return 0;
514 :
515 : /*
516 : * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
517 : * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
518 : */
519 0 : return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
520 : }
521 :
522 : /*
523 : * mte_parent() - Get the parent of @node.
524 : * @node: The encoded maple node.
525 : *
526 : * Return: The parent maple node.
527 : */
528 : static inline struct maple_node *mte_parent(const struct maple_enode *enode)
529 : {
530 0 : return (void *)((unsigned long)
531 0 : (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
532 : }
533 :
534 : /*
535 : * ma_dead_node() - check if the @enode is dead.
536 : * @enode: The encoded maple node
537 : *
538 : * Return: true if dead, false otherwise.
539 : */
540 : static inline bool ma_dead_node(const struct maple_node *node)
541 : {
542 : struct maple_node *parent;
543 :
544 : /* Do not reorder reads from the node prior to the parent check */
545 0 : smp_rmb();
546 0 : parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
547 0 : return (parent == node);
548 : }
549 :
550 : /*
551 : * mte_dead_node() - check if the @enode is dead.
552 : * @enode: The encoded maple node
553 : *
554 : * Return: true if dead, false otherwise.
555 : */
556 : static inline bool mte_dead_node(const struct maple_enode *enode)
557 : {
558 : struct maple_node *parent, *node;
559 :
560 0 : node = mte_to_node(enode);
561 : /* Do not reorder reads from the node prior to the parent check */
562 0 : smp_rmb();
563 0 : parent = mte_parent(enode);
564 0 : return (parent == node);
565 : }
566 :
567 : /*
568 : * mas_allocated() - Get the number of nodes allocated in a maple state.
569 : * @mas: The maple state
570 : *
571 : * The ma_state alloc member is overloaded to hold a pointer to the first
572 : * allocated node or to the number of requested nodes to allocate. If bit 0 is
573 : * set, then the alloc contains the number of requested nodes. If there is an
574 : * allocated node, then the total allocated nodes is in that node.
575 : *
576 : * Return: The total number of nodes allocated
577 : */
578 : static inline unsigned long mas_allocated(const struct ma_state *mas)
579 : {
580 0 : if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
581 : return 0;
582 :
583 0 : return mas->alloc->total;
584 : }
585 :
586 : /*
587 : * mas_set_alloc_req() - Set the requested number of allocations.
588 : * @mas: the maple state
589 : * @count: the number of allocations.
590 : *
591 : * The requested number of allocations is either in the first allocated node,
592 : * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
593 : * no allocated node. Set the request either in the node or do the necessary
594 : * encoding to store in @mas->alloc directly.
595 : */
596 : static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
597 : {
598 0 : if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
599 0 : if (!count)
600 0 : mas->alloc = NULL;
601 : else
602 0 : mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
603 : return;
604 : }
605 :
606 0 : mas->alloc->request_count = count;
607 : }
608 :
609 : /*
610 : * mas_alloc_req() - get the requested number of allocations.
611 : * @mas: The maple state
612 : *
613 : * The alloc count is either stored directly in @mas, or in
614 : * @mas->alloc->request_count if there is at least one node allocated. Decode
615 : * the request count if it's stored directly in @mas->alloc.
616 : *
617 : * Return: The allocation request count.
618 : */
619 : static inline unsigned int mas_alloc_req(const struct ma_state *mas)
620 : {
621 0 : if ((unsigned long)mas->alloc & 0x1)
622 0 : return (unsigned long)(mas->alloc) >> 1;
623 0 : else if (mas->alloc)
624 0 : return mas->alloc->request_count;
625 : return 0;
626 : }
627 :
628 : /*
629 : * ma_pivots() - Get a pointer to the maple node pivots.
630 : * @node - the maple node
631 : * @type - the node type
632 : *
633 : * In the event of a dead node, this array may be %NULL
634 : *
635 : * Return: A pointer to the maple node pivots
636 : */
637 : static inline unsigned long *ma_pivots(struct maple_node *node,
638 : enum maple_type type)
639 : {
640 0 : switch (type) {
641 : case maple_arange_64:
642 0 : return node->ma64.pivot;
643 : case maple_range_64:
644 : case maple_leaf_64:
645 0 : return node->mr64.pivot;
646 : case maple_dense:
647 : return NULL;
648 : }
649 : return NULL;
650 : }
651 :
652 : /*
653 : * ma_gaps() - Get a pointer to the maple node gaps.
654 : * @node - the maple node
655 : * @type - the node type
656 : *
657 : * Return: A pointer to the maple node gaps
658 : */
659 : static inline unsigned long *ma_gaps(struct maple_node *node,
660 : enum maple_type type)
661 : {
662 0 : switch (type) {
663 : case maple_arange_64:
664 0 : return node->ma64.gap;
665 : case maple_range_64:
666 : case maple_leaf_64:
667 : case maple_dense:
668 : return NULL;
669 : }
670 : return NULL;
671 : }
672 :
673 : /*
674 : * mte_pivot() - Get the pivot at @piv of the maple encoded node.
675 : * @mn: The maple encoded node.
676 : * @piv: The pivot.
677 : *
678 : * Return: the pivot at @piv of @mn.
679 : */
680 0 : static inline unsigned long mte_pivot(const struct maple_enode *mn,
681 : unsigned char piv)
682 : {
683 0 : struct maple_node *node = mte_to_node(mn);
684 0 : enum maple_type type = mte_node_type(mn);
685 :
686 0 : if (piv >= mt_pivots[type]) {
687 0 : WARN_ON(1);
688 0 : return 0;
689 : }
690 0 : switch (type) {
691 : case maple_arange_64:
692 0 : return node->ma64.pivot[piv];
693 : case maple_range_64:
694 : case maple_leaf_64:
695 0 : return node->mr64.pivot[piv];
696 : case maple_dense:
697 : return 0;
698 : }
699 : return 0;
700 : }
701 :
702 : /*
703 : * mas_safe_pivot() - get the pivot at @piv or mas->max.
704 : * @mas: The maple state
705 : * @pivots: The pointer to the maple node pivots
706 : * @piv: The pivot to fetch
707 : * @type: The maple node type
708 : *
709 : * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
710 : * otherwise.
711 : */
712 : static inline unsigned long
713 : mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
714 : unsigned char piv, enum maple_type type)
715 : {
716 0 : if (piv >= mt_pivots[type])
717 0 : return mas->max;
718 :
719 0 : return pivots[piv];
720 : }
721 :
722 : /*
723 : * mas_safe_min() - Return the minimum for a given offset.
724 : * @mas: The maple state
725 : * @pivots: The pointer to the maple node pivots
726 : * @offset: The offset into the pivot array
727 : *
728 : * Return: The minimum range value that is contained in @offset.
729 : */
730 : static inline unsigned long
731 : mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
732 : {
733 0 : if (likely(offset))
734 0 : return pivots[offset - 1] + 1;
735 :
736 0 : return mas->min;
737 : }
738 :
739 : /*
740 : * mas_logical_pivot() - Get the logical pivot of a given offset.
741 : * @mas: The maple state
742 : * @pivots: The pointer to the maple node pivots
743 : * @offset: The offset into the pivot array
744 : * @type: The maple node type
745 : *
746 : * When there is no value at a pivot (beyond the end of the data), then the
747 : * pivot is actually @mas->max.
748 : *
749 : * Return: the logical pivot of a given @offset.
750 : */
751 : static inline unsigned long
752 : mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
753 : unsigned char offset, enum maple_type type)
754 : {
755 0 : unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
756 :
757 0 : if (likely(lpiv))
758 : return lpiv;
759 :
760 0 : if (likely(offset))
761 : return mas->max;
762 :
763 : return lpiv;
764 : }
765 :
766 : /*
767 : * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
768 : * @mn: The encoded maple node
769 : * @piv: The pivot offset
770 : * @val: The value of the pivot
771 : */
772 0 : static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
773 : unsigned long val)
774 : {
775 0 : struct maple_node *node = mte_to_node(mn);
776 0 : enum maple_type type = mte_node_type(mn);
777 :
778 0 : BUG_ON(piv >= mt_pivots[type]);
779 0 : switch (type) {
780 : default:
781 : case maple_range_64:
782 : case maple_leaf_64:
783 0 : node->mr64.pivot[piv] = val;
784 0 : break;
785 : case maple_arange_64:
786 0 : node->ma64.pivot[piv] = val;
787 0 : break;
788 : case maple_dense:
789 : break;
790 : }
791 :
792 0 : }
793 :
794 : /*
795 : * ma_slots() - Get a pointer to the maple node slots.
796 : * @mn: The maple node
797 : * @mt: The maple node type
798 : *
799 : * Return: A pointer to the maple node slots
800 : */
801 : static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
802 : {
803 0 : switch (mt) {
804 : default:
805 : case maple_arange_64:
806 0 : return mn->ma64.slot;
807 : case maple_range_64:
808 : case maple_leaf_64:
809 0 : return mn->mr64.slot;
810 : case maple_dense:
811 0 : return mn->slot;
812 : }
813 : }
814 :
815 : static inline bool mt_locked(const struct maple_tree *mt)
816 : {
817 : return mt_external_lock(mt) ? mt_lock_is_held(mt) :
818 : lockdep_is_held(&mt->ma_lock);
819 : }
820 :
821 : static inline void *mt_slot(const struct maple_tree *mt,
822 : void __rcu **slots, unsigned char offset)
823 : {
824 0 : return rcu_dereference_check(slots[offset], mt_locked(mt));
825 : }
826 :
827 : static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
828 : unsigned char offset)
829 : {
830 0 : return rcu_dereference_protected(slots[offset], mt_locked(mt));
831 : }
832 : /*
833 : * mas_slot_locked() - Get the slot value when holding the maple tree lock.
834 : * @mas: The maple state
835 : * @slots: The pointer to the slots
836 : * @offset: The offset into the slots array to fetch
837 : *
838 : * Return: The entry stored in @slots at the @offset.
839 : */
840 : static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
841 : unsigned char offset)
842 : {
843 0 : return mt_slot_locked(mas->tree, slots, offset);
844 : }
845 :
846 : /*
847 : * mas_slot() - Get the slot value when not holding the maple tree lock.
848 : * @mas: The maple state
849 : * @slots: The pointer to the slots
850 : * @offset: The offset into the slots array to fetch
851 : *
852 : * Return: The entry stored in @slots at the @offset
853 : */
854 : static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
855 : unsigned char offset)
856 : {
857 0 : return mt_slot(mas->tree, slots, offset);
858 : }
859 :
860 : /*
861 : * mas_root() - Get the maple tree root.
862 : * @mas: The maple state.
863 : *
864 : * Return: The pointer to the root of the tree
865 : */
866 : static inline void *mas_root(struct ma_state *mas)
867 : {
868 0 : return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
869 : }
870 :
871 : static inline void *mt_root_locked(struct maple_tree *mt)
872 : {
873 : return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
874 : }
875 :
876 : /*
877 : * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
878 : * @mas: The maple state.
879 : *
880 : * Return: The pointer to the root of the tree
881 : */
882 : static inline void *mas_root_locked(struct ma_state *mas)
883 : {
884 0 : return mt_root_locked(mas->tree);
885 : }
886 :
887 : static inline struct maple_metadata *ma_meta(struct maple_node *mn,
888 : enum maple_type mt)
889 : {
890 0 : switch (mt) {
891 : case maple_arange_64:
892 0 : return &mn->ma64.meta;
893 : default:
894 0 : return &mn->mr64.meta;
895 : }
896 : }
897 :
898 : /*
899 : * ma_set_meta() - Set the metadata information of a node.
900 : * @mn: The maple node
901 : * @mt: The maple node type
902 : * @offset: The offset of the highest sub-gap in this node.
903 : * @end: The end of the data in this node.
904 : */
905 : static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
906 : unsigned char offset, unsigned char end)
907 : {
908 0 : struct maple_metadata *meta = ma_meta(mn, mt);
909 :
910 0 : meta->gap = offset;
911 0 : meta->end = end;
912 : }
913 :
914 : /*
915 : * mt_clear_meta() - clear the metadata information of a node, if it exists
916 : * @mt: The maple tree
917 : * @mn: The maple node
918 : * @type: The maple node type
919 : * @offset: The offset of the highest sub-gap in this node.
920 : * @end: The end of the data in this node.
921 : */
922 0 : static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
923 : enum maple_type type)
924 : {
925 : struct maple_metadata *meta;
926 : unsigned long *pivots;
927 : void __rcu **slots;
928 : void *next;
929 :
930 0 : switch (type) {
931 : case maple_range_64:
932 0 : pivots = mn->mr64.pivot;
933 0 : if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
934 0 : slots = mn->mr64.slot;
935 0 : next = mt_slot_locked(mt, slots,
936 : MAPLE_RANGE64_SLOTS - 1);
937 0 : if (unlikely((mte_to_node(next) &&
938 : mte_node_type(next))))
939 : return; /* no metadata, could be node */
940 : }
941 : fallthrough;
942 : case maple_arange_64:
943 0 : meta = ma_meta(mn, type);
944 : break;
945 : default:
946 : return;
947 : }
948 :
949 0 : meta->gap = 0;
950 0 : meta->end = 0;
951 : }
952 :
953 : /*
954 : * ma_meta_end() - Get the data end of a node from the metadata
955 : * @mn: The maple node
956 : * @mt: The maple node type
957 : */
958 : static inline unsigned char ma_meta_end(struct maple_node *mn,
959 : enum maple_type mt)
960 : {
961 0 : struct maple_metadata *meta = ma_meta(mn, mt);
962 :
963 0 : return meta->end;
964 : }
965 :
966 : /*
967 : * ma_meta_gap() - Get the largest gap location of a node from the metadata
968 : * @mn: The maple node
969 : * @mt: The maple node type
970 : */
971 0 : static inline unsigned char ma_meta_gap(struct maple_node *mn,
972 : enum maple_type mt)
973 : {
974 0 : BUG_ON(mt != maple_arange_64);
975 :
976 0 : return mn->ma64.meta.gap;
977 : }
978 :
979 : /*
980 : * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
981 : * @mn: The maple node
982 : * @mn: The maple node type
983 : * @offset: The location of the largest gap.
984 : */
985 : static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
986 : unsigned char offset)
987 : {
988 :
989 0 : struct maple_metadata *meta = ma_meta(mn, mt);
990 :
991 0 : meta->gap = offset;
992 : }
993 :
994 : /*
995 : * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
996 : * @mat - the ma_topiary, a linked list of dead nodes.
997 : * @dead_enode - the node to be marked as dead and added to the tail of the list
998 : *
999 : * Add the @dead_enode to the linked list in @mat.
1000 : */
1001 : static inline void mat_add(struct ma_topiary *mat,
1002 : struct maple_enode *dead_enode)
1003 : {
1004 0 : mte_set_node_dead(dead_enode);
1005 0 : mte_to_mat(dead_enode)->next = NULL;
1006 0 : if (!mat->tail) {
1007 0 : mat->tail = mat->head = dead_enode;
1008 : return;
1009 : }
1010 :
1011 0 : mte_to_mat(mat->tail)->next = dead_enode;
1012 0 : mat->tail = dead_enode;
1013 : }
1014 :
1015 : static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1016 : static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1017 :
1018 : /*
1019 : * mas_mat_free() - Free all nodes in a dead list.
1020 : * @mas - the maple state
1021 : * @mat - the ma_topiary linked list of dead nodes to free.
1022 : *
1023 : * Free walk a dead list.
1024 : */
1025 : static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1026 : {
1027 : struct maple_enode *next;
1028 :
1029 0 : while (mat->head) {
1030 0 : next = mte_to_mat(mat->head)->next;
1031 0 : mas_free(mas, mat->head);
1032 0 : mat->head = next;
1033 : }
1034 : }
1035 :
1036 : /*
1037 : * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1038 : * @mas - the maple state
1039 : * @mat - the ma_topiary linked list of dead nodes to free.
1040 : *
1041 : * Destroy walk a dead list.
1042 : */
1043 : static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1044 : {
1045 : struct maple_enode *next;
1046 :
1047 0 : while (mat->head) {
1048 0 : next = mte_to_mat(mat->head)->next;
1049 0 : mte_destroy_walk(mat->head, mat->mtree);
1050 0 : mat->head = next;
1051 : }
1052 : }
1053 : /*
1054 : * mas_descend() - Descend into the slot stored in the ma_state.
1055 : * @mas - the maple state.
1056 : *
1057 : * Note: Not RCU safe, only use in write side or debug code.
1058 : */
1059 0 : static inline void mas_descend(struct ma_state *mas)
1060 : {
1061 : enum maple_type type;
1062 : unsigned long *pivots;
1063 : struct maple_node *node;
1064 : void __rcu **slots;
1065 :
1066 0 : node = mas_mn(mas);
1067 0 : type = mte_node_type(mas->node);
1068 0 : pivots = ma_pivots(node, type);
1069 0 : slots = ma_slots(node, type);
1070 :
1071 0 : if (mas->offset)
1072 0 : mas->min = pivots[mas->offset - 1] + 1;
1073 0 : mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1074 0 : mas->node = mas_slot(mas, slots, mas->offset);
1075 0 : }
1076 :
1077 : /*
1078 : * mte_set_gap() - Set a maple node gap.
1079 : * @mn: The encoded maple node
1080 : * @gap: The offset of the gap to set
1081 : * @val: The gap value
1082 : */
1083 : static inline void mte_set_gap(const struct maple_enode *mn,
1084 : unsigned char gap, unsigned long val)
1085 : {
1086 0 : switch (mte_node_type(mn)) {
1087 : default:
1088 : break;
1089 : case maple_arange_64:
1090 0 : mte_to_node(mn)->ma64.gap[gap] = val;
1091 : break;
1092 : }
1093 : }
1094 :
1095 : /*
1096 : * mas_ascend() - Walk up a level of the tree.
1097 : * @mas: The maple state
1098 : *
1099 : * Sets the @mas->max and @mas->min to the correct values when walking up. This
1100 : * may cause several levels of walking up to find the correct min and max.
1101 : * May find a dead node which will cause a premature return.
1102 : * Return: 1 on dead node, 0 otherwise
1103 : */
1104 0 : static int mas_ascend(struct ma_state *mas)
1105 : {
1106 : struct maple_enode *p_enode; /* parent enode. */
1107 : struct maple_enode *a_enode; /* ancestor enode. */
1108 : struct maple_node *a_node; /* ancestor node. */
1109 : struct maple_node *p_node; /* parent node. */
1110 : unsigned char a_slot;
1111 : enum maple_type a_type;
1112 : unsigned long min, max;
1113 : unsigned long *pivots;
1114 : unsigned char offset;
1115 0 : bool set_max = false, set_min = false;
1116 :
1117 0 : a_node = mas_mn(mas);
1118 0 : if (ma_is_root(a_node)) {
1119 0 : mas->offset = 0;
1120 0 : return 0;
1121 : }
1122 :
1123 0 : p_node = mte_parent(mas->node);
1124 0 : if (unlikely(a_node == p_node))
1125 : return 1;
1126 0 : a_type = mas_parent_enum(mas, mas->node);
1127 0 : offset = mte_parent_slot(mas->node);
1128 0 : a_enode = mt_mk_node(p_node, a_type);
1129 :
1130 : /* Check to make sure all parent information is still accurate */
1131 0 : if (p_node != mte_parent(mas->node))
1132 : return 1;
1133 :
1134 0 : mas->node = a_enode;
1135 0 : mas->offset = offset;
1136 :
1137 0 : if (mte_is_root(a_enode)) {
1138 0 : mas->max = ULONG_MAX;
1139 0 : mas->min = 0;
1140 0 : return 0;
1141 : }
1142 :
1143 : min = 0;
1144 : max = ULONG_MAX;
1145 : do {
1146 0 : p_enode = a_enode;
1147 0 : a_type = mas_parent_enum(mas, p_enode);
1148 0 : a_node = mte_parent(p_enode);
1149 0 : a_slot = mte_parent_slot(p_enode);
1150 0 : a_enode = mt_mk_node(a_node, a_type);
1151 0 : pivots = ma_pivots(a_node, a_type);
1152 :
1153 0 : if (unlikely(ma_dead_node(a_node)))
1154 : return 1;
1155 :
1156 0 : if (!set_min && a_slot) {
1157 0 : set_min = true;
1158 0 : min = pivots[a_slot - 1] + 1;
1159 : }
1160 :
1161 0 : if (!set_max && a_slot < mt_pivots[a_type]) {
1162 0 : set_max = true;
1163 0 : max = pivots[a_slot];
1164 : }
1165 :
1166 0 : if (unlikely(ma_dead_node(a_node)))
1167 : return 1;
1168 :
1169 0 : if (unlikely(ma_is_root(a_node)))
1170 : break;
1171 :
1172 0 : } while (!set_min || !set_max);
1173 :
1174 0 : mas->max = max;
1175 0 : mas->min = min;
1176 0 : return 0;
1177 : }
1178 :
1179 : /*
1180 : * mas_pop_node() - Get a previously allocated maple node from the maple state.
1181 : * @mas: The maple state
1182 : *
1183 : * Return: A pointer to a maple node.
1184 : */
1185 0 : static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1186 : {
1187 0 : struct maple_alloc *ret, *node = mas->alloc;
1188 0 : unsigned long total = mas_allocated(mas);
1189 0 : unsigned int req = mas_alloc_req(mas);
1190 :
1191 : /* nothing or a request pending. */
1192 0 : if (WARN_ON(!total))
1193 : return NULL;
1194 :
1195 0 : if (total == 1) {
1196 : /* single allocation in this ma_state */
1197 0 : mas->alloc = NULL;
1198 0 : ret = node;
1199 0 : goto single_node;
1200 : }
1201 :
1202 0 : if (node->node_count == 1) {
1203 : /* Single allocation in this node. */
1204 0 : mas->alloc = node->slot[0];
1205 0 : mas->alloc->total = node->total - 1;
1206 0 : ret = node;
1207 0 : goto new_head;
1208 : }
1209 0 : node->total--;
1210 0 : ret = node->slot[--node->node_count];
1211 0 : node->slot[node->node_count] = NULL;
1212 :
1213 : single_node:
1214 : new_head:
1215 0 : if (req) {
1216 0 : req++;
1217 0 : mas_set_alloc_req(mas, req);
1218 : }
1219 :
1220 0 : memset(ret, 0, sizeof(*ret));
1221 0 : return (struct maple_node *)ret;
1222 : }
1223 :
1224 : /*
1225 : * mas_push_node() - Push a node back on the maple state allocation.
1226 : * @mas: The maple state
1227 : * @used: The used maple node
1228 : *
1229 : * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1230 : * requested node count as necessary.
1231 : */
1232 0 : static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1233 : {
1234 0 : struct maple_alloc *reuse = (struct maple_alloc *)used;
1235 0 : struct maple_alloc *head = mas->alloc;
1236 : unsigned long count;
1237 0 : unsigned int requested = mas_alloc_req(mas);
1238 :
1239 0 : count = mas_allocated(mas);
1240 :
1241 0 : reuse->request_count = 0;
1242 0 : reuse->node_count = 0;
1243 0 : if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1244 0 : head->slot[head->node_count++] = reuse;
1245 0 : head->total++;
1246 0 : goto done;
1247 : }
1248 :
1249 0 : reuse->total = 1;
1250 0 : if ((head) && !((unsigned long)head & 0x1)) {
1251 0 : reuse->slot[0] = head;
1252 0 : reuse->node_count = 1;
1253 0 : reuse->total += head->total;
1254 : }
1255 :
1256 0 : mas->alloc = reuse;
1257 : done:
1258 0 : if (requested > 1)
1259 0 : mas_set_alloc_req(mas, requested - 1);
1260 0 : }
1261 :
1262 : /*
1263 : * mas_alloc_nodes() - Allocate nodes into a maple state
1264 : * @mas: The maple state
1265 : * @gfp: The GFP Flags
1266 : */
1267 0 : static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1268 : {
1269 : struct maple_alloc *node;
1270 0 : unsigned long allocated = mas_allocated(mas);
1271 0 : unsigned int requested = mas_alloc_req(mas);
1272 : unsigned int count;
1273 0 : void **slots = NULL;
1274 0 : unsigned int max_req = 0;
1275 :
1276 0 : if (!requested)
1277 : return;
1278 :
1279 0 : mas_set_alloc_req(mas, 0);
1280 0 : if (mas->mas_flags & MA_STATE_PREALLOC) {
1281 0 : if (allocated)
1282 : return;
1283 0 : WARN_ON(!allocated);
1284 : }
1285 :
1286 0 : if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1287 0 : node = (struct maple_alloc *)mt_alloc_one(gfp);
1288 0 : if (!node)
1289 : goto nomem_one;
1290 :
1291 0 : if (allocated) {
1292 0 : node->slot[0] = mas->alloc;
1293 0 : node->node_count = 1;
1294 : } else {
1295 0 : node->node_count = 0;
1296 : }
1297 :
1298 0 : mas->alloc = node;
1299 0 : node->total = ++allocated;
1300 0 : requested--;
1301 : }
1302 :
1303 0 : node = mas->alloc;
1304 0 : node->request_count = 0;
1305 0 : while (requested) {
1306 0 : max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1307 0 : slots = (void **)&node->slot[node->node_count];
1308 0 : max_req = min(requested, max_req);
1309 0 : count = mt_alloc_bulk(gfp, max_req, slots);
1310 0 : if (!count)
1311 : goto nomem_bulk;
1312 :
1313 0 : if (node->node_count == 0) {
1314 0 : node->slot[0]->node_count = 0;
1315 0 : node->slot[0]->request_count = 0;
1316 : }
1317 :
1318 0 : node->node_count += count;
1319 0 : allocated += count;
1320 0 : node = node->slot[0];
1321 0 : requested -= count;
1322 : }
1323 0 : mas->alloc->total = allocated;
1324 0 : return;
1325 :
1326 : nomem_bulk:
1327 : /* Clean up potential freed allocations on bulk failure */
1328 0 : memset(slots, 0, max_req * sizeof(unsigned long));
1329 : nomem_one:
1330 0 : mas_set_alloc_req(mas, requested);
1331 0 : if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1332 0 : mas->alloc->total = allocated;
1333 0 : mas_set_err(mas, -ENOMEM);
1334 : }
1335 :
1336 : /*
1337 : * mas_free() - Free an encoded maple node
1338 : * @mas: The maple state
1339 : * @used: The encoded maple node to free.
1340 : *
1341 : * Uses rcu free if necessary, pushes @used back on the maple state allocations
1342 : * otherwise.
1343 : */
1344 0 : static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1345 : {
1346 0 : struct maple_node *tmp = mte_to_node(used);
1347 :
1348 0 : if (mt_in_rcu(mas->tree))
1349 0 : ma_free_rcu(tmp);
1350 : else
1351 0 : mas_push_node(mas, tmp);
1352 0 : }
1353 :
1354 : /*
1355 : * mas_node_count() - Check if enough nodes are allocated and request more if
1356 : * there is not enough nodes.
1357 : * @mas: The maple state
1358 : * @count: The number of nodes needed
1359 : * @gfp: the gfp flags
1360 : */
1361 0 : static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1362 : {
1363 0 : unsigned long allocated = mas_allocated(mas);
1364 :
1365 0 : if (allocated < count) {
1366 0 : mas_set_alloc_req(mas, count - allocated);
1367 0 : mas_alloc_nodes(mas, gfp);
1368 : }
1369 0 : }
1370 :
1371 : /*
1372 : * mas_node_count() - Check if enough nodes are allocated and request more if
1373 : * there is not enough nodes.
1374 : * @mas: The maple state
1375 : * @count: The number of nodes needed
1376 : *
1377 : * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1378 : */
1379 : static void mas_node_count(struct ma_state *mas, int count)
1380 : {
1381 0 : return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1382 : }
1383 :
1384 : /*
1385 : * mas_start() - Sets up maple state for operations.
1386 : * @mas: The maple state.
1387 : *
1388 : * If mas->node == MAS_START, then set the min, max and depth to
1389 : * defaults.
1390 : *
1391 : * Return:
1392 : * - If mas->node is an error or not MAS_START, return NULL.
1393 : * - If it's an empty tree: NULL & mas->node == MAS_NONE
1394 : * - If it's a single entry: The entry & mas->node == MAS_ROOT
1395 : * - If it's a tree: NULL & mas->node == safe root node.
1396 : */
1397 0 : static inline struct maple_enode *mas_start(struct ma_state *mas)
1398 : {
1399 0 : if (likely(mas_is_start(mas))) {
1400 : struct maple_enode *root;
1401 :
1402 0 : mas->min = 0;
1403 0 : mas->max = ULONG_MAX;
1404 0 : mas->depth = 0;
1405 :
1406 : retry:
1407 0 : root = mas_root(mas);
1408 : /* Tree with nodes */
1409 0 : if (likely(xa_is_node(root))) {
1410 0 : mas->depth = 1;
1411 0 : mas->node = mte_safe_root(root);
1412 0 : mas->offset = 0;
1413 0 : if (mte_dead_node(mas->node))
1414 : goto retry;
1415 :
1416 : return NULL;
1417 : }
1418 :
1419 : /* empty tree */
1420 0 : if (unlikely(!root)) {
1421 0 : mas->node = MAS_NONE;
1422 0 : mas->offset = MAPLE_NODE_SLOTS;
1423 0 : return NULL;
1424 : }
1425 :
1426 : /* Single entry tree */
1427 0 : mas->node = MAS_ROOT;
1428 0 : mas->offset = MAPLE_NODE_SLOTS;
1429 :
1430 : /* Single entry tree. */
1431 0 : if (mas->index > 0)
1432 : return NULL;
1433 :
1434 0 : return root;
1435 : }
1436 :
1437 : return NULL;
1438 : }
1439 :
1440 : /*
1441 : * ma_data_end() - Find the end of the data in a node.
1442 : * @node: The maple node
1443 : * @type: The maple node type
1444 : * @pivots: The array of pivots in the node
1445 : * @max: The maximum value in the node
1446 : *
1447 : * Uses metadata to find the end of the data when possible.
1448 : * Return: The zero indexed last slot with data (may be null).
1449 : */
1450 0 : static inline unsigned char ma_data_end(struct maple_node *node,
1451 : enum maple_type type,
1452 : unsigned long *pivots,
1453 : unsigned long max)
1454 : {
1455 : unsigned char offset;
1456 :
1457 0 : if (!pivots)
1458 : return 0;
1459 :
1460 0 : if (type == maple_arange_64)
1461 0 : return ma_meta_end(node, type);
1462 :
1463 0 : offset = mt_pivots[type] - 1;
1464 0 : if (likely(!pivots[offset]))
1465 0 : return ma_meta_end(node, type);
1466 :
1467 0 : if (likely(pivots[offset] == max))
1468 : return offset;
1469 :
1470 0 : return mt_pivots[type];
1471 : }
1472 :
1473 : /*
1474 : * mas_data_end() - Find the end of the data (slot).
1475 : * @mas: the maple state
1476 : *
1477 : * This method is optimized to check the metadata of a node if the node type
1478 : * supports data end metadata.
1479 : *
1480 : * Return: The zero indexed last slot with data (may be null).
1481 : */
1482 0 : static inline unsigned char mas_data_end(struct ma_state *mas)
1483 : {
1484 : enum maple_type type;
1485 : struct maple_node *node;
1486 : unsigned char offset;
1487 : unsigned long *pivots;
1488 :
1489 0 : type = mte_node_type(mas->node);
1490 0 : node = mas_mn(mas);
1491 0 : if (type == maple_arange_64)
1492 0 : return ma_meta_end(node, type);
1493 :
1494 0 : pivots = ma_pivots(node, type);
1495 0 : if (unlikely(ma_dead_node(node)))
1496 : return 0;
1497 :
1498 0 : offset = mt_pivots[type] - 1;
1499 0 : if (likely(!pivots[offset]))
1500 0 : return ma_meta_end(node, type);
1501 :
1502 0 : if (likely(pivots[offset] == mas->max))
1503 : return offset;
1504 :
1505 : return mt_pivots[type];
1506 : }
1507 :
1508 : /*
1509 : * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1510 : * @mas - the maple state
1511 : *
1512 : * Return: The maximum gap in the leaf.
1513 : */
1514 0 : static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1515 : {
1516 : enum maple_type mt;
1517 : unsigned long pstart, gap, max_gap;
1518 : struct maple_node *mn;
1519 : unsigned long *pivots;
1520 : void __rcu **slots;
1521 : unsigned char i;
1522 : unsigned char max_piv;
1523 :
1524 0 : mt = mte_node_type(mas->node);
1525 0 : mn = mas_mn(mas);
1526 0 : slots = ma_slots(mn, mt);
1527 0 : max_gap = 0;
1528 0 : if (unlikely(ma_is_dense(mt))) {
1529 : gap = 0;
1530 0 : for (i = 0; i < mt_slots[mt]; i++) {
1531 0 : if (slots[i]) {
1532 0 : if (gap > max_gap)
1533 0 : max_gap = gap;
1534 : gap = 0;
1535 : } else {
1536 0 : gap++;
1537 : }
1538 : }
1539 0 : if (gap > max_gap)
1540 0 : max_gap = gap;
1541 : return max_gap;
1542 : }
1543 :
1544 : /*
1545 : * Check the first implied pivot optimizes the loop below and slot 1 may
1546 : * be skipped if there is a gap in slot 0.
1547 : */
1548 0 : pivots = ma_pivots(mn, mt);
1549 0 : if (likely(!slots[0])) {
1550 0 : max_gap = pivots[0] - mas->min + 1;
1551 0 : i = 2;
1552 : } else {
1553 : i = 1;
1554 : }
1555 :
1556 : /* reduce max_piv as the special case is checked before the loop */
1557 0 : max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1558 : /*
1559 : * Check end implied pivot which can only be a gap on the right most
1560 : * node.
1561 : */
1562 0 : if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1563 0 : gap = ULONG_MAX - pivots[max_piv];
1564 0 : if (gap > max_gap)
1565 0 : max_gap = gap;
1566 : }
1567 :
1568 0 : for (; i <= max_piv; i++) {
1569 : /* data == no gap. */
1570 0 : if (likely(slots[i]))
1571 0 : continue;
1572 :
1573 0 : pstart = pivots[i - 1];
1574 0 : gap = pivots[i] - pstart;
1575 0 : if (gap > max_gap)
1576 0 : max_gap = gap;
1577 :
1578 : /* There cannot be two gaps in a row. */
1579 0 : i++;
1580 : }
1581 : return max_gap;
1582 : }
1583 :
1584 : /*
1585 : * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1586 : * @node: The maple node
1587 : * @gaps: The pointer to the gaps
1588 : * @mt: The maple node type
1589 : * @*off: Pointer to store the offset location of the gap.
1590 : *
1591 : * Uses the metadata data end to scan backwards across set gaps.
1592 : *
1593 : * Return: The maximum gap value
1594 : */
1595 : static inline unsigned long
1596 : ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1597 : unsigned char *off)
1598 : {
1599 : unsigned char offset, i;
1600 0 : unsigned long max_gap = 0;
1601 :
1602 0 : i = offset = ma_meta_end(node, mt);
1603 : do {
1604 0 : if (gaps[i] > max_gap) {
1605 0 : max_gap = gaps[i];
1606 0 : offset = i;
1607 : }
1608 0 : } while (i--);
1609 :
1610 0 : *off = offset;
1611 : return max_gap;
1612 : }
1613 :
1614 : /*
1615 : * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1616 : * @mas: The maple state.
1617 : *
1618 : * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1619 : *
1620 : * Return: The gap value.
1621 : */
1622 0 : static inline unsigned long mas_max_gap(struct ma_state *mas)
1623 : {
1624 : unsigned long *gaps;
1625 : unsigned char offset;
1626 : enum maple_type mt;
1627 : struct maple_node *node;
1628 :
1629 0 : mt = mte_node_type(mas->node);
1630 0 : if (ma_is_leaf(mt))
1631 0 : return mas_leaf_max_gap(mas);
1632 :
1633 0 : node = mas_mn(mas);
1634 0 : offset = ma_meta_gap(node, mt);
1635 0 : if (offset == MAPLE_ARANGE64_META_MAX)
1636 : return 0;
1637 :
1638 0 : gaps = ma_gaps(node, mt);
1639 0 : return gaps[offset];
1640 : }
1641 :
1642 : /*
1643 : * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1644 : * @mas: The maple state
1645 : * @offset: The gap offset in the parent to set
1646 : * @new: The new gap value.
1647 : *
1648 : * Set the parent gap then continue to set the gap upwards, using the metadata
1649 : * of the parent to see if it is necessary to check the node above.
1650 : */
1651 0 : static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1652 : unsigned long new)
1653 : {
1654 0 : unsigned long meta_gap = 0;
1655 : struct maple_node *pnode;
1656 : struct maple_enode *penode;
1657 : unsigned long *pgaps;
1658 : unsigned char meta_offset;
1659 : enum maple_type pmt;
1660 :
1661 0 : pnode = mte_parent(mas->node);
1662 0 : pmt = mas_parent_enum(mas, mas->node);
1663 0 : penode = mt_mk_node(pnode, pmt);
1664 : pgaps = ma_gaps(pnode, pmt);
1665 :
1666 : ascend:
1667 0 : meta_offset = ma_meta_gap(pnode, pmt);
1668 0 : if (meta_offset == MAPLE_ARANGE64_META_MAX)
1669 : meta_gap = 0;
1670 : else
1671 0 : meta_gap = pgaps[meta_offset];
1672 :
1673 0 : pgaps[offset] = new;
1674 :
1675 0 : if (meta_gap == new)
1676 : return;
1677 :
1678 0 : if (offset != meta_offset) {
1679 0 : if (meta_gap > new)
1680 : return;
1681 :
1682 0 : ma_set_meta_gap(pnode, pmt, offset);
1683 0 : } else if (new < meta_gap) {
1684 0 : meta_offset = 15;
1685 0 : new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1686 0 : ma_set_meta_gap(pnode, pmt, meta_offset);
1687 : }
1688 :
1689 0 : if (ma_is_root(pnode))
1690 : return;
1691 :
1692 : /* Go to the parent node. */
1693 0 : pnode = mte_parent(penode);
1694 0 : pmt = mas_parent_enum(mas, penode);
1695 0 : pgaps = ma_gaps(pnode, pmt);
1696 0 : offset = mte_parent_slot(penode);
1697 0 : penode = mt_mk_node(pnode, pmt);
1698 0 : goto ascend;
1699 : }
1700 :
1701 : /*
1702 : * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1703 : * @mas - the maple state.
1704 : */
1705 0 : static inline void mas_update_gap(struct ma_state *mas)
1706 : {
1707 : unsigned char pslot;
1708 : unsigned long p_gap;
1709 : unsigned long max_gap;
1710 :
1711 0 : if (!mt_is_alloc(mas->tree))
1712 : return;
1713 :
1714 0 : if (mte_is_root(mas->node))
1715 : return;
1716 :
1717 0 : max_gap = mas_max_gap(mas);
1718 :
1719 0 : pslot = mte_parent_slot(mas->node);
1720 0 : p_gap = ma_gaps(mte_parent(mas->node),
1721 0 : mas_parent_enum(mas, mas->node))[pslot];
1722 :
1723 0 : if (p_gap != max_gap)
1724 0 : mas_parent_gap(mas, pslot, max_gap);
1725 : }
1726 :
1727 : /*
1728 : * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1729 : * @parent with the slot encoded.
1730 : * @mas - the maple state (for the tree)
1731 : * @parent - the maple encoded node containing the children.
1732 : */
1733 0 : static inline void mas_adopt_children(struct ma_state *mas,
1734 : struct maple_enode *parent)
1735 : {
1736 0 : enum maple_type type = mte_node_type(parent);
1737 0 : struct maple_node *node = mas_mn(mas);
1738 0 : void __rcu **slots = ma_slots(node, type);
1739 0 : unsigned long *pivots = ma_pivots(node, type);
1740 : struct maple_enode *child;
1741 : unsigned char offset;
1742 :
1743 0 : offset = ma_data_end(node, type, pivots, mas->max);
1744 : do {
1745 0 : child = mas_slot_locked(mas, slots, offset);
1746 0 : mte_set_parent(child, parent, offset);
1747 0 : } while (offset--);
1748 0 : }
1749 :
1750 : /*
1751 : * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
1752 : * parent encoding to locate the maple node in the tree.
1753 : * @mas - the ma_state to use for operations.
1754 : * @advanced - boolean to adopt the child nodes and free the old node (false) or
1755 : * leave the node (true) and handle the adoption and free elsewhere.
1756 : */
1757 0 : static inline void mas_replace(struct ma_state *mas, bool advanced)
1758 : __must_hold(mas->tree->lock)
1759 : {
1760 0 : struct maple_node *mn = mas_mn(mas);
1761 : struct maple_enode *old_enode;
1762 0 : unsigned char offset = 0;
1763 0 : void __rcu **slots = NULL;
1764 :
1765 0 : if (ma_is_root(mn)) {
1766 0 : old_enode = mas_root_locked(mas);
1767 : } else {
1768 0 : offset = mte_parent_slot(mas->node);
1769 0 : slots = ma_slots(mte_parent(mas->node),
1770 : mas_parent_enum(mas, mas->node));
1771 0 : old_enode = mas_slot_locked(mas, slots, offset);
1772 : }
1773 :
1774 0 : if (!advanced && !mte_is_leaf(mas->node))
1775 0 : mas_adopt_children(mas, mas->node);
1776 :
1777 0 : if (mte_is_root(mas->node)) {
1778 0 : mn->parent = ma_parent_ptr(
1779 : ((unsigned long)mas->tree | MA_ROOT_PARENT));
1780 0 : rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1781 0 : mas_set_height(mas);
1782 : } else {
1783 0 : rcu_assign_pointer(slots[offset], mas->node);
1784 : }
1785 :
1786 0 : if (!advanced) {
1787 0 : mte_set_node_dead(old_enode);
1788 0 : mas_free(mas, old_enode);
1789 : }
1790 0 : }
1791 :
1792 : /*
1793 : * mas_new_child() - Find the new child of a node.
1794 : * @mas: the maple state
1795 : * @child: the maple state to store the child.
1796 : */
1797 0 : static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1798 : __must_hold(mas->tree->lock)
1799 : {
1800 : enum maple_type mt;
1801 : unsigned char offset;
1802 : unsigned char end;
1803 : unsigned long *pivots;
1804 : struct maple_enode *entry;
1805 : struct maple_node *node;
1806 : void __rcu **slots;
1807 :
1808 0 : mt = mte_node_type(mas->node);
1809 0 : node = mas_mn(mas);
1810 0 : slots = ma_slots(node, mt);
1811 0 : pivots = ma_pivots(node, mt);
1812 0 : end = ma_data_end(node, mt, pivots, mas->max);
1813 0 : for (offset = mas->offset; offset <= end; offset++) {
1814 0 : entry = mas_slot_locked(mas, slots, offset);
1815 0 : if (mte_parent(entry) == node) {
1816 0 : *child = *mas;
1817 0 : mas->offset = offset + 1;
1818 0 : child->offset = offset;
1819 0 : mas_descend(child);
1820 0 : child->offset = 0;
1821 0 : return true;
1822 : }
1823 : }
1824 : return false;
1825 : }
1826 :
1827 : /*
1828 : * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1829 : * old data or set b_node->b_end.
1830 : * @b_node: the maple_big_node
1831 : * @shift: the shift count
1832 : */
1833 0 : static inline void mab_shift_right(struct maple_big_node *b_node,
1834 : unsigned char shift)
1835 : {
1836 0 : unsigned long size = b_node->b_end * sizeof(unsigned long);
1837 :
1838 0 : memmove(b_node->pivot + shift, b_node->pivot, size);
1839 0 : memmove(b_node->slot + shift, b_node->slot, size);
1840 0 : if (b_node->type == maple_arange_64)
1841 0 : memmove(b_node->gap + shift, b_node->gap, size);
1842 0 : }
1843 :
1844 : /*
1845 : * mab_middle_node() - Check if a middle node is needed (unlikely)
1846 : * @b_node: the maple_big_node that contains the data.
1847 : * @size: the amount of data in the b_node
1848 : * @split: the potential split location
1849 : * @slot_count: the size that can be stored in a single node being considered.
1850 : *
1851 : * Return: true if a middle node is required.
1852 : */
1853 : static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1854 : unsigned char slot_count)
1855 : {
1856 0 : unsigned char size = b_node->b_end;
1857 :
1858 0 : if (size >= 2 * slot_count)
1859 : return true;
1860 :
1861 0 : if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1862 : return true;
1863 :
1864 : return false;
1865 : }
1866 :
1867 : /*
1868 : * mab_no_null_split() - ensure the split doesn't fall on a NULL
1869 : * @b_node: the maple_big_node with the data
1870 : * @split: the suggested split location
1871 : * @slot_count: the number of slots in the node being considered.
1872 : *
1873 : * Return: the split location.
1874 : */
1875 : static inline int mab_no_null_split(struct maple_big_node *b_node,
1876 : unsigned char split, unsigned char slot_count)
1877 : {
1878 0 : if (!b_node->slot[split]) {
1879 : /*
1880 : * If the split is less than the max slot && the right side will
1881 : * still be sufficient, then increment the split on NULL.
1882 : */
1883 0 : if ((split < slot_count - 1) &&
1884 0 : (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1885 0 : split++;
1886 : else
1887 0 : split--;
1888 : }
1889 0 : return split;
1890 : }
1891 :
1892 : /*
1893 : * mab_calc_split() - Calculate the split location and if there needs to be two
1894 : * splits.
1895 : * @bn: The maple_big_node with the data
1896 : * @mid_split: The second split, if required. 0 otherwise.
1897 : *
1898 : * Return: The first split location. The middle split is set in @mid_split.
1899 : */
1900 0 : static inline int mab_calc_split(struct ma_state *mas,
1901 : struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1902 : {
1903 0 : unsigned char b_end = bn->b_end;
1904 0 : int split = b_end / 2; /* Assume equal split. */
1905 0 : unsigned char slot_min, slot_count = mt_slots[bn->type];
1906 :
1907 : /*
1908 : * To support gap tracking, all NULL entries are kept together and a node cannot
1909 : * end on a NULL entry, with the exception of the left-most leaf. The
1910 : * limitation means that the split of a node must be checked for this condition
1911 : * and be able to put more data in one direction or the other.
1912 : */
1913 0 : if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1914 0 : *mid_split = 0;
1915 0 : split = b_end - mt_min_slots[bn->type];
1916 :
1917 0 : if (!ma_is_leaf(bn->type))
1918 : return split;
1919 :
1920 0 : mas->mas_flags |= MA_STATE_REBALANCE;
1921 0 : if (!bn->slot[split])
1922 0 : split--;
1923 : return split;
1924 : }
1925 :
1926 : /*
1927 : * Although extremely rare, it is possible to enter what is known as the 3-way
1928 : * split scenario. The 3-way split comes about by means of a store of a range
1929 : * that overwrites the end and beginning of two full nodes. The result is a set
1930 : * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1931 : * also be located in different parent nodes which are also full. This can
1932 : * carry upwards all the way to the root in the worst case.
1933 : */
1934 0 : if (unlikely(mab_middle_node(bn, split, slot_count))) {
1935 0 : split = b_end / 3;
1936 0 : *mid_split = split * 2;
1937 : } else {
1938 0 : slot_min = mt_min_slots[bn->type];
1939 :
1940 0 : *mid_split = 0;
1941 : /*
1942 : * Avoid having a range less than the slot count unless it
1943 : * causes one node to be deficient.
1944 : * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1945 : */
1946 0 : while (((bn->pivot[split] - min) < slot_count - 1) &&
1947 0 : (split < slot_count - 1) && (b_end - split > slot_min))
1948 0 : split++;
1949 : }
1950 :
1951 : /* Avoid ending a node on a NULL entry */
1952 0 : split = mab_no_null_split(bn, split, slot_count);
1953 :
1954 0 : if (unlikely(*mid_split))
1955 0 : *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1956 :
1957 : return split;
1958 : }
1959 :
1960 : /*
1961 : * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1962 : * and set @b_node->b_end to the next free slot.
1963 : * @mas: The maple state
1964 : * @mas_start: The starting slot to copy
1965 : * @mas_end: The end slot to copy (inclusively)
1966 : * @b_node: The maple_big_node to place the data
1967 : * @mab_start: The starting location in maple_big_node to store the data.
1968 : */
1969 0 : static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1970 : unsigned char mas_end, struct maple_big_node *b_node,
1971 : unsigned char mab_start)
1972 : {
1973 : enum maple_type mt;
1974 : struct maple_node *node;
1975 : void __rcu **slots;
1976 : unsigned long *pivots, *gaps;
1977 0 : int i = mas_start, j = mab_start;
1978 : unsigned char piv_end;
1979 :
1980 0 : node = mas_mn(mas);
1981 0 : mt = mte_node_type(mas->node);
1982 0 : pivots = ma_pivots(node, mt);
1983 0 : if (!i) {
1984 0 : b_node->pivot[j] = pivots[i++];
1985 0 : if (unlikely(i > mas_end))
1986 : goto complete;
1987 0 : j++;
1988 : }
1989 :
1990 0 : piv_end = min(mas_end, mt_pivots[mt]);
1991 0 : for (; i < piv_end; i++, j++) {
1992 0 : b_node->pivot[j] = pivots[i];
1993 0 : if (unlikely(!b_node->pivot[j]))
1994 : break;
1995 :
1996 0 : if (unlikely(mas->max == b_node->pivot[j]))
1997 : goto complete;
1998 : }
1999 :
2000 0 : if (likely(i <= mas_end))
2001 0 : b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2002 :
2003 : complete:
2004 0 : b_node->b_end = ++j;
2005 0 : j -= mab_start;
2006 0 : slots = ma_slots(node, mt);
2007 0 : memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2008 0 : if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2009 0 : gaps = ma_gaps(node, mt);
2010 0 : memcpy(b_node->gap + mab_start, gaps + mas_start,
2011 : sizeof(unsigned long) * j);
2012 : }
2013 0 : }
2014 :
2015 : /*
2016 : * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2017 : * @mas: The maple state
2018 : * @node: The maple node
2019 : * @pivots: pointer to the maple node pivots
2020 : * @mt: The maple type
2021 : * @end: The assumed end
2022 : *
2023 : * Note, end may be incremented within this function but not modified at the
2024 : * source. This is fine since the metadata is the last thing to be stored in a
2025 : * node during a write.
2026 : */
2027 0 : static inline void mas_leaf_set_meta(struct ma_state *mas,
2028 : struct maple_node *node, unsigned long *pivots,
2029 : enum maple_type mt, unsigned char end)
2030 : {
2031 : /* There is no room for metadata already */
2032 0 : if (mt_pivots[mt] <= end)
2033 : return;
2034 :
2035 0 : if (pivots[end] && pivots[end] < mas->max)
2036 0 : end++;
2037 :
2038 0 : if (end < mt_slots[mt] - 1)
2039 0 : ma_set_meta(node, mt, 0, end);
2040 : }
2041 :
2042 : /*
2043 : * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2044 : * @b_node: the maple_big_node that has the data
2045 : * @mab_start: the start location in @b_node.
2046 : * @mab_end: The end location in @b_node (inclusively)
2047 : * @mas: The maple state with the maple encoded node.
2048 : */
2049 0 : static inline void mab_mas_cp(struct maple_big_node *b_node,
2050 : unsigned char mab_start, unsigned char mab_end,
2051 : struct ma_state *mas, bool new_max)
2052 : {
2053 0 : int i, j = 0;
2054 0 : enum maple_type mt = mte_node_type(mas->node);
2055 0 : struct maple_node *node = mte_to_node(mas->node);
2056 0 : void __rcu **slots = ma_slots(node, mt);
2057 0 : unsigned long *pivots = ma_pivots(node, mt);
2058 0 : unsigned long *gaps = NULL;
2059 : unsigned char end;
2060 :
2061 0 : if (mab_end - mab_start > mt_pivots[mt])
2062 0 : mab_end--;
2063 :
2064 0 : if (!pivots[mt_pivots[mt] - 1])
2065 0 : slots[mt_pivots[mt]] = NULL;
2066 :
2067 : i = mab_start;
2068 : do {
2069 0 : pivots[j++] = b_node->pivot[i++];
2070 0 : } while (i <= mab_end && likely(b_node->pivot[i]));
2071 :
2072 0 : memcpy(slots, b_node->slot + mab_start,
2073 0 : sizeof(void *) * (i - mab_start));
2074 :
2075 0 : if (new_max)
2076 0 : mas->max = b_node->pivot[i - 1];
2077 :
2078 0 : end = j - 1;
2079 0 : if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2080 0 : unsigned long max_gap = 0;
2081 0 : unsigned char offset = 15;
2082 :
2083 0 : gaps = ma_gaps(node, mt);
2084 : do {
2085 0 : gaps[--j] = b_node->gap[--i];
2086 0 : if (gaps[j] > max_gap) {
2087 0 : offset = j;
2088 0 : max_gap = gaps[j];
2089 : }
2090 0 : } while (j);
2091 :
2092 0 : ma_set_meta(node, mt, offset, end);
2093 : } else {
2094 0 : mas_leaf_set_meta(mas, node, pivots, mt, end);
2095 : }
2096 0 : }
2097 :
2098 : /*
2099 : * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2100 : * @mas: the maple state with the maple encoded node of the sub-tree.
2101 : *
2102 : * Descend through a sub-tree and adopt children who do not have the correct
2103 : * parents set. Follow the parents which have the correct parents as they are
2104 : * the new entries which need to be followed to find other incorrectly set
2105 : * parents.
2106 : */
2107 0 : static inline void mas_descend_adopt(struct ma_state *mas)
2108 : {
2109 : struct ma_state list[3], next[3];
2110 : int i, n;
2111 :
2112 : /*
2113 : * At each level there may be up to 3 correct parent pointers which indicates
2114 : * the new nodes which need to be walked to find any new nodes at a lower level.
2115 : */
2116 :
2117 0 : for (i = 0; i < 3; i++) {
2118 0 : list[i] = *mas;
2119 0 : list[i].offset = 0;
2120 0 : next[i].offset = 0;
2121 : }
2122 0 : next[0] = *mas;
2123 :
2124 0 : while (!mte_is_leaf(list[0].node)) {
2125 : n = 0;
2126 0 : for (i = 0; i < 3; i++) {
2127 0 : if (mas_is_none(&list[i]))
2128 0 : continue;
2129 :
2130 0 : if (i && list[i-1].node == list[i].node)
2131 0 : continue;
2132 :
2133 0 : while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2134 0 : n++;
2135 :
2136 0 : mas_adopt_children(&list[i], list[i].node);
2137 : }
2138 :
2139 0 : while (n < 3)
2140 0 : next[n++].node = MAS_NONE;
2141 :
2142 : /* descend by setting the list to the children */
2143 0 : for (i = 0; i < 3; i++)
2144 0 : list[i] = next[i];
2145 : }
2146 0 : }
2147 :
2148 : /*
2149 : * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2150 : * @mas: The maple state
2151 : * @end: The maple node end
2152 : * @mt: The maple node type
2153 : */
2154 : static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2155 : enum maple_type mt)
2156 : {
2157 0 : if (!(mas->mas_flags & MA_STATE_BULK))
2158 : return;
2159 :
2160 0 : if (mte_is_root(mas->node))
2161 : return;
2162 :
2163 0 : if (end > mt_min_slots[mt]) {
2164 0 : mas->mas_flags &= ~MA_STATE_REBALANCE;
2165 : return;
2166 : }
2167 : }
2168 :
2169 : /*
2170 : * mas_store_b_node() - Store an @entry into the b_node while also copying the
2171 : * data from a maple encoded node.
2172 : * @wr_mas: the maple write state
2173 : * @b_node: the maple_big_node to fill with data
2174 : * @offset_end: the offset to end copying
2175 : *
2176 : * Return: The actual end of the data stored in @b_node
2177 : */
2178 0 : static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2179 : struct maple_big_node *b_node, unsigned char offset_end)
2180 : {
2181 : unsigned char slot;
2182 : unsigned char b_end;
2183 : /* Possible underflow of piv will wrap back to 0 before use. */
2184 : unsigned long piv;
2185 0 : struct ma_state *mas = wr_mas->mas;
2186 :
2187 0 : b_node->type = wr_mas->type;
2188 0 : b_end = 0;
2189 0 : slot = mas->offset;
2190 0 : if (slot) {
2191 : /* Copy start data up to insert. */
2192 0 : mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2193 0 : b_end = b_node->b_end;
2194 0 : piv = b_node->pivot[b_end - 1];
2195 : } else
2196 0 : piv = mas->min - 1;
2197 :
2198 0 : if (piv + 1 < mas->index) {
2199 : /* Handle range starting after old range */
2200 0 : b_node->slot[b_end] = wr_mas->content;
2201 0 : if (!wr_mas->content)
2202 0 : b_node->gap[b_end] = mas->index - 1 - piv;
2203 0 : b_node->pivot[b_end++] = mas->index - 1;
2204 : }
2205 :
2206 : /* Store the new entry. */
2207 0 : mas->offset = b_end;
2208 0 : b_node->slot[b_end] = wr_mas->entry;
2209 0 : b_node->pivot[b_end] = mas->last;
2210 :
2211 : /* Appended. */
2212 0 : if (mas->last >= mas->max)
2213 : goto b_end;
2214 :
2215 : /* Handle new range ending before old range ends */
2216 0 : piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2217 0 : if (piv > mas->last) {
2218 0 : if (piv == ULONG_MAX)
2219 0 : mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2220 :
2221 0 : if (offset_end != slot)
2222 0 : wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2223 : offset_end);
2224 :
2225 0 : b_node->slot[++b_end] = wr_mas->content;
2226 0 : if (!wr_mas->content)
2227 0 : b_node->gap[b_end] = piv - mas->last + 1;
2228 0 : b_node->pivot[b_end] = piv;
2229 : }
2230 :
2231 0 : slot = offset_end + 1;
2232 0 : if (slot > wr_mas->node_end)
2233 : goto b_end;
2234 :
2235 : /* Copy end data to the end of the node. */
2236 0 : mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2237 0 : b_node->b_end--;
2238 0 : return;
2239 :
2240 : b_end:
2241 0 : b_node->b_end = b_end;
2242 : }
2243 :
2244 : /*
2245 : * mas_prev_sibling() - Find the previous node with the same parent.
2246 : * @mas: the maple state
2247 : *
2248 : * Return: True if there is a previous sibling, false otherwise.
2249 : */
2250 0 : static inline bool mas_prev_sibling(struct ma_state *mas)
2251 : {
2252 0 : unsigned int p_slot = mte_parent_slot(mas->node);
2253 :
2254 0 : if (mte_is_root(mas->node))
2255 : return false;
2256 :
2257 0 : if (!p_slot)
2258 : return false;
2259 :
2260 0 : mas_ascend(mas);
2261 0 : mas->offset = p_slot - 1;
2262 0 : mas_descend(mas);
2263 0 : return true;
2264 : }
2265 :
2266 : /*
2267 : * mas_next_sibling() - Find the next node with the same parent.
2268 : * @mas: the maple state
2269 : *
2270 : * Return: true if there is a next sibling, false otherwise.
2271 : */
2272 0 : static inline bool mas_next_sibling(struct ma_state *mas)
2273 : {
2274 0 : MA_STATE(parent, mas->tree, mas->index, mas->last);
2275 :
2276 0 : if (mte_is_root(mas->node))
2277 : return false;
2278 :
2279 0 : parent = *mas;
2280 0 : mas_ascend(&parent);
2281 0 : parent.offset = mte_parent_slot(mas->node) + 1;
2282 0 : if (parent.offset > mas_data_end(&parent))
2283 : return false;
2284 :
2285 0 : *mas = parent;
2286 0 : mas_descend(mas);
2287 0 : return true;
2288 : }
2289 :
2290 : /*
2291 : * mte_node_or_node() - Return the encoded node or MAS_NONE.
2292 : * @enode: The encoded maple node.
2293 : *
2294 : * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2295 : *
2296 : * Return: @enode or MAS_NONE
2297 : */
2298 : static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2299 : {
2300 0 : if (enode)
2301 : return enode;
2302 :
2303 : return ma_enode_ptr(MAS_NONE);
2304 : }
2305 :
2306 : /*
2307 : * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2308 : * @wr_mas: The maple write state
2309 : *
2310 : * Uses mas_slot_locked() and does not need to worry about dead nodes.
2311 : */
2312 0 : static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2313 : {
2314 0 : struct ma_state *mas = wr_mas->mas;
2315 : unsigned char count, offset;
2316 :
2317 0 : if (unlikely(ma_is_dense(wr_mas->type))) {
2318 0 : wr_mas->r_max = wr_mas->r_min = mas->index;
2319 0 : mas->offset = mas->index = mas->min;
2320 0 : return;
2321 : }
2322 :
2323 0 : wr_mas->node = mas_mn(wr_mas->mas);
2324 0 : wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2325 0 : count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2326 : wr_mas->pivots, mas->max);
2327 0 : offset = mas->offset;
2328 :
2329 0 : while (offset < count && mas->index > wr_mas->pivots[offset])
2330 0 : offset++;
2331 :
2332 0 : wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2333 0 : wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2334 0 : wr_mas->offset_end = mas->offset = offset;
2335 : }
2336 :
2337 : /*
2338 : * mas_topiary_range() - Add a range of slots to the topiary.
2339 : * @mas: The maple state
2340 : * @destroy: The topiary to add the slots (usually destroy)
2341 : * @start: The starting slot inclusively
2342 : * @end: The end slot inclusively
2343 : */
2344 0 : static inline void mas_topiary_range(struct ma_state *mas,
2345 : struct ma_topiary *destroy, unsigned char start, unsigned char end)
2346 : {
2347 : void __rcu **slots;
2348 : unsigned char offset;
2349 :
2350 0 : MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2351 0 : slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2352 0 : for (offset = start; offset <= end; offset++) {
2353 0 : struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2354 :
2355 0 : if (mte_dead_node(enode))
2356 0 : continue;
2357 :
2358 0 : mat_add(destroy, enode);
2359 : }
2360 0 : }
2361 :
2362 : /*
2363 : * mast_topiary() - Add the portions of the tree to the removal list; either to
2364 : * be freed or discarded (destroy walk).
2365 : * @mast: The maple_subtree_state.
2366 : */
2367 0 : static inline void mast_topiary(struct maple_subtree_state *mast)
2368 : {
2369 0 : MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2370 : unsigned char r_start, r_end;
2371 : unsigned char l_start, l_end;
2372 : void __rcu **l_slots, **r_slots;
2373 :
2374 0 : wr_mas.type = mte_node_type(mast->orig_l->node);
2375 0 : mast->orig_l->index = mast->orig_l->last;
2376 0 : mas_wr_node_walk(&wr_mas);
2377 0 : l_start = mast->orig_l->offset + 1;
2378 0 : l_end = mas_data_end(mast->orig_l);
2379 0 : r_start = 0;
2380 0 : r_end = mast->orig_r->offset;
2381 :
2382 0 : if (r_end)
2383 0 : r_end--;
2384 :
2385 0 : l_slots = ma_slots(mas_mn(mast->orig_l),
2386 0 : mte_node_type(mast->orig_l->node));
2387 :
2388 0 : r_slots = ma_slots(mas_mn(mast->orig_r),
2389 0 : mte_node_type(mast->orig_r->node));
2390 :
2391 0 : if ((l_start < l_end) &&
2392 0 : mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2393 0 : l_start++;
2394 : }
2395 :
2396 0 : if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2397 0 : if (r_end)
2398 0 : r_end--;
2399 : }
2400 :
2401 0 : if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2402 0 : return;
2403 :
2404 : /* At the node where left and right sides meet, add the parts between */
2405 0 : if (mast->orig_l->node == mast->orig_r->node) {
2406 0 : return mas_topiary_range(mast->orig_l, mast->destroy,
2407 : l_start, r_end);
2408 : }
2409 :
2410 : /* mast->orig_r is different and consumed. */
2411 0 : if (mte_is_leaf(mast->orig_r->node))
2412 : return;
2413 :
2414 0 : if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2415 0 : l_end--;
2416 :
2417 :
2418 0 : if (l_start <= l_end)
2419 0 : mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2420 :
2421 0 : if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2422 0 : r_start++;
2423 :
2424 0 : if (r_start <= r_end)
2425 0 : mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2426 : }
2427 :
2428 : /*
2429 : * mast_rebalance_next() - Rebalance against the next node
2430 : * @mast: The maple subtree state
2431 : * @old_r: The encoded maple node to the right (next node).
2432 : */
2433 0 : static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2434 : {
2435 0 : unsigned char b_end = mast->bn->b_end;
2436 :
2437 0 : mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2438 : mast->bn, b_end);
2439 0 : mast->orig_r->last = mast->orig_r->max;
2440 0 : }
2441 :
2442 : /*
2443 : * mast_rebalance_prev() - Rebalance against the previous node
2444 : * @mast: The maple subtree state
2445 : * @old_l: The encoded maple node to the left (previous node)
2446 : */
2447 0 : static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2448 : {
2449 0 : unsigned char end = mas_data_end(mast->orig_l) + 1;
2450 0 : unsigned char b_end = mast->bn->b_end;
2451 :
2452 0 : mab_shift_right(mast->bn, end);
2453 0 : mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2454 0 : mast->l->min = mast->orig_l->min;
2455 0 : mast->orig_l->index = mast->orig_l->min;
2456 0 : mast->bn->b_end = end + b_end;
2457 0 : mast->l->offset += end;
2458 0 : }
2459 :
2460 : /*
2461 : * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2462 : * the node to the right. Checking the nodes to the right then the left at each
2463 : * level upwards until root is reached. Free and destroy as needed.
2464 : * Data is copied into the @mast->bn.
2465 : * @mast: The maple_subtree_state.
2466 : */
2467 : static inline
2468 0 : bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2469 : {
2470 0 : struct ma_state r_tmp = *mast->orig_r;
2471 0 : struct ma_state l_tmp = *mast->orig_l;
2472 0 : struct maple_enode *ancestor = NULL;
2473 : unsigned char start, end;
2474 0 : unsigned char depth = 0;
2475 :
2476 0 : r_tmp = *mast->orig_r;
2477 0 : l_tmp = *mast->orig_l;
2478 : do {
2479 0 : mas_ascend(mast->orig_r);
2480 0 : mas_ascend(mast->orig_l);
2481 0 : depth++;
2482 0 : if (!ancestor &&
2483 0 : (mast->orig_r->node == mast->orig_l->node)) {
2484 0 : ancestor = mast->orig_r->node;
2485 0 : end = mast->orig_r->offset - 1;
2486 0 : start = mast->orig_l->offset + 1;
2487 : }
2488 :
2489 0 : if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2490 0 : if (!ancestor) {
2491 0 : ancestor = mast->orig_r->node;
2492 0 : start = 0;
2493 : }
2494 :
2495 0 : mast->orig_r->offset++;
2496 : do {
2497 0 : mas_descend(mast->orig_r);
2498 0 : mast->orig_r->offset = 0;
2499 0 : depth--;
2500 0 : } while (depth);
2501 :
2502 0 : mast_rebalance_next(mast);
2503 : do {
2504 0 : unsigned char l_off = 0;
2505 0 : struct maple_enode *child = r_tmp.node;
2506 :
2507 0 : mas_ascend(&r_tmp);
2508 0 : if (ancestor == r_tmp.node)
2509 0 : l_off = start;
2510 :
2511 0 : if (r_tmp.offset)
2512 0 : r_tmp.offset--;
2513 :
2514 0 : if (l_off < r_tmp.offset)
2515 0 : mas_topiary_range(&r_tmp, mast->destroy,
2516 : l_off, r_tmp.offset);
2517 :
2518 0 : if (l_tmp.node != child)
2519 0 : mat_add(mast->free, child);
2520 :
2521 0 : } while (r_tmp.node != ancestor);
2522 :
2523 0 : *mast->orig_l = l_tmp;
2524 0 : return true;
2525 :
2526 0 : } else if (mast->orig_l->offset != 0) {
2527 0 : if (!ancestor) {
2528 0 : ancestor = mast->orig_l->node;
2529 0 : end = mas_data_end(mast->orig_l);
2530 : }
2531 :
2532 0 : mast->orig_l->offset--;
2533 : do {
2534 0 : mas_descend(mast->orig_l);
2535 0 : mast->orig_l->offset =
2536 0 : mas_data_end(mast->orig_l);
2537 0 : depth--;
2538 0 : } while (depth);
2539 :
2540 0 : mast_rebalance_prev(mast);
2541 : do {
2542 : unsigned char r_off;
2543 0 : struct maple_enode *child = l_tmp.node;
2544 :
2545 0 : mas_ascend(&l_tmp);
2546 0 : if (ancestor == l_tmp.node)
2547 : r_off = end;
2548 : else
2549 0 : r_off = mas_data_end(&l_tmp);
2550 :
2551 0 : if (l_tmp.offset < r_off)
2552 0 : l_tmp.offset++;
2553 :
2554 0 : if (l_tmp.offset < r_off)
2555 0 : mas_topiary_range(&l_tmp, mast->destroy,
2556 : l_tmp.offset, r_off);
2557 :
2558 0 : if (r_tmp.node != child)
2559 0 : mat_add(mast->free, child);
2560 :
2561 0 : } while (l_tmp.node != ancestor);
2562 :
2563 0 : *mast->orig_r = r_tmp;
2564 0 : return true;
2565 : }
2566 0 : } while (!mte_is_root(mast->orig_r->node));
2567 :
2568 0 : *mast->orig_r = r_tmp;
2569 0 : *mast->orig_l = l_tmp;
2570 0 : return false;
2571 : }
2572 :
2573 : /*
2574 : * mast_ascend_free() - Add current original maple state nodes to the free list
2575 : * and ascend.
2576 : * @mast: the maple subtree state.
2577 : *
2578 : * Ascend the original left and right sides and add the previous nodes to the
2579 : * free list. Set the slots to point to the correct location in the new nodes.
2580 : */
2581 : static inline void
2582 0 : mast_ascend_free(struct maple_subtree_state *mast)
2583 : {
2584 0 : MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2585 0 : struct maple_enode *left = mast->orig_l->node;
2586 0 : struct maple_enode *right = mast->orig_r->node;
2587 :
2588 0 : mas_ascend(mast->orig_l);
2589 0 : mas_ascend(mast->orig_r);
2590 0 : mat_add(mast->free, left);
2591 :
2592 0 : if (left != right)
2593 0 : mat_add(mast->free, right);
2594 :
2595 0 : mast->orig_r->offset = 0;
2596 0 : mast->orig_r->index = mast->r->max;
2597 : /* last should be larger than or equal to index */
2598 0 : if (mast->orig_r->last < mast->orig_r->index)
2599 0 : mast->orig_r->last = mast->orig_r->index;
2600 : /*
2601 : * The node may not contain the value so set slot to ensure all
2602 : * of the nodes contents are freed or destroyed.
2603 : */
2604 0 : wr_mas.type = mte_node_type(mast->orig_r->node);
2605 0 : mas_wr_node_walk(&wr_mas);
2606 : /* Set up the left side of things */
2607 0 : mast->orig_l->offset = 0;
2608 0 : mast->orig_l->index = mast->l->min;
2609 0 : wr_mas.mas = mast->orig_l;
2610 0 : wr_mas.type = mte_node_type(mast->orig_l->node);
2611 0 : mas_wr_node_walk(&wr_mas);
2612 :
2613 0 : mast->bn->type = wr_mas.type;
2614 0 : }
2615 :
2616 : /*
2617 : * mas_new_ma_node() - Create and return a new maple node. Helper function.
2618 : * @mas: the maple state with the allocations.
2619 : * @b_node: the maple_big_node with the type encoding.
2620 : *
2621 : * Use the node type from the maple_big_node to allocate a new node from the
2622 : * ma_state. This function exists mainly for code readability.
2623 : *
2624 : * Return: A new maple encoded node
2625 : */
2626 : static inline struct maple_enode
2627 : *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2628 : {
2629 0 : return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2630 : }
2631 :
2632 : /*
2633 : * mas_mab_to_node() - Set up right and middle nodes
2634 : *
2635 : * @mas: the maple state that contains the allocations.
2636 : * @b_node: the node which contains the data.
2637 : * @left: The pointer which will have the left node
2638 : * @right: The pointer which may have the right node
2639 : * @middle: the pointer which may have the middle node (rare)
2640 : * @mid_split: the split location for the middle node
2641 : *
2642 : * Return: the split of left.
2643 : */
2644 0 : static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2645 : struct maple_big_node *b_node, struct maple_enode **left,
2646 : struct maple_enode **right, struct maple_enode **middle,
2647 : unsigned char *mid_split, unsigned long min)
2648 : {
2649 0 : unsigned char split = 0;
2650 0 : unsigned char slot_count = mt_slots[b_node->type];
2651 :
2652 0 : *left = mas_new_ma_node(mas, b_node);
2653 0 : *right = NULL;
2654 0 : *middle = NULL;
2655 0 : *mid_split = 0;
2656 :
2657 0 : if (b_node->b_end < slot_count) {
2658 : split = b_node->b_end;
2659 : } else {
2660 0 : split = mab_calc_split(mas, b_node, mid_split, min);
2661 0 : *right = mas_new_ma_node(mas, b_node);
2662 : }
2663 :
2664 0 : if (*mid_split)
2665 0 : *middle = mas_new_ma_node(mas, b_node);
2666 :
2667 0 : return split;
2668 :
2669 : }
2670 :
2671 : /*
2672 : * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2673 : * pointer.
2674 : * @b_node - the big node to add the entry
2675 : * @mas - the maple state to get the pivot (mas->max)
2676 : * @entry - the entry to add, if NULL nothing happens.
2677 : */
2678 0 : static inline void mab_set_b_end(struct maple_big_node *b_node,
2679 : struct ma_state *mas,
2680 : void *entry)
2681 : {
2682 0 : if (!entry)
2683 : return;
2684 :
2685 0 : b_node->slot[b_node->b_end] = entry;
2686 0 : if (mt_is_alloc(mas->tree))
2687 0 : b_node->gap[b_node->b_end] = mas_max_gap(mas);
2688 0 : b_node->pivot[b_node->b_end++] = mas->max;
2689 : }
2690 :
2691 : /*
2692 : * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2693 : * of @mas->node to either @left or @right, depending on @slot and @split
2694 : *
2695 : * @mas - the maple state with the node that needs a parent
2696 : * @left - possible parent 1
2697 : * @right - possible parent 2
2698 : * @slot - the slot the mas->node was placed
2699 : * @split - the split location between @left and @right
2700 : */
2701 0 : static inline void mas_set_split_parent(struct ma_state *mas,
2702 : struct maple_enode *left,
2703 : struct maple_enode *right,
2704 : unsigned char *slot, unsigned char split)
2705 : {
2706 0 : if (mas_is_none(mas))
2707 : return;
2708 :
2709 0 : if ((*slot) <= split)
2710 0 : mte_set_parent(mas->node, left, *slot);
2711 0 : else if (right)
2712 0 : mte_set_parent(mas->node, right, (*slot) - split - 1);
2713 :
2714 0 : (*slot)++;
2715 : }
2716 :
2717 : /*
2718 : * mte_mid_split_check() - Check if the next node passes the mid-split
2719 : * @**l: Pointer to left encoded maple node.
2720 : * @**m: Pointer to middle encoded maple node.
2721 : * @**r: Pointer to right encoded maple node.
2722 : * @slot: The offset
2723 : * @*split: The split location.
2724 : * @mid_split: The middle split.
2725 : */
2726 : static inline void mte_mid_split_check(struct maple_enode **l,
2727 : struct maple_enode **r,
2728 : struct maple_enode *right,
2729 : unsigned char slot,
2730 : unsigned char *split,
2731 : unsigned char mid_split)
2732 : {
2733 0 : if (*r == right)
2734 : return;
2735 :
2736 0 : if (slot < mid_split)
2737 : return;
2738 :
2739 0 : *l = *r;
2740 0 : *r = right;
2741 0 : *split = mid_split;
2742 : }
2743 :
2744 : /*
2745 : * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2746 : * is taken from @mast->l.
2747 : * @mast - the maple subtree state
2748 : * @left - the left node
2749 : * @right - the right node
2750 : * @split - the split location.
2751 : */
2752 0 : static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2753 : struct maple_enode *left,
2754 : struct maple_enode *middle,
2755 : struct maple_enode *right,
2756 : unsigned char split,
2757 : unsigned char mid_split)
2758 : {
2759 : unsigned char slot;
2760 0 : struct maple_enode *l = left;
2761 0 : struct maple_enode *r = right;
2762 :
2763 0 : if (mas_is_none(mast->l))
2764 0 : return;
2765 :
2766 0 : if (middle)
2767 0 : r = middle;
2768 :
2769 0 : slot = mast->l->offset;
2770 :
2771 0 : mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2772 0 : mas_set_split_parent(mast->l, l, r, &slot, split);
2773 :
2774 0 : mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2775 0 : mas_set_split_parent(mast->m, l, r, &slot, split);
2776 :
2777 0 : mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2778 0 : mas_set_split_parent(mast->r, l, r, &slot, split);
2779 : }
2780 :
2781 : /*
2782 : * mas_wmb_replace() - Write memory barrier and replace
2783 : * @mas: The maple state
2784 : * @free: the maple topiary list of nodes to free
2785 : * @destroy: The maple topiary list of nodes to destroy (walk and free)
2786 : *
2787 : * Updates gap as necessary.
2788 : */
2789 0 : static inline void mas_wmb_replace(struct ma_state *mas,
2790 : struct ma_topiary *free,
2791 : struct ma_topiary *destroy)
2792 : {
2793 : /* All nodes must see old data as dead prior to replacing that data */
2794 0 : smp_wmb(); /* Needed for RCU */
2795 :
2796 : /* Insert the new data in the tree */
2797 0 : mas_replace(mas, true);
2798 :
2799 0 : if (!mte_is_leaf(mas->node))
2800 0 : mas_descend_adopt(mas);
2801 :
2802 0 : mas_mat_free(mas, free);
2803 :
2804 0 : if (destroy)
2805 : mas_mat_destroy(mas, destroy);
2806 :
2807 0 : if (mte_is_leaf(mas->node))
2808 : return;
2809 :
2810 0 : mas_update_gap(mas);
2811 : }
2812 :
2813 : /*
2814 : * mast_new_root() - Set a new tree root during subtree creation
2815 : * @mast: The maple subtree state
2816 : * @mas: The maple state
2817 : */
2818 0 : static inline void mast_new_root(struct maple_subtree_state *mast,
2819 : struct ma_state *mas)
2820 : {
2821 0 : mas_mn(mast->l)->parent =
2822 0 : ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2823 0 : if (!mte_dead_node(mast->orig_l->node) &&
2824 0 : !mte_is_root(mast->orig_l->node)) {
2825 : do {
2826 0 : mast_ascend_free(mast);
2827 0 : mast_topiary(mast);
2828 0 : } while (!mte_is_root(mast->orig_l->node));
2829 : }
2830 0 : if ((mast->orig_l->node != mas->node) &&
2831 0 : (mast->l->depth > mas_mt_height(mas))) {
2832 0 : mat_add(mast->free, mas->node);
2833 : }
2834 0 : }
2835 :
2836 : /*
2837 : * mast_cp_to_nodes() - Copy data out to nodes.
2838 : * @mast: The maple subtree state
2839 : * @left: The left encoded maple node
2840 : * @middle: The middle encoded maple node
2841 : * @right: The right encoded maple node
2842 : * @split: The location to split between left and (middle ? middle : right)
2843 : * @mid_split: The location to split between middle and right.
2844 : */
2845 0 : static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2846 : struct maple_enode *left, struct maple_enode *middle,
2847 : struct maple_enode *right, unsigned char split, unsigned char mid_split)
2848 : {
2849 0 : bool new_lmax = true;
2850 :
2851 0 : mast->l->node = mte_node_or_none(left);
2852 0 : mast->m->node = mte_node_or_none(middle);
2853 0 : mast->r->node = mte_node_or_none(right);
2854 :
2855 0 : mast->l->min = mast->orig_l->min;
2856 0 : if (split == mast->bn->b_end) {
2857 0 : mast->l->max = mast->orig_r->max;
2858 0 : new_lmax = false;
2859 : }
2860 :
2861 0 : mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2862 :
2863 0 : if (middle) {
2864 0 : mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2865 0 : mast->m->min = mast->bn->pivot[split] + 1;
2866 0 : split = mid_split;
2867 : }
2868 :
2869 0 : mast->r->max = mast->orig_r->max;
2870 0 : if (right) {
2871 0 : mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2872 0 : mast->r->min = mast->bn->pivot[split] + 1;
2873 : }
2874 0 : }
2875 :
2876 : /*
2877 : * mast_combine_cp_left - Copy in the original left side of the tree into the
2878 : * combined data set in the maple subtree state big node.
2879 : * @mast: The maple subtree state
2880 : */
2881 : static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2882 : {
2883 0 : unsigned char l_slot = mast->orig_l->offset;
2884 :
2885 0 : if (!l_slot)
2886 : return;
2887 :
2888 0 : mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2889 : }
2890 :
2891 : /*
2892 : * mast_combine_cp_right: Copy in the original right side of the tree into the
2893 : * combined data set in the maple subtree state big node.
2894 : * @mast: The maple subtree state
2895 : */
2896 0 : static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2897 : {
2898 0 : if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2899 : return;
2900 :
2901 0 : mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2902 0 : mt_slot_count(mast->orig_r->node), mast->bn,
2903 : mast->bn->b_end);
2904 0 : mast->orig_r->last = mast->orig_r->max;
2905 : }
2906 :
2907 : /*
2908 : * mast_sufficient: Check if the maple subtree state has enough data in the big
2909 : * node to create at least one sufficient node
2910 : * @mast: the maple subtree state
2911 : */
2912 : static inline bool mast_sufficient(struct maple_subtree_state *mast)
2913 : {
2914 0 : if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2915 : return true;
2916 :
2917 : return false;
2918 : }
2919 :
2920 : /*
2921 : * mast_overflow: Check if there is too much data in the subtree state for a
2922 : * single node.
2923 : * @mast: The maple subtree state
2924 : */
2925 : static inline bool mast_overflow(struct maple_subtree_state *mast)
2926 : {
2927 0 : if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2928 : return true;
2929 :
2930 : return false;
2931 : }
2932 :
2933 0 : static inline void *mtree_range_walk(struct ma_state *mas)
2934 : {
2935 : unsigned long *pivots;
2936 : unsigned char offset;
2937 : struct maple_node *node;
2938 : struct maple_enode *next, *last;
2939 : enum maple_type type;
2940 : void __rcu **slots;
2941 : unsigned char end;
2942 : unsigned long max, min;
2943 : unsigned long prev_max, prev_min;
2944 :
2945 0 : next = mas->node;
2946 0 : min = mas->min;
2947 0 : max = mas->max;
2948 : do {
2949 0 : offset = 0;
2950 0 : last = next;
2951 0 : node = mte_to_node(next);
2952 0 : type = mte_node_type(next);
2953 0 : pivots = ma_pivots(node, type);
2954 0 : end = ma_data_end(node, type, pivots, max);
2955 0 : if (unlikely(ma_dead_node(node)))
2956 : goto dead_node;
2957 :
2958 0 : if (pivots[offset] >= mas->index) {
2959 : prev_max = max;
2960 : prev_min = min;
2961 : max = pivots[offset];
2962 : goto next;
2963 : }
2964 :
2965 : do {
2966 0 : offset++;
2967 0 : } while ((offset < end) && (pivots[offset] < mas->index));
2968 :
2969 0 : prev_min = min;
2970 0 : min = pivots[offset - 1] + 1;
2971 0 : prev_max = max;
2972 0 : if (likely(offset < end && pivots[offset]))
2973 0 : max = pivots[offset];
2974 :
2975 : next:
2976 0 : slots = ma_slots(node, type);
2977 0 : next = mt_slot(mas->tree, slots, offset);
2978 0 : if (unlikely(ma_dead_node(node)))
2979 : goto dead_node;
2980 0 : } while (!ma_is_leaf(type));
2981 :
2982 0 : mas->offset = offset;
2983 0 : mas->index = min;
2984 0 : mas->last = max;
2985 0 : mas->min = prev_min;
2986 0 : mas->max = prev_max;
2987 0 : mas->node = last;
2988 0 : return (void *)next;
2989 :
2990 : dead_node:
2991 0 : mas_reset(mas);
2992 0 : return NULL;
2993 : }
2994 :
2995 : /*
2996 : * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2997 : * @mas: The starting maple state
2998 : * @mast: The maple_subtree_state, keeps track of 4 maple states.
2999 : * @count: The estimated count of iterations needed.
3000 : *
3001 : * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3002 : * is hit. First @b_node is split into two entries which are inserted into the
3003 : * next iteration of the loop. @b_node is returned populated with the final
3004 : * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
3005 : * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3006 : * to account of what has been copied into the new sub-tree. The update of
3007 : * orig_l_mas->last is used in mas_consume to find the slots that will need to
3008 : * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
3009 : * the new sub-tree in case the sub-tree becomes the full tree.
3010 : *
3011 : * Return: the number of elements in b_node during the last loop.
3012 : */
3013 0 : static int mas_spanning_rebalance(struct ma_state *mas,
3014 : struct maple_subtree_state *mast, unsigned char count)
3015 : {
3016 : unsigned char split, mid_split;
3017 0 : unsigned char slot = 0;
3018 0 : struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3019 :
3020 0 : MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3021 0 : MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3022 0 : MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3023 0 : MA_TOPIARY(free, mas->tree);
3024 0 : MA_TOPIARY(destroy, mas->tree);
3025 :
3026 : /*
3027 : * The tree needs to be rebalanced and leaves need to be kept at the same level.
3028 : * Rebalancing is done by use of the ``struct maple_topiary``.
3029 : */
3030 0 : mast->l = &l_mas;
3031 0 : mast->m = &m_mas;
3032 0 : mast->r = &r_mas;
3033 0 : mast->free = &free;
3034 0 : mast->destroy = &destroy;
3035 0 : l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3036 :
3037 : /* Check if this is not root and has sufficient data. */
3038 0 : if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3039 0 : unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3040 0 : mast_spanning_rebalance(mast);
3041 :
3042 0 : mast->orig_l->depth = 0;
3043 :
3044 : /*
3045 : * Each level of the tree is examined and balanced, pushing data to the left or
3046 : * right, or rebalancing against left or right nodes is employed to avoid
3047 : * rippling up the tree to limit the amount of churn. Once a new sub-section of
3048 : * the tree is created, there may be a mix of new and old nodes. The old nodes
3049 : * will have the incorrect parent pointers and currently be in two trees: the
3050 : * original tree and the partially new tree. To remedy the parent pointers in
3051 : * the old tree, the new data is swapped into the active tree and a walk down
3052 : * the tree is performed and the parent pointers are updated.
3053 : * See mas_descend_adopt() for more information..
3054 : */
3055 0 : while (count--) {
3056 0 : mast->bn->b_end--;
3057 0 : mast->bn->type = mte_node_type(mast->orig_l->node);
3058 0 : split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3059 0 : &mid_split, mast->orig_l->min);
3060 0 : mast_set_split_parents(mast, left, middle, right, split,
3061 : mid_split);
3062 0 : mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3063 :
3064 : /*
3065 : * Copy data from next level in the tree to mast->bn from next
3066 : * iteration
3067 : */
3068 0 : memset(mast->bn, 0, sizeof(struct maple_big_node));
3069 0 : mast->bn->type = mte_node_type(left);
3070 0 : mast->orig_l->depth++;
3071 :
3072 : /* Root already stored in l->node. */
3073 0 : if (mas_is_root_limits(mast->l))
3074 : goto new_root;
3075 :
3076 0 : mast_ascend_free(mast);
3077 0 : mast_combine_cp_left(mast);
3078 0 : l_mas.offset = mast->bn->b_end;
3079 0 : mab_set_b_end(mast->bn, &l_mas, left);
3080 0 : mab_set_b_end(mast->bn, &m_mas, middle);
3081 0 : mab_set_b_end(mast->bn, &r_mas, right);
3082 :
3083 : /* Copy anything necessary out of the right node. */
3084 0 : mast_combine_cp_right(mast);
3085 0 : mast_topiary(mast);
3086 0 : mast->orig_l->last = mast->orig_l->max;
3087 :
3088 0 : if (mast_sufficient(mast))
3089 0 : continue;
3090 :
3091 0 : if (mast_overflow(mast))
3092 0 : continue;
3093 :
3094 : /* May be a new root stored in mast->bn */
3095 0 : if (mas_is_root_limits(mast->orig_l))
3096 : break;
3097 :
3098 0 : mast_spanning_rebalance(mast);
3099 :
3100 : /* rebalancing from other nodes may require another loop. */
3101 0 : if (!count)
3102 0 : count++;
3103 : }
3104 :
3105 0 : l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3106 0 : mte_node_type(mast->orig_l->node));
3107 0 : mast->orig_l->depth++;
3108 0 : mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3109 0 : mte_set_parent(left, l_mas.node, slot);
3110 0 : if (middle)
3111 0 : mte_set_parent(middle, l_mas.node, ++slot);
3112 :
3113 0 : if (right)
3114 0 : mte_set_parent(right, l_mas.node, ++slot);
3115 :
3116 0 : if (mas_is_root_limits(mast->l)) {
3117 : new_root:
3118 0 : mast_new_root(mast, mas);
3119 : } else {
3120 0 : mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3121 : }
3122 :
3123 0 : if (!mte_dead_node(mast->orig_l->node))
3124 0 : mat_add(&free, mast->orig_l->node);
3125 :
3126 0 : mas->depth = mast->orig_l->depth;
3127 0 : *mast->orig_l = l_mas;
3128 0 : mte_set_node_dead(mas->node);
3129 :
3130 : /* Set up mas for insertion. */
3131 0 : mast->orig_l->depth = mas->depth;
3132 0 : mast->orig_l->alloc = mas->alloc;
3133 0 : *mas = *mast->orig_l;
3134 0 : mas_wmb_replace(mas, &free, &destroy);
3135 0 : mtree_range_walk(mas);
3136 0 : return mast->bn->b_end;
3137 : }
3138 :
3139 : /*
3140 : * mas_rebalance() - Rebalance a given node.
3141 : * @mas: The maple state
3142 : * @b_node: The big maple node.
3143 : *
3144 : * Rebalance two nodes into a single node or two new nodes that are sufficient.
3145 : * Continue upwards until tree is sufficient.
3146 : *
3147 : * Return: the number of elements in b_node during the last loop.
3148 : */
3149 0 : static inline int mas_rebalance(struct ma_state *mas,
3150 : struct maple_big_node *b_node)
3151 : {
3152 0 : char empty_count = mas_mt_height(mas);
3153 : struct maple_subtree_state mast;
3154 0 : unsigned char shift, b_end = ++b_node->b_end;
3155 :
3156 0 : MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3157 0 : MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3158 :
3159 0 : trace_ma_op(__func__, mas);
3160 :
3161 : /*
3162 : * Rebalancing occurs if a node is insufficient. Data is rebalanced
3163 : * against the node to the right if it exists, otherwise the node to the
3164 : * left of this node is rebalanced against this node. If rebalancing
3165 : * causes just one node to be produced instead of two, then the parent
3166 : * is also examined and rebalanced if it is insufficient. Every level
3167 : * tries to combine the data in the same way. If one node contains the
3168 : * entire range of the tree, then that node is used as a new root node.
3169 : */
3170 0 : mas_node_count(mas, 1 + empty_count * 3);
3171 0 : if (mas_is_err(mas))
3172 : return 0;
3173 :
3174 0 : mast.orig_l = &l_mas;
3175 0 : mast.orig_r = &r_mas;
3176 0 : mast.bn = b_node;
3177 0 : mast.bn->type = mte_node_type(mas->node);
3178 :
3179 0 : l_mas = r_mas = *mas;
3180 :
3181 0 : if (mas_next_sibling(&r_mas)) {
3182 0 : mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3183 0 : r_mas.last = r_mas.index = r_mas.max;
3184 : } else {
3185 0 : mas_prev_sibling(&l_mas);
3186 0 : shift = mas_data_end(&l_mas) + 1;
3187 0 : mab_shift_right(b_node, shift);
3188 0 : mas->offset += shift;
3189 0 : mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3190 0 : b_node->b_end = shift + b_end;
3191 0 : l_mas.index = l_mas.last = l_mas.min;
3192 : }
3193 :
3194 0 : return mas_spanning_rebalance(mas, &mast, empty_count);
3195 : }
3196 :
3197 : /*
3198 : * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3199 : * state.
3200 : * @mas: The maple state
3201 : * @end: The end of the left-most node.
3202 : *
3203 : * During a mass-insert event (such as forking), it may be necessary to
3204 : * rebalance the left-most node when it is not sufficient.
3205 : */
3206 0 : static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3207 : {
3208 0 : enum maple_type mt = mte_node_type(mas->node);
3209 : struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3210 : struct maple_enode *eparent;
3211 0 : unsigned char offset, tmp, split = mt_slots[mt] / 2;
3212 : void __rcu **l_slots, **slots;
3213 : unsigned long *l_pivs, *pivs, gap;
3214 0 : bool in_rcu = mt_in_rcu(mas->tree);
3215 :
3216 : MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3217 :
3218 0 : l_mas = *mas;
3219 0 : mas_prev_sibling(&l_mas);
3220 :
3221 : /* set up node. */
3222 0 : if (in_rcu) {
3223 : /* Allocate for both left and right as well as parent. */
3224 0 : mas_node_count(mas, 3);
3225 0 : if (mas_is_err(mas))
3226 0 : return;
3227 :
3228 0 : newnode = mas_pop_node(mas);
3229 : } else {
3230 : newnode = &reuse;
3231 : }
3232 :
3233 0 : node = mas_mn(mas);
3234 0 : newnode->parent = node->parent;
3235 0 : slots = ma_slots(newnode, mt);
3236 0 : pivs = ma_pivots(newnode, mt);
3237 0 : left = mas_mn(&l_mas);
3238 0 : l_slots = ma_slots(left, mt);
3239 0 : l_pivs = ma_pivots(left, mt);
3240 0 : if (!l_slots[split])
3241 0 : split++;
3242 0 : tmp = mas_data_end(&l_mas) - split;
3243 :
3244 0 : memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3245 0 : memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3246 0 : pivs[tmp] = l_mas.max;
3247 0 : memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3248 0 : memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3249 :
3250 0 : l_mas.max = l_pivs[split];
3251 0 : mas->min = l_mas.max + 1;
3252 0 : eparent = mt_mk_node(mte_parent(l_mas.node),
3253 : mas_parent_enum(&l_mas, l_mas.node));
3254 0 : tmp += end;
3255 0 : if (!in_rcu) {
3256 0 : unsigned char max_p = mt_pivots[mt];
3257 0 : unsigned char max_s = mt_slots[mt];
3258 :
3259 0 : if (tmp < max_p)
3260 0 : memset(pivs + tmp, 0,
3261 0 : sizeof(unsigned long) * (max_p - tmp));
3262 :
3263 0 : if (tmp < mt_slots[mt])
3264 0 : memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3265 :
3266 0 : memcpy(node, newnode, sizeof(struct maple_node));
3267 0 : ma_set_meta(node, mt, 0, tmp - 1);
3268 0 : mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3269 : l_pivs[split]);
3270 :
3271 : /* Remove data from l_pivs. */
3272 0 : tmp = split + 1;
3273 0 : memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3274 0 : memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3275 0 : ma_set_meta(left, mt, 0, split);
3276 :
3277 : goto done;
3278 : }
3279 :
3280 : /* RCU requires replacing both l_mas, mas, and parent. */
3281 0 : mas->node = mt_mk_node(newnode, mt);
3282 0 : ma_set_meta(newnode, mt, 0, tmp);
3283 :
3284 0 : new_left = mas_pop_node(mas);
3285 0 : new_left->parent = left->parent;
3286 0 : mt = mte_node_type(l_mas.node);
3287 0 : slots = ma_slots(new_left, mt);
3288 0 : pivs = ma_pivots(new_left, mt);
3289 0 : memcpy(slots, l_slots, sizeof(void *) * split);
3290 0 : memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3291 0 : ma_set_meta(new_left, mt, 0, split);
3292 0 : l_mas.node = mt_mk_node(new_left, mt);
3293 :
3294 : /* replace parent. */
3295 0 : offset = mte_parent_slot(mas->node);
3296 0 : mt = mas_parent_enum(&l_mas, l_mas.node);
3297 0 : parent = mas_pop_node(mas);
3298 0 : slots = ma_slots(parent, mt);
3299 0 : pivs = ma_pivots(parent, mt);
3300 0 : memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3301 0 : rcu_assign_pointer(slots[offset], mas->node);
3302 0 : rcu_assign_pointer(slots[offset - 1], l_mas.node);
3303 0 : pivs[offset - 1] = l_mas.max;
3304 0 : eparent = mt_mk_node(parent, mt);
3305 : done:
3306 0 : gap = mas_leaf_max_gap(mas);
3307 0 : mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3308 0 : gap = mas_leaf_max_gap(&l_mas);
3309 0 : mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3310 0 : mas_ascend(mas);
3311 :
3312 0 : if (in_rcu)
3313 0 : mas_replace(mas, false);
3314 :
3315 0 : mas_update_gap(mas);
3316 : }
3317 :
3318 : /*
3319 : * mas_split_final_node() - Split the final node in a subtree operation.
3320 : * @mast: the maple subtree state
3321 : * @mas: The maple state
3322 : * @height: The height of the tree in case it's a new root.
3323 : */
3324 0 : static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3325 : struct ma_state *mas, int height)
3326 : {
3327 : struct maple_enode *ancestor;
3328 :
3329 0 : if (mte_is_root(mas->node)) {
3330 0 : if (mt_is_alloc(mas->tree))
3331 0 : mast->bn->type = maple_arange_64;
3332 : else
3333 0 : mast->bn->type = maple_range_64;
3334 0 : mas->depth = height;
3335 : }
3336 : /*
3337 : * Only a single node is used here, could be root.
3338 : * The Big_node data should just fit in a single node.
3339 : */
3340 0 : ancestor = mas_new_ma_node(mas, mast->bn);
3341 0 : mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3342 0 : mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3343 0 : mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3344 :
3345 0 : mast->l->node = ancestor;
3346 0 : mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3347 0 : mas->offset = mast->bn->b_end - 1;
3348 0 : return true;
3349 : }
3350 :
3351 : /*
3352 : * mast_fill_bnode() - Copy data into the big node in the subtree state
3353 : * @mast: The maple subtree state
3354 : * @mas: the maple state
3355 : * @skip: The number of entries to skip for new nodes insertion.
3356 : */
3357 0 : static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3358 : struct ma_state *mas,
3359 : unsigned char skip)
3360 : {
3361 0 : bool cp = true;
3362 0 : struct maple_enode *old = mas->node;
3363 : unsigned char split;
3364 :
3365 0 : memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3366 0 : memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3367 0 : memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3368 0 : mast->bn->b_end = 0;
3369 :
3370 0 : if (mte_is_root(mas->node)) {
3371 : cp = false;
3372 : } else {
3373 0 : mas_ascend(mas);
3374 0 : mat_add(mast->free, old);
3375 0 : mas->offset = mte_parent_slot(mas->node);
3376 : }
3377 :
3378 0 : if (cp && mast->l->offset)
3379 0 : mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3380 :
3381 0 : split = mast->bn->b_end;
3382 0 : mab_set_b_end(mast->bn, mast->l, mast->l->node);
3383 0 : mast->r->offset = mast->bn->b_end;
3384 0 : mab_set_b_end(mast->bn, mast->r, mast->r->node);
3385 0 : if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3386 0 : cp = false;
3387 :
3388 0 : if (cp)
3389 0 : mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3390 : mast->bn, mast->bn->b_end);
3391 :
3392 0 : mast->bn->b_end--;
3393 0 : mast->bn->type = mte_node_type(mas->node);
3394 0 : }
3395 :
3396 : /*
3397 : * mast_split_data() - Split the data in the subtree state big node into regular
3398 : * nodes.
3399 : * @mast: The maple subtree state
3400 : * @mas: The maple state
3401 : * @split: The location to split the big node
3402 : */
3403 0 : static inline void mast_split_data(struct maple_subtree_state *mast,
3404 : struct ma_state *mas, unsigned char split)
3405 : {
3406 : unsigned char p_slot;
3407 :
3408 0 : mab_mas_cp(mast->bn, 0, split, mast->l, true);
3409 0 : mte_set_pivot(mast->r->node, 0, mast->r->max);
3410 0 : mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3411 0 : mast->l->offset = mte_parent_slot(mas->node);
3412 0 : mast->l->max = mast->bn->pivot[split];
3413 0 : mast->r->min = mast->l->max + 1;
3414 0 : if (mte_is_leaf(mas->node))
3415 0 : return;
3416 :
3417 0 : p_slot = mast->orig_l->offset;
3418 0 : mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3419 : &p_slot, split);
3420 0 : mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3421 : &p_slot, split);
3422 : }
3423 :
3424 : /*
3425 : * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3426 : * data to the right or left node if there is room.
3427 : * @mas: The maple state
3428 : * @height: The current height of the maple state
3429 : * @mast: The maple subtree state
3430 : * @left: Push left or not.
3431 : *
3432 : * Keeping the height of the tree low means faster lookups.
3433 : *
3434 : * Return: True if pushed, false otherwise.
3435 : */
3436 0 : static inline bool mas_push_data(struct ma_state *mas, int height,
3437 : struct maple_subtree_state *mast, bool left)
3438 : {
3439 0 : unsigned char slot_total = mast->bn->b_end;
3440 : unsigned char end, space, split;
3441 :
3442 : MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3443 0 : tmp_mas = *mas;
3444 0 : tmp_mas.depth = mast->l->depth;
3445 :
3446 0 : if (left && !mas_prev_sibling(&tmp_mas))
3447 : return false;
3448 0 : else if (!left && !mas_next_sibling(&tmp_mas))
3449 : return false;
3450 :
3451 0 : end = mas_data_end(&tmp_mas);
3452 0 : slot_total += end;
3453 0 : space = 2 * mt_slot_count(mas->node) - 2;
3454 : /* -2 instead of -1 to ensure there isn't a triple split */
3455 0 : if (ma_is_leaf(mast->bn->type))
3456 0 : space--;
3457 :
3458 0 : if (mas->max == ULONG_MAX)
3459 0 : space--;
3460 :
3461 0 : if (slot_total >= space)
3462 : return false;
3463 :
3464 : /* Get the data; Fill mast->bn */
3465 0 : mast->bn->b_end++;
3466 0 : if (left) {
3467 0 : mab_shift_right(mast->bn, end + 1);
3468 0 : mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3469 0 : mast->bn->b_end = slot_total + 1;
3470 : } else {
3471 0 : mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3472 : }
3473 :
3474 : /* Configure mast for splitting of mast->bn */
3475 0 : split = mt_slots[mast->bn->type] - 2;
3476 0 : if (left) {
3477 : /* Switch mas to prev node */
3478 0 : mat_add(mast->free, mas->node);
3479 0 : *mas = tmp_mas;
3480 : /* Start using mast->l for the left side. */
3481 0 : tmp_mas.node = mast->l->node;
3482 0 : *mast->l = tmp_mas;
3483 : } else {
3484 0 : mat_add(mast->free, tmp_mas.node);
3485 0 : tmp_mas.node = mast->r->node;
3486 0 : *mast->r = tmp_mas;
3487 0 : split = slot_total - split;
3488 : }
3489 0 : split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3490 : /* Update parent slot for split calculation. */
3491 0 : if (left)
3492 0 : mast->orig_l->offset += end + 1;
3493 :
3494 0 : mast_split_data(mast, mas, split);
3495 0 : mast_fill_bnode(mast, mas, 2);
3496 0 : mas_split_final_node(mast, mas, height + 1);
3497 0 : return true;
3498 : }
3499 :
3500 : /*
3501 : * mas_split() - Split data that is too big for one node into two.
3502 : * @mas: The maple state
3503 : * @b_node: The maple big node
3504 : * Return: 1 on success, 0 on failure.
3505 : */
3506 0 : static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3507 : {
3508 : struct maple_subtree_state mast;
3509 0 : int height = 0;
3510 0 : unsigned char mid_split, split = 0;
3511 :
3512 : /*
3513 : * Splitting is handled differently from any other B-tree; the Maple
3514 : * Tree splits upwards. Splitting up means that the split operation
3515 : * occurs when the walk of the tree hits the leaves and not on the way
3516 : * down. The reason for splitting up is that it is impossible to know
3517 : * how much space will be needed until the leaf is (or leaves are)
3518 : * reached. Since overwriting data is allowed and a range could
3519 : * overwrite more than one range or result in changing one entry into 3
3520 : * entries, it is impossible to know if a split is required until the
3521 : * data is examined.
3522 : *
3523 : * Splitting is a balancing act between keeping allocations to a minimum
3524 : * and avoiding a 'jitter' event where a tree is expanded to make room
3525 : * for an entry followed by a contraction when the entry is removed. To
3526 : * accomplish the balance, there are empty slots remaining in both left
3527 : * and right nodes after a split.
3528 : */
3529 0 : MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3530 0 : MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3531 0 : MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3532 0 : MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3533 0 : MA_TOPIARY(mat, mas->tree);
3534 :
3535 0 : trace_ma_op(__func__, mas);
3536 0 : mas->depth = mas_mt_height(mas);
3537 : /* Allocation failures will happen early. */
3538 0 : mas_node_count(mas, 1 + mas->depth * 2);
3539 0 : if (mas_is_err(mas))
3540 : return 0;
3541 :
3542 0 : mast.l = &l_mas;
3543 0 : mast.r = &r_mas;
3544 0 : mast.orig_l = &prev_l_mas;
3545 0 : mast.orig_r = &prev_r_mas;
3546 0 : mast.free = &mat;
3547 0 : mast.bn = b_node;
3548 :
3549 0 : while (height++ <= mas->depth) {
3550 0 : if (mt_slots[b_node->type] > b_node->b_end) {
3551 0 : mas_split_final_node(&mast, mas, height);
3552 0 : break;
3553 : }
3554 :
3555 0 : l_mas = r_mas = *mas;
3556 0 : l_mas.node = mas_new_ma_node(mas, b_node);
3557 0 : r_mas.node = mas_new_ma_node(mas, b_node);
3558 : /*
3559 : * Another way that 'jitter' is avoided is to terminate a split up early if the
3560 : * left or right node has space to spare. This is referred to as "pushing left"
3561 : * or "pushing right" and is similar to the B* tree, except the nodes left or
3562 : * right can rarely be reused due to RCU, but the ripple upwards is halted which
3563 : * is a significant savings.
3564 : */
3565 : /* Try to push left. */
3566 0 : if (mas_push_data(mas, height, &mast, true))
3567 : break;
3568 :
3569 : /* Try to push right. */
3570 0 : if (mas_push_data(mas, height, &mast, false))
3571 : break;
3572 :
3573 0 : split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3574 0 : mast_split_data(&mast, mas, split);
3575 : /*
3576 : * Usually correct, mab_mas_cp in the above call overwrites
3577 : * r->max.
3578 : */
3579 0 : mast.r->max = mas->max;
3580 0 : mast_fill_bnode(&mast, mas, 1);
3581 0 : prev_l_mas = *mast.l;
3582 0 : prev_r_mas = *mast.r;
3583 : }
3584 :
3585 : /* Set the original node as dead */
3586 0 : mat_add(mast.free, mas->node);
3587 0 : mas->node = l_mas.node;
3588 0 : mas_wmb_replace(mas, mast.free, NULL);
3589 0 : mtree_range_walk(mas);
3590 0 : return 1;
3591 : }
3592 :
3593 : /*
3594 : * mas_reuse_node() - Reuse the node to store the data.
3595 : * @wr_mas: The maple write state
3596 : * @bn: The maple big node
3597 : * @end: The end of the data.
3598 : *
3599 : * Will always return false in RCU mode.
3600 : *
3601 : * Return: True if node was reused, false otherwise.
3602 : */
3603 0 : static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3604 : struct maple_big_node *bn, unsigned char end)
3605 : {
3606 : /* Need to be rcu safe. */
3607 0 : if (mt_in_rcu(wr_mas->mas->tree))
3608 : return false;
3609 :
3610 0 : if (end > bn->b_end) {
3611 0 : int clear = mt_slots[wr_mas->type] - bn->b_end;
3612 :
3613 0 : memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3614 0 : memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3615 : }
3616 0 : mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3617 0 : return true;
3618 : }
3619 :
3620 : /*
3621 : * mas_commit_b_node() - Commit the big node into the tree.
3622 : * @wr_mas: The maple write state
3623 : * @b_node: The maple big node
3624 : * @end: The end of the data.
3625 : */
3626 0 : static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3627 : struct maple_big_node *b_node, unsigned char end)
3628 : {
3629 : struct maple_node *node;
3630 0 : unsigned char b_end = b_node->b_end;
3631 0 : enum maple_type b_type = b_node->type;
3632 :
3633 0 : if ((b_end < mt_min_slots[b_type]) &&
3634 0 : (!mte_is_root(wr_mas->mas->node)) &&
3635 0 : (mas_mt_height(wr_mas->mas) > 1))
3636 0 : return mas_rebalance(wr_mas->mas, b_node);
3637 :
3638 0 : if (b_end >= mt_slots[b_type])
3639 0 : return mas_split(wr_mas->mas, b_node);
3640 :
3641 0 : if (mas_reuse_node(wr_mas, b_node, end))
3642 : goto reuse_node;
3643 :
3644 0 : mas_node_count(wr_mas->mas, 1);
3645 0 : if (mas_is_err(wr_mas->mas))
3646 : return 0;
3647 :
3648 0 : node = mas_pop_node(wr_mas->mas);
3649 0 : node->parent = mas_mn(wr_mas->mas)->parent;
3650 0 : wr_mas->mas->node = mt_mk_node(node, b_type);
3651 0 : mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3652 0 : mas_replace(wr_mas->mas, false);
3653 : reuse_node:
3654 0 : mas_update_gap(wr_mas->mas);
3655 0 : return 1;
3656 : }
3657 :
3658 : /*
3659 : * mas_root_expand() - Expand a root to a node
3660 : * @mas: The maple state
3661 : * @entry: The entry to store into the tree
3662 : */
3663 0 : static inline int mas_root_expand(struct ma_state *mas, void *entry)
3664 : {
3665 0 : void *contents = mas_root_locked(mas);
3666 0 : enum maple_type type = maple_leaf_64;
3667 : struct maple_node *node;
3668 : void __rcu **slots;
3669 : unsigned long *pivots;
3670 0 : int slot = 0;
3671 :
3672 0 : mas_node_count(mas, 1);
3673 0 : if (unlikely(mas_is_err(mas)))
3674 : return 0;
3675 :
3676 0 : node = mas_pop_node(mas);
3677 0 : pivots = ma_pivots(node, type);
3678 0 : slots = ma_slots(node, type);
3679 0 : node->parent = ma_parent_ptr(
3680 : ((unsigned long)mas->tree | MA_ROOT_PARENT));
3681 0 : mas->node = mt_mk_node(node, type);
3682 :
3683 0 : if (mas->index) {
3684 0 : if (contents) {
3685 0 : rcu_assign_pointer(slots[slot], contents);
3686 0 : if (likely(mas->index > 1))
3687 0 : slot++;
3688 : }
3689 0 : pivots[slot++] = mas->index - 1;
3690 : }
3691 :
3692 0 : rcu_assign_pointer(slots[slot], entry);
3693 0 : mas->offset = slot;
3694 0 : pivots[slot] = mas->last;
3695 0 : if (mas->last != ULONG_MAX)
3696 0 : slot++;
3697 0 : mas->depth = 1;
3698 0 : mas_set_height(mas);
3699 0 : ma_set_meta(node, maple_leaf_64, 0, slot);
3700 : /* swap the new root into the tree */
3701 0 : rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3702 0 : return slot;
3703 : }
3704 :
3705 0 : static inline void mas_store_root(struct ma_state *mas, void *entry)
3706 : {
3707 0 : if (likely((mas->last != 0) || (mas->index != 0)))
3708 0 : mas_root_expand(mas, entry);
3709 0 : else if (((unsigned long) (entry) & 3) == 2)
3710 0 : mas_root_expand(mas, entry);
3711 : else {
3712 0 : rcu_assign_pointer(mas->tree->ma_root, entry);
3713 0 : mas->node = MAS_START;
3714 : }
3715 0 : }
3716 :
3717 : /*
3718 : * mas_is_span_wr() - Check if the write needs to be treated as a write that
3719 : * spans the node.
3720 : * @mas: The maple state
3721 : * @piv: The pivot value being written
3722 : * @type: The maple node type
3723 : * @entry: The data to write
3724 : *
3725 : * Spanning writes are writes that start in one node and end in another OR if
3726 : * the write of a %NULL will cause the node to end with a %NULL.
3727 : *
3728 : * Return: True if this is a spanning write, false otherwise.
3729 : */
3730 0 : static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3731 : {
3732 : unsigned long max;
3733 0 : unsigned long last = wr_mas->mas->last;
3734 0 : unsigned long piv = wr_mas->r_max;
3735 0 : enum maple_type type = wr_mas->type;
3736 0 : void *entry = wr_mas->entry;
3737 :
3738 : /* Contained in this pivot */
3739 0 : if (piv > last)
3740 : return false;
3741 :
3742 0 : max = wr_mas->mas->max;
3743 0 : if (unlikely(ma_is_leaf(type))) {
3744 : /* Fits in the node, but may span slots. */
3745 0 : if (last < max)
3746 : return false;
3747 :
3748 : /* Writes to the end of the node but not null. */
3749 0 : if ((last == max) && entry)
3750 : return false;
3751 :
3752 : /*
3753 : * Writing ULONG_MAX is not a spanning write regardless of the
3754 : * value being written as long as the range fits in the node.
3755 : */
3756 0 : if ((last == ULONG_MAX) && (last == max))
3757 : return false;
3758 0 : } else if (piv == last) {
3759 0 : if (entry)
3760 : return false;
3761 :
3762 : /* Detect spanning store wr walk */
3763 0 : if (last == ULONG_MAX)
3764 : return false;
3765 : }
3766 :
3767 0 : trace_ma_write(__func__, wr_mas->mas, piv, entry);
3768 :
3769 0 : return true;
3770 : }
3771 :
3772 0 : static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3773 : {
3774 0 : wr_mas->type = mte_node_type(wr_mas->mas->node);
3775 0 : mas_wr_node_walk(wr_mas);
3776 0 : wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3777 0 : }
3778 :
3779 : static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3780 : {
3781 0 : wr_mas->mas->max = wr_mas->r_max;
3782 0 : wr_mas->mas->min = wr_mas->r_min;
3783 0 : wr_mas->mas->node = wr_mas->content;
3784 0 : wr_mas->mas->offset = 0;
3785 0 : wr_mas->mas->depth++;
3786 : }
3787 : /*
3788 : * mas_wr_walk() - Walk the tree for a write.
3789 : * @wr_mas: The maple write state
3790 : *
3791 : * Uses mas_slot_locked() and does not need to worry about dead nodes.
3792 : *
3793 : * Return: True if it's contained in a node, false on spanning write.
3794 : */
3795 0 : static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3796 : {
3797 0 : struct ma_state *mas = wr_mas->mas;
3798 :
3799 : while (true) {
3800 0 : mas_wr_walk_descend(wr_mas);
3801 0 : if (unlikely(mas_is_span_wr(wr_mas)))
3802 : return false;
3803 :
3804 0 : wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3805 0 : mas->offset);
3806 0 : if (ma_is_leaf(wr_mas->type))
3807 : return true;
3808 :
3809 : mas_wr_walk_traverse(wr_mas);
3810 : }
3811 :
3812 : return true;
3813 : }
3814 :
3815 0 : static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3816 : {
3817 0 : struct ma_state *mas = wr_mas->mas;
3818 :
3819 : while (true) {
3820 0 : mas_wr_walk_descend(wr_mas);
3821 0 : wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3822 0 : mas->offset);
3823 0 : if (ma_is_leaf(wr_mas->type))
3824 : return true;
3825 : mas_wr_walk_traverse(wr_mas);
3826 :
3827 : }
3828 : return true;
3829 : }
3830 : /*
3831 : * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3832 : * @l_wr_mas: The left maple write state
3833 : * @r_wr_mas: The right maple write state
3834 : */
3835 0 : static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3836 : struct ma_wr_state *r_wr_mas)
3837 : {
3838 0 : struct ma_state *r_mas = r_wr_mas->mas;
3839 0 : struct ma_state *l_mas = l_wr_mas->mas;
3840 : unsigned char l_slot;
3841 :
3842 0 : l_slot = l_mas->offset;
3843 0 : if (!l_wr_mas->content)
3844 0 : l_mas->index = l_wr_mas->r_min;
3845 :
3846 0 : if ((l_mas->index == l_wr_mas->r_min) &&
3847 0 : (l_slot &&
3848 0 : !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3849 0 : if (l_slot > 1)
3850 0 : l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3851 : else
3852 0 : l_mas->index = l_mas->min;
3853 :
3854 0 : l_mas->offset = l_slot - 1;
3855 : }
3856 :
3857 0 : if (!r_wr_mas->content) {
3858 0 : if (r_mas->last < r_wr_mas->r_max)
3859 0 : r_mas->last = r_wr_mas->r_max;
3860 0 : r_mas->offset++;
3861 0 : } else if ((r_mas->last == r_wr_mas->r_max) &&
3862 0 : (r_mas->last < r_mas->max) &&
3863 0 : !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3864 0 : r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3865 0 : r_wr_mas->type, r_mas->offset + 1);
3866 0 : r_mas->offset++;
3867 : }
3868 0 : }
3869 :
3870 0 : static inline void *mas_state_walk(struct ma_state *mas)
3871 : {
3872 : void *entry;
3873 :
3874 0 : entry = mas_start(mas);
3875 0 : if (mas_is_none(mas))
3876 : return NULL;
3877 :
3878 0 : if (mas_is_ptr(mas))
3879 : return entry;
3880 :
3881 0 : return mtree_range_walk(mas);
3882 : }
3883 :
3884 : /*
3885 : * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3886 : * to date.
3887 : *
3888 : * @mas: The maple state.
3889 : *
3890 : * Note: Leaves mas in undesirable state.
3891 : * Return: The entry for @mas->index or %NULL on dead node.
3892 : */
3893 0 : static inline void *mtree_lookup_walk(struct ma_state *mas)
3894 : {
3895 : unsigned long *pivots;
3896 : unsigned char offset;
3897 : struct maple_node *node;
3898 : struct maple_enode *next;
3899 : enum maple_type type;
3900 : void __rcu **slots;
3901 : unsigned char end;
3902 : unsigned long max;
3903 :
3904 0 : next = mas->node;
3905 0 : max = ULONG_MAX;
3906 : do {
3907 0 : offset = 0;
3908 0 : node = mte_to_node(next);
3909 0 : type = mte_node_type(next);
3910 0 : pivots = ma_pivots(node, type);
3911 0 : end = ma_data_end(node, type, pivots, max);
3912 0 : if (unlikely(ma_dead_node(node)))
3913 : goto dead_node;
3914 : do {
3915 0 : if (pivots[offset] >= mas->index) {
3916 : max = pivots[offset];
3917 : break;
3918 : }
3919 0 : } while (++offset < end);
3920 :
3921 0 : slots = ma_slots(node, type);
3922 0 : next = mt_slot(mas->tree, slots, offset);
3923 0 : if (unlikely(ma_dead_node(node)))
3924 : goto dead_node;
3925 0 : } while (!ma_is_leaf(type));
3926 :
3927 : return (void *)next;
3928 :
3929 : dead_node:
3930 0 : mas_reset(mas);
3931 0 : return NULL;
3932 : }
3933 :
3934 : /*
3935 : * mas_new_root() - Create a new root node that only contains the entry passed
3936 : * in.
3937 : * @mas: The maple state
3938 : * @entry: The entry to store.
3939 : *
3940 : * Only valid when the index == 0 and the last == ULONG_MAX
3941 : *
3942 : * Return 0 on error, 1 on success.
3943 : */
3944 0 : static inline int mas_new_root(struct ma_state *mas, void *entry)
3945 : {
3946 0 : struct maple_enode *root = mas_root_locked(mas);
3947 0 : enum maple_type type = maple_leaf_64;
3948 : struct maple_node *node;
3949 : void __rcu **slots;
3950 : unsigned long *pivots;
3951 :
3952 0 : if (!entry && !mas->index && mas->last == ULONG_MAX) {
3953 0 : mas->depth = 0;
3954 0 : mas_set_height(mas);
3955 0 : rcu_assign_pointer(mas->tree->ma_root, entry);
3956 0 : mas->node = MAS_START;
3957 0 : goto done;
3958 : }
3959 :
3960 0 : mas_node_count(mas, 1);
3961 0 : if (mas_is_err(mas))
3962 : return 0;
3963 :
3964 0 : node = mas_pop_node(mas);
3965 0 : pivots = ma_pivots(node, type);
3966 0 : slots = ma_slots(node, type);
3967 0 : node->parent = ma_parent_ptr(
3968 : ((unsigned long)mas->tree | MA_ROOT_PARENT));
3969 0 : mas->node = mt_mk_node(node, type);
3970 0 : rcu_assign_pointer(slots[0], entry);
3971 0 : pivots[0] = mas->last;
3972 0 : mas->depth = 1;
3973 0 : mas_set_height(mas);
3974 0 : rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3975 :
3976 : done:
3977 0 : if (xa_is_node(root))
3978 0 : mte_destroy_walk(root, mas->tree);
3979 :
3980 : return 1;
3981 : }
3982 : /*
3983 : * mas_wr_spanning_store() - Create a subtree with the store operation completed
3984 : * and new nodes where necessary, then place the sub-tree in the actual tree.
3985 : * Note that mas is expected to point to the node which caused the store to
3986 : * span.
3987 : * @wr_mas: The maple write state
3988 : *
3989 : * Return: 0 on error, positive on success.
3990 : */
3991 0 : static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3992 : {
3993 : struct maple_subtree_state mast;
3994 : struct maple_big_node b_node;
3995 : struct ma_state *mas;
3996 : unsigned char height;
3997 :
3998 : /* Left and Right side of spanning store */
3999 0 : MA_STATE(l_mas, NULL, 0, 0);
4000 0 : MA_STATE(r_mas, NULL, 0, 0);
4001 :
4002 0 : MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4003 0 : MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4004 :
4005 : /*
4006 : * A store operation that spans multiple nodes is called a spanning
4007 : * store and is handled early in the store call stack by the function
4008 : * mas_is_span_wr(). When a spanning store is identified, the maple
4009 : * state is duplicated. The first maple state walks the left tree path
4010 : * to ``index``, the duplicate walks the right tree path to ``last``.
4011 : * The data in the two nodes are combined into a single node, two nodes,
4012 : * or possibly three nodes (see the 3-way split above). A ``NULL``
4013 : * written to the last entry of a node is considered a spanning store as
4014 : * a rebalance is required for the operation to complete and an overflow
4015 : * of data may happen.
4016 : */
4017 0 : mas = wr_mas->mas;
4018 0 : trace_ma_op(__func__, mas);
4019 :
4020 0 : if (unlikely(!mas->index && mas->last == ULONG_MAX))
4021 0 : return mas_new_root(mas, wr_mas->entry);
4022 : /*
4023 : * Node rebalancing may occur due to this store, so there may be three new
4024 : * entries per level plus a new root.
4025 : */
4026 0 : height = mas_mt_height(mas);
4027 0 : mas_node_count(mas, 1 + height * 3);
4028 0 : if (mas_is_err(mas))
4029 : return 0;
4030 :
4031 : /*
4032 : * Set up right side. Need to get to the next offset after the spanning
4033 : * store to ensure it's not NULL and to combine both the next node and
4034 : * the node with the start together.
4035 : */
4036 0 : r_mas = *mas;
4037 : /* Avoid overflow, walk to next slot in the tree. */
4038 0 : if (r_mas.last + 1)
4039 0 : r_mas.last++;
4040 :
4041 0 : r_mas.index = r_mas.last;
4042 0 : mas_wr_walk_index(&r_wr_mas);
4043 0 : r_mas.last = r_mas.index = mas->last;
4044 :
4045 : /* Set up left side. */
4046 0 : l_mas = *mas;
4047 0 : mas_wr_walk_index(&l_wr_mas);
4048 :
4049 0 : if (!wr_mas->entry) {
4050 0 : mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4051 0 : mas->offset = l_mas.offset;
4052 0 : mas->index = l_mas.index;
4053 0 : mas->last = l_mas.last = r_mas.last;
4054 : }
4055 :
4056 : /* expanding NULLs may make this cover the entire range */
4057 0 : if (!l_mas.index && r_mas.last == ULONG_MAX) {
4058 0 : mas_set_range(mas, 0, ULONG_MAX);
4059 0 : return mas_new_root(mas, wr_mas->entry);
4060 : }
4061 :
4062 0 : memset(&b_node, 0, sizeof(struct maple_big_node));
4063 : /* Copy l_mas and store the value in b_node. */
4064 0 : mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4065 : /* Copy r_mas into b_node. */
4066 0 : if (r_mas.offset <= r_wr_mas.node_end)
4067 0 : mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4068 0 : &b_node, b_node.b_end + 1);
4069 : else
4070 0 : b_node.b_end++;
4071 :
4072 : /* Stop spanning searches by searching for just index. */
4073 0 : l_mas.index = l_mas.last = mas->index;
4074 :
4075 0 : mast.bn = &b_node;
4076 0 : mast.orig_l = &l_mas;
4077 0 : mast.orig_r = &r_mas;
4078 : /* Combine l_mas and r_mas and split them up evenly again. */
4079 0 : return mas_spanning_rebalance(mas, &mast, height + 1);
4080 : }
4081 :
4082 : /*
4083 : * mas_wr_node_store() - Attempt to store the value in a node
4084 : * @wr_mas: The maple write state
4085 : *
4086 : * Attempts to reuse the node, but may allocate.
4087 : *
4088 : * Return: True if stored, false otherwise
4089 : */
4090 0 : static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4091 : {
4092 0 : struct ma_state *mas = wr_mas->mas;
4093 : void __rcu **dst_slots;
4094 : unsigned long *dst_pivots;
4095 : unsigned char dst_offset;
4096 0 : unsigned char new_end = wr_mas->node_end;
4097 : unsigned char offset;
4098 0 : unsigned char node_slots = mt_slots[wr_mas->type];
4099 : struct maple_node reuse, *newnode;
4100 0 : unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4101 0 : bool in_rcu = mt_in_rcu(mas->tree);
4102 :
4103 0 : offset = mas->offset;
4104 0 : if (mas->last == wr_mas->r_max) {
4105 : /* runs right to the end of the node */
4106 0 : if (mas->last == mas->max)
4107 0 : new_end = offset;
4108 : /* don't copy this offset */
4109 0 : wr_mas->offset_end++;
4110 0 : } else if (mas->last < wr_mas->r_max) {
4111 : /* new range ends in this range */
4112 0 : if (unlikely(wr_mas->r_max == ULONG_MAX))
4113 0 : mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4114 :
4115 0 : new_end++;
4116 : } else {
4117 0 : if (wr_mas->end_piv == mas->last)
4118 0 : wr_mas->offset_end++;
4119 :
4120 0 : new_end -= wr_mas->offset_end - offset - 1;
4121 : }
4122 :
4123 : /* new range starts within a range */
4124 0 : if (wr_mas->r_min < mas->index)
4125 0 : new_end++;
4126 :
4127 : /* Not enough room */
4128 0 : if (new_end >= node_slots)
4129 : return false;
4130 :
4131 : /* Not enough data. */
4132 0 : if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4133 0 : !(mas->mas_flags & MA_STATE_BULK))
4134 : return false;
4135 :
4136 : /* set up node. */
4137 0 : if (in_rcu) {
4138 0 : mas_node_count(mas, 1);
4139 0 : if (mas_is_err(mas))
4140 : return false;
4141 :
4142 0 : newnode = mas_pop_node(mas);
4143 : } else {
4144 0 : memset(&reuse, 0, sizeof(struct maple_node));
4145 0 : newnode = &reuse;
4146 : }
4147 :
4148 0 : newnode->parent = mas_mn(mas)->parent;
4149 0 : dst_pivots = ma_pivots(newnode, wr_mas->type);
4150 0 : dst_slots = ma_slots(newnode, wr_mas->type);
4151 : /* Copy from start to insert point */
4152 0 : memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4153 0 : memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4154 0 : dst_offset = offset;
4155 :
4156 : /* Handle insert of new range starting after old range */
4157 0 : if (wr_mas->r_min < mas->index) {
4158 0 : mas->offset++;
4159 0 : rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4160 0 : dst_pivots[dst_offset++] = mas->index - 1;
4161 : }
4162 :
4163 : /* Store the new entry and range end. */
4164 0 : if (dst_offset < max_piv)
4165 0 : dst_pivots[dst_offset] = mas->last;
4166 0 : mas->offset = dst_offset;
4167 0 : rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4168 :
4169 : /*
4170 : * this range wrote to the end of the node or it overwrote the rest of
4171 : * the data
4172 : */
4173 0 : if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4174 : new_end = dst_offset;
4175 : goto done;
4176 : }
4177 :
4178 0 : dst_offset++;
4179 : /* Copy to the end of node if necessary. */
4180 0 : copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4181 0 : memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4182 : sizeof(void *) * copy_size);
4183 0 : if (dst_offset < max_piv) {
4184 0 : if (copy_size > max_piv - dst_offset)
4185 0 : copy_size = max_piv - dst_offset;
4186 :
4187 0 : memcpy(dst_pivots + dst_offset,
4188 0 : wr_mas->pivots + wr_mas->offset_end,
4189 : sizeof(unsigned long) * copy_size);
4190 : }
4191 :
4192 0 : if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4193 0 : dst_pivots[new_end] = mas->max;
4194 :
4195 : done:
4196 0 : mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4197 0 : if (in_rcu) {
4198 0 : mte_set_node_dead(mas->node);
4199 0 : mas->node = mt_mk_node(newnode, wr_mas->type);
4200 0 : mas_replace(mas, false);
4201 : } else {
4202 0 : memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4203 : }
4204 0 : trace_ma_write(__func__, mas, 0, wr_mas->entry);
4205 0 : mas_update_gap(mas);
4206 0 : return true;
4207 : }
4208 :
4209 : /*
4210 : * mas_wr_slot_store: Attempt to store a value in a slot.
4211 : * @wr_mas: the maple write state
4212 : *
4213 : * Return: True if stored, false otherwise
4214 : */
4215 0 : static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4216 : {
4217 0 : struct ma_state *mas = wr_mas->mas;
4218 : unsigned long lmax; /* Logical max. */
4219 0 : unsigned char offset = mas->offset;
4220 :
4221 0 : if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4222 0 : (offset != wr_mas->node_end)))
4223 : return false;
4224 :
4225 0 : if (offset == wr_mas->node_end - 1)
4226 0 : lmax = mas->max;
4227 : else
4228 0 : lmax = wr_mas->pivots[offset + 1];
4229 :
4230 : /* going to overwrite too many slots. */
4231 0 : if (lmax < mas->last)
4232 : return false;
4233 :
4234 0 : if (wr_mas->r_min == mas->index) {
4235 : /* overwriting two or more ranges with one. */
4236 0 : if (lmax == mas->last)
4237 : return false;
4238 :
4239 : /* Overwriting all of offset and a portion of offset + 1. */
4240 0 : rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4241 0 : wr_mas->pivots[offset] = mas->last;
4242 0 : goto done;
4243 : }
4244 :
4245 : /* Doesn't end on the next range end. */
4246 0 : if (lmax != mas->last)
4247 : return false;
4248 :
4249 : /* Overwriting a portion of offset and all of offset + 1 */
4250 0 : if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4251 0 : (wr_mas->entry || wr_mas->pivots[offset + 1]))
4252 0 : wr_mas->pivots[offset + 1] = mas->last;
4253 :
4254 0 : rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4255 0 : wr_mas->pivots[offset] = mas->index - 1;
4256 0 : mas->offset++; /* Keep mas accurate. */
4257 :
4258 : done:
4259 0 : trace_ma_write(__func__, mas, 0, wr_mas->entry);
4260 0 : mas_update_gap(mas);
4261 0 : return true;
4262 : }
4263 :
4264 : static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4265 : {
4266 0 : while ((wr_mas->mas->last > wr_mas->end_piv) &&
4267 0 : (wr_mas->offset_end < wr_mas->node_end))
4268 0 : wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4269 :
4270 0 : if (wr_mas->mas->last > wr_mas->end_piv)
4271 0 : wr_mas->end_piv = wr_mas->mas->max;
4272 : }
4273 :
4274 0 : static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4275 : {
4276 0 : struct ma_state *mas = wr_mas->mas;
4277 :
4278 0 : if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4279 0 : mas->last = wr_mas->end_piv;
4280 :
4281 : /* Check next slot(s) if we are overwriting the end */
4282 0 : if ((mas->last == wr_mas->end_piv) &&
4283 0 : (wr_mas->node_end != wr_mas->offset_end) &&
4284 0 : !wr_mas->slots[wr_mas->offset_end + 1]) {
4285 0 : wr_mas->offset_end++;
4286 0 : if (wr_mas->offset_end == wr_mas->node_end)
4287 0 : mas->last = mas->max;
4288 : else
4289 0 : mas->last = wr_mas->pivots[wr_mas->offset_end];
4290 0 : wr_mas->end_piv = mas->last;
4291 : }
4292 :
4293 0 : if (!wr_mas->content) {
4294 : /* If this one is null, the next and prev are not */
4295 0 : mas->index = wr_mas->r_min;
4296 : } else {
4297 : /* Check prev slot if we are overwriting the start */
4298 0 : if (mas->index == wr_mas->r_min && mas->offset &&
4299 0 : !wr_mas->slots[mas->offset - 1]) {
4300 0 : mas->offset--;
4301 0 : wr_mas->r_min = mas->index =
4302 0 : mas_safe_min(mas, wr_mas->pivots, mas->offset);
4303 0 : wr_mas->r_max = wr_mas->pivots[mas->offset];
4304 : }
4305 : }
4306 0 : }
4307 :
4308 0 : static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4309 : {
4310 0 : unsigned char end = wr_mas->node_end;
4311 0 : unsigned char new_end = end + 1;
4312 0 : struct ma_state *mas = wr_mas->mas;
4313 0 : unsigned char node_pivots = mt_pivots[wr_mas->type];
4314 :
4315 0 : if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4316 0 : if (new_end < node_pivots)
4317 0 : wr_mas->pivots[new_end] = wr_mas->pivots[end];
4318 :
4319 0 : if (new_end < node_pivots)
4320 0 : ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4321 :
4322 0 : rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4323 0 : mas->offset = new_end;
4324 0 : wr_mas->pivots[end] = mas->index - 1;
4325 :
4326 0 : return true;
4327 : }
4328 :
4329 0 : if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4330 0 : if (new_end < node_pivots)
4331 0 : wr_mas->pivots[new_end] = wr_mas->pivots[end];
4332 :
4333 0 : rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4334 0 : if (new_end < node_pivots)
4335 0 : ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4336 :
4337 0 : wr_mas->pivots[end] = mas->last;
4338 0 : rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4339 0 : return true;
4340 : }
4341 :
4342 : return false;
4343 : }
4344 :
4345 : /*
4346 : * mas_wr_bnode() - Slow path for a modification.
4347 : * @wr_mas: The write maple state
4348 : *
4349 : * This is where split, rebalance end up.
4350 : */
4351 0 : static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4352 : {
4353 : struct maple_big_node b_node;
4354 :
4355 0 : trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4356 0 : memset(&b_node, 0, sizeof(struct maple_big_node));
4357 0 : mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4358 0 : mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4359 0 : }
4360 :
4361 0 : static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4362 : {
4363 : unsigned char node_slots;
4364 : unsigned char node_size;
4365 0 : struct ma_state *mas = wr_mas->mas;
4366 :
4367 : /* Direct replacement */
4368 0 : if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4369 0 : rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4370 0 : if (!!wr_mas->entry ^ !!wr_mas->content)
4371 0 : mas_update_gap(mas);
4372 : return;
4373 : }
4374 :
4375 : /* Attempt to append */
4376 0 : node_slots = mt_slots[wr_mas->type];
4377 0 : node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4378 0 : if (mas->max == ULONG_MAX)
4379 0 : node_size++;
4380 :
4381 : /* slot and node store will not fit, go to the slow path */
4382 0 : if (unlikely(node_size >= node_slots))
4383 : goto slow_path;
4384 :
4385 0 : if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4386 0 : (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4387 0 : if (!wr_mas->content || !wr_mas->entry)
4388 0 : mas_update_gap(mas);
4389 : return;
4390 : }
4391 :
4392 0 : if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4393 : return;
4394 0 : else if (mas_wr_node_store(wr_mas))
4395 : return;
4396 :
4397 0 : if (mas_is_err(mas))
4398 : return;
4399 :
4400 : slow_path:
4401 0 : mas_wr_bnode(wr_mas);
4402 : }
4403 :
4404 : /*
4405 : * mas_wr_store_entry() - Internal call to store a value
4406 : * @mas: The maple state
4407 : * @entry: The entry to store.
4408 : *
4409 : * Return: The contents that was stored at the index.
4410 : */
4411 0 : static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4412 : {
4413 0 : struct ma_state *mas = wr_mas->mas;
4414 :
4415 0 : wr_mas->content = mas_start(mas);
4416 0 : if (mas_is_none(mas) || mas_is_ptr(mas)) {
4417 0 : mas_store_root(mas, wr_mas->entry);
4418 0 : return wr_mas->content;
4419 : }
4420 :
4421 0 : if (unlikely(!mas_wr_walk(wr_mas))) {
4422 0 : mas_wr_spanning_store(wr_mas);
4423 0 : return wr_mas->content;
4424 : }
4425 :
4426 : /* At this point, we are at the leaf node that needs to be altered. */
4427 0 : wr_mas->end_piv = wr_mas->r_max;
4428 0 : mas_wr_end_piv(wr_mas);
4429 :
4430 0 : if (!wr_mas->entry)
4431 0 : mas_wr_extend_null(wr_mas);
4432 :
4433 : /* New root for a single pointer */
4434 0 : if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4435 0 : mas_new_root(mas, wr_mas->entry);
4436 0 : return wr_mas->content;
4437 : }
4438 :
4439 0 : mas_wr_modify(wr_mas);
4440 0 : return wr_mas->content;
4441 : }
4442 :
4443 : /**
4444 : * mas_insert() - Internal call to insert a value
4445 : * @mas: The maple state
4446 : * @entry: The entry to store
4447 : *
4448 : * Return: %NULL or the contents that already exists at the requested index
4449 : * otherwise. The maple state needs to be checked for error conditions.
4450 : */
4451 0 : static inline void *mas_insert(struct ma_state *mas, void *entry)
4452 : {
4453 0 : MA_WR_STATE(wr_mas, mas, entry);
4454 :
4455 : /*
4456 : * Inserting a new range inserts either 0, 1, or 2 pivots within the
4457 : * tree. If the insert fits exactly into an existing gap with a value
4458 : * of NULL, then the slot only needs to be written with the new value.
4459 : * If the range being inserted is adjacent to another range, then only a
4460 : * single pivot needs to be inserted (as well as writing the entry). If
4461 : * the new range is within a gap but does not touch any other ranges,
4462 : * then two pivots need to be inserted: the start - 1, and the end. As
4463 : * usual, the entry must be written. Most operations require a new node
4464 : * to be allocated and replace an existing node to ensure RCU safety,
4465 : * when in RCU mode. The exception to requiring a newly allocated node
4466 : * is when inserting at the end of a node (appending). When done
4467 : * carefully, appending can reuse the node in place.
4468 : */
4469 0 : wr_mas.content = mas_start(mas);
4470 0 : if (wr_mas.content)
4471 : goto exists;
4472 :
4473 0 : if (mas_is_none(mas) || mas_is_ptr(mas)) {
4474 0 : mas_store_root(mas, entry);
4475 0 : return NULL;
4476 : }
4477 :
4478 : /* spanning writes always overwrite something */
4479 0 : if (!mas_wr_walk(&wr_mas))
4480 : goto exists;
4481 :
4482 : /* At this point, we are at the leaf node that needs to be altered. */
4483 0 : wr_mas.offset_end = mas->offset;
4484 0 : wr_mas.end_piv = wr_mas.r_max;
4485 :
4486 0 : if (wr_mas.content || (mas->last > wr_mas.r_max))
4487 : goto exists;
4488 :
4489 0 : if (!entry)
4490 : return NULL;
4491 :
4492 0 : mas_wr_modify(&wr_mas);
4493 0 : return wr_mas.content;
4494 :
4495 : exists:
4496 0 : mas_set_err(mas, -EEXIST);
4497 0 : return wr_mas.content;
4498 :
4499 : }
4500 :
4501 : /*
4502 : * mas_prev_node() - Find the prev non-null entry at the same level in the
4503 : * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4504 : * @mas: The maple state
4505 : * @min: The lower limit to search
4506 : *
4507 : * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4508 : * Return: 1 if the node is dead, 0 otherwise.
4509 : */
4510 0 : static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4511 : {
4512 : enum maple_type mt;
4513 : int offset, level;
4514 : void __rcu **slots;
4515 : struct maple_node *node;
4516 : struct maple_enode *enode;
4517 : unsigned long *pivots;
4518 :
4519 0 : if (mas_is_none(mas))
4520 : return 0;
4521 :
4522 : level = 0;
4523 : do {
4524 0 : node = mas_mn(mas);
4525 0 : if (ma_is_root(node))
4526 : goto no_entry;
4527 :
4528 : /* Walk up. */
4529 0 : if (unlikely(mas_ascend(mas)))
4530 : return 1;
4531 0 : offset = mas->offset;
4532 0 : level++;
4533 0 : } while (!offset);
4534 :
4535 0 : offset--;
4536 0 : mt = mte_node_type(mas->node);
4537 0 : node = mas_mn(mas);
4538 0 : slots = ma_slots(node, mt);
4539 0 : pivots = ma_pivots(node, mt);
4540 0 : if (unlikely(ma_dead_node(node)))
4541 : return 1;
4542 :
4543 0 : mas->max = pivots[offset];
4544 0 : if (offset)
4545 0 : mas->min = pivots[offset - 1] + 1;
4546 0 : if (unlikely(ma_dead_node(node)))
4547 : return 1;
4548 :
4549 0 : if (mas->max < min)
4550 : goto no_entry_min;
4551 :
4552 0 : while (level > 1) {
4553 0 : level--;
4554 0 : enode = mas_slot(mas, slots, offset);
4555 0 : if (unlikely(ma_dead_node(node)))
4556 : return 1;
4557 :
4558 0 : mas->node = enode;
4559 0 : mt = mte_node_type(mas->node);
4560 0 : node = mas_mn(mas);
4561 0 : slots = ma_slots(node, mt);
4562 0 : pivots = ma_pivots(node, mt);
4563 0 : offset = ma_data_end(node, mt, pivots, mas->max);
4564 0 : if (unlikely(ma_dead_node(node)))
4565 : return 1;
4566 :
4567 0 : if (offset)
4568 0 : mas->min = pivots[offset - 1] + 1;
4569 :
4570 0 : if (offset < mt_pivots[mt])
4571 0 : mas->max = pivots[offset];
4572 :
4573 0 : if (mas->max < min)
4574 : goto no_entry;
4575 : }
4576 :
4577 0 : mas->node = mas_slot(mas, slots, offset);
4578 0 : if (unlikely(ma_dead_node(node)))
4579 : return 1;
4580 :
4581 0 : mas->offset = mas_data_end(mas);
4582 0 : if (unlikely(mte_dead_node(mas->node)))
4583 : return 1;
4584 :
4585 0 : return 0;
4586 :
4587 : no_entry_min:
4588 0 : mas->offset = offset;
4589 0 : if (offset)
4590 0 : mas->min = pivots[offset - 1] + 1;
4591 : no_entry:
4592 0 : if (unlikely(ma_dead_node(node)))
4593 : return 1;
4594 :
4595 0 : mas->node = MAS_NONE;
4596 0 : return 0;
4597 : }
4598 :
4599 : /*
4600 : * mas_next_node() - Get the next node at the same level in the tree.
4601 : * @mas: The maple state
4602 : * @max: The maximum pivot value to check.
4603 : *
4604 : * The next value will be mas->node[mas->offset] or MAS_NONE.
4605 : * Return: 1 on dead node, 0 otherwise.
4606 : */
4607 0 : static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4608 : unsigned long max)
4609 : {
4610 : unsigned long min, pivot;
4611 : unsigned long *pivots;
4612 : struct maple_enode *enode;
4613 0 : int level = 0;
4614 : unsigned char offset;
4615 : unsigned char node_end;
4616 : enum maple_type mt;
4617 : void __rcu **slots;
4618 :
4619 0 : if (mas->max >= max)
4620 : goto no_entry;
4621 :
4622 : level = 0;
4623 : do {
4624 0 : if (ma_is_root(node))
4625 : goto no_entry;
4626 :
4627 0 : min = mas->max + 1;
4628 0 : if (min > max)
4629 : goto no_entry;
4630 :
4631 0 : if (unlikely(mas_ascend(mas)))
4632 : return 1;
4633 :
4634 0 : offset = mas->offset;
4635 0 : level++;
4636 0 : node = mas_mn(mas);
4637 0 : mt = mte_node_type(mas->node);
4638 0 : pivots = ma_pivots(node, mt);
4639 0 : node_end = ma_data_end(node, mt, pivots, mas->max);
4640 0 : if (unlikely(ma_dead_node(node)))
4641 : return 1;
4642 :
4643 0 : } while (unlikely(offset == node_end));
4644 :
4645 0 : slots = ma_slots(node, mt);
4646 0 : pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4647 0 : while (unlikely(level > 1)) {
4648 : /* Descend, if necessary */
4649 0 : enode = mas_slot(mas, slots, offset);
4650 0 : if (unlikely(ma_dead_node(node)))
4651 : return 1;
4652 :
4653 0 : mas->node = enode;
4654 0 : level--;
4655 0 : node = mas_mn(mas);
4656 0 : mt = mte_node_type(mas->node);
4657 0 : slots = ma_slots(node, mt);
4658 0 : pivots = ma_pivots(node, mt);
4659 0 : if (unlikely(ma_dead_node(node)))
4660 : return 1;
4661 :
4662 0 : offset = 0;
4663 0 : pivot = pivots[0];
4664 : }
4665 :
4666 0 : enode = mas_slot(mas, slots, offset);
4667 0 : if (unlikely(ma_dead_node(node)))
4668 : return 1;
4669 :
4670 0 : mas->node = enode;
4671 0 : mas->min = min;
4672 0 : mas->max = pivot;
4673 0 : return 0;
4674 :
4675 : no_entry:
4676 0 : if (unlikely(ma_dead_node(node)))
4677 : return 1;
4678 :
4679 0 : mas->node = MAS_NONE;
4680 0 : return 0;
4681 : }
4682 :
4683 : /*
4684 : * mas_next_nentry() - Get the next node entry
4685 : * @mas: The maple state
4686 : * @max: The maximum value to check
4687 : * @*range_start: Pointer to store the start of the range.
4688 : *
4689 : * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4690 : * pivot of the entry.
4691 : *
4692 : * Return: The next entry, %NULL otherwise
4693 : */
4694 0 : static inline void *mas_next_nentry(struct ma_state *mas,
4695 : struct maple_node *node, unsigned long max, enum maple_type type)
4696 : {
4697 : unsigned char count;
4698 : unsigned long pivot;
4699 : unsigned long *pivots;
4700 : void __rcu **slots;
4701 : void *entry;
4702 :
4703 0 : if (mas->last == mas->max) {
4704 0 : mas->index = mas->max;
4705 0 : return NULL;
4706 : }
4707 :
4708 0 : slots = ma_slots(node, type);
4709 0 : pivots = ma_pivots(node, type);
4710 0 : count = ma_data_end(node, type, pivots, mas->max);
4711 0 : if (unlikely(ma_dead_node(node)))
4712 : return NULL;
4713 :
4714 0 : mas->index = mas_safe_min(mas, pivots, mas->offset);
4715 0 : if (unlikely(ma_dead_node(node)))
4716 : return NULL;
4717 :
4718 0 : if (mas->index > max)
4719 : return NULL;
4720 :
4721 0 : if (mas->offset > count)
4722 : return NULL;
4723 :
4724 0 : while (mas->offset < count) {
4725 0 : pivot = pivots[mas->offset];
4726 0 : entry = mas_slot(mas, slots, mas->offset);
4727 0 : if (ma_dead_node(node))
4728 : return NULL;
4729 :
4730 0 : if (entry)
4731 : goto found;
4732 :
4733 0 : if (pivot >= max)
4734 : return NULL;
4735 :
4736 0 : mas->index = pivot + 1;
4737 0 : mas->offset++;
4738 : }
4739 :
4740 0 : if (mas->index > mas->max) {
4741 0 : mas->index = mas->last;
4742 0 : return NULL;
4743 : }
4744 :
4745 0 : pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4746 0 : entry = mas_slot(mas, slots, mas->offset);
4747 0 : if (ma_dead_node(node))
4748 : return NULL;
4749 :
4750 0 : if (!pivot)
4751 : return NULL;
4752 :
4753 0 : if (!entry)
4754 : return NULL;
4755 :
4756 : found:
4757 0 : mas->last = pivot;
4758 0 : return entry;
4759 : }
4760 :
4761 : static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4762 : {
4763 : retry:
4764 0 : mas_set(mas, index);
4765 0 : mas_state_walk(mas);
4766 0 : if (mas_is_start(mas))
4767 : goto retry;
4768 : }
4769 :
4770 : /*
4771 : * mas_next_entry() - Internal function to get the next entry.
4772 : * @mas: The maple state
4773 : * @limit: The maximum range start.
4774 : *
4775 : * Set the @mas->node to the next entry and the range_start to
4776 : * the beginning value for the entry. Does not check beyond @limit.
4777 : * Sets @mas->index and @mas->last to the limit if it is hit.
4778 : * Restarts on dead nodes.
4779 : *
4780 : * Return: the next entry or %NULL.
4781 : */
4782 0 : static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4783 : {
4784 0 : void *entry = NULL;
4785 : struct maple_enode *prev_node;
4786 : struct maple_node *node;
4787 : unsigned char offset;
4788 : unsigned long last;
4789 : enum maple_type mt;
4790 :
4791 0 : if (mas->index > limit) {
4792 0 : mas->index = mas->last = limit;
4793 0 : mas_pause(mas);
4794 0 : return NULL;
4795 : }
4796 0 : last = mas->last;
4797 : retry:
4798 0 : offset = mas->offset;
4799 0 : prev_node = mas->node;
4800 0 : node = mas_mn(mas);
4801 0 : mt = mte_node_type(mas->node);
4802 0 : mas->offset++;
4803 0 : if (unlikely(mas->offset >= mt_slots[mt])) {
4804 0 : mas->offset = mt_slots[mt] - 1;
4805 0 : goto next_node;
4806 : }
4807 :
4808 0 : while (!mas_is_none(mas)) {
4809 0 : entry = mas_next_nentry(mas, node, limit, mt);
4810 0 : if (unlikely(ma_dead_node(node))) {
4811 : mas_rewalk(mas, last);
4812 : goto retry;
4813 : }
4814 :
4815 0 : if (likely(entry))
4816 : return entry;
4817 :
4818 0 : if (unlikely((mas->index > limit)))
4819 : break;
4820 :
4821 : next_node:
4822 0 : prev_node = mas->node;
4823 0 : offset = mas->offset;
4824 0 : if (unlikely(mas_next_node(mas, node, limit))) {
4825 : mas_rewalk(mas, last);
4826 : goto retry;
4827 : }
4828 0 : mas->offset = 0;
4829 0 : node = mas_mn(mas);
4830 0 : mt = mte_node_type(mas->node);
4831 : }
4832 :
4833 0 : mas->index = mas->last = limit;
4834 0 : mas->offset = offset;
4835 0 : mas->node = prev_node;
4836 0 : return NULL;
4837 : }
4838 :
4839 : /*
4840 : * mas_prev_nentry() - Get the previous node entry.
4841 : * @mas: The maple state.
4842 : * @limit: The lower limit to check for a value.
4843 : *
4844 : * Return: the entry, %NULL otherwise.
4845 : */
4846 0 : static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4847 : unsigned long index)
4848 : {
4849 : unsigned long pivot, min;
4850 : unsigned char offset;
4851 : struct maple_node *mn;
4852 : enum maple_type mt;
4853 : unsigned long *pivots;
4854 : void __rcu **slots;
4855 : void *entry;
4856 :
4857 : retry:
4858 0 : if (!mas->offset)
4859 : return NULL;
4860 :
4861 0 : mn = mas_mn(mas);
4862 0 : mt = mte_node_type(mas->node);
4863 0 : offset = mas->offset - 1;
4864 0 : if (offset >= mt_slots[mt])
4865 0 : offset = mt_slots[mt] - 1;
4866 :
4867 0 : slots = ma_slots(mn, mt);
4868 0 : pivots = ma_pivots(mn, mt);
4869 0 : if (unlikely(ma_dead_node(mn))) {
4870 : mas_rewalk(mas, index);
4871 : goto retry;
4872 : }
4873 :
4874 0 : if (offset == mt_pivots[mt])
4875 0 : pivot = mas->max;
4876 : else
4877 0 : pivot = pivots[offset];
4878 :
4879 0 : if (unlikely(ma_dead_node(mn))) {
4880 : mas_rewalk(mas, index);
4881 : goto retry;
4882 : }
4883 :
4884 0 : while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4885 : !pivot))
4886 0 : pivot = pivots[--offset];
4887 :
4888 0 : min = mas_safe_min(mas, pivots, offset);
4889 0 : entry = mas_slot(mas, slots, offset);
4890 0 : if (unlikely(ma_dead_node(mn))) {
4891 : mas_rewalk(mas, index);
4892 : goto retry;
4893 : }
4894 :
4895 0 : if (likely(entry)) {
4896 0 : mas->offset = offset;
4897 0 : mas->last = pivot;
4898 0 : mas->index = min;
4899 : }
4900 : return entry;
4901 : }
4902 :
4903 0 : static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4904 : {
4905 : void *entry;
4906 :
4907 0 : if (mas->index < min) {
4908 0 : mas->index = mas->last = min;
4909 0 : mas->node = MAS_NONE;
4910 0 : return NULL;
4911 : }
4912 : retry:
4913 0 : while (likely(!mas_is_none(mas))) {
4914 0 : entry = mas_prev_nentry(mas, min, mas->index);
4915 0 : if (unlikely(mas->last < min))
4916 : goto not_found;
4917 :
4918 0 : if (likely(entry))
4919 : return entry;
4920 :
4921 0 : if (unlikely(mas_prev_node(mas, min))) {
4922 0 : mas_rewalk(mas, mas->index);
4923 : goto retry;
4924 : }
4925 :
4926 0 : mas->offset++;
4927 : }
4928 :
4929 0 : mas->offset--;
4930 : not_found:
4931 0 : mas->index = mas->last = min;
4932 0 : return NULL;
4933 : }
4934 :
4935 : /*
4936 : * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4937 : * highest gap address of a given size in a given node and descend.
4938 : * @mas: The maple state
4939 : * @size: The needed size.
4940 : *
4941 : * Return: True if found in a leaf, false otherwise.
4942 : *
4943 : */
4944 0 : static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4945 : unsigned long *gap_min, unsigned long *gap_max)
4946 : {
4947 0 : enum maple_type type = mte_node_type(mas->node);
4948 0 : struct maple_node *node = mas_mn(mas);
4949 : unsigned long *pivots, *gaps;
4950 : void __rcu **slots;
4951 0 : unsigned long gap = 0;
4952 : unsigned long max, min;
4953 : unsigned char offset;
4954 :
4955 0 : if (unlikely(mas_is_err(mas)))
4956 : return true;
4957 :
4958 0 : if (ma_is_dense(type)) {
4959 : /* dense nodes. */
4960 0 : mas->offset = (unsigned char)(mas->index - mas->min);
4961 0 : return true;
4962 : }
4963 :
4964 0 : pivots = ma_pivots(node, type);
4965 0 : slots = ma_slots(node, type);
4966 0 : gaps = ma_gaps(node, type);
4967 0 : offset = mas->offset;
4968 0 : min = mas_safe_min(mas, pivots, offset);
4969 : /* Skip out of bounds. */
4970 0 : while (mas->last < min)
4971 0 : min = mas_safe_min(mas, pivots, --offset);
4972 :
4973 0 : max = mas_safe_pivot(mas, pivots, offset, type);
4974 0 : while (mas->index <= max) {
4975 0 : gap = 0;
4976 0 : if (gaps)
4977 0 : gap = gaps[offset];
4978 0 : else if (!mas_slot(mas, slots, offset))
4979 0 : gap = max - min + 1;
4980 :
4981 0 : if (gap) {
4982 0 : if ((size <= gap) && (size <= mas->last - min + 1))
4983 : break;
4984 :
4985 0 : if (!gaps) {
4986 : /* Skip the next slot, it cannot be a gap. */
4987 0 : if (offset < 2)
4988 : goto ascend;
4989 :
4990 0 : offset -= 2;
4991 0 : max = pivots[offset];
4992 0 : min = mas_safe_min(mas, pivots, offset);
4993 0 : continue;
4994 : }
4995 : }
4996 :
4997 0 : if (!offset)
4998 : goto ascend;
4999 :
5000 0 : offset--;
5001 0 : max = min - 1;
5002 0 : min = mas_safe_min(mas, pivots, offset);
5003 : }
5004 :
5005 0 : if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
5006 : goto no_space;
5007 :
5008 0 : if (unlikely(ma_is_leaf(type))) {
5009 0 : mas->offset = offset;
5010 0 : *gap_min = min;
5011 0 : *gap_max = min + gap - 1;
5012 0 : return true;
5013 : }
5014 :
5015 : /* descend, only happens under lock. */
5016 0 : mas->node = mas_slot(mas, slots, offset);
5017 0 : mas->min = min;
5018 0 : mas->max = max;
5019 0 : mas->offset = mas_data_end(mas);
5020 0 : return false;
5021 :
5022 : ascend:
5023 0 : if (!mte_is_root(mas->node))
5024 : return false;
5025 :
5026 : no_space:
5027 0 : mas_set_err(mas, -EBUSY);
5028 0 : return false;
5029 : }
5030 :
5031 0 : static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5032 : {
5033 0 : enum maple_type type = mte_node_type(mas->node);
5034 0 : unsigned long pivot, min, gap = 0;
5035 : unsigned char offset, data_end;
5036 : unsigned long *gaps, *pivots;
5037 : void __rcu **slots;
5038 : struct maple_node *node;
5039 0 : bool found = false;
5040 :
5041 0 : if (ma_is_dense(type)) {
5042 0 : mas->offset = (unsigned char)(mas->index - mas->min);
5043 0 : return true;
5044 : }
5045 :
5046 0 : node = mas_mn(mas);
5047 0 : pivots = ma_pivots(node, type);
5048 0 : slots = ma_slots(node, type);
5049 0 : gaps = ma_gaps(node, type);
5050 0 : offset = mas->offset;
5051 0 : min = mas_safe_min(mas, pivots, offset);
5052 0 : data_end = ma_data_end(node, type, pivots, mas->max);
5053 0 : for (; offset <= data_end; offset++) {
5054 0 : pivot = mas_logical_pivot(mas, pivots, offset, type);
5055 :
5056 : /* Not within lower bounds */
5057 0 : if (mas->index > pivot)
5058 : goto next_slot;
5059 :
5060 0 : if (gaps)
5061 0 : gap = gaps[offset];
5062 0 : else if (!mas_slot(mas, slots, offset))
5063 0 : gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5064 : else
5065 : goto next_slot;
5066 :
5067 0 : if (gap >= size) {
5068 0 : if (ma_is_leaf(type)) {
5069 : found = true;
5070 : goto done;
5071 : }
5072 : if (mas->index <= pivot) {
5073 0 : mas->node = mas_slot(mas, slots, offset);
5074 0 : mas->min = min;
5075 0 : mas->max = pivot;
5076 0 : offset = 0;
5077 0 : break;
5078 : }
5079 : }
5080 : next_slot:
5081 0 : min = pivot + 1;
5082 0 : if (mas->last <= pivot) {
5083 0 : mas_set_err(mas, -EBUSY);
5084 0 : return true;
5085 : }
5086 : }
5087 :
5088 0 : if (mte_is_root(mas->node))
5089 0 : found = true;
5090 : done:
5091 0 : mas->offset = offset;
5092 0 : return found;
5093 : }
5094 :
5095 : /**
5096 : * mas_walk() - Search for @mas->index in the tree.
5097 : * @mas: The maple state.
5098 : *
5099 : * mas->index and mas->last will be set to the range if there is a value. If
5100 : * mas->node is MAS_NONE, reset to MAS_START.
5101 : *
5102 : * Return: the entry at the location or %NULL.
5103 : */
5104 0 : void *mas_walk(struct ma_state *mas)
5105 : {
5106 : void *entry;
5107 :
5108 : retry:
5109 0 : entry = mas_state_walk(mas);
5110 0 : if (mas_is_start(mas))
5111 : goto retry;
5112 :
5113 0 : if (mas_is_ptr(mas)) {
5114 0 : if (!mas->index) {
5115 0 : mas->last = 0;
5116 : } else {
5117 0 : mas->index = 1;
5118 0 : mas->last = ULONG_MAX;
5119 : }
5120 : return entry;
5121 : }
5122 :
5123 0 : if (mas_is_none(mas)) {
5124 0 : mas->index = 0;
5125 0 : mas->last = ULONG_MAX;
5126 : }
5127 :
5128 : return entry;
5129 : }
5130 : EXPORT_SYMBOL_GPL(mas_walk);
5131 :
5132 0 : static inline bool mas_rewind_node(struct ma_state *mas)
5133 : {
5134 : unsigned char slot;
5135 :
5136 : do {
5137 0 : if (mte_is_root(mas->node)) {
5138 0 : slot = mas->offset;
5139 0 : if (!slot)
5140 : return false;
5141 : } else {
5142 0 : mas_ascend(mas);
5143 0 : slot = mas->offset;
5144 : }
5145 0 : } while (!slot);
5146 :
5147 0 : mas->offset = --slot;
5148 0 : return true;
5149 : }
5150 :
5151 : /*
5152 : * mas_skip_node() - Internal function. Skip over a node.
5153 : * @mas: The maple state.
5154 : *
5155 : * Return: true if there is another node, false otherwise.
5156 : */
5157 0 : static inline bool mas_skip_node(struct ma_state *mas)
5158 : {
5159 0 : if (mas_is_err(mas))
5160 : return false;
5161 :
5162 : do {
5163 0 : if (mte_is_root(mas->node)) {
5164 0 : if (mas->offset >= mas_data_end(mas)) {
5165 0 : mas_set_err(mas, -EBUSY);
5166 0 : return false;
5167 : }
5168 : } else {
5169 0 : mas_ascend(mas);
5170 : }
5171 0 : } while (mas->offset >= mas_data_end(mas));
5172 :
5173 0 : mas->offset++;
5174 0 : return true;
5175 : }
5176 :
5177 : /*
5178 : * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
5179 : * @size
5180 : * @mas: The maple state
5181 : * @size: The size of the gap required
5182 : *
5183 : * Search between @mas->index and @mas->last for a gap of @size.
5184 : */
5185 0 : static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5186 : {
5187 0 : struct maple_enode *last = NULL;
5188 :
5189 : /*
5190 : * There are 4 options:
5191 : * go to child (descend)
5192 : * go back to parent (ascend)
5193 : * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5194 : * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5195 : */
5196 0 : while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5197 0 : if (last == mas->node)
5198 0 : mas_skip_node(mas);
5199 : else
5200 : last = mas->node;
5201 : }
5202 0 : }
5203 :
5204 : /*
5205 : * mas_fill_gap() - Fill a located gap with @entry.
5206 : * @mas: The maple state
5207 : * @entry: The value to store
5208 : * @slot: The offset into the node to store the @entry
5209 : * @size: The size of the entry
5210 : * @index: The start location
5211 : */
5212 0 : static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5213 : unsigned char slot, unsigned long size, unsigned long *index)
5214 : {
5215 0 : MA_WR_STATE(wr_mas, mas, entry);
5216 0 : unsigned char pslot = mte_parent_slot(mas->node);
5217 0 : struct maple_enode *mn = mas->node;
5218 : unsigned long *pivots;
5219 : enum maple_type ptype;
5220 : /*
5221 : * mas->index is the start address for the search
5222 : * which may no longer be needed.
5223 : * mas->last is the end address for the search
5224 : */
5225 :
5226 0 : *index = mas->index;
5227 0 : mas->last = mas->index + size - 1;
5228 :
5229 : /*
5230 : * It is possible that using mas->max and mas->min to correctly
5231 : * calculate the index and last will cause an issue in the gap
5232 : * calculation, so fix the ma_state here
5233 : */
5234 0 : mas_ascend(mas);
5235 0 : ptype = mte_node_type(mas->node);
5236 0 : pivots = ma_pivots(mas_mn(mas), ptype);
5237 0 : mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5238 0 : mas->min = mas_safe_min(mas, pivots, pslot);
5239 0 : mas->node = mn;
5240 0 : mas->offset = slot;
5241 0 : mas_wr_store_entry(&wr_mas);
5242 0 : }
5243 :
5244 : /*
5245 : * mas_sparse_area() - Internal function. Return upper or lower limit when
5246 : * searching for a gap in an empty tree.
5247 : * @mas: The maple state
5248 : * @min: the minimum range
5249 : * @max: The maximum range
5250 : * @size: The size of the gap
5251 : * @fwd: Searching forward or back
5252 : */
5253 : static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5254 : unsigned long max, unsigned long size, bool fwd)
5255 : {
5256 0 : if (!unlikely(mas_is_none(mas)) && min == 0) {
5257 0 : min++;
5258 : /*
5259 : * At this time, min is increased, we need to recheck whether
5260 : * the size is satisfied.
5261 : */
5262 0 : if (min > max || max - min + 1 < size)
5263 : return -EBUSY;
5264 : }
5265 : /* mas_is_ptr */
5266 :
5267 : if (fwd) {
5268 0 : mas->index = min;
5269 0 : mas->last = min + size - 1;
5270 : } else {
5271 0 : mas->last = max;
5272 0 : mas->index = max - size + 1;
5273 : }
5274 : return 0;
5275 : }
5276 :
5277 : /*
5278 : * mas_empty_area() - Get the lowest address within the range that is
5279 : * sufficient for the size requested.
5280 : * @mas: The maple state
5281 : * @min: The lowest value of the range
5282 : * @max: The highest value of the range
5283 : * @size: The size needed
5284 : */
5285 0 : int mas_empty_area(struct ma_state *mas, unsigned long min,
5286 : unsigned long max, unsigned long size)
5287 : {
5288 : unsigned char offset;
5289 : unsigned long *pivots;
5290 : enum maple_type mt;
5291 :
5292 0 : if (min >= max)
5293 : return -EINVAL;
5294 :
5295 0 : if (mas_is_start(mas))
5296 0 : mas_start(mas);
5297 0 : else if (mas->offset >= 2)
5298 0 : mas->offset -= 2;
5299 0 : else if (!mas_skip_node(mas))
5300 : return -EBUSY;
5301 :
5302 : /* Empty set */
5303 0 : if (mas_is_none(mas) || mas_is_ptr(mas))
5304 : return mas_sparse_area(mas, min, max, size, true);
5305 :
5306 : /* The start of the window can only be within these values */
5307 0 : mas->index = min;
5308 0 : mas->last = max;
5309 0 : mas_awalk(mas, size);
5310 :
5311 0 : if (unlikely(mas_is_err(mas)))
5312 0 : return xa_err(mas->node);
5313 :
5314 0 : offset = mas->offset;
5315 0 : if (unlikely(offset == MAPLE_NODE_SLOTS))
5316 : return -EBUSY;
5317 :
5318 0 : mt = mte_node_type(mas->node);
5319 0 : pivots = ma_pivots(mas_mn(mas), mt);
5320 0 : min = mas_safe_min(mas, pivots, offset);
5321 0 : if (mas->index < min)
5322 0 : mas->index = min;
5323 0 : mas->last = mas->index + size - 1;
5324 0 : return 0;
5325 : }
5326 : EXPORT_SYMBOL_GPL(mas_empty_area);
5327 :
5328 : /*
5329 : * mas_empty_area_rev() - Get the highest address within the range that is
5330 : * sufficient for the size requested.
5331 : * @mas: The maple state
5332 : * @min: The lowest value of the range
5333 : * @max: The highest value of the range
5334 : * @size: The size needed
5335 : */
5336 0 : int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5337 : unsigned long max, unsigned long size)
5338 : {
5339 0 : struct maple_enode *last = mas->node;
5340 :
5341 0 : if (min >= max)
5342 : return -EINVAL;
5343 :
5344 0 : if (mas_is_start(mas)) {
5345 0 : mas_start(mas);
5346 0 : mas->offset = mas_data_end(mas);
5347 0 : } else if (mas->offset >= 2) {
5348 0 : mas->offset -= 2;
5349 0 : } else if (!mas_rewind_node(mas)) {
5350 : return -EBUSY;
5351 : }
5352 :
5353 : /* Empty set. */
5354 0 : if (mas_is_none(mas) || mas_is_ptr(mas))
5355 0 : return mas_sparse_area(mas, min, max, size, false);
5356 :
5357 : /* The start of the window can only be within these values. */
5358 0 : mas->index = min;
5359 0 : mas->last = max;
5360 :
5361 0 : while (!mas_rev_awalk(mas, size, &min, &max)) {
5362 0 : if (last == mas->node) {
5363 0 : if (!mas_rewind_node(mas))
5364 : return -EBUSY;
5365 : } else {
5366 : last = mas->node;
5367 : }
5368 : }
5369 :
5370 0 : if (mas_is_err(mas))
5371 0 : return xa_err(mas->node);
5372 :
5373 0 : if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5374 : return -EBUSY;
5375 :
5376 : /* Trim the upper limit to the max. */
5377 0 : if (max <= mas->last)
5378 0 : mas->last = max;
5379 :
5380 0 : mas->index = mas->last - size + 1;
5381 0 : return 0;
5382 : }
5383 : EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5384 :
5385 0 : static inline int mas_alloc(struct ma_state *mas, void *entry,
5386 : unsigned long size, unsigned long *index)
5387 : {
5388 : unsigned long min;
5389 :
5390 0 : mas_start(mas);
5391 0 : if (mas_is_none(mas) || mas_is_ptr(mas)) {
5392 0 : mas_root_expand(mas, entry);
5393 0 : if (mas_is_err(mas))
5394 0 : return xa_err(mas->node);
5395 :
5396 0 : if (!mas->index)
5397 0 : return mte_pivot(mas->node, 0);
5398 0 : return mte_pivot(mas->node, 1);
5399 : }
5400 :
5401 : /* Must be walking a tree. */
5402 0 : mas_awalk(mas, size);
5403 0 : if (mas_is_err(mas))
5404 0 : return xa_err(mas->node);
5405 :
5406 0 : if (mas->offset == MAPLE_NODE_SLOTS)
5407 : goto no_gap;
5408 :
5409 : /*
5410 : * At this point, mas->node points to the right node and we have an
5411 : * offset that has a sufficient gap.
5412 : */
5413 0 : min = mas->min;
5414 0 : if (mas->offset)
5415 0 : min = mte_pivot(mas->node, mas->offset - 1) + 1;
5416 :
5417 0 : if (mas->index < min)
5418 0 : mas->index = min;
5419 :
5420 0 : mas_fill_gap(mas, entry, mas->offset, size, index);
5421 0 : return 0;
5422 :
5423 : no_gap:
5424 : return -EBUSY;
5425 : }
5426 :
5427 0 : static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5428 : unsigned long max, void *entry,
5429 : unsigned long size, unsigned long *index)
5430 : {
5431 0 : int ret = 0;
5432 :
5433 0 : ret = mas_empty_area_rev(mas, min, max, size);
5434 0 : if (ret)
5435 : return ret;
5436 :
5437 0 : if (mas_is_err(mas))
5438 0 : return xa_err(mas->node);
5439 :
5440 0 : if (mas->offset == MAPLE_NODE_SLOTS)
5441 : goto no_gap;
5442 :
5443 0 : mas_fill_gap(mas, entry, mas->offset, size, index);
5444 0 : return 0;
5445 :
5446 : no_gap:
5447 : return -EBUSY;
5448 : }
5449 :
5450 : /*
5451 : * mte_dead_leaves() - Mark all leaves of a node as dead.
5452 : * @mas: The maple state
5453 : * @slots: Pointer to the slot array
5454 : * @type: The maple node type
5455 : *
5456 : * Must hold the write lock.
5457 : *
5458 : * Return: The number of leaves marked as dead.
5459 : */
5460 : static inline
5461 0 : unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5462 : void __rcu **slots)
5463 : {
5464 : struct maple_node *node;
5465 : enum maple_type type;
5466 : void *entry;
5467 : int offset;
5468 :
5469 0 : for (offset = 0; offset < mt_slot_count(enode); offset++) {
5470 0 : entry = mt_slot(mt, slots, offset);
5471 0 : type = mte_node_type(entry);
5472 0 : node = mte_to_node(entry);
5473 : /* Use both node and type to catch LE & BE metadata */
5474 0 : if (!node || !type)
5475 : break;
5476 :
5477 0 : mte_set_node_dead(entry);
5478 0 : node->type = type;
5479 0 : rcu_assign_pointer(slots[offset], node);
5480 : }
5481 :
5482 0 : return offset;
5483 : }
5484 :
5485 : /**
5486 : * mte_dead_walk() - Walk down a dead tree to just before the leaves
5487 : * @enode: The maple encoded node
5488 : * @offset: The starting offset
5489 : *
5490 : * Note: This can only be used from the RCU callback context.
5491 : */
5492 : static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5493 : {
5494 : struct maple_node *node, *next;
5495 0 : void __rcu **slots = NULL;
5496 :
5497 0 : next = mte_to_node(*enode);
5498 : do {
5499 0 : *enode = ma_enode_ptr(next);
5500 0 : node = mte_to_node(*enode);
5501 0 : slots = ma_slots(node, node->type);
5502 0 : next = rcu_dereference_protected(slots[offset],
5503 : lock_is_held(&rcu_callback_map));
5504 0 : offset = 0;
5505 0 : } while (!ma_is_leaf(next->type));
5506 :
5507 : return slots;
5508 : }
5509 :
5510 : /**
5511 : * mt_free_walk() - Walk & free a tree in the RCU callback context
5512 : * @head: The RCU head that's within the node.
5513 : *
5514 : * Note: This can only be used from the RCU callback context.
5515 : */
5516 0 : static void mt_free_walk(struct rcu_head *head)
5517 : {
5518 : void __rcu **slots;
5519 : struct maple_node *node, *start;
5520 : struct maple_enode *enode;
5521 : unsigned char offset;
5522 : enum maple_type type;
5523 :
5524 0 : node = container_of(head, struct maple_node, rcu);
5525 :
5526 0 : if (ma_is_leaf(node->type))
5527 : goto free_leaf;
5528 :
5529 0 : start = node;
5530 0 : enode = mt_mk_node(node, node->type);
5531 : slots = mte_dead_walk(&enode, 0);
5532 : node = mte_to_node(enode);
5533 : do {
5534 0 : mt_free_bulk(node->slot_len, slots);
5535 0 : offset = node->parent_slot + 1;
5536 0 : enode = node->piv_parent;
5537 0 : if (mte_to_node(enode) == node)
5538 : goto free_leaf;
5539 :
5540 0 : type = mte_node_type(enode);
5541 0 : slots = ma_slots(mte_to_node(enode), type);
5542 0 : if ((offset < mt_slots[type]) &&
5543 0 : rcu_dereference_protected(slots[offset],
5544 : lock_is_held(&rcu_callback_map)))
5545 : slots = mte_dead_walk(&enode, offset);
5546 0 : node = mte_to_node(enode);
5547 0 : } while ((node != start) || (node->slot_len < offset));
5548 :
5549 0 : slots = ma_slots(node, node->type);
5550 0 : mt_free_bulk(node->slot_len, slots);
5551 :
5552 : free_leaf:
5553 0 : mt_free_rcu(&node->rcu);
5554 0 : }
5555 :
5556 0 : static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5557 : struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5558 : {
5559 : struct maple_node *node;
5560 0 : struct maple_enode *next = *enode;
5561 0 : void __rcu **slots = NULL;
5562 : enum maple_type type;
5563 0 : unsigned char next_offset = 0;
5564 :
5565 : do {
5566 0 : *enode = next;
5567 0 : node = mte_to_node(*enode);
5568 0 : type = mte_node_type(*enode);
5569 0 : slots = ma_slots(node, type);
5570 0 : next = mt_slot_locked(mt, slots, next_offset);
5571 0 : if ((mte_dead_node(next)))
5572 0 : next = mt_slot_locked(mt, slots, ++next_offset);
5573 :
5574 0 : mte_set_node_dead(*enode);
5575 0 : node->type = type;
5576 0 : node->piv_parent = prev;
5577 0 : node->parent_slot = offset;
5578 0 : offset = next_offset;
5579 0 : next_offset = 0;
5580 0 : prev = *enode;
5581 0 : } while (!mte_is_leaf(next));
5582 :
5583 0 : return slots;
5584 : }
5585 :
5586 0 : static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5587 : bool free)
5588 : {
5589 : void __rcu **slots;
5590 0 : struct maple_node *node = mte_to_node(enode);
5591 : struct maple_enode *start;
5592 :
5593 0 : if (mte_is_leaf(enode)) {
5594 0 : node->type = mte_node_type(enode);
5595 0 : goto free_leaf;
5596 : }
5597 :
5598 0 : start = enode;
5599 0 : slots = mte_destroy_descend(&enode, mt, start, 0);
5600 0 : node = mte_to_node(enode); // Updated in the above call.
5601 : do {
5602 : enum maple_type type;
5603 : unsigned char offset;
5604 : struct maple_enode *parent, *tmp;
5605 :
5606 0 : node->slot_len = mte_dead_leaves(enode, mt, slots);
5607 0 : if (free)
5608 0 : mt_free_bulk(node->slot_len, slots);
5609 0 : offset = node->parent_slot + 1;
5610 0 : enode = node->piv_parent;
5611 0 : if (mte_to_node(enode) == node)
5612 : goto free_leaf;
5613 :
5614 0 : type = mte_node_type(enode);
5615 0 : slots = ma_slots(mte_to_node(enode), type);
5616 0 : if (offset >= mt_slots[type])
5617 : goto next;
5618 :
5619 0 : tmp = mt_slot_locked(mt, slots, offset);
5620 0 : if (mte_node_type(tmp) && mte_to_node(tmp)) {
5621 0 : parent = enode;
5622 0 : enode = tmp;
5623 0 : slots = mte_destroy_descend(&enode, mt, parent, offset);
5624 : }
5625 : next:
5626 0 : node = mte_to_node(enode);
5627 0 : } while (start != enode);
5628 :
5629 0 : node = mte_to_node(enode);
5630 0 : node->slot_len = mte_dead_leaves(enode, mt, slots);
5631 0 : if (free)
5632 0 : mt_free_bulk(node->slot_len, slots);
5633 :
5634 : free_leaf:
5635 0 : if (free)
5636 0 : mt_free_rcu(&node->rcu);
5637 : else
5638 0 : mt_clear_meta(mt, node, node->type);
5639 0 : }
5640 :
5641 : /*
5642 : * mte_destroy_walk() - Free a tree or sub-tree.
5643 : * @enode: the encoded maple node (maple_enode) to start
5644 : * @mt: the tree to free - needed for node types.
5645 : *
5646 : * Must hold the write lock.
5647 : */
5648 0 : static inline void mte_destroy_walk(struct maple_enode *enode,
5649 : struct maple_tree *mt)
5650 : {
5651 0 : struct maple_node *node = mte_to_node(enode);
5652 :
5653 0 : if (mt_in_rcu(mt)) {
5654 0 : mt_destroy_walk(enode, mt, false);
5655 0 : call_rcu(&node->rcu, mt_free_walk);
5656 : } else {
5657 0 : mt_destroy_walk(enode, mt, true);
5658 : }
5659 0 : }
5660 :
5661 0 : static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5662 : {
5663 0 : if (unlikely(mas_is_paused(wr_mas->mas)))
5664 0 : mas_reset(wr_mas->mas);
5665 :
5666 0 : if (!mas_is_start(wr_mas->mas)) {
5667 0 : if (mas_is_none(wr_mas->mas)) {
5668 0 : mas_reset(wr_mas->mas);
5669 : } else {
5670 0 : wr_mas->r_max = wr_mas->mas->max;
5671 0 : wr_mas->type = mte_node_type(wr_mas->mas->node);
5672 0 : if (mas_is_span_wr(wr_mas))
5673 0 : mas_reset(wr_mas->mas);
5674 : }
5675 : }
5676 0 : }
5677 :
5678 : /* Interface */
5679 :
5680 : /**
5681 : * mas_store() - Store an @entry.
5682 : * @mas: The maple state.
5683 : * @entry: The entry to store.
5684 : *
5685 : * The @mas->index and @mas->last is used to set the range for the @entry.
5686 : * Note: The @mas should have pre-allocated entries to ensure there is memory to
5687 : * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5688 : *
5689 : * Return: the first entry between mas->index and mas->last or %NULL.
5690 : */
5691 0 : void *mas_store(struct ma_state *mas, void *entry)
5692 : {
5693 0 : MA_WR_STATE(wr_mas, mas, entry);
5694 :
5695 0 : trace_ma_write(__func__, mas, 0, entry);
5696 : #ifdef CONFIG_DEBUG_MAPLE_TREE
5697 : if (mas->index > mas->last)
5698 : pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5699 : MT_BUG_ON(mas->tree, mas->index > mas->last);
5700 : if (mas->index > mas->last) {
5701 : mas_set_err(mas, -EINVAL);
5702 : return NULL;
5703 : }
5704 :
5705 : #endif
5706 :
5707 : /*
5708 : * Storing is the same operation as insert with the added caveat that it
5709 : * can overwrite entries. Although this seems simple enough, one may
5710 : * want to examine what happens if a single store operation was to
5711 : * overwrite multiple entries within a self-balancing B-Tree.
5712 : */
5713 0 : mas_wr_store_setup(&wr_mas);
5714 0 : mas_wr_store_entry(&wr_mas);
5715 0 : return wr_mas.content;
5716 : }
5717 : EXPORT_SYMBOL_GPL(mas_store);
5718 :
5719 : /**
5720 : * mas_store_gfp() - Store a value into the tree.
5721 : * @mas: The maple state
5722 : * @entry: The entry to store
5723 : * @gfp: The GFP_FLAGS to use for allocations if necessary.
5724 : *
5725 : * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5726 : * be allocated.
5727 : */
5728 0 : int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5729 : {
5730 0 : MA_WR_STATE(wr_mas, mas, entry);
5731 :
5732 0 : mas_wr_store_setup(&wr_mas);
5733 0 : trace_ma_write(__func__, mas, 0, entry);
5734 : retry:
5735 0 : mas_wr_store_entry(&wr_mas);
5736 0 : if (unlikely(mas_nomem(mas, gfp)))
5737 : goto retry;
5738 :
5739 0 : if (unlikely(mas_is_err(mas)))
5740 0 : return xa_err(mas->node);
5741 :
5742 : return 0;
5743 : }
5744 : EXPORT_SYMBOL_GPL(mas_store_gfp);
5745 :
5746 : /**
5747 : * mas_store_prealloc() - Store a value into the tree using memory
5748 : * preallocated in the maple state.
5749 : * @mas: The maple state
5750 : * @entry: The entry to store.
5751 : */
5752 0 : void mas_store_prealloc(struct ma_state *mas, void *entry)
5753 : {
5754 0 : MA_WR_STATE(wr_mas, mas, entry);
5755 :
5756 0 : mas_wr_store_setup(&wr_mas);
5757 0 : trace_ma_write(__func__, mas, 0, entry);
5758 0 : mas_wr_store_entry(&wr_mas);
5759 0 : BUG_ON(mas_is_err(mas));
5760 0 : mas_destroy(mas);
5761 0 : }
5762 : EXPORT_SYMBOL_GPL(mas_store_prealloc);
5763 :
5764 : /**
5765 : * mas_preallocate() - Preallocate enough nodes for a store operation
5766 : * @mas: The maple state
5767 : * @gfp: The GFP_FLAGS to use for allocations.
5768 : *
5769 : * Return: 0 on success, -ENOMEM if memory could not be allocated.
5770 : */
5771 0 : int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5772 : {
5773 : int ret;
5774 :
5775 0 : mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5776 0 : mas->mas_flags |= MA_STATE_PREALLOC;
5777 0 : if (likely(!mas_is_err(mas)))
5778 : return 0;
5779 :
5780 0 : mas_set_alloc_req(mas, 0);
5781 0 : ret = xa_err(mas->node);
5782 0 : mas_reset(mas);
5783 0 : mas_destroy(mas);
5784 0 : mas_reset(mas);
5785 0 : return ret;
5786 : }
5787 : EXPORT_SYMBOL_GPL(mas_preallocate);
5788 :
5789 : /*
5790 : * mas_destroy() - destroy a maple state.
5791 : * @mas: The maple state
5792 : *
5793 : * Upon completion, check the left-most node and rebalance against the node to
5794 : * the right if necessary. Frees any allocated nodes associated with this maple
5795 : * state.
5796 : */
5797 0 : void mas_destroy(struct ma_state *mas)
5798 : {
5799 : struct maple_alloc *node;
5800 : unsigned long total;
5801 :
5802 : /*
5803 : * When using mas_for_each() to insert an expected number of elements,
5804 : * it is possible that the number inserted is less than the expected
5805 : * number. To fix an invalid final node, a check is performed here to
5806 : * rebalance the previous node with the final node.
5807 : */
5808 0 : if (mas->mas_flags & MA_STATE_REBALANCE) {
5809 : unsigned char end;
5810 :
5811 0 : if (mas_is_start(mas))
5812 0 : mas_start(mas);
5813 :
5814 0 : mtree_range_walk(mas);
5815 0 : end = mas_data_end(mas) + 1;
5816 0 : if (end < mt_min_slot_count(mas->node) - 1)
5817 0 : mas_destroy_rebalance(mas, end);
5818 :
5819 0 : mas->mas_flags &= ~MA_STATE_REBALANCE;
5820 : }
5821 0 : mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5822 :
5823 0 : total = mas_allocated(mas);
5824 0 : while (total) {
5825 0 : node = mas->alloc;
5826 0 : mas->alloc = node->slot[0];
5827 0 : if (node->node_count > 1) {
5828 0 : size_t count = node->node_count - 1;
5829 :
5830 0 : mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5831 0 : total -= count;
5832 : }
5833 0 : kmem_cache_free(maple_node_cache, node);
5834 0 : total--;
5835 : }
5836 :
5837 0 : mas->alloc = NULL;
5838 0 : }
5839 : EXPORT_SYMBOL_GPL(mas_destroy);
5840 :
5841 : /*
5842 : * mas_expected_entries() - Set the expected number of entries that will be inserted.
5843 : * @mas: The maple state
5844 : * @nr_entries: The number of expected entries.
5845 : *
5846 : * This will attempt to pre-allocate enough nodes to store the expected number
5847 : * of entries. The allocations will occur using the bulk allocator interface
5848 : * for speed. Please call mas_destroy() on the @mas after inserting the entries
5849 : * to ensure any unused nodes are freed.
5850 : *
5851 : * Return: 0 on success, -ENOMEM if memory could not be allocated.
5852 : */
5853 0 : int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5854 : {
5855 0 : int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5856 0 : struct maple_enode *enode = mas->node;
5857 : int nr_nodes;
5858 : int ret;
5859 :
5860 : /*
5861 : * Sometimes it is necessary to duplicate a tree to a new tree, such as
5862 : * forking a process and duplicating the VMAs from one tree to a new
5863 : * tree. When such a situation arises, it is known that the new tree is
5864 : * not going to be used until the entire tree is populated. For
5865 : * performance reasons, it is best to use a bulk load with RCU disabled.
5866 : * This allows for optimistic splitting that favours the left and reuse
5867 : * of nodes during the operation.
5868 : */
5869 :
5870 : /* Optimize splitting for bulk insert in-order */
5871 0 : mas->mas_flags |= MA_STATE_BULK;
5872 :
5873 : /*
5874 : * Avoid overflow, assume a gap between each entry and a trailing null.
5875 : * If this is wrong, it just means allocation can happen during
5876 : * insertion of entries.
5877 : */
5878 0 : nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5879 0 : if (!mt_is_alloc(mas->tree))
5880 0 : nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5881 :
5882 : /* Leaves; reduce slots to keep space for expansion */
5883 0 : nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5884 : /* Internal nodes */
5885 0 : nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5886 : /* Add working room for split (2 nodes) + new parents */
5887 0 : mas_node_count(mas, nr_nodes + 3);
5888 :
5889 : /* Detect if allocations run out */
5890 0 : mas->mas_flags |= MA_STATE_PREALLOC;
5891 :
5892 0 : if (!mas_is_err(mas))
5893 : return 0;
5894 :
5895 0 : ret = xa_err(mas->node);
5896 0 : mas->node = enode;
5897 0 : mas_destroy(mas);
5898 0 : return ret;
5899 :
5900 : }
5901 : EXPORT_SYMBOL_GPL(mas_expected_entries);
5902 :
5903 : /**
5904 : * mas_next() - Get the next entry.
5905 : * @mas: The maple state
5906 : * @max: The maximum index to check.
5907 : *
5908 : * Returns the next entry after @mas->index.
5909 : * Must hold rcu_read_lock or the write lock.
5910 : * Can return the zero entry.
5911 : *
5912 : * Return: The next entry or %NULL
5913 : */
5914 0 : void *mas_next(struct ma_state *mas, unsigned long max)
5915 : {
5916 0 : if (mas_is_none(mas) || mas_is_paused(mas))
5917 0 : mas->node = MAS_START;
5918 :
5919 0 : if (mas_is_start(mas))
5920 0 : mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5921 :
5922 0 : if (mas_is_ptr(mas)) {
5923 0 : if (!mas->index) {
5924 0 : mas->index = 1;
5925 0 : mas->last = ULONG_MAX;
5926 : }
5927 : return NULL;
5928 : }
5929 :
5930 0 : if (mas->last == ULONG_MAX)
5931 : return NULL;
5932 :
5933 : /* Retries on dead nodes handled by mas_next_entry */
5934 0 : return mas_next_entry(mas, max);
5935 : }
5936 : EXPORT_SYMBOL_GPL(mas_next);
5937 :
5938 : /**
5939 : * mt_next() - get the next value in the maple tree
5940 : * @mt: The maple tree
5941 : * @index: The start index
5942 : * @max: The maximum index to check
5943 : *
5944 : * Return: The entry at @index or higher, or %NULL if nothing is found.
5945 : */
5946 0 : void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5947 : {
5948 0 : void *entry = NULL;
5949 0 : MA_STATE(mas, mt, index, index);
5950 :
5951 : rcu_read_lock();
5952 0 : entry = mas_next(&mas, max);
5953 : rcu_read_unlock();
5954 0 : return entry;
5955 : }
5956 : EXPORT_SYMBOL_GPL(mt_next);
5957 :
5958 : /**
5959 : * mas_prev() - Get the previous entry
5960 : * @mas: The maple state
5961 : * @min: The minimum value to check.
5962 : *
5963 : * Must hold rcu_read_lock or the write lock.
5964 : * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5965 : * searchable nodes.
5966 : *
5967 : * Return: the previous value or %NULL.
5968 : */
5969 0 : void *mas_prev(struct ma_state *mas, unsigned long min)
5970 : {
5971 0 : if (!mas->index) {
5972 : /* Nothing comes before 0 */
5973 0 : mas->last = 0;
5974 0 : mas->node = MAS_NONE;
5975 0 : return NULL;
5976 : }
5977 :
5978 0 : if (unlikely(mas_is_ptr(mas)))
5979 : return NULL;
5980 :
5981 0 : if (mas_is_none(mas) || mas_is_paused(mas))
5982 0 : mas->node = MAS_START;
5983 :
5984 0 : if (mas_is_start(mas)) {
5985 0 : mas_walk(mas);
5986 0 : if (!mas->index)
5987 : return NULL;
5988 : }
5989 :
5990 0 : if (mas_is_ptr(mas)) {
5991 0 : if (!mas->index) {
5992 0 : mas->last = 0;
5993 0 : return NULL;
5994 : }
5995 :
5996 0 : mas->index = mas->last = 0;
5997 0 : return mas_root_locked(mas);
5998 : }
5999 0 : return mas_prev_entry(mas, min);
6000 : }
6001 : EXPORT_SYMBOL_GPL(mas_prev);
6002 :
6003 : /**
6004 : * mt_prev() - get the previous value in the maple tree
6005 : * @mt: The maple tree
6006 : * @index: The start index
6007 : * @min: The minimum index to check
6008 : *
6009 : * Return: The entry at @index or lower, or %NULL if nothing is found.
6010 : */
6011 0 : void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6012 : {
6013 0 : void *entry = NULL;
6014 0 : MA_STATE(mas, mt, index, index);
6015 :
6016 : rcu_read_lock();
6017 0 : entry = mas_prev(&mas, min);
6018 : rcu_read_unlock();
6019 0 : return entry;
6020 : }
6021 : EXPORT_SYMBOL_GPL(mt_prev);
6022 :
6023 : /**
6024 : * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6025 : * @mas: The maple state to pause
6026 : *
6027 : * Some users need to pause a walk and drop the lock they're holding in
6028 : * order to yield to a higher priority thread or carry out an operation
6029 : * on an entry. Those users should call this function before they drop
6030 : * the lock. It resets the @mas to be suitable for the next iteration
6031 : * of the loop after the user has reacquired the lock. If most entries
6032 : * found during a walk require you to call mas_pause(), the mt_for_each()
6033 : * iterator may be more appropriate.
6034 : *
6035 : */
6036 0 : void mas_pause(struct ma_state *mas)
6037 : {
6038 0 : mas->node = MAS_PAUSE;
6039 0 : }
6040 : EXPORT_SYMBOL_GPL(mas_pause);
6041 :
6042 : /**
6043 : * mas_find() - On the first call, find the entry at or after mas->index up to
6044 : * %max. Otherwise, find the entry after mas->index.
6045 : * @mas: The maple state
6046 : * @max: The maximum value to check.
6047 : *
6048 : * Must hold rcu_read_lock or the write lock.
6049 : * If an entry exists, last and index are updated accordingly.
6050 : * May set @mas->node to MAS_NONE.
6051 : *
6052 : * Return: The entry or %NULL.
6053 : */
6054 0 : void *mas_find(struct ma_state *mas, unsigned long max)
6055 : {
6056 0 : if (unlikely(mas_is_paused(mas))) {
6057 0 : if (unlikely(mas->last == ULONG_MAX)) {
6058 0 : mas->node = MAS_NONE;
6059 0 : return NULL;
6060 : }
6061 0 : mas->node = MAS_START;
6062 0 : mas->index = ++mas->last;
6063 : }
6064 :
6065 0 : if (unlikely(mas_is_none(mas)))
6066 0 : mas->node = MAS_START;
6067 :
6068 0 : if (unlikely(mas_is_start(mas))) {
6069 : /* First run or continue */
6070 : void *entry;
6071 :
6072 0 : if (mas->index > max)
6073 : return NULL;
6074 :
6075 0 : entry = mas_walk(mas);
6076 0 : if (entry)
6077 : return entry;
6078 : }
6079 :
6080 0 : if (unlikely(!mas_searchable(mas)))
6081 : return NULL;
6082 :
6083 : /* Retries on dead nodes handled by mas_next_entry */
6084 0 : return mas_next_entry(mas, max);
6085 : }
6086 : EXPORT_SYMBOL_GPL(mas_find);
6087 :
6088 : /**
6089 : * mas_find_rev: On the first call, find the first non-null entry at or below
6090 : * mas->index down to %min. Otherwise find the first non-null entry below
6091 : * mas->index down to %min.
6092 : * @mas: The maple state
6093 : * @min: The minimum value to check.
6094 : *
6095 : * Must hold rcu_read_lock or the write lock.
6096 : * If an entry exists, last and index are updated accordingly.
6097 : * May set @mas->node to MAS_NONE.
6098 : *
6099 : * Return: The entry or %NULL.
6100 : */
6101 0 : void *mas_find_rev(struct ma_state *mas, unsigned long min)
6102 : {
6103 0 : if (unlikely(mas_is_paused(mas))) {
6104 0 : if (unlikely(mas->last == ULONG_MAX)) {
6105 0 : mas->node = MAS_NONE;
6106 0 : return NULL;
6107 : }
6108 0 : mas->node = MAS_START;
6109 0 : mas->last = --mas->index;
6110 : }
6111 :
6112 0 : if (unlikely(mas_is_start(mas))) {
6113 : /* First run or continue */
6114 : void *entry;
6115 :
6116 0 : if (mas->index < min)
6117 : return NULL;
6118 :
6119 0 : entry = mas_walk(mas);
6120 0 : if (entry)
6121 : return entry;
6122 : }
6123 :
6124 0 : if (unlikely(!mas_searchable(mas)))
6125 : return NULL;
6126 :
6127 0 : if (mas->index < min)
6128 : return NULL;
6129 :
6130 : /* Retries on dead nodes handled by mas_prev_entry */
6131 0 : return mas_prev_entry(mas, min);
6132 : }
6133 : EXPORT_SYMBOL_GPL(mas_find_rev);
6134 :
6135 : /**
6136 : * mas_erase() - Find the range in which index resides and erase the entire
6137 : * range.
6138 : * @mas: The maple state
6139 : *
6140 : * Must hold the write lock.
6141 : * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6142 : * erases that range.
6143 : *
6144 : * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6145 : */
6146 0 : void *mas_erase(struct ma_state *mas)
6147 : {
6148 : void *entry;
6149 0 : MA_WR_STATE(wr_mas, mas, NULL);
6150 :
6151 0 : if (mas_is_none(mas) || mas_is_paused(mas))
6152 0 : mas->node = MAS_START;
6153 :
6154 : /* Retry unnecessary when holding the write lock. */
6155 0 : entry = mas_state_walk(mas);
6156 0 : if (!entry)
6157 : return NULL;
6158 :
6159 : write_retry:
6160 : /* Must reset to ensure spanning writes of last slot are detected */
6161 0 : mas_reset(mas);
6162 0 : mas_wr_store_setup(&wr_mas);
6163 0 : mas_wr_store_entry(&wr_mas);
6164 0 : if (mas_nomem(mas, GFP_KERNEL))
6165 : goto write_retry;
6166 :
6167 : return entry;
6168 : }
6169 : EXPORT_SYMBOL_GPL(mas_erase);
6170 :
6171 : /**
6172 : * mas_nomem() - Check if there was an error allocating and do the allocation
6173 : * if necessary If there are allocations, then free them.
6174 : * @mas: The maple state
6175 : * @gfp: The GFP_FLAGS to use for allocations
6176 : * Return: true on allocation, false otherwise.
6177 : */
6178 0 : bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6179 : __must_hold(mas->tree->lock)
6180 : {
6181 0 : if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6182 0 : mas_destroy(mas);
6183 0 : return false;
6184 : }
6185 :
6186 0 : if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6187 0 : mtree_unlock(mas->tree);
6188 0 : mas_alloc_nodes(mas, gfp);
6189 0 : mtree_lock(mas->tree);
6190 : } else {
6191 0 : mas_alloc_nodes(mas, gfp);
6192 : }
6193 :
6194 0 : if (!mas_allocated(mas))
6195 : return false;
6196 :
6197 0 : mas->node = MAS_START;
6198 0 : return true;
6199 : }
6200 :
6201 1 : void __init maple_tree_init(void)
6202 : {
6203 1 : maple_node_cache = kmem_cache_create("maple_node",
6204 : sizeof(struct maple_node), sizeof(struct maple_node),
6205 : SLAB_PANIC, NULL);
6206 1 : }
6207 :
6208 : /**
6209 : * mtree_load() - Load a value stored in a maple tree
6210 : * @mt: The maple tree
6211 : * @index: The index to load
6212 : *
6213 : * Return: the entry or %NULL
6214 : */
6215 0 : void *mtree_load(struct maple_tree *mt, unsigned long index)
6216 : {
6217 0 : MA_STATE(mas, mt, index, index);
6218 : void *entry;
6219 :
6220 0 : trace_ma_read(__func__, &mas);
6221 : rcu_read_lock();
6222 : retry:
6223 0 : entry = mas_start(&mas);
6224 0 : if (unlikely(mas_is_none(&mas)))
6225 : goto unlock;
6226 :
6227 0 : if (unlikely(mas_is_ptr(&mas))) {
6228 0 : if (index)
6229 0 : entry = NULL;
6230 :
6231 : goto unlock;
6232 : }
6233 :
6234 0 : entry = mtree_lookup_walk(&mas);
6235 0 : if (!entry && unlikely(mas_is_start(&mas)))
6236 : goto retry;
6237 : unlock:
6238 0 : rcu_read_unlock();
6239 0 : if (xa_is_zero(entry))
6240 : return NULL;
6241 :
6242 0 : return entry;
6243 : }
6244 : EXPORT_SYMBOL(mtree_load);
6245 :
6246 : /**
6247 : * mtree_store_range() - Store an entry at a given range.
6248 : * @mt: The maple tree
6249 : * @index: The start of the range
6250 : * @last: The end of the range
6251 : * @entry: The entry to store
6252 : * @gfp: The GFP_FLAGS to use for allocations
6253 : *
6254 : * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6255 : * be allocated.
6256 : */
6257 0 : int mtree_store_range(struct maple_tree *mt, unsigned long index,
6258 : unsigned long last, void *entry, gfp_t gfp)
6259 : {
6260 0 : MA_STATE(mas, mt, index, last);
6261 0 : MA_WR_STATE(wr_mas, &mas, entry);
6262 :
6263 0 : trace_ma_write(__func__, &mas, 0, entry);
6264 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
6265 : return -EINVAL;
6266 :
6267 0 : if (index > last)
6268 : return -EINVAL;
6269 :
6270 0 : mtree_lock(mt);
6271 : retry:
6272 0 : mas_wr_store_entry(&wr_mas);
6273 0 : if (mas_nomem(&mas, gfp))
6274 : goto retry;
6275 :
6276 0 : mtree_unlock(mt);
6277 0 : if (mas_is_err(&mas))
6278 0 : return xa_err(mas.node);
6279 :
6280 : return 0;
6281 : }
6282 : EXPORT_SYMBOL(mtree_store_range);
6283 :
6284 : /**
6285 : * mtree_store() - Store an entry at a given index.
6286 : * @mt: The maple tree
6287 : * @index: The index to store the value
6288 : * @entry: The entry to store
6289 : * @gfp: The GFP_FLAGS to use for allocations
6290 : *
6291 : * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6292 : * be allocated.
6293 : */
6294 0 : int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6295 : gfp_t gfp)
6296 : {
6297 0 : return mtree_store_range(mt, index, index, entry, gfp);
6298 : }
6299 : EXPORT_SYMBOL(mtree_store);
6300 :
6301 : /**
6302 : * mtree_insert_range() - Insert an entry at a give range if there is no value.
6303 : * @mt: The maple tree
6304 : * @first: The start of the range
6305 : * @last: The end of the range
6306 : * @entry: The entry to store
6307 : * @gfp: The GFP_FLAGS to use for allocations.
6308 : *
6309 : * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6310 : * request, -ENOMEM if memory could not be allocated.
6311 : */
6312 0 : int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6313 : unsigned long last, void *entry, gfp_t gfp)
6314 : {
6315 0 : MA_STATE(ms, mt, first, last);
6316 :
6317 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
6318 : return -EINVAL;
6319 :
6320 0 : if (first > last)
6321 : return -EINVAL;
6322 :
6323 0 : mtree_lock(mt);
6324 : retry:
6325 0 : mas_insert(&ms, entry);
6326 0 : if (mas_nomem(&ms, gfp))
6327 : goto retry;
6328 :
6329 0 : mtree_unlock(mt);
6330 0 : if (mas_is_err(&ms))
6331 0 : return xa_err(ms.node);
6332 :
6333 : return 0;
6334 : }
6335 : EXPORT_SYMBOL(mtree_insert_range);
6336 :
6337 : /**
6338 : * mtree_insert() - Insert an entry at a give index if there is no value.
6339 : * @mt: The maple tree
6340 : * @index : The index to store the value
6341 : * @entry: The entry to store
6342 : * @gfp: The FGP_FLAGS to use for allocations.
6343 : *
6344 : * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6345 : * request, -ENOMEM if memory could not be allocated.
6346 : */
6347 0 : int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6348 : gfp_t gfp)
6349 : {
6350 0 : return mtree_insert_range(mt, index, index, entry, gfp);
6351 : }
6352 : EXPORT_SYMBOL(mtree_insert);
6353 :
6354 0 : int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6355 : void *entry, unsigned long size, unsigned long min,
6356 : unsigned long max, gfp_t gfp)
6357 : {
6358 0 : int ret = 0;
6359 :
6360 0 : MA_STATE(mas, mt, min, max - size);
6361 0 : if (!mt_is_alloc(mt))
6362 : return -EINVAL;
6363 :
6364 0 : if (WARN_ON_ONCE(mt_is_reserved(entry)))
6365 : return -EINVAL;
6366 :
6367 0 : if (min > max)
6368 : return -EINVAL;
6369 :
6370 0 : if (max < size)
6371 : return -EINVAL;
6372 :
6373 0 : if (!size)
6374 : return -EINVAL;
6375 :
6376 0 : mtree_lock(mt);
6377 : retry:
6378 0 : mas.offset = 0;
6379 0 : mas.index = min;
6380 0 : mas.last = max - size;
6381 0 : ret = mas_alloc(&mas, entry, size, startp);
6382 0 : if (mas_nomem(&mas, gfp))
6383 : goto retry;
6384 :
6385 0 : mtree_unlock(mt);
6386 0 : return ret;
6387 : }
6388 : EXPORT_SYMBOL(mtree_alloc_range);
6389 :
6390 0 : int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6391 : void *entry, unsigned long size, unsigned long min,
6392 : unsigned long max, gfp_t gfp)
6393 : {
6394 0 : int ret = 0;
6395 :
6396 0 : MA_STATE(mas, mt, min, max - size);
6397 0 : if (!mt_is_alloc(mt))
6398 : return -EINVAL;
6399 :
6400 0 : if (WARN_ON_ONCE(mt_is_reserved(entry)))
6401 : return -EINVAL;
6402 :
6403 0 : if (min >= max)
6404 : return -EINVAL;
6405 :
6406 0 : if (max < size - 1)
6407 : return -EINVAL;
6408 :
6409 0 : if (!size)
6410 : return -EINVAL;
6411 :
6412 0 : mtree_lock(mt);
6413 : retry:
6414 0 : ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6415 0 : if (mas_nomem(&mas, gfp))
6416 : goto retry;
6417 :
6418 0 : mtree_unlock(mt);
6419 0 : return ret;
6420 : }
6421 : EXPORT_SYMBOL(mtree_alloc_rrange);
6422 :
6423 : /**
6424 : * mtree_erase() - Find an index and erase the entire range.
6425 : * @mt: The maple tree
6426 : * @index: The index to erase
6427 : *
6428 : * Erasing is the same as a walk to an entry then a store of a NULL to that
6429 : * ENTIRE range. In fact, it is implemented as such using the advanced API.
6430 : *
6431 : * Return: The entry stored at the @index or %NULL
6432 : */
6433 0 : void *mtree_erase(struct maple_tree *mt, unsigned long index)
6434 : {
6435 0 : void *entry = NULL;
6436 :
6437 0 : MA_STATE(mas, mt, index, index);
6438 0 : trace_ma_op(__func__, &mas);
6439 :
6440 0 : mtree_lock(mt);
6441 0 : entry = mas_erase(&mas);
6442 0 : mtree_unlock(mt);
6443 :
6444 0 : return entry;
6445 : }
6446 : EXPORT_SYMBOL(mtree_erase);
6447 :
6448 : /**
6449 : * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6450 : * @mt: The maple tree
6451 : *
6452 : * Note: Does not handle locking.
6453 : */
6454 0 : void __mt_destroy(struct maple_tree *mt)
6455 : {
6456 0 : void *root = mt_root_locked(mt);
6457 :
6458 0 : rcu_assign_pointer(mt->ma_root, NULL);
6459 0 : if (xa_is_node(root))
6460 0 : mte_destroy_walk(root, mt);
6461 :
6462 0 : mt->ma_flags = 0;
6463 0 : }
6464 : EXPORT_SYMBOL_GPL(__mt_destroy);
6465 :
6466 : /**
6467 : * mtree_destroy() - Destroy a maple tree
6468 : * @mt: The maple tree
6469 : *
6470 : * Frees all resources used by the tree. Handles locking.
6471 : */
6472 0 : void mtree_destroy(struct maple_tree *mt)
6473 : {
6474 0 : mtree_lock(mt);
6475 0 : __mt_destroy(mt);
6476 0 : mtree_unlock(mt);
6477 0 : }
6478 : EXPORT_SYMBOL(mtree_destroy);
6479 :
6480 : /**
6481 : * mt_find() - Search from the start up until an entry is found.
6482 : * @mt: The maple tree
6483 : * @index: Pointer which contains the start location of the search
6484 : * @max: The maximum value to check
6485 : *
6486 : * Handles locking. @index will be incremented to one beyond the range.
6487 : *
6488 : * Return: The entry at or after the @index or %NULL
6489 : */
6490 0 : void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6491 : {
6492 0 : MA_STATE(mas, mt, *index, *index);
6493 : void *entry;
6494 : #ifdef CONFIG_DEBUG_MAPLE_TREE
6495 : unsigned long copy = *index;
6496 : #endif
6497 :
6498 0 : trace_ma_read(__func__, &mas);
6499 :
6500 0 : if ((*index) > max)
6501 : return NULL;
6502 :
6503 : rcu_read_lock();
6504 : retry:
6505 0 : entry = mas_state_walk(&mas);
6506 0 : if (mas_is_start(&mas))
6507 : goto retry;
6508 :
6509 0 : if (unlikely(xa_is_zero(entry)))
6510 0 : entry = NULL;
6511 :
6512 0 : if (entry)
6513 : goto unlock;
6514 :
6515 0 : while (mas_searchable(&mas) && (mas.index < max)) {
6516 0 : entry = mas_next_entry(&mas, max);
6517 0 : if (likely(entry && !xa_is_zero(entry)))
6518 : break;
6519 : }
6520 :
6521 0 : if (unlikely(xa_is_zero(entry)))
6522 0 : entry = NULL;
6523 : unlock:
6524 : rcu_read_unlock();
6525 0 : if (likely(entry)) {
6526 0 : *index = mas.last + 1;
6527 : #ifdef CONFIG_DEBUG_MAPLE_TREE
6528 : if ((*index) && (*index) <= copy)
6529 : pr_err("index not increased! %lx <= %lx\n",
6530 : *index, copy);
6531 : MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6532 : #endif
6533 : }
6534 :
6535 : return entry;
6536 : }
6537 : EXPORT_SYMBOL(mt_find);
6538 :
6539 : /**
6540 : * mt_find_after() - Search from the start up until an entry is found.
6541 : * @mt: The maple tree
6542 : * @index: Pointer which contains the start location of the search
6543 : * @max: The maximum value to check
6544 : *
6545 : * Handles locking, detects wrapping on index == 0
6546 : *
6547 : * Return: The entry at or after the @index or %NULL
6548 : */
6549 0 : void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6550 : unsigned long max)
6551 : {
6552 0 : if (!(*index))
6553 : return NULL;
6554 :
6555 0 : return mt_find(mt, index, max);
6556 : }
6557 : EXPORT_SYMBOL(mt_find_after);
6558 :
6559 : #ifdef CONFIG_DEBUG_MAPLE_TREE
6560 : atomic_t maple_tree_tests_run;
6561 : EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6562 : atomic_t maple_tree_tests_passed;
6563 : EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6564 :
6565 : #ifndef __KERNEL__
6566 : extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6567 : void mt_set_non_kernel(unsigned int val)
6568 : {
6569 : kmem_cache_set_non_kernel(maple_node_cache, val);
6570 : }
6571 :
6572 : extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6573 : unsigned long mt_get_alloc_size(void)
6574 : {
6575 : return kmem_cache_get_alloc(maple_node_cache);
6576 : }
6577 :
6578 : extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6579 : void mt_zero_nr_tallocated(void)
6580 : {
6581 : kmem_cache_zero_nr_tallocated(maple_node_cache);
6582 : }
6583 :
6584 : extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6585 : unsigned int mt_nr_tallocated(void)
6586 : {
6587 : return kmem_cache_nr_tallocated(maple_node_cache);
6588 : }
6589 :
6590 : extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6591 : unsigned int mt_nr_allocated(void)
6592 : {
6593 : return kmem_cache_nr_allocated(maple_node_cache);
6594 : }
6595 :
6596 : /*
6597 : * mas_dead_node() - Check if the maple state is pointing to a dead node.
6598 : * @mas: The maple state
6599 : * @index: The index to restore in @mas.
6600 : *
6601 : * Used in test code.
6602 : * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6603 : */
6604 : static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6605 : {
6606 : if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6607 : return 0;
6608 :
6609 : if (likely(!mte_dead_node(mas->node)))
6610 : return 0;
6611 :
6612 : mas_rewalk(mas, index);
6613 : return 1;
6614 : }
6615 :
6616 : void mt_cache_shrink(void)
6617 : {
6618 : }
6619 : #else
6620 : /*
6621 : * mt_cache_shrink() - For testing, don't use this.
6622 : *
6623 : * Certain testcases can trigger an OOM when combined with other memory
6624 : * debugging configuration options. This function is used to reduce the
6625 : * possibility of an out of memory even due to kmem_cache objects remaining
6626 : * around for longer than usual.
6627 : */
6628 : void mt_cache_shrink(void)
6629 : {
6630 : kmem_cache_shrink(maple_node_cache);
6631 :
6632 : }
6633 : EXPORT_SYMBOL_GPL(mt_cache_shrink);
6634 :
6635 : #endif /* not defined __KERNEL__ */
6636 : /*
6637 : * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6638 : * @mas: The maple state
6639 : * @offset: The offset into the slot array to fetch.
6640 : *
6641 : * Return: The entry stored at @offset.
6642 : */
6643 : static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6644 : unsigned char offset)
6645 : {
6646 : return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6647 : offset);
6648 : }
6649 :
6650 :
6651 : /*
6652 : * mas_first_entry() - Go the first leaf and find the first entry.
6653 : * @mas: the maple state.
6654 : * @limit: the maximum index to check.
6655 : * @*r_start: Pointer to set to the range start.
6656 : *
6657 : * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6658 : *
6659 : * Return: The first entry or MAS_NONE.
6660 : */
6661 : static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6662 : unsigned long limit, enum maple_type mt)
6663 :
6664 : {
6665 : unsigned long max;
6666 : unsigned long *pivots;
6667 : void __rcu **slots;
6668 : void *entry = NULL;
6669 :
6670 : mas->index = mas->min;
6671 : if (mas->index > limit)
6672 : goto none;
6673 :
6674 : max = mas->max;
6675 : mas->offset = 0;
6676 : while (likely(!ma_is_leaf(mt))) {
6677 : MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6678 : slots = ma_slots(mn, mt);
6679 : entry = mas_slot(mas, slots, 0);
6680 : pivots = ma_pivots(mn, mt);
6681 : if (unlikely(ma_dead_node(mn)))
6682 : return NULL;
6683 : max = pivots[0];
6684 : mas->node = entry;
6685 : mn = mas_mn(mas);
6686 : mt = mte_node_type(mas->node);
6687 : }
6688 : MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6689 :
6690 : mas->max = max;
6691 : slots = ma_slots(mn, mt);
6692 : entry = mas_slot(mas, slots, 0);
6693 : if (unlikely(ma_dead_node(mn)))
6694 : return NULL;
6695 :
6696 : /* Slot 0 or 1 must be set */
6697 : if (mas->index > limit)
6698 : goto none;
6699 :
6700 : if (likely(entry))
6701 : return entry;
6702 :
6703 : mas->offset = 1;
6704 : entry = mas_slot(mas, slots, 1);
6705 : pivots = ma_pivots(mn, mt);
6706 : if (unlikely(ma_dead_node(mn)))
6707 : return NULL;
6708 :
6709 : mas->index = pivots[0] + 1;
6710 : if (mas->index > limit)
6711 : goto none;
6712 :
6713 : if (likely(entry))
6714 : return entry;
6715 :
6716 : none:
6717 : if (likely(!ma_dead_node(mn)))
6718 : mas->node = MAS_NONE;
6719 : return NULL;
6720 : }
6721 :
6722 : /* Depth first search, post-order */
6723 : static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6724 : {
6725 :
6726 : struct maple_enode *p = MAS_NONE, *mn = mas->node;
6727 : unsigned long p_min, p_max;
6728 :
6729 : mas_next_node(mas, mas_mn(mas), max);
6730 : if (!mas_is_none(mas))
6731 : return;
6732 :
6733 : if (mte_is_root(mn))
6734 : return;
6735 :
6736 : mas->node = mn;
6737 : mas_ascend(mas);
6738 : while (mas->node != MAS_NONE) {
6739 : p = mas->node;
6740 : p_min = mas->min;
6741 : p_max = mas->max;
6742 : mas_prev_node(mas, 0);
6743 : }
6744 :
6745 : if (p == MAS_NONE)
6746 : return;
6747 :
6748 : mas->node = p;
6749 : mas->max = p_max;
6750 : mas->min = p_min;
6751 : }
6752 :
6753 : /* Tree validations */
6754 : static void mt_dump_node(const struct maple_tree *mt, void *entry,
6755 : unsigned long min, unsigned long max, unsigned int depth);
6756 : static void mt_dump_range(unsigned long min, unsigned long max,
6757 : unsigned int depth)
6758 : {
6759 : static const char spaces[] = " ";
6760 :
6761 : if (min == max)
6762 : pr_info("%.*s%lu: ", depth * 2, spaces, min);
6763 : else
6764 : pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6765 : }
6766 :
6767 : static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6768 : unsigned int depth)
6769 : {
6770 : mt_dump_range(min, max, depth);
6771 :
6772 : if (xa_is_value(entry))
6773 : pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6774 : xa_to_value(entry), entry);
6775 : else if (xa_is_zero(entry))
6776 : pr_cont("zero (%ld)\n", xa_to_internal(entry));
6777 : else if (mt_is_reserved(entry))
6778 : pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6779 : else
6780 : pr_cont("%p\n", entry);
6781 : }
6782 :
6783 : static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6784 : unsigned long min, unsigned long max, unsigned int depth)
6785 : {
6786 : struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6787 : bool leaf = mte_is_leaf(entry);
6788 : unsigned long first = min;
6789 : int i;
6790 :
6791 : pr_cont(" contents: ");
6792 : for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6793 : pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6794 : pr_cont("%p\n", node->slot[i]);
6795 : for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6796 : unsigned long last = max;
6797 :
6798 : if (i < (MAPLE_RANGE64_SLOTS - 1))
6799 : last = node->pivot[i];
6800 : else if (!node->slot[i] && max != mt_node_max(entry))
6801 : break;
6802 : if (last == 0 && i > 0)
6803 : break;
6804 : if (leaf)
6805 : mt_dump_entry(mt_slot(mt, node->slot, i),
6806 : first, last, depth + 1);
6807 : else if (node->slot[i])
6808 : mt_dump_node(mt, mt_slot(mt, node->slot, i),
6809 : first, last, depth + 1);
6810 :
6811 : if (last == max)
6812 : break;
6813 : if (last > max) {
6814 : pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6815 : node, last, max, i);
6816 : break;
6817 : }
6818 : first = last + 1;
6819 : }
6820 : }
6821 :
6822 : static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6823 : unsigned long min, unsigned long max, unsigned int depth)
6824 : {
6825 : struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6826 : bool leaf = mte_is_leaf(entry);
6827 : unsigned long first = min;
6828 : int i;
6829 :
6830 : pr_cont(" contents: ");
6831 : for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6832 : pr_cont("%lu ", node->gap[i]);
6833 : pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6834 : for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6835 : pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6836 : pr_cont("%p\n", node->slot[i]);
6837 : for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6838 : unsigned long last = max;
6839 :
6840 : if (i < (MAPLE_ARANGE64_SLOTS - 1))
6841 : last = node->pivot[i];
6842 : else if (!node->slot[i])
6843 : break;
6844 : if (last == 0 && i > 0)
6845 : break;
6846 : if (leaf)
6847 : mt_dump_entry(mt_slot(mt, node->slot, i),
6848 : first, last, depth + 1);
6849 : else if (node->slot[i])
6850 : mt_dump_node(mt, mt_slot(mt, node->slot, i),
6851 : first, last, depth + 1);
6852 :
6853 : if (last == max)
6854 : break;
6855 : if (last > max) {
6856 : pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6857 : node, last, max, i);
6858 : break;
6859 : }
6860 : first = last + 1;
6861 : }
6862 : }
6863 :
6864 : static void mt_dump_node(const struct maple_tree *mt, void *entry,
6865 : unsigned long min, unsigned long max, unsigned int depth)
6866 : {
6867 : struct maple_node *node = mte_to_node(entry);
6868 : unsigned int type = mte_node_type(entry);
6869 : unsigned int i;
6870 :
6871 : mt_dump_range(min, max, depth);
6872 :
6873 : pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6874 : node ? node->parent : NULL);
6875 : switch (type) {
6876 : case maple_dense:
6877 : pr_cont("\n");
6878 : for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6879 : if (min + i > max)
6880 : pr_cont("OUT OF RANGE: ");
6881 : mt_dump_entry(mt_slot(mt, node->slot, i),
6882 : min + i, min + i, depth);
6883 : }
6884 : break;
6885 : case maple_leaf_64:
6886 : case maple_range_64:
6887 : mt_dump_range64(mt, entry, min, max, depth);
6888 : break;
6889 : case maple_arange_64:
6890 : mt_dump_arange64(mt, entry, min, max, depth);
6891 : break;
6892 :
6893 : default:
6894 : pr_cont(" UNKNOWN TYPE\n");
6895 : }
6896 : }
6897 :
6898 : void mt_dump(const struct maple_tree *mt)
6899 : {
6900 : void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6901 :
6902 : pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6903 : mt, mt->ma_flags, mt_height(mt), entry);
6904 : if (!xa_is_node(entry))
6905 : mt_dump_entry(entry, 0, 0, 0);
6906 : else if (entry)
6907 : mt_dump_node(mt, entry, 0, mt_node_max(entry), 0);
6908 : }
6909 : EXPORT_SYMBOL_GPL(mt_dump);
6910 :
6911 : /*
6912 : * Calculate the maximum gap in a node and check if that's what is reported in
6913 : * the parent (unless root).
6914 : */
6915 : static void mas_validate_gaps(struct ma_state *mas)
6916 : {
6917 : struct maple_enode *mte = mas->node;
6918 : struct maple_node *p_mn;
6919 : unsigned long gap = 0, max_gap = 0;
6920 : unsigned long p_end, p_start = mas->min;
6921 : unsigned char p_slot;
6922 : unsigned long *gaps = NULL;
6923 : unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6924 : int i;
6925 :
6926 : if (ma_is_dense(mte_node_type(mte))) {
6927 : for (i = 0; i < mt_slot_count(mte); i++) {
6928 : if (mas_get_slot(mas, i)) {
6929 : if (gap > max_gap)
6930 : max_gap = gap;
6931 : gap = 0;
6932 : continue;
6933 : }
6934 : gap++;
6935 : }
6936 : goto counted;
6937 : }
6938 :
6939 : gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6940 : for (i = 0; i < mt_slot_count(mte); i++) {
6941 : p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6942 :
6943 : if (!gaps) {
6944 : if (mas_get_slot(mas, i)) {
6945 : gap = 0;
6946 : goto not_empty;
6947 : }
6948 :
6949 : gap += p_end - p_start + 1;
6950 : } else {
6951 : void *entry = mas_get_slot(mas, i);
6952 :
6953 : gap = gaps[i];
6954 : if (!entry) {
6955 : if (gap != p_end - p_start + 1) {
6956 : pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6957 : mas_mn(mas), i,
6958 : mas_get_slot(mas, i), gap,
6959 : p_end, p_start);
6960 : mt_dump(mas->tree);
6961 :
6962 : MT_BUG_ON(mas->tree,
6963 : gap != p_end - p_start + 1);
6964 : }
6965 : } else {
6966 : if (gap > p_end - p_start + 1) {
6967 : pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6968 : mas_mn(mas), i, gap, p_end, p_start,
6969 : p_end - p_start + 1);
6970 : MT_BUG_ON(mas->tree,
6971 : gap > p_end - p_start + 1);
6972 : }
6973 : }
6974 : }
6975 :
6976 : if (gap > max_gap)
6977 : max_gap = gap;
6978 : not_empty:
6979 : p_start = p_end + 1;
6980 : if (p_end >= mas->max)
6981 : break;
6982 : }
6983 :
6984 : counted:
6985 : if (mte_is_root(mte))
6986 : return;
6987 :
6988 : p_slot = mte_parent_slot(mas->node);
6989 : p_mn = mte_parent(mte);
6990 : MT_BUG_ON(mas->tree, max_gap > mas->max);
6991 : if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
6992 : pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
6993 : mt_dump(mas->tree);
6994 : }
6995 :
6996 : MT_BUG_ON(mas->tree,
6997 : ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
6998 : }
6999 :
7000 : static void mas_validate_parent_slot(struct ma_state *mas)
7001 : {
7002 : struct maple_node *parent;
7003 : struct maple_enode *node;
7004 : enum maple_type p_type = mas_parent_enum(mas, mas->node);
7005 : unsigned char p_slot = mte_parent_slot(mas->node);
7006 : void __rcu **slots;
7007 : int i;
7008 :
7009 : if (mte_is_root(mas->node))
7010 : return;
7011 :
7012 : parent = mte_parent(mas->node);
7013 : slots = ma_slots(parent, p_type);
7014 : MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7015 :
7016 : /* Check prev/next parent slot for duplicate node entry */
7017 :
7018 : for (i = 0; i < mt_slots[p_type]; i++) {
7019 : node = mas_slot(mas, slots, i);
7020 : if (i == p_slot) {
7021 : if (node != mas->node)
7022 : pr_err("parent %p[%u] does not have %p\n",
7023 : parent, i, mas_mn(mas));
7024 : MT_BUG_ON(mas->tree, node != mas->node);
7025 : } else if (node == mas->node) {
7026 : pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7027 : mas_mn(mas), parent, i, p_slot);
7028 : MT_BUG_ON(mas->tree, node == mas->node);
7029 : }
7030 : }
7031 : }
7032 :
7033 : static void mas_validate_child_slot(struct ma_state *mas)
7034 : {
7035 : enum maple_type type = mte_node_type(mas->node);
7036 : void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7037 : unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7038 : struct maple_enode *child;
7039 : unsigned char i;
7040 :
7041 : if (mte_is_leaf(mas->node))
7042 : return;
7043 :
7044 : for (i = 0; i < mt_slots[type]; i++) {
7045 : child = mas_slot(mas, slots, i);
7046 : if (!pivots[i] || pivots[i] == mas->max)
7047 : break;
7048 :
7049 : if (!child)
7050 : break;
7051 :
7052 : if (mte_parent_slot(child) != i) {
7053 : pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7054 : mas_mn(mas), i, mte_to_node(child),
7055 : mte_parent_slot(child));
7056 : MT_BUG_ON(mas->tree, 1);
7057 : }
7058 :
7059 : if (mte_parent(child) != mte_to_node(mas->node)) {
7060 : pr_err("child %p has parent %p not %p\n",
7061 : mte_to_node(child), mte_parent(child),
7062 : mte_to_node(mas->node));
7063 : MT_BUG_ON(mas->tree, 1);
7064 : }
7065 : }
7066 : }
7067 :
7068 : /*
7069 : * Validate all pivots are within mas->min and mas->max.
7070 : */
7071 : static void mas_validate_limits(struct ma_state *mas)
7072 : {
7073 : int i;
7074 : unsigned long prev_piv = 0;
7075 : enum maple_type type = mte_node_type(mas->node);
7076 : void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7077 : unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7078 :
7079 : /* all limits are fine here. */
7080 : if (mte_is_root(mas->node))
7081 : return;
7082 :
7083 : for (i = 0; i < mt_slots[type]; i++) {
7084 : unsigned long piv;
7085 :
7086 : piv = mas_safe_pivot(mas, pivots, i, type);
7087 :
7088 : if (!piv && (i != 0))
7089 : break;
7090 :
7091 : if (!mte_is_leaf(mas->node)) {
7092 : void *entry = mas_slot(mas, slots, i);
7093 :
7094 : if (!entry)
7095 : pr_err("%p[%u] cannot be null\n",
7096 : mas_mn(mas), i);
7097 :
7098 : MT_BUG_ON(mas->tree, !entry);
7099 : }
7100 :
7101 : if (prev_piv > piv) {
7102 : pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7103 : mas_mn(mas), i, piv, prev_piv);
7104 : MT_BUG_ON(mas->tree, piv < prev_piv);
7105 : }
7106 :
7107 : if (piv < mas->min) {
7108 : pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7109 : piv, mas->min);
7110 : MT_BUG_ON(mas->tree, piv < mas->min);
7111 : }
7112 : if (piv > mas->max) {
7113 : pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7114 : piv, mas->max);
7115 : MT_BUG_ON(mas->tree, piv > mas->max);
7116 : }
7117 : prev_piv = piv;
7118 : if (piv == mas->max)
7119 : break;
7120 : }
7121 : for (i += 1; i < mt_slots[type]; i++) {
7122 : void *entry = mas_slot(mas, slots, i);
7123 :
7124 : if (entry && (i != mt_slots[type] - 1)) {
7125 : pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7126 : i, entry);
7127 : MT_BUG_ON(mas->tree, entry != NULL);
7128 : }
7129 :
7130 : if (i < mt_pivots[type]) {
7131 : unsigned long piv = pivots[i];
7132 :
7133 : if (!piv)
7134 : continue;
7135 :
7136 : pr_err("%p[%u] should not have piv %lu\n",
7137 : mas_mn(mas), i, piv);
7138 : MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7139 : }
7140 : }
7141 : }
7142 :
7143 : static void mt_validate_nulls(struct maple_tree *mt)
7144 : {
7145 : void *entry, *last = (void *)1;
7146 : unsigned char offset = 0;
7147 : void __rcu **slots;
7148 : MA_STATE(mas, mt, 0, 0);
7149 :
7150 : mas_start(&mas);
7151 : if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7152 : return;
7153 :
7154 : while (!mte_is_leaf(mas.node))
7155 : mas_descend(&mas);
7156 :
7157 : slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7158 : do {
7159 : entry = mas_slot(&mas, slots, offset);
7160 : if (!last && !entry) {
7161 : pr_err("Sequential nulls end at %p[%u]\n",
7162 : mas_mn(&mas), offset);
7163 : }
7164 : MT_BUG_ON(mt, !last && !entry);
7165 : last = entry;
7166 : if (offset == mas_data_end(&mas)) {
7167 : mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7168 : if (mas_is_none(&mas))
7169 : return;
7170 : offset = 0;
7171 : slots = ma_slots(mte_to_node(mas.node),
7172 : mte_node_type(mas.node));
7173 : } else {
7174 : offset++;
7175 : }
7176 :
7177 : } while (!mas_is_none(&mas));
7178 : }
7179 :
7180 : /*
7181 : * validate a maple tree by checking:
7182 : * 1. The limits (pivots are within mas->min to mas->max)
7183 : * 2. The gap is correctly set in the parents
7184 : */
7185 : void mt_validate(struct maple_tree *mt)
7186 : {
7187 : unsigned char end;
7188 :
7189 : MA_STATE(mas, mt, 0, 0);
7190 : rcu_read_lock();
7191 : mas_start(&mas);
7192 : if (!mas_searchable(&mas))
7193 : goto done;
7194 :
7195 : mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7196 : while (!mas_is_none(&mas)) {
7197 : MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7198 : if (!mte_is_root(mas.node)) {
7199 : end = mas_data_end(&mas);
7200 : if ((end < mt_min_slot_count(mas.node)) &&
7201 : (mas.max != ULONG_MAX)) {
7202 : pr_err("Invalid size %u of %p\n", end,
7203 : mas_mn(&mas));
7204 : MT_BUG_ON(mas.tree, 1);
7205 : }
7206 :
7207 : }
7208 : mas_validate_parent_slot(&mas);
7209 : mas_validate_child_slot(&mas);
7210 : mas_validate_limits(&mas);
7211 : if (mt_is_alloc(mt))
7212 : mas_validate_gaps(&mas);
7213 : mas_dfs_postorder(&mas, ULONG_MAX);
7214 : }
7215 : mt_validate_nulls(mt);
7216 : done:
7217 : rcu_read_unlock();
7218 :
7219 : }
7220 : EXPORT_SYMBOL_GPL(mt_validate);
7221 :
7222 : #endif /* CONFIG_DEBUG_MAPLE_TREE */
|