Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0+ */
2 : #ifndef _LINUX_MAPLE_TREE_H
3 : #define _LINUX_MAPLE_TREE_H
4 : /*
5 : * Maple Tree - An RCU-safe adaptive tree for storing ranges
6 : * Copyright (c) 2018-2022 Oracle
7 : * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
8 : * Matthew Wilcox <willy@infradead.org>
9 : */
10 :
11 : #include <linux/kernel.h>
12 : #include <linux/rcupdate.h>
13 : #include <linux/spinlock.h>
14 : /* #define CONFIG_MAPLE_RCU_DISABLED */
15 :
16 : /*
17 : * Allocated nodes are mutable until they have been inserted into the tree,
18 : * at which time they cannot change their type until they have been removed
19 : * from the tree and an RCU grace period has passed.
20 : *
21 : * Removed nodes have their ->parent set to point to themselves. RCU readers
22 : * check ->parent before relying on the value that they loaded from the
23 : * slots array. This lets us reuse the slots array for the RCU head.
24 : *
25 : * Nodes in the tree point to their parent unless bit 0 is set.
26 : */
27 : #if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
28 : /* 64bit sizes */
29 : #define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
30 : #define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
31 : #define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
32 : #define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
33 : #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
34 : #else
35 : /* 32bit sizes */
36 : #define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
37 : #define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
38 : #define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
39 : #define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
40 : #define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
41 : #endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
42 :
43 : #define MAPLE_NODE_MASK 255UL
44 :
45 : /*
46 : * The node->parent of the root node has bit 0 set and the rest of the pointer
47 : * is a pointer to the tree itself. No more bits are available in this pointer
48 : * (on m68k, the data structure may only be 2-byte aligned).
49 : *
50 : * Internal non-root nodes can only have maple_range_* nodes as parents. The
51 : * parent pointer is 256B aligned like all other tree nodes. When storing a 32
52 : * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
53 : * extra bit to store the offset. This extra bit comes from a reuse of the last
54 : * bit in the node type. This is possible by using bit 1 to indicate if bit 2
55 : * is part of the type or the slot.
56 : *
57 : * Once the type is decided, the decision of an allocation range type or a range
58 : * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
59 : * flag.
60 : *
61 : * Node types:
62 : * 0x??1 = Root
63 : * 0x?00 = 16 bit nodes
64 : * 0x010 = 32 bit nodes
65 : * 0x110 = 64 bit nodes
66 : *
67 : * Slot size and location in the parent pointer:
68 : * type : slot location
69 : * 0x??1 : Root
70 : * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
71 : * 0x010 : 32 bit values, type in 0-2, slot in 3-6
72 : * 0x110 : 64 bit values, type in 0-2, slot in 3-6
73 : */
74 :
75 : /*
76 : * This metadata is used to optimize the gap updating code and in reverse
77 : * searching for gaps or any other code that needs to find the end of the data.
78 : */
79 : struct maple_metadata {
80 : unsigned char end;
81 : unsigned char gap;
82 : };
83 :
84 : /*
85 : * Leaf nodes do not store pointers to nodes, they store user data. Users may
86 : * store almost any bit pattern. As noted above, the optimisation of storing an
87 : * entry at 0 in the root pointer cannot be done for data which have the bottom
88 : * two bits set to '10'. We also reserve values with the bottom two bits set to
89 : * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
90 : * return errnos as a negative errno shifted right by two bits and the bottom
91 : * two bits set to '10', and while choosing to store these values in the array
92 : * is not an error, it may lead to confusion if you're testing for an error with
93 : * mas_is_err().
94 : *
95 : * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
96 : * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
97 : *
98 : * In regular B-Tree terms, pivots are called keys. The term pivot is used to
99 : * indicate that the tree is specifying ranges, Pivots may appear in the
100 : * subtree with an entry attached to the value whereas keys are unique to a
101 : * specific position of a B-tree. Pivot values are inclusive of the slot with
102 : * the same index.
103 : */
104 :
105 : struct maple_range_64 {
106 : struct maple_pnode *parent;
107 : unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
108 : union {
109 : void __rcu *slot[MAPLE_RANGE64_SLOTS];
110 : struct {
111 : void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
112 : struct maple_metadata meta;
113 : };
114 : };
115 : };
116 :
117 : /*
118 : * At tree creation time, the user can specify that they're willing to trade off
119 : * storing fewer entries in a tree in return for storing more information in
120 : * each node.
121 : *
122 : * The maple tree supports recording the largest range of NULL entries available
123 : * in this node, also called gaps. This optimises the tree for allocating a
124 : * range.
125 : */
126 : struct maple_arange_64 {
127 : struct maple_pnode *parent;
128 : unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
129 : void __rcu *slot[MAPLE_ARANGE64_SLOTS];
130 : unsigned long gap[MAPLE_ARANGE64_SLOTS];
131 : struct maple_metadata meta;
132 : };
133 :
134 : struct maple_alloc {
135 : unsigned long total;
136 : unsigned char node_count;
137 : unsigned int request_count;
138 : struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
139 : };
140 :
141 : struct maple_topiary {
142 : struct maple_pnode *parent;
143 : struct maple_enode *next; /* Overlaps the pivot */
144 : };
145 :
146 : enum maple_type {
147 : maple_dense,
148 : maple_leaf_64,
149 : maple_range_64,
150 : maple_arange_64,
151 : };
152 :
153 :
154 : /**
155 : * DOC: Maple tree flags
156 : *
157 : * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
158 : * * MT_FLAGS_USE_RCU - Operate in RCU mode
159 : * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
160 : * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
161 : * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
162 : * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
163 : * * MT_FLAGS_LOCK_BH - Acquired bh-safe
164 : * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
165 : *
166 : * MAPLE_HEIGHT_MAX The largest height that can be stored
167 : */
168 : #define MT_FLAGS_ALLOC_RANGE 0x01
169 : #define MT_FLAGS_USE_RCU 0x02
170 : #define MT_FLAGS_HEIGHT_OFFSET 0x02
171 : #define MT_FLAGS_HEIGHT_MASK 0x7C
172 : #define MT_FLAGS_LOCK_MASK 0x300
173 : #define MT_FLAGS_LOCK_IRQ 0x100
174 : #define MT_FLAGS_LOCK_BH 0x200
175 : #define MT_FLAGS_LOCK_EXTERN 0x300
176 :
177 : #define MAPLE_HEIGHT_MAX 31
178 :
179 :
180 : #define MAPLE_NODE_TYPE_MASK 0x0F
181 : #define MAPLE_NODE_TYPE_SHIFT 0x03
182 :
183 : #define MAPLE_RESERVED_RANGE 4096
184 :
185 : #ifdef CONFIG_LOCKDEP
186 : typedef struct lockdep_map *lockdep_map_p;
187 : #define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
188 : #define mt_set_external_lock(mt, lock) \
189 : (mt)->ma_external_lock = &(lock)->dep_map
190 : #else
191 : typedef struct { /* nothing */ } lockdep_map_p;
192 : #define mt_lock_is_held(mt) 1
193 : #define mt_set_external_lock(mt, lock) do { } while (0)
194 : #endif
195 :
196 : /*
197 : * If the tree contains a single entry at index 0, it is usually stored in
198 : * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
199 : * '01' or '11' is stored in the root, but an entry which ends in '10' will be
200 : * stored in a node. Bits 3-6 are used to store enum maple_type.
201 : *
202 : * The flags are used both to store some immutable information about this tree
203 : * (set at tree creation time) and dynamic information set under the spinlock.
204 : *
205 : * Another use of flags are to indicate global states of the tree. This is the
206 : * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
207 : * RCU mode. This mode was added to allow the tree to reuse nodes instead of
208 : * re-allocating and RCU freeing nodes when there is a single user.
209 : */
210 : struct maple_tree {
211 : union {
212 : spinlock_t ma_lock;
213 : lockdep_map_p ma_external_lock;
214 : };
215 : void __rcu *ma_root;
216 : unsigned int ma_flags;
217 : };
218 :
219 : /**
220 : * MTREE_INIT() - Initialize a maple tree
221 : * @name: The maple tree name
222 : * @__flags: The maple tree flags
223 : *
224 : */
225 : #define MTREE_INIT(name, __flags) { \
226 : .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
227 : .ma_flags = __flags, \
228 : .ma_root = NULL, \
229 : }
230 :
231 : /**
232 : * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
233 : * @name: The tree name
234 : * @__flags: The maple tree flags
235 : * @__lock: The external lock
236 : */
237 : #ifdef CONFIG_LOCKDEP
238 : #define MTREE_INIT_EXT(name, __flags, __lock) { \
239 : .ma_external_lock = &(__lock).dep_map, \
240 : .ma_flags = (__flags), \
241 : .ma_root = NULL, \
242 : }
243 : #else
244 : #define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
245 : #endif
246 :
247 : #define DEFINE_MTREE(name) \
248 : struct maple_tree name = MTREE_INIT(name, 0)
249 :
250 : #define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
251 : #define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
252 :
253 : /*
254 : * The Maple Tree squeezes various bits in at various points which aren't
255 : * necessarily obvious. Usually, this is done by observing that pointers are
256 : * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
257 : * don't use the high bits of pointers to store additional information because
258 : * we don't know what bits are unused on any given architecture.
259 : *
260 : * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
261 : * low bits for our own purposes. Nodes are currently of 4 types:
262 : * 1. Single pointer (Range is 0-0)
263 : * 2. Non-leaf Allocation Range nodes
264 : * 3. Non-leaf Range nodes
265 : * 4. Leaf Range nodes All nodes consist of a number of node slots,
266 : * pivots, and a parent pointer.
267 : */
268 :
269 : struct maple_node {
270 : union {
271 : struct {
272 : struct maple_pnode *parent;
273 : void __rcu *slot[MAPLE_NODE_SLOTS];
274 : };
275 : struct {
276 : void *pad;
277 : struct rcu_head rcu;
278 : struct maple_enode *piv_parent;
279 : unsigned char parent_slot;
280 : enum maple_type type;
281 : unsigned char slot_len;
282 : unsigned int ma_flags;
283 : };
284 : struct maple_range_64 mr64;
285 : struct maple_arange_64 ma64;
286 : struct maple_alloc alloc;
287 : };
288 : };
289 :
290 : /*
291 : * More complicated stores can cause two nodes to become one or three and
292 : * potentially alter the height of the tree. Either half of the tree may need
293 : * to be rebalanced against the other. The ma_topiary struct is used to track
294 : * which nodes have been 'cut' from the tree so that the change can be done
295 : * safely at a later date. This is done to support RCU.
296 : */
297 : struct ma_topiary {
298 : struct maple_enode *head;
299 : struct maple_enode *tail;
300 : struct maple_tree *mtree;
301 : };
302 :
303 : void *mtree_load(struct maple_tree *mt, unsigned long index);
304 :
305 : int mtree_insert(struct maple_tree *mt, unsigned long index,
306 : void *entry, gfp_t gfp);
307 : int mtree_insert_range(struct maple_tree *mt, unsigned long first,
308 : unsigned long last, void *entry, gfp_t gfp);
309 : int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
310 : void *entry, unsigned long size, unsigned long min,
311 : unsigned long max, gfp_t gfp);
312 : int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
313 : void *entry, unsigned long size, unsigned long min,
314 : unsigned long max, gfp_t gfp);
315 :
316 : int mtree_store_range(struct maple_tree *mt, unsigned long first,
317 : unsigned long last, void *entry, gfp_t gfp);
318 : int mtree_store(struct maple_tree *mt, unsigned long index,
319 : void *entry, gfp_t gfp);
320 : void *mtree_erase(struct maple_tree *mt, unsigned long index);
321 :
322 : void mtree_destroy(struct maple_tree *mt);
323 : void __mt_destroy(struct maple_tree *mt);
324 :
325 : /**
326 : * mtree_empty() - Determine if a tree has any present entries.
327 : * @mt: Maple Tree.
328 : *
329 : * Context: Any context.
330 : * Return: %true if the tree contains only NULL pointers.
331 : */
332 : static inline bool mtree_empty(const struct maple_tree *mt)
333 : {
334 : return mt->ma_root == NULL;
335 : }
336 :
337 : /* Advanced API */
338 :
339 : /*
340 : * The maple state is defined in the struct ma_state and is used to keep track
341 : * of information during operations, and even between operations when using the
342 : * advanced API.
343 : *
344 : * If state->node has bit 0 set then it references a tree location which is not
345 : * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
346 : * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
347 : * node type.
348 : *
349 : * state->alloc either has a request number of nodes or an allocated node. If
350 : * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
351 : * and the remaining bits are the value. If state->alloc is a node, then the
352 : * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
353 : * storing more allocated nodes, a total number of nodes allocated, and the
354 : * node_count in this node. node_count is the number of allocated nodes in this
355 : * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
356 : * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
357 : * by removing a node from the state->alloc node until state->alloc->node_count
358 : * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
359 : * to state->alloc. Nodes are pushed onto state->alloc by putting the current
360 : * state->alloc into the pushed node's slot[0].
361 : *
362 : * The state also contains the implied min/max of the state->node, the depth of
363 : * this search, and the offset. The implied min/max are either from the parent
364 : * node or are 0-oo for the root node. The depth is incremented or decremented
365 : * every time a node is walked down or up. The offset is the slot/pivot of
366 : * interest in the node - either for reading or writing.
367 : *
368 : * When returning a value the maple state index and last respectively contain
369 : * the start and end of the range for the entry. Ranges are inclusive in the
370 : * Maple Tree.
371 : */
372 : struct ma_state {
373 : struct maple_tree *tree; /* The tree we're operating in */
374 : unsigned long index; /* The index we're operating on - range start */
375 : unsigned long last; /* The last index we're operating on - range end */
376 : struct maple_enode *node; /* The node containing this entry */
377 : unsigned long min; /* The minimum index of this node - implied pivot min */
378 : unsigned long max; /* The maximum index of this node - implied pivot max */
379 : struct maple_alloc *alloc; /* Allocated nodes for this operation */
380 : unsigned char depth; /* depth of tree descent during write */
381 : unsigned char offset;
382 : unsigned char mas_flags;
383 : };
384 :
385 : struct ma_wr_state {
386 : struct ma_state *mas;
387 : struct maple_node *node; /* Decoded mas->node */
388 : unsigned long r_min; /* range min */
389 : unsigned long r_max; /* range max */
390 : enum maple_type type; /* mas->node type */
391 : unsigned char offset_end; /* The offset where the write ends */
392 : unsigned char node_end; /* mas->node end */
393 : unsigned long *pivots; /* mas->node->pivots pointer */
394 : unsigned long end_piv; /* The pivot at the offset end */
395 : void __rcu **slots; /* mas->node->slots pointer */
396 : void *entry; /* The entry to write */
397 : void *content; /* The existing entry that is being overwritten */
398 : };
399 :
400 : #define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
401 : #define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
402 :
403 :
404 : /*
405 : * Special values for ma_state.node.
406 : * MAS_START means we have not searched the tree.
407 : * MAS_ROOT means we have searched the tree and the entry we found lives in
408 : * the root of the tree (ie it has index 0, length 1 and is the only entry in
409 : * the tree).
410 : * MAS_NONE means we have searched the tree and there is no node in the
411 : * tree for this entry. For example, we searched for index 1 in an empty
412 : * tree. Or we have a tree which points to a full leaf node and we
413 : * searched for an entry which is larger than can be contained in that
414 : * leaf node.
415 : * MA_ERROR represents an errno. After dropping the lock and attempting
416 : * to resolve the error, the walk would have to be restarted from the
417 : * top of the tree as the tree may have been modified.
418 : */
419 : #define MAS_START ((struct maple_enode *)1UL)
420 : #define MAS_ROOT ((struct maple_enode *)5UL)
421 : #define MAS_NONE ((struct maple_enode *)9UL)
422 : #define MAS_PAUSE ((struct maple_enode *)17UL)
423 : #define MA_ERROR(err) \
424 : ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
425 :
426 : #define MA_STATE(name, mt, first, end) \
427 : struct ma_state name = { \
428 : .tree = mt, \
429 : .index = first, \
430 : .last = end, \
431 : .node = MAS_START, \
432 : .min = 0, \
433 : .max = ULONG_MAX, \
434 : .alloc = NULL, \
435 : .mas_flags = 0, \
436 : }
437 :
438 : #define MA_WR_STATE(name, ma_state, wr_entry) \
439 : struct ma_wr_state name = { \
440 : .mas = ma_state, \
441 : .content = NULL, \
442 : .entry = wr_entry, \
443 : }
444 :
445 : #define MA_TOPIARY(name, tree) \
446 : struct ma_topiary name = { \
447 : .head = NULL, \
448 : .tail = NULL, \
449 : .mtree = tree, \
450 : }
451 :
452 : void *mas_walk(struct ma_state *mas);
453 : void *mas_store(struct ma_state *mas, void *entry);
454 : void *mas_erase(struct ma_state *mas);
455 : int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
456 : void mas_store_prealloc(struct ma_state *mas, void *entry);
457 : void *mas_find(struct ma_state *mas, unsigned long max);
458 : void *mas_find_range(struct ma_state *mas, unsigned long max);
459 : void *mas_find_rev(struct ma_state *mas, unsigned long min);
460 : void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
461 : int mas_preallocate(struct ma_state *mas, gfp_t gfp);
462 : bool mas_is_err(struct ma_state *mas);
463 :
464 : bool mas_nomem(struct ma_state *mas, gfp_t gfp);
465 : void mas_pause(struct ma_state *mas);
466 : void maple_tree_init(void);
467 : void mas_destroy(struct ma_state *mas);
468 : int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
469 :
470 : void *mas_prev(struct ma_state *mas, unsigned long min);
471 : void *mas_prev_range(struct ma_state *mas, unsigned long max);
472 : void *mas_next(struct ma_state *mas, unsigned long max);
473 : void *mas_next_range(struct ma_state *mas, unsigned long max);
474 :
475 : int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
476 : unsigned long size);
477 : /*
478 : * This finds an empty area from the highest address to the lowest.
479 : * AKA "Topdown" version,
480 : */
481 : int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
482 : unsigned long max, unsigned long size);
483 :
484 0 : static inline void mas_init(struct ma_state *mas, struct maple_tree *tree,
485 : unsigned long addr)
486 : {
487 0 : memset(mas, 0, sizeof(struct ma_state));
488 0 : mas->tree = tree;
489 0 : mas->index = mas->last = addr;
490 0 : mas->max = ULONG_MAX;
491 0 : mas->node = MAS_START;
492 0 : }
493 :
494 : /* Checks if a mas has not found anything */
495 : static inline bool mas_is_none(const struct ma_state *mas)
496 : {
497 0 : return mas->node == MAS_NONE;
498 : }
499 :
500 : /* Checks if a mas has been paused */
501 : static inline bool mas_is_paused(const struct ma_state *mas)
502 : {
503 64 : return mas->node == MAS_PAUSE;
504 : }
505 :
506 : /**
507 : * mas_reset() - Reset a Maple Tree operation state.
508 : * @mas: Maple Tree operation state.
509 : *
510 : * Resets the error or walk state of the @mas so future walks of the
511 : * array will start from the root. Use this if you have dropped the
512 : * lock and want to reuse the ma_state.
513 : *
514 : * Context: Any context.
515 : */
516 : static inline void mas_reset(struct ma_state *mas)
517 : {
518 0 : mas->node = MAS_START;
519 : }
520 :
521 : /**
522 : * mas_for_each() - Iterate over a range of the maple tree.
523 : * @__mas: Maple Tree operation state (maple_state)
524 : * @__entry: Entry retrieved from the tree
525 : * @__max: maximum index to retrieve from the tree
526 : *
527 : * When returned, mas->index and mas->last will hold the entire range for the
528 : * entry.
529 : *
530 : * Note: may return the zero entry.
531 : */
532 : #define mas_for_each(__mas, __entry, __max) \
533 : while (((__entry) = mas_find((__mas), (__max))) != NULL)
534 :
535 : /**
536 : * mas_set_range() - Set up Maple Tree operation state for a different index.
537 : * @mas: Maple Tree operation state.
538 : * @start: New start of range in the Maple Tree.
539 : * @last: New end of range in the Maple Tree.
540 : *
541 : * Move the operation state to refer to a different range. This will
542 : * have the effect of starting a walk from the top; see mas_next()
543 : * to move to an adjacent index.
544 : */
545 : static inline
546 : void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
547 : {
548 0 : mas->index = start;
549 0 : mas->last = last;
550 0 : mas->node = MAS_START;
551 : }
552 :
553 : /**
554 : * mas_set() - Set up Maple Tree operation state for a different index.
555 : * @mas: Maple Tree operation state.
556 : * @index: New index into the Maple Tree.
557 : *
558 : * Move the operation state to refer to a different index. This will
559 : * have the effect of starting a walk from the top; see mas_next()
560 : * to move to an adjacent index.
561 : */
562 : static inline void mas_set(struct ma_state *mas, unsigned long index)
563 : {
564 :
565 0 : mas_set_range(mas, index, index);
566 : }
567 :
568 : static inline bool mt_external_lock(const struct maple_tree *mt)
569 : {
570 0 : return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
571 : }
572 :
573 : /**
574 : * mt_init_flags() - Initialise an empty maple tree with flags.
575 : * @mt: Maple Tree
576 : * @flags: maple tree flags.
577 : *
578 : * If you need to initialise a Maple Tree with special flags (eg, an
579 : * allocation tree), use this function.
580 : *
581 : * Context: Any context.
582 : */
583 : static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
584 : {
585 0 : mt->ma_flags = flags;
586 0 : if (!mt_external_lock(mt))
587 : spin_lock_init(&mt->ma_lock);
588 0 : rcu_assign_pointer(mt->ma_root, NULL);
589 : }
590 :
591 : /**
592 : * mt_init() - Initialise an empty maple tree.
593 : * @mt: Maple Tree
594 : *
595 : * An empty Maple Tree.
596 : *
597 : * Context: Any context.
598 : */
599 : static inline void mt_init(struct maple_tree *mt)
600 : {
601 : mt_init_flags(mt, 0);
602 : }
603 :
604 : static inline bool mt_in_rcu(struct maple_tree *mt)
605 : {
606 : #ifdef CONFIG_MAPLE_RCU_DISABLED
607 : return false;
608 : #endif
609 16 : return mt->ma_flags & MT_FLAGS_USE_RCU;
610 : }
611 :
612 : /**
613 : * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
614 : * @mt: The Maple Tree
615 : */
616 : static inline void mt_clear_in_rcu(struct maple_tree *mt)
617 : {
618 0 : if (!mt_in_rcu(mt))
619 : return;
620 :
621 0 : if (mt_external_lock(mt)) {
622 0 : WARN_ON(!mt_lock_is_held(mt));
623 0 : mt->ma_flags &= ~MT_FLAGS_USE_RCU;
624 : } else {
625 0 : mtree_lock(mt);
626 0 : mt->ma_flags &= ~MT_FLAGS_USE_RCU;
627 0 : mtree_unlock(mt);
628 : }
629 : }
630 :
631 : /**
632 : * mt_set_in_rcu() - Switch the tree to RCU safe mode.
633 : * @mt: The Maple Tree
634 : */
635 : static inline void mt_set_in_rcu(struct maple_tree *mt)
636 : {
637 0 : if (mt_in_rcu(mt))
638 : return;
639 :
640 0 : if (mt_external_lock(mt)) {
641 0 : WARN_ON(!mt_lock_is_held(mt));
642 0 : mt->ma_flags |= MT_FLAGS_USE_RCU;
643 : } else {
644 0 : mtree_lock(mt);
645 0 : mt->ma_flags |= MT_FLAGS_USE_RCU;
646 0 : mtree_unlock(mt);
647 : }
648 : }
649 :
650 : static inline unsigned int mt_height(const struct maple_tree *mt)
651 : {
652 7 : return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
653 : }
654 :
655 : void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
656 : void *mt_find_after(struct maple_tree *mt, unsigned long *index,
657 : unsigned long max);
658 : void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
659 : void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
660 :
661 : /**
662 : * mt_for_each - Iterate over each entry starting at index until max.
663 : * @__tree: The Maple Tree
664 : * @__entry: The current entry
665 : * @__index: The index to update to track the location in the tree
666 : * @__max: The maximum limit for @index
667 : *
668 : * Note: Will not return the zero entry.
669 : */
670 : #define mt_for_each(__tree, __entry, __index, __max) \
671 : for (__entry = mt_find(__tree, &(__index), __max); \
672 : __entry; __entry = mt_find_after(__tree, &(__index), __max))
673 :
674 :
675 : #ifdef CONFIG_DEBUG_MAPLE_TREE
676 : enum mt_dump_format {
677 : mt_dump_dec,
678 : mt_dump_hex,
679 : };
680 :
681 : extern atomic_t maple_tree_tests_run;
682 : extern atomic_t maple_tree_tests_passed;
683 :
684 : void mt_dump(const struct maple_tree *mt, enum mt_dump_format format);
685 : void mas_dump(const struct ma_state *mas);
686 : void mas_wr_dump(const struct ma_wr_state *wr_mas);
687 : void mt_validate(struct maple_tree *mt);
688 : void mt_cache_shrink(void);
689 : #define MT_BUG_ON(__tree, __x) do { \
690 : atomic_inc(&maple_tree_tests_run); \
691 : if (__x) { \
692 : pr_info("BUG at %s:%d (%u)\n", \
693 : __func__, __LINE__, __x); \
694 : mt_dump(__tree, mt_dump_hex); \
695 : pr_info("Pass: %u Run:%u\n", \
696 : atomic_read(&maple_tree_tests_passed), \
697 : atomic_read(&maple_tree_tests_run)); \
698 : dump_stack(); \
699 : } else { \
700 : atomic_inc(&maple_tree_tests_passed); \
701 : } \
702 : } while (0)
703 :
704 : #define MAS_BUG_ON(__mas, __x) do { \
705 : atomic_inc(&maple_tree_tests_run); \
706 : if (__x) { \
707 : pr_info("BUG at %s:%d (%u)\n", \
708 : __func__, __LINE__, __x); \
709 : mas_dump(__mas); \
710 : mt_dump((__mas)->tree, mt_dump_hex); \
711 : pr_info("Pass: %u Run:%u\n", \
712 : atomic_read(&maple_tree_tests_passed), \
713 : atomic_read(&maple_tree_tests_run)); \
714 : dump_stack(); \
715 : } else { \
716 : atomic_inc(&maple_tree_tests_passed); \
717 : } \
718 : } while (0)
719 :
720 : #define MAS_WR_BUG_ON(__wrmas, __x) do { \
721 : atomic_inc(&maple_tree_tests_run); \
722 : if (__x) { \
723 : pr_info("BUG at %s:%d (%u)\n", \
724 : __func__, __LINE__, __x); \
725 : mas_wr_dump(__wrmas); \
726 : mas_dump((__wrmas)->mas); \
727 : mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
728 : pr_info("Pass: %u Run:%u\n", \
729 : atomic_read(&maple_tree_tests_passed), \
730 : atomic_read(&maple_tree_tests_run)); \
731 : dump_stack(); \
732 : } else { \
733 : atomic_inc(&maple_tree_tests_passed); \
734 : } \
735 : } while (0)
736 :
737 : #define MT_WARN_ON(__tree, __x) ({ \
738 : int ret = !!(__x); \
739 : atomic_inc(&maple_tree_tests_run); \
740 : if (ret) { \
741 : pr_info("WARN at %s:%d (%u)\n", \
742 : __func__, __LINE__, __x); \
743 : mt_dump(__tree, mt_dump_hex); \
744 : pr_info("Pass: %u Run:%u\n", \
745 : atomic_read(&maple_tree_tests_passed), \
746 : atomic_read(&maple_tree_tests_run)); \
747 : dump_stack(); \
748 : } else { \
749 : atomic_inc(&maple_tree_tests_passed); \
750 : } \
751 : unlikely(ret); \
752 : })
753 :
754 : #define MAS_WARN_ON(__mas, __x) ({ \
755 : int ret = !!(__x); \
756 : atomic_inc(&maple_tree_tests_run); \
757 : if (ret) { \
758 : pr_info("WARN at %s:%d (%u)\n", \
759 : __func__, __LINE__, __x); \
760 : mas_dump(__mas); \
761 : mt_dump((__mas)->tree, mt_dump_hex); \
762 : pr_info("Pass: %u Run:%u\n", \
763 : atomic_read(&maple_tree_tests_passed), \
764 : atomic_read(&maple_tree_tests_run)); \
765 : dump_stack(); \
766 : } else { \
767 : atomic_inc(&maple_tree_tests_passed); \
768 : } \
769 : unlikely(ret); \
770 : })
771 :
772 : #define MAS_WR_WARN_ON(__wrmas, __x) ({ \
773 : int ret = !!(__x); \
774 : atomic_inc(&maple_tree_tests_run); \
775 : if (ret) { \
776 : pr_info("WARN at %s:%d (%u)\n", \
777 : __func__, __LINE__, __x); \
778 : mas_wr_dump(__wrmas); \
779 : mas_dump((__wrmas)->mas); \
780 : mt_dump((__wrmas)->mas->tree, mt_dump_hex); \
781 : pr_info("Pass: %u Run:%u\n", \
782 : atomic_read(&maple_tree_tests_passed), \
783 : atomic_read(&maple_tree_tests_run)); \
784 : dump_stack(); \
785 : } else { \
786 : atomic_inc(&maple_tree_tests_passed); \
787 : } \
788 : unlikely(ret); \
789 : })
790 : #else
791 : #define MT_BUG_ON(__tree, __x) BUG_ON(__x)
792 : #define MAS_BUG_ON(__mas, __x) BUG_ON(__x)
793 : #define MAS_WR_BUG_ON(__mas, __x) BUG_ON(__x)
794 : #define MT_WARN_ON(__tree, __x) WARN_ON(__x)
795 : #define MAS_WARN_ON(__mas, __x) WARN_ON(__x)
796 : #define MAS_WR_WARN_ON(__mas, __x) WARN_ON(__x)
797 : #endif /* CONFIG_DEBUG_MAPLE_TREE */
798 :
799 : #endif /*_LINUX_MAPLE_TREE_H */
|