Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * XArray implementation
4 : * Copyright (c) 2017-2018 Microsoft Corporation
5 : * Copyright (c) 2018-2020 Oracle
6 : * Author: Matthew Wilcox <willy@infradead.org>
7 : */
8 :
9 : #include <linux/bitmap.h>
10 : #include <linux/export.h>
11 : #include <linux/list.h>
12 : #include <linux/slab.h>
13 : #include <linux/xarray.h>
14 :
15 : #include "radix-tree.h"
16 :
17 : /*
18 : * Coding conventions in this file:
19 : *
20 : * @xa is used to refer to the entire xarray.
21 : * @xas is the 'xarray operation state'. It may be either a pointer to
22 : * an xa_state, or an xa_state stored on the stack. This is an unfortunate
23 : * ambiguity.
24 : * @index is the index of the entry being operated on
25 : * @mark is an xa_mark_t; a small number indicating one of the mark bits.
26 : * @node refers to an xa_node; usually the primary one being operated on by
27 : * this function.
28 : * @offset is the index into the slots array inside an xa_node.
29 : * @parent refers to the @xa_node closer to the head than @node.
30 : * @entry refers to something stored in a slot in the xarray
31 : */
32 :
33 : static inline unsigned int xa_lock_type(const struct xarray *xa)
34 : {
35 2 : return (__force unsigned int)xa->xa_flags & 3;
36 : }
37 :
38 : static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
39 : {
40 0 : if (lock_type == XA_LOCK_IRQ)
41 0 : xas_lock_irq(xas);
42 0 : else if (lock_type == XA_LOCK_BH)
43 0 : xas_lock_bh(xas);
44 : else
45 0 : xas_lock(xas);
46 : }
47 :
48 0 : static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
49 : {
50 0 : if (lock_type == XA_LOCK_IRQ)
51 0 : xas_unlock_irq(xas);
52 0 : else if (lock_type == XA_LOCK_BH)
53 0 : xas_unlock_bh(xas);
54 : else
55 0 : xas_unlock(xas);
56 0 : }
57 :
58 : static inline bool xa_track_free(const struct xarray *xa)
59 : {
60 6 : return xa->xa_flags & XA_FLAGS_TRACK_FREE;
61 : }
62 :
63 : static inline bool xa_zero_busy(const struct xarray *xa)
64 : {
65 8 : return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
66 : }
67 :
68 : static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
69 : {
70 0 : if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
71 0 : xa->xa_flags |= XA_FLAGS_MARK(mark);
72 : }
73 :
74 : static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
75 : {
76 0 : if (xa->xa_flags & XA_FLAGS_MARK(mark))
77 0 : xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
78 : }
79 :
80 : static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
81 : {
82 0 : return node->marks[(__force unsigned)mark];
83 : }
84 :
85 : static inline bool node_get_mark(struct xa_node *node,
86 : unsigned int offset, xa_mark_t mark)
87 : {
88 0 : return test_bit(offset, node_marks(node, mark));
89 : }
90 :
91 : /* returns true if the bit was set */
92 : static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
93 : xa_mark_t mark)
94 : {
95 0 : return __test_and_set_bit(offset, node_marks(node, mark));
96 : }
97 :
98 : /* returns true if the bit was set */
99 : static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
100 : xa_mark_t mark)
101 : {
102 0 : return __test_and_clear_bit(offset, node_marks(node, mark));
103 : }
104 :
105 : static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
106 : {
107 0 : return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
108 : }
109 :
110 : static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
111 : {
112 0 : bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
113 : }
114 :
115 : #define mark_inc(mark) do { \
116 : mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
117 : } while (0)
118 :
119 : /*
120 : * xas_squash_marks() - Merge all marks to the first entry
121 : * @xas: Array operation state.
122 : *
123 : * Set a mark on the first entry if any entry has it set. Clear marks on
124 : * all sibling entries.
125 : */
126 0 : static void xas_squash_marks(const struct xa_state *xas)
127 : {
128 0 : unsigned int mark = 0;
129 0 : unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
130 :
131 0 : if (!xas->xa_sibs)
132 : return;
133 :
134 : do {
135 0 : unsigned long *marks = xas->xa_node->marks[mark];
136 0 : if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
137 0 : continue;
138 0 : __set_bit(xas->xa_offset, marks);
139 0 : bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
140 0 : } while (mark++ != (__force unsigned)XA_MARK_MAX);
141 : }
142 :
143 : /* extracts the offset within this node from the index */
144 : static unsigned int get_offset(unsigned long index, struct xa_node *node)
145 : {
146 6 : return (index >> node->shift) & XA_CHUNK_MASK;
147 : }
148 :
149 : static void xas_set_offset(struct xa_state *xas)
150 : {
151 0 : xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
152 : }
153 :
154 : /* move the index either forwards (find) or backwards (sibling slot) */
155 : static void xas_move_index(struct xa_state *xas, unsigned long offset)
156 : {
157 0 : unsigned int shift = xas->xa_node->shift;
158 0 : xas->xa_index &= ~XA_CHUNK_MASK << shift;
159 0 : xas->xa_index += offset << shift;
160 : }
161 :
162 : static void xas_next_offset(struct xa_state *xas)
163 : {
164 0 : xas->xa_offset++;
165 0 : xas_move_index(xas, xas->xa_offset);
166 : }
167 :
168 : static void *set_bounds(struct xa_state *xas)
169 : {
170 1 : xas->xa_node = XAS_BOUNDS;
171 : return NULL;
172 : }
173 :
174 : /*
175 : * Starts a walk. If the @xas is already valid, we assume that it's on
176 : * the right path and just return where we've got to. If we're in an
177 : * error state, return NULL. If the index is outside the current scope
178 : * of the xarray, return NULL without changing @xas->xa_node. Otherwise
179 : * set @xas->xa_node to NULL and return the current head of the array.
180 : */
181 17 : static void *xas_start(struct xa_state *xas)
182 : {
183 : void *entry;
184 :
185 17 : if (xas_valid(xas))
186 : return xas_reload(xas);
187 34 : if (xas_error(xas))
188 : return NULL;
189 :
190 34 : entry = xa_head(xas->xa);
191 17 : if (!xa_is_node(entry)) {
192 16 : if (xas->xa_index)
193 2 : return set_bounds(xas);
194 : } else {
195 2 : if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
196 0 : return set_bounds(xas);
197 : }
198 :
199 16 : xas->xa_node = NULL;
200 16 : return entry;
201 : }
202 :
203 : static void *xas_descend(struct xa_state *xas, struct xa_node *node)
204 : {
205 12 : unsigned int offset = get_offset(xas->xa_index, node);
206 12 : void *entry = xa_entry(xas->xa, node, offset);
207 :
208 6 : xas->xa_node = node;
209 : if (xa_is_sibling(entry)) {
210 : offset = xa_to_sibling(entry);
211 : entry = xa_entry(xas->xa, node, offset);
212 : if (node->shift && xa_is_node(entry))
213 : entry = XA_RETRY_ENTRY;
214 : }
215 :
216 6 : xas->xa_offset = offset;
217 : return entry;
218 : }
219 :
220 : /**
221 : * xas_load() - Load an entry from the XArray (advanced).
222 : * @xas: XArray operation state.
223 : *
224 : * Usually walks the @xas to the appropriate state to load the entry
225 : * stored at xa_index. However, it will do nothing and return %NULL if
226 : * @xas is in an error state. xas_load() will never expand the tree.
227 : *
228 : * If the xa_state is set up to operate on a multi-index entry, xas_load()
229 : * may return %NULL or an internal entry, even if there are entries
230 : * present within the range specified by @xas.
231 : *
232 : * Context: Any context. The caller should hold the xa_lock or the RCU lock.
233 : * Return: Usually an entry in the XArray, but see description for exceptions.
234 : */
235 17 : void *xas_load(struct xa_state *xas)
236 : {
237 17 : void *entry = xas_start(xas);
238 :
239 35 : while (xa_is_node(entry)) {
240 2 : struct xa_node *node = xa_to_node(entry);
241 :
242 2 : if (xas->xa_shift > node->shift)
243 : break;
244 2 : entry = xas_descend(xas, node);
245 2 : if (node->shift == 0)
246 : break;
247 : }
248 17 : return entry;
249 : }
250 : EXPORT_SYMBOL_GPL(xas_load);
251 :
252 : #define XA_RCU_FREE ((struct xarray *)1)
253 :
254 : static void xa_node_free(struct xa_node *node)
255 : {
256 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
257 0 : node->array = XA_RCU_FREE;
258 0 : call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
259 : }
260 :
261 : /*
262 : * xas_destroy() - Free any resources allocated during the XArray operation.
263 : * @xas: XArray operation state.
264 : *
265 : * Most users will not need to call this function; it is called for you
266 : * by xas_nomem().
267 : */
268 0 : void xas_destroy(struct xa_state *xas)
269 : {
270 221 : struct xa_node *next, *node = xas->xa_alloc;
271 :
272 221 : while (node) {
273 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
274 0 : next = rcu_dereference_raw(node->parent);
275 0 : radix_tree_node_rcu_free(&node->rcu_head);
276 0 : xas->xa_alloc = node = next;
277 : }
278 0 : }
279 :
280 : /**
281 : * xas_nomem() - Allocate memory if needed.
282 : * @xas: XArray operation state.
283 : * @gfp: Memory allocation flags.
284 : *
285 : * If we need to add new nodes to the XArray, we try to allocate memory
286 : * with GFP_NOWAIT while holding the lock, which will usually succeed.
287 : * If it fails, @xas is flagged as needing memory to continue. The caller
288 : * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
289 : * the caller should retry the operation.
290 : *
291 : * Forward progress is guaranteed as one node is allocated here and
292 : * stored in the xa_state where it will be found by xas_alloc(). More
293 : * nodes will likely be found in the slab allocator, but we do not tie
294 : * them up here.
295 : *
296 : * Return: true if memory was needed, and was successfully allocated.
297 : */
298 219 : bool xas_nomem(struct xa_state *xas, gfp_t gfp)
299 : {
300 219 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
301 : xas_destroy(xas);
302 : return false;
303 : }
304 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
305 0 : gfp |= __GFP_ACCOUNT;
306 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
307 0 : if (!xas->xa_alloc)
308 : return false;
309 0 : xas->xa_alloc->parent = NULL;
310 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
311 0 : xas->xa_node = XAS_RESTART;
312 0 : return true;
313 : }
314 : EXPORT_SYMBOL_GPL(xas_nomem);
315 :
316 : /*
317 : * __xas_nomem() - Drop locks and allocate memory if needed.
318 : * @xas: XArray operation state.
319 : * @gfp: Memory allocation flags.
320 : *
321 : * Internal variant of xas_nomem().
322 : *
323 : * Return: true if memory was needed, and was successfully allocated.
324 : */
325 2 : static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
326 : __must_hold(xas->xa->xa_lock)
327 : {
328 4 : unsigned int lock_type = xa_lock_type(xas->xa);
329 :
330 2 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
331 : xas_destroy(xas);
332 : return false;
333 : }
334 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
335 0 : gfp |= __GFP_ACCOUNT;
336 0 : if (gfpflags_allow_blocking(gfp)) {
337 0 : xas_unlock_type(xas, lock_type);
338 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
339 0 : xas_lock_type(xas, lock_type);
340 : } else {
341 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
342 : }
343 0 : if (!xas->xa_alloc)
344 : return false;
345 0 : xas->xa_alloc->parent = NULL;
346 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
347 0 : xas->xa_node = XAS_RESTART;
348 0 : return true;
349 : }
350 :
351 : static void xas_update(struct xa_state *xas, struct xa_node *node)
352 : {
353 3 : if (xas->xa_update)
354 0 : xas->xa_update(node);
355 : else
356 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
357 : }
358 :
359 2 : static void *xas_alloc(struct xa_state *xas, unsigned int shift)
360 : {
361 2 : struct xa_node *parent = xas->xa_node;
362 2 : struct xa_node *node = xas->xa_alloc;
363 :
364 4 : if (xas_invalid(xas))
365 : return NULL;
366 :
367 2 : if (node) {
368 0 : xas->xa_alloc = NULL;
369 : } else {
370 2 : gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
371 :
372 2 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
373 0 : gfp |= __GFP_ACCOUNT;
374 :
375 2 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
376 2 : if (!node) {
377 0 : xas_set_err(xas, -ENOMEM);
378 0 : return NULL;
379 : }
380 : }
381 :
382 2 : if (parent) {
383 1 : node->offset = xas->xa_offset;
384 1 : parent->count++;
385 : XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
386 1 : xas_update(xas, parent);
387 : }
388 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
389 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
390 2 : node->shift = shift;
391 2 : node->count = 0;
392 2 : node->nr_values = 0;
393 2 : RCU_INIT_POINTER(node->parent, xas->xa_node);
394 2 : node->array = xas->xa;
395 :
396 2 : return node;
397 : }
398 :
399 : #ifdef CONFIG_XARRAY_MULTI
400 : /* Returns the number of indices covered by a given xa_state */
401 : static unsigned long xas_size(const struct xa_state *xas)
402 : {
403 : return (xas->xa_sibs + 1UL) << xas->xa_shift;
404 : }
405 : #endif
406 :
407 : /*
408 : * Use this to calculate the maximum index that will need to be created
409 : * in order to add the entry described by @xas. Because we cannot store a
410 : * multi-index entry at index 0, the calculation is a little more complex
411 : * than you might expect.
412 : */
413 : static unsigned long xas_max(struct xa_state *xas)
414 : {
415 112 : unsigned long max = xas->xa_index;
416 :
417 : #ifdef CONFIG_XARRAY_MULTI
418 : if (xas->xa_shift || xas->xa_sibs) {
419 : unsigned long mask = xas_size(xas) - 1;
420 : max |= mask;
421 : if (mask == max)
422 : max++;
423 : }
424 : #endif
425 :
426 : return max;
427 : }
428 :
429 : /* The maximum index that can be contained in the array without expanding it */
430 : static unsigned long max_index(void *entry)
431 : {
432 323 : if (!xa_is_node(entry))
433 : return 0;
434 1 : return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
435 : }
436 :
437 0 : static void xas_shrink(struct xa_state *xas)
438 : {
439 0 : struct xarray *xa = xas->xa;
440 0 : struct xa_node *node = xas->xa_node;
441 :
442 0 : for (;;) {
443 : void *entry;
444 :
445 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
446 0 : if (node->count != 1)
447 : break;
448 0 : entry = xa_entry_locked(xa, node, 0);
449 0 : if (!entry)
450 : break;
451 0 : if (!xa_is_node(entry) && node->shift)
452 : break;
453 0 : if (xa_is_zero(entry) && xa_zero_busy(xa))
454 0 : entry = NULL;
455 0 : xas->xa_node = XAS_BOUNDS;
456 :
457 0 : RCU_INIT_POINTER(xa->xa_head, entry);
458 0 : if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
459 0 : xa_mark_clear(xa, XA_FREE_MARK);
460 :
461 0 : node->count = 0;
462 0 : node->nr_values = 0;
463 0 : if (!xa_is_node(entry))
464 0 : RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
465 0 : xas_update(xas, node);
466 0 : xa_node_free(node);
467 0 : if (!xa_is_node(entry))
468 : break;
469 0 : node = xa_to_node(entry);
470 0 : node->parent = NULL;
471 : }
472 0 : }
473 :
474 : /*
475 : * xas_delete_node() - Attempt to delete an xa_node
476 : * @xas: Array operation state.
477 : *
478 : * Attempts to delete the @xas->xa_node. This will fail if xa->node has
479 : * a non-zero reference count.
480 : */
481 0 : static void xas_delete_node(struct xa_state *xas)
482 : {
483 0 : struct xa_node *node = xas->xa_node;
484 :
485 : for (;;) {
486 : struct xa_node *parent;
487 :
488 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
489 0 : if (node->count)
490 : break;
491 :
492 0 : parent = xa_parent_locked(xas->xa, node);
493 0 : xas->xa_node = parent;
494 0 : xas->xa_offset = node->offset;
495 0 : xa_node_free(node);
496 :
497 0 : if (!parent) {
498 0 : xas->xa->xa_head = NULL;
499 0 : xas->xa_node = XAS_BOUNDS;
500 0 : return;
501 : }
502 :
503 0 : parent->slots[xas->xa_offset] = NULL;
504 0 : parent->count--;
505 : XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
506 0 : node = parent;
507 0 : xas_update(xas, node);
508 : }
509 :
510 0 : if (!node->parent)
511 0 : xas_shrink(xas);
512 : }
513 :
514 : /**
515 : * xas_free_nodes() - Free this node and all nodes that it references
516 : * @xas: Array operation state.
517 : * @top: Node to free
518 : *
519 : * This node has been removed from the tree. We must now free it and all
520 : * of its subnodes. There may be RCU walkers with references into the tree,
521 : * so we must replace all entries with retry markers.
522 : */
523 0 : static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
524 : {
525 0 : unsigned int offset = 0;
526 0 : struct xa_node *node = top;
527 :
528 : for (;;) {
529 0 : void *entry = xa_entry_locked(xas->xa, node, offset);
530 :
531 0 : if (node->shift && xa_is_node(entry)) {
532 0 : node = xa_to_node(entry);
533 0 : offset = 0;
534 0 : continue;
535 : }
536 0 : if (entry)
537 0 : RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
538 0 : offset++;
539 0 : while (offset == XA_CHUNK_SIZE) {
540 : struct xa_node *parent;
541 :
542 0 : parent = xa_parent_locked(xas->xa, node);
543 0 : offset = node->offset + 1;
544 0 : node->count = 0;
545 0 : node->nr_values = 0;
546 0 : xas_update(xas, node);
547 0 : xa_node_free(node);
548 0 : if (node == top)
549 0 : return;
550 : node = parent;
551 : }
552 : }
553 : }
554 :
555 : /*
556 : * xas_expand adds nodes to the head of the tree until it has reached
557 : * sufficient height to be able to contain @xas->xa_index
558 : */
559 112 : static int xas_expand(struct xa_state *xas, void *head)
560 : {
561 112 : struct xarray *xa = xas->xa;
562 112 : struct xa_node *node = NULL;
563 112 : unsigned int shift = 0;
564 224 : unsigned long max = xas_max(xas);
565 :
566 112 : if (!head) {
567 8 : if (max == 0)
568 : return 0;
569 2 : while ((max >> shift) >= XA_CHUNK_SIZE)
570 1 : shift += XA_CHUNK_SHIFT;
571 1 : return shift + XA_CHUNK_SHIFT;
572 104 : } else if (xa_is_node(head)) {
573 1 : node = xa_to_node(head);
574 1 : shift = node->shift + XA_CHUNK_SHIFT;
575 : }
576 104 : xas->xa_node = NULL;
577 :
578 208 : while (max > max_index(head)) {
579 0 : xa_mark_t mark = 0;
580 :
581 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
582 0 : node = xas_alloc(xas, shift);
583 0 : if (!node)
584 : return -ENOMEM;
585 :
586 0 : node->count = 1;
587 0 : if (xa_is_value(head))
588 0 : node->nr_values = 1;
589 0 : RCU_INIT_POINTER(node->slots[0], head);
590 :
591 : /* Propagate the aggregated mark info to the new child */
592 : for (;;) {
593 0 : if (xa_track_free(xa) && mark == XA_FREE_MARK) {
594 0 : node_mark_all(node, XA_FREE_MARK);
595 0 : if (!xa_marked(xa, XA_FREE_MARK)) {
596 0 : node_clear_mark(node, 0, XA_FREE_MARK);
597 0 : xa_mark_set(xa, XA_FREE_MARK);
598 : }
599 0 : } else if (xa_marked(xa, mark)) {
600 : node_set_mark(node, 0, mark);
601 : }
602 0 : if (mark == XA_MARK_MAX)
603 : break;
604 0 : mark_inc(mark);
605 : }
606 :
607 : /*
608 : * Now that the new node is fully initialised, we can add
609 : * it to the tree
610 : */
611 0 : if (xa_is_node(head)) {
612 0 : xa_to_node(head)->offset = 0;
613 0 : rcu_assign_pointer(xa_to_node(head)->parent, node);
614 : }
615 0 : head = xa_mk_node(node);
616 0 : rcu_assign_pointer(xa->xa_head, head);
617 0 : xas_update(xas, node);
618 :
619 0 : shift += XA_CHUNK_SHIFT;
620 : }
621 :
622 104 : xas->xa_node = node;
623 104 : return shift;
624 : }
625 :
626 : /*
627 : * xas_create() - Create a slot to store an entry in.
628 : * @xas: XArray operation state.
629 : * @allow_root: %true if we can store the entry in the root directly
630 : *
631 : * Most users will not need to call this function directly, as it is called
632 : * by xas_store(). It is useful for doing conditional store operations
633 : * (see the xa_cmpxchg() implementation for an example).
634 : *
635 : * Return: If the slot already existed, returns the contents of this slot.
636 : * If the slot was newly created, returns %NULL. If it failed to create the
637 : * slot, returns %NULL and indicates the error in @xas.
638 : */
639 112 : static void *xas_create(struct xa_state *xas, bool allow_root)
640 : {
641 112 : struct xarray *xa = xas->xa;
642 : void *entry;
643 : void __rcu **slot;
644 112 : struct xa_node *node = xas->xa_node;
645 : int shift;
646 112 : unsigned int order = xas->xa_shift;
647 :
648 112 : if (xas_top(node)) {
649 112 : entry = xa_head_locked(xa);
650 112 : xas->xa_node = NULL;
651 120 : if (!entry && xa_zero_busy(xa))
652 0 : entry = XA_ZERO_ENTRY;
653 112 : shift = xas_expand(xas, entry);
654 112 : if (shift < 0)
655 : return NULL;
656 112 : if (!shift && !allow_root)
657 0 : shift = XA_CHUNK_SHIFT;
658 112 : entry = xa_head_locked(xa);
659 112 : slot = &xa->xa_head;
660 0 : } else if (xas_error(xas)) {
661 : return NULL;
662 0 : } else if (node) {
663 0 : unsigned int offset = xas->xa_offset;
664 :
665 0 : shift = node->shift;
666 0 : entry = xa_entry_locked(xa, node, offset);
667 0 : slot = &node->slots[offset];
668 : } else {
669 0 : shift = 0;
670 0 : entry = xa_head_locked(xa);
671 0 : slot = &xa->xa_head;
672 : }
673 :
674 116 : while (shift > order) {
675 4 : shift -= XA_CHUNK_SHIFT;
676 4 : if (!entry) {
677 2 : node = xas_alloc(xas, shift);
678 2 : if (!node)
679 : break;
680 4 : if (xa_track_free(xa))
681 : node_mark_all(node, XA_FREE_MARK);
682 2 : rcu_assign_pointer(*slot, xa_mk_node(node));
683 2 : } else if (xa_is_node(entry)) {
684 2 : node = xa_to_node(entry);
685 : } else {
686 : break;
687 : }
688 4 : entry = xas_descend(xas, node);
689 4 : slot = &node->slots[xas->xa_offset];
690 : }
691 :
692 : return entry;
693 : }
694 :
695 : /**
696 : * xas_create_range() - Ensure that stores to this range will succeed
697 : * @xas: XArray operation state.
698 : *
699 : * Creates all of the slots in the range covered by @xas. Sets @xas to
700 : * create single-index entries and positions it at the beginning of the
701 : * range. This is for the benefit of users which have not yet been
702 : * converted to use multi-index entries.
703 : */
704 0 : void xas_create_range(struct xa_state *xas)
705 : {
706 0 : unsigned long index = xas->xa_index;
707 0 : unsigned char shift = xas->xa_shift;
708 0 : unsigned char sibs = xas->xa_sibs;
709 :
710 0 : xas->xa_index |= ((sibs + 1UL) << shift) - 1;
711 0 : if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
712 0 : xas->xa_offset |= sibs;
713 0 : xas->xa_shift = 0;
714 0 : xas->xa_sibs = 0;
715 :
716 : for (;;) {
717 0 : xas_create(xas, true);
718 0 : if (xas_error(xas))
719 : goto restore;
720 0 : if (xas->xa_index <= (index | XA_CHUNK_MASK))
721 : goto success;
722 0 : xas->xa_index -= XA_CHUNK_SIZE;
723 :
724 : for (;;) {
725 0 : struct xa_node *node = xas->xa_node;
726 0 : if (node->shift >= shift)
727 : break;
728 0 : xas->xa_node = xa_parent_locked(xas->xa, node);
729 0 : xas->xa_offset = node->offset - 1;
730 0 : if (node->offset != 0)
731 : break;
732 : }
733 : }
734 :
735 : restore:
736 0 : xas->xa_shift = shift;
737 0 : xas->xa_sibs = sibs;
738 0 : xas->xa_index = index;
739 0 : return;
740 : success:
741 0 : xas->xa_index = index;
742 0 : if (xas->xa_node)
743 : xas_set_offset(xas);
744 : }
745 : EXPORT_SYMBOL_GPL(xas_create_range);
746 :
747 112 : static void update_node(struct xa_state *xas, struct xa_node *node,
748 : int count, int values)
749 : {
750 112 : if (!node || (!count && !values))
751 : return;
752 :
753 2 : node->count += count;
754 2 : node->nr_values += values;
755 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
756 : XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
757 4 : xas_update(xas, node);
758 2 : if (count < 0)
759 0 : xas_delete_node(xas);
760 : }
761 :
762 : /**
763 : * xas_store() - Store this entry in the XArray.
764 : * @xas: XArray operation state.
765 : * @entry: New entry.
766 : *
767 : * If @xas is operating on a multi-index entry, the entry returned by this
768 : * function is essentially meaningless (it may be an internal entry or it
769 : * may be %NULL, even if there are non-NULL entries at some of the indices
770 : * covered by the range). This is not a problem for any current users,
771 : * and can be changed if needed.
772 : *
773 : * Return: The old entry at this index.
774 : */
775 112 : void *xas_store(struct xa_state *xas, void *entry)
776 : {
777 : struct xa_node *node;
778 112 : void __rcu **slot = &xas->xa->xa_head;
779 : unsigned int offset, max;
780 112 : int count = 0;
781 112 : int values = 0;
782 : void *first, *next;
783 112 : bool value = xa_is_value(entry);
784 :
785 112 : if (entry) {
786 224 : bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
787 112 : first = xas_create(xas, allow_root);
788 : } else {
789 0 : first = xas_load(xas);
790 : }
791 :
792 224 : if (xas_invalid(xas))
793 : return first;
794 112 : node = xas->xa_node;
795 112 : if (node && (xas->xa_shift < node->shift))
796 0 : xas->xa_sibs = 0;
797 112 : if ((first == entry) && !xas->xa_sibs)
798 : return first;
799 :
800 112 : next = first;
801 112 : offset = xas->xa_offset;
802 112 : max = xas->xa_offset + xas->xa_sibs;
803 112 : if (node) {
804 2 : slot = &node->slots[offset];
805 2 : if (xas->xa_sibs)
806 0 : xas_squash_marks(xas);
807 : }
808 112 : if (!entry)
809 0 : xas_init_marks(xas);
810 :
811 : for (;;) {
812 : /*
813 : * Must clear the marks before setting the entry to NULL,
814 : * otherwise xas_for_each_marked may find a NULL entry and
815 : * stop early. rcu_assign_pointer contains a release barrier
816 : * so the mark clearing will appear to happen before the
817 : * entry is set to NULL.
818 : */
819 112 : rcu_assign_pointer(*slot, entry);
820 112 : if (xa_is_node(next) && (!node || node->shift))
821 0 : xas_free_nodes(xas, xa_to_node(next));
822 112 : if (!node)
823 : break;
824 2 : count += !next - !entry;
825 2 : values += !xa_is_value(first) - !value;
826 2 : if (entry) {
827 2 : if (offset == max)
828 : break;
829 0 : if (!xa_is_sibling(entry))
830 0 : entry = xa_mk_sibling(xas->xa_offset);
831 : } else {
832 0 : if (offset == XA_CHUNK_MASK)
833 : break;
834 : }
835 0 : next = xa_entry_locked(xas->xa, node, ++offset);
836 : if (!xa_is_sibling(next)) {
837 0 : if (!entry && (offset > max))
838 : break;
839 0 : first = next;
840 : }
841 0 : slot++;
842 : }
843 :
844 112 : update_node(xas, node, count, values);
845 112 : return first;
846 : }
847 : EXPORT_SYMBOL_GPL(xas_store);
848 :
849 : /**
850 : * xas_get_mark() - Returns the state of this mark.
851 : * @xas: XArray operation state.
852 : * @mark: Mark number.
853 : *
854 : * Return: true if the mark is set, false if the mark is clear or @xas
855 : * is in an error state.
856 : */
857 0 : bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
858 : {
859 0 : if (xas_invalid(xas))
860 : return false;
861 0 : if (!xas->xa_node)
862 0 : return xa_marked(xas->xa, mark);
863 0 : return node_get_mark(xas->xa_node, xas->xa_offset, mark);
864 : }
865 : EXPORT_SYMBOL_GPL(xas_get_mark);
866 :
867 : /**
868 : * xas_set_mark() - Sets the mark on this entry and its parents.
869 : * @xas: XArray operation state.
870 : * @mark: Mark number.
871 : *
872 : * Sets the specified mark on this entry, and walks up the tree setting it
873 : * on all the ancestor entries. Does nothing if @xas has not been walked to
874 : * an entry, or is in an error state.
875 : */
876 0 : void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
877 : {
878 0 : struct xa_node *node = xas->xa_node;
879 0 : unsigned int offset = xas->xa_offset;
880 :
881 0 : if (xas_invalid(xas))
882 : return;
883 :
884 0 : while (node) {
885 0 : if (node_set_mark(node, offset, mark))
886 : return;
887 0 : offset = node->offset;
888 0 : node = xa_parent_locked(xas->xa, node);
889 : }
890 :
891 0 : if (!xa_marked(xas->xa, mark))
892 0 : xa_mark_set(xas->xa, mark);
893 : }
894 : EXPORT_SYMBOL_GPL(xas_set_mark);
895 :
896 : /**
897 : * xas_clear_mark() - Clears the mark on this entry and its parents.
898 : * @xas: XArray operation state.
899 : * @mark: Mark number.
900 : *
901 : * Clears the specified mark on this entry, and walks back to the head
902 : * attempting to clear it on all the ancestor entries. Does nothing if
903 : * @xas has not been walked to an entry, or is in an error state.
904 : */
905 0 : void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
906 : {
907 0 : struct xa_node *node = xas->xa_node;
908 0 : unsigned int offset = xas->xa_offset;
909 :
910 0 : if (xas_invalid(xas))
911 : return;
912 :
913 0 : while (node) {
914 0 : if (!node_clear_mark(node, offset, mark))
915 : return;
916 0 : if (node_any_mark(node, mark))
917 : return;
918 :
919 0 : offset = node->offset;
920 0 : node = xa_parent_locked(xas->xa, node);
921 : }
922 :
923 0 : if (xa_marked(xas->xa, mark))
924 0 : xa_mark_clear(xas->xa, mark);
925 : }
926 : EXPORT_SYMBOL_GPL(xas_clear_mark);
927 :
928 : /**
929 : * xas_init_marks() - Initialise all marks for the entry
930 : * @xas: Array operations state.
931 : *
932 : * Initialise all marks for the entry specified by @xas. If we're tracking
933 : * free entries with a mark, we need to set it on all entries. All other
934 : * marks are cleared.
935 : *
936 : * This implementation is not as efficient as it could be; we may walk
937 : * up the tree multiple times.
938 : */
939 0 : void xas_init_marks(const struct xa_state *xas)
940 : {
941 0 : xa_mark_t mark = 0;
942 :
943 : for (;;) {
944 0 : if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
945 0 : xas_set_mark(xas, mark);
946 : else
947 0 : xas_clear_mark(xas, mark);
948 0 : if (mark == XA_MARK_MAX)
949 : break;
950 0 : mark_inc(mark);
951 : }
952 0 : }
953 : EXPORT_SYMBOL_GPL(xas_init_marks);
954 :
955 : #ifdef CONFIG_XARRAY_MULTI
956 : static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
957 : {
958 : unsigned int marks = 0;
959 : xa_mark_t mark = XA_MARK_0;
960 :
961 : for (;;) {
962 : if (node_get_mark(node, offset, mark))
963 : marks |= 1 << (__force unsigned int)mark;
964 : if (mark == XA_MARK_MAX)
965 : break;
966 : mark_inc(mark);
967 : }
968 :
969 : return marks;
970 : }
971 :
972 : static void node_set_marks(struct xa_node *node, unsigned int offset,
973 : struct xa_node *child, unsigned int marks)
974 : {
975 : xa_mark_t mark = XA_MARK_0;
976 :
977 : for (;;) {
978 : if (marks & (1 << (__force unsigned int)mark)) {
979 : node_set_mark(node, offset, mark);
980 : if (child)
981 : node_mark_all(child, mark);
982 : }
983 : if (mark == XA_MARK_MAX)
984 : break;
985 : mark_inc(mark);
986 : }
987 : }
988 :
989 : /**
990 : * xas_split_alloc() - Allocate memory for splitting an entry.
991 : * @xas: XArray operation state.
992 : * @entry: New entry which will be stored in the array.
993 : * @order: Current entry order.
994 : * @gfp: Memory allocation flags.
995 : *
996 : * This function should be called before calling xas_split().
997 : * If necessary, it will allocate new nodes (and fill them with @entry)
998 : * to prepare for the upcoming split of an entry of @order size into
999 : * entries of the order stored in the @xas.
1000 : *
1001 : * Context: May sleep if @gfp flags permit.
1002 : */
1003 : void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1004 : gfp_t gfp)
1005 : {
1006 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1007 : unsigned int mask = xas->xa_sibs;
1008 :
1009 : /* XXX: no support for splitting really large entries yet */
1010 : if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1011 : goto nomem;
1012 : if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1013 : return;
1014 :
1015 : do {
1016 : unsigned int i;
1017 : void *sibling = NULL;
1018 : struct xa_node *node;
1019 :
1020 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1021 : if (!node)
1022 : goto nomem;
1023 : node->array = xas->xa;
1024 : for (i = 0; i < XA_CHUNK_SIZE; i++) {
1025 : if ((i & mask) == 0) {
1026 : RCU_INIT_POINTER(node->slots[i], entry);
1027 : sibling = xa_mk_sibling(i);
1028 : } else {
1029 : RCU_INIT_POINTER(node->slots[i], sibling);
1030 : }
1031 : }
1032 : RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1033 : xas->xa_alloc = node;
1034 : } while (sibs-- > 0);
1035 :
1036 : return;
1037 : nomem:
1038 : xas_destroy(xas);
1039 : xas_set_err(xas, -ENOMEM);
1040 : }
1041 : EXPORT_SYMBOL_GPL(xas_split_alloc);
1042 :
1043 : /**
1044 : * xas_split() - Split a multi-index entry into smaller entries.
1045 : * @xas: XArray operation state.
1046 : * @entry: New entry to store in the array.
1047 : * @order: Current entry order.
1048 : *
1049 : * The size of the new entries is set in @xas. The value in @entry is
1050 : * copied to all the replacement entries.
1051 : *
1052 : * Context: Any context. The caller should hold the xa_lock.
1053 : */
1054 : void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1055 : {
1056 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1057 : unsigned int offset, marks;
1058 : struct xa_node *node;
1059 : void *curr = xas_load(xas);
1060 : int values = 0;
1061 :
1062 : node = xas->xa_node;
1063 : if (xas_top(node))
1064 : return;
1065 :
1066 : marks = node_get_marks(node, xas->xa_offset);
1067 :
1068 : offset = xas->xa_offset + sibs;
1069 : do {
1070 : if (xas->xa_shift < node->shift) {
1071 : struct xa_node *child = xas->xa_alloc;
1072 :
1073 : xas->xa_alloc = rcu_dereference_raw(child->parent);
1074 : child->shift = node->shift - XA_CHUNK_SHIFT;
1075 : child->offset = offset;
1076 : child->count = XA_CHUNK_SIZE;
1077 : child->nr_values = xa_is_value(entry) ?
1078 : XA_CHUNK_SIZE : 0;
1079 : RCU_INIT_POINTER(child->parent, node);
1080 : node_set_marks(node, offset, child, marks);
1081 : rcu_assign_pointer(node->slots[offset],
1082 : xa_mk_node(child));
1083 : if (xa_is_value(curr))
1084 : values--;
1085 : xas_update(xas, child);
1086 : } else {
1087 : unsigned int canon = offset - xas->xa_sibs;
1088 :
1089 : node_set_marks(node, canon, NULL, marks);
1090 : rcu_assign_pointer(node->slots[canon], entry);
1091 : while (offset > canon)
1092 : rcu_assign_pointer(node->slots[offset--],
1093 : xa_mk_sibling(canon));
1094 : values += (xa_is_value(entry) - xa_is_value(curr)) *
1095 : (xas->xa_sibs + 1);
1096 : }
1097 : } while (offset-- > xas->xa_offset);
1098 :
1099 : node->nr_values += values;
1100 : xas_update(xas, node);
1101 : }
1102 : EXPORT_SYMBOL_GPL(xas_split);
1103 : #endif
1104 :
1105 : /**
1106 : * xas_pause() - Pause a walk to drop a lock.
1107 : * @xas: XArray operation state.
1108 : *
1109 : * Some users need to pause a walk and drop the lock they're holding in
1110 : * order to yield to a higher priority thread or carry out an operation
1111 : * on an entry. Those users should call this function before they drop
1112 : * the lock. It resets the @xas to be suitable for the next iteration
1113 : * of the loop after the user has reacquired the lock. If most entries
1114 : * found during a walk require you to call xas_pause(), the xa_for_each()
1115 : * iterator may be more appropriate.
1116 : *
1117 : * Note that xas_pause() only works for forward iteration. If a user needs
1118 : * to pause a reverse iteration, we will need a xas_pause_rev().
1119 : */
1120 0 : void xas_pause(struct xa_state *xas)
1121 : {
1122 0 : struct xa_node *node = xas->xa_node;
1123 :
1124 0 : if (xas_invalid(xas))
1125 : return;
1126 :
1127 0 : xas->xa_node = XAS_RESTART;
1128 0 : if (node) {
1129 0 : unsigned long offset = xas->xa_offset;
1130 0 : while (++offset < XA_CHUNK_SIZE) {
1131 0 : if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1132 : break;
1133 : }
1134 0 : xas->xa_index += (offset - xas->xa_offset) << node->shift;
1135 0 : if (xas->xa_index == 0)
1136 0 : xas->xa_node = XAS_BOUNDS;
1137 : } else {
1138 0 : xas->xa_index++;
1139 : }
1140 : }
1141 : EXPORT_SYMBOL_GPL(xas_pause);
1142 :
1143 : /*
1144 : * __xas_prev() - Find the previous entry in the XArray.
1145 : * @xas: XArray operation state.
1146 : *
1147 : * Helper function for xas_prev() which handles all the complex cases
1148 : * out of line.
1149 : */
1150 0 : void *__xas_prev(struct xa_state *xas)
1151 : {
1152 : void *entry;
1153 :
1154 0 : if (!xas_frozen(xas->xa_node))
1155 0 : xas->xa_index--;
1156 0 : if (!xas->xa_node)
1157 0 : return set_bounds(xas);
1158 0 : if (xas_not_node(xas->xa_node))
1159 0 : return xas_load(xas);
1160 :
1161 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1162 0 : xas->xa_offset--;
1163 :
1164 0 : while (xas->xa_offset == 255) {
1165 0 : xas->xa_offset = xas->xa_node->offset - 1;
1166 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1167 0 : if (!xas->xa_node)
1168 0 : return set_bounds(xas);
1169 : }
1170 :
1171 : for (;;) {
1172 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1173 0 : if (!xa_is_node(entry))
1174 : return entry;
1175 :
1176 0 : xas->xa_node = xa_to_node(entry);
1177 : xas_set_offset(xas);
1178 : }
1179 : }
1180 : EXPORT_SYMBOL_GPL(__xas_prev);
1181 :
1182 : /*
1183 : * __xas_next() - Find the next entry in the XArray.
1184 : * @xas: XArray operation state.
1185 : *
1186 : * Helper function for xas_next() which handles all the complex cases
1187 : * out of line.
1188 : */
1189 0 : void *__xas_next(struct xa_state *xas)
1190 : {
1191 : void *entry;
1192 :
1193 0 : if (!xas_frozen(xas->xa_node))
1194 0 : xas->xa_index++;
1195 0 : if (!xas->xa_node)
1196 0 : return set_bounds(xas);
1197 0 : if (xas_not_node(xas->xa_node))
1198 0 : return xas_load(xas);
1199 :
1200 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1201 0 : xas->xa_offset++;
1202 :
1203 0 : while (xas->xa_offset == XA_CHUNK_SIZE) {
1204 0 : xas->xa_offset = xas->xa_node->offset + 1;
1205 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1206 0 : if (!xas->xa_node)
1207 0 : return set_bounds(xas);
1208 : }
1209 :
1210 : for (;;) {
1211 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1212 0 : if (!xa_is_node(entry))
1213 : return entry;
1214 :
1215 0 : xas->xa_node = xa_to_node(entry);
1216 : xas_set_offset(xas);
1217 : }
1218 : }
1219 : EXPORT_SYMBOL_GPL(__xas_next);
1220 :
1221 : /**
1222 : * xas_find() - Find the next present entry in the XArray.
1223 : * @xas: XArray operation state.
1224 : * @max: Highest index to return.
1225 : *
1226 : * If the @xas has not yet been walked to an entry, return the entry
1227 : * which has an index >= xas.xa_index. If it has been walked, the entry
1228 : * currently being pointed at has been processed, and so we move to the
1229 : * next entry.
1230 : *
1231 : * If no entry is found and the array is smaller than @max, the iterator
1232 : * is set to the smallest index not yet in the array. This allows @xas
1233 : * to be immediately passed to xas_store().
1234 : *
1235 : * Return: The entry, if found, otherwise %NULL.
1236 : */
1237 5 : void *xas_find(struct xa_state *xas, unsigned long max)
1238 : {
1239 : void *entry;
1240 :
1241 10 : if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1242 : return NULL;
1243 5 : if (xas->xa_index > max)
1244 0 : return set_bounds(xas);
1245 :
1246 5 : if (!xas->xa_node) {
1247 0 : xas->xa_index = 1;
1248 0 : return set_bounds(xas);
1249 5 : } else if (xas->xa_node == XAS_RESTART) {
1250 5 : entry = xas_load(xas);
1251 10 : if (entry || xas_not_node(xas->xa_node))
1252 : return entry;
1253 0 : } else if (!xas->xa_node->shift &&
1254 0 : xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1255 0 : xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1256 : }
1257 :
1258 : xas_next_offset(xas);
1259 :
1260 0 : while (xas->xa_node && (xas->xa_index <= max)) {
1261 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1262 0 : xas->xa_offset = xas->xa_node->offset + 1;
1263 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1264 0 : continue;
1265 : }
1266 :
1267 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1268 0 : if (xa_is_node(entry)) {
1269 0 : xas->xa_node = xa_to_node(entry);
1270 0 : xas->xa_offset = 0;
1271 0 : continue;
1272 : }
1273 0 : if (entry && !xa_is_sibling(entry))
1274 : return entry;
1275 :
1276 : xas_next_offset(xas);
1277 : }
1278 :
1279 0 : if (!xas->xa_node)
1280 0 : xas->xa_node = XAS_BOUNDS;
1281 : return NULL;
1282 : }
1283 : EXPORT_SYMBOL_GPL(xas_find);
1284 :
1285 : /**
1286 : * xas_find_marked() - Find the next marked entry in the XArray.
1287 : * @xas: XArray operation state.
1288 : * @max: Highest index to return.
1289 : * @mark: Mark number to search for.
1290 : *
1291 : * If the @xas has not yet been walked to an entry, return the marked entry
1292 : * which has an index >= xas.xa_index. If it has been walked, the entry
1293 : * currently being pointed at has been processed, and so we return the
1294 : * first marked entry with an index > xas.xa_index.
1295 : *
1296 : * If no marked entry is found and the array is smaller than @max, @xas is
1297 : * set to the bounds state and xas->xa_index is set to the smallest index
1298 : * not yet in the array. This allows @xas to be immediately passed to
1299 : * xas_store().
1300 : *
1301 : * If no entry is found before @max is reached, @xas is set to the restart
1302 : * state.
1303 : *
1304 : * Return: The entry, if found, otherwise %NULL.
1305 : */
1306 219 : void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1307 : {
1308 219 : bool advance = true;
1309 : unsigned int offset;
1310 : void *entry;
1311 :
1312 438 : if (xas_error(xas))
1313 : return NULL;
1314 219 : if (xas->xa_index > max)
1315 : goto max;
1316 :
1317 219 : if (!xas->xa_node) {
1318 0 : xas->xa_index = 1;
1319 0 : goto out;
1320 219 : } else if (xas_top(xas->xa_node)) {
1321 219 : advance = false;
1322 438 : entry = xa_head(xas->xa);
1323 219 : xas->xa_node = NULL;
1324 438 : if (xas->xa_index > max_index(entry))
1325 : goto out;
1326 219 : if (!xa_is_node(entry)) {
1327 438 : if (xa_marked(xas->xa, mark))
1328 : return entry;
1329 0 : xas->xa_index = 1;
1330 0 : goto out;
1331 : }
1332 0 : xas->xa_node = xa_to_node(entry);
1333 0 : xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1334 : }
1335 :
1336 0 : while (xas->xa_index <= max) {
1337 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1338 0 : xas->xa_offset = xas->xa_node->offset + 1;
1339 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1340 0 : if (!xas->xa_node)
1341 : break;
1342 0 : advance = false;
1343 0 : continue;
1344 : }
1345 :
1346 0 : if (!advance) {
1347 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1348 : if (xa_is_sibling(entry)) {
1349 : xas->xa_offset = xa_to_sibling(entry);
1350 : xas_move_index(xas, xas->xa_offset);
1351 : }
1352 : }
1353 :
1354 0 : offset = xas_find_chunk(xas, advance, mark);
1355 0 : if (offset > xas->xa_offset) {
1356 0 : advance = false;
1357 0 : xas_move_index(xas, offset);
1358 : /* Mind the wrap */
1359 0 : if ((xas->xa_index - 1) >= max)
1360 : goto max;
1361 0 : xas->xa_offset = offset;
1362 0 : if (offset == XA_CHUNK_SIZE)
1363 0 : continue;
1364 : }
1365 :
1366 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1367 0 : if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1368 0 : continue;
1369 0 : if (!xa_is_node(entry))
1370 : return entry;
1371 0 : xas->xa_node = xa_to_node(entry);
1372 : xas_set_offset(xas);
1373 : }
1374 :
1375 : out:
1376 0 : if (xas->xa_index > max)
1377 : goto max;
1378 0 : return set_bounds(xas);
1379 : max:
1380 0 : xas->xa_node = XAS_RESTART;
1381 0 : return NULL;
1382 : }
1383 : EXPORT_SYMBOL_GPL(xas_find_marked);
1384 :
1385 : /**
1386 : * xas_find_conflict() - Find the next present entry in a range.
1387 : * @xas: XArray operation state.
1388 : *
1389 : * The @xas describes both a range and a position within that range.
1390 : *
1391 : * Context: Any context. Expects xa_lock to be held.
1392 : * Return: The next entry in the range covered by @xas or %NULL.
1393 : */
1394 0 : void *xas_find_conflict(struct xa_state *xas)
1395 : {
1396 : void *curr;
1397 :
1398 0 : if (xas_error(xas))
1399 : return NULL;
1400 :
1401 0 : if (!xas->xa_node)
1402 : return NULL;
1403 :
1404 0 : if (xas_top(xas->xa_node)) {
1405 0 : curr = xas_start(xas);
1406 0 : if (!curr)
1407 : return NULL;
1408 0 : while (xa_is_node(curr)) {
1409 0 : struct xa_node *node = xa_to_node(curr);
1410 0 : curr = xas_descend(xas, node);
1411 : }
1412 0 : if (curr)
1413 : return curr;
1414 : }
1415 :
1416 0 : if (xas->xa_node->shift > xas->xa_shift)
1417 : return NULL;
1418 :
1419 : for (;;) {
1420 0 : if (xas->xa_node->shift == xas->xa_shift) {
1421 0 : if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1422 : break;
1423 0 : } else if (xas->xa_offset == XA_CHUNK_MASK) {
1424 0 : xas->xa_offset = xas->xa_node->offset;
1425 0 : xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1426 0 : if (!xas->xa_node)
1427 : break;
1428 0 : continue;
1429 : }
1430 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1431 : if (xa_is_sibling(curr))
1432 : continue;
1433 0 : while (xa_is_node(curr)) {
1434 0 : xas->xa_node = xa_to_node(curr);
1435 0 : xas->xa_offset = 0;
1436 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1437 : }
1438 0 : if (curr)
1439 : return curr;
1440 : }
1441 0 : xas->xa_offset -= xas->xa_sibs;
1442 0 : return NULL;
1443 : }
1444 : EXPORT_SYMBOL_GPL(xas_find_conflict);
1445 :
1446 : /**
1447 : * xa_load() - Load an entry from an XArray.
1448 : * @xa: XArray.
1449 : * @index: index into array.
1450 : *
1451 : * Context: Any context. Takes and releases the RCU lock.
1452 : * Return: The entry at @index in @xa.
1453 : */
1454 2 : void *xa_load(struct xarray *xa, unsigned long index)
1455 : {
1456 2 : XA_STATE(xas, xa, index);
1457 : void *entry;
1458 :
1459 : rcu_read_lock();
1460 : do {
1461 2 : entry = xas_load(&xas);
1462 2 : if (xa_is_zero(entry))
1463 0 : entry = NULL;
1464 2 : } while (xas_retry(&xas, entry));
1465 : rcu_read_unlock();
1466 :
1467 2 : return entry;
1468 : }
1469 : EXPORT_SYMBOL(xa_load);
1470 :
1471 : static void *xas_result(struct xa_state *xas, void *curr)
1472 : {
1473 2 : if (xa_is_zero(curr))
1474 : return NULL;
1475 4 : if (xas_error(xas))
1476 0 : curr = xas->xa_node;
1477 : return curr;
1478 : }
1479 :
1480 : /**
1481 : * __xa_erase() - Erase this entry from the XArray while locked.
1482 : * @xa: XArray.
1483 : * @index: Index into array.
1484 : *
1485 : * After this function returns, loading from @index will return %NULL.
1486 : * If the index is part of a multi-index entry, all indices will be erased
1487 : * and none of the entries will be part of a multi-index entry.
1488 : *
1489 : * Context: Any context. Expects xa_lock to be held on entry.
1490 : * Return: The entry which used to be at this index.
1491 : */
1492 0 : void *__xa_erase(struct xarray *xa, unsigned long index)
1493 : {
1494 0 : XA_STATE(xas, xa, index);
1495 0 : return xas_result(&xas, xas_store(&xas, NULL));
1496 : }
1497 : EXPORT_SYMBOL(__xa_erase);
1498 :
1499 : /**
1500 : * xa_erase() - Erase this entry from the XArray.
1501 : * @xa: XArray.
1502 : * @index: Index of entry.
1503 : *
1504 : * After this function returns, loading from @index will return %NULL.
1505 : * If the index is part of a multi-index entry, all indices will be erased
1506 : * and none of the entries will be part of a multi-index entry.
1507 : *
1508 : * Context: Any context. Takes and releases the xa_lock.
1509 : * Return: The entry which used to be at this index.
1510 : */
1511 0 : void *xa_erase(struct xarray *xa, unsigned long index)
1512 : {
1513 : void *entry;
1514 :
1515 0 : xa_lock(xa);
1516 0 : entry = __xa_erase(xa, index);
1517 0 : xa_unlock(xa);
1518 :
1519 0 : return entry;
1520 : }
1521 : EXPORT_SYMBOL(xa_erase);
1522 :
1523 : /**
1524 : * __xa_store() - Store this entry in the XArray.
1525 : * @xa: XArray.
1526 : * @index: Index into array.
1527 : * @entry: New entry.
1528 : * @gfp: Memory allocation flags.
1529 : *
1530 : * You must already be holding the xa_lock when calling this function.
1531 : * It will drop the lock if needed to allocate memory, and then reacquire
1532 : * it afterwards.
1533 : *
1534 : * Context: Any context. Expects xa_lock to be held on entry. May
1535 : * release and reacquire xa_lock if @gfp flags permit.
1536 : * Return: The old entry at this index or xa_err() if an error happened.
1537 : */
1538 2 : void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1539 : {
1540 2 : XA_STATE(xas, xa, index);
1541 : void *curr;
1542 :
1543 2 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1544 : return XA_ERROR(-EINVAL);
1545 4 : if (xa_track_free(xa) && !entry)
1546 0 : entry = XA_ZERO_ENTRY;
1547 :
1548 : do {
1549 2 : curr = xas_store(&xas, entry);
1550 4 : if (xa_track_free(xa))
1551 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1552 2 : } while (__xas_nomem(&xas, gfp));
1553 :
1554 : return xas_result(&xas, curr);
1555 : }
1556 : EXPORT_SYMBOL(__xa_store);
1557 :
1558 : /**
1559 : * xa_store() - Store this entry in the XArray.
1560 : * @xa: XArray.
1561 : * @index: Index into array.
1562 : * @entry: New entry.
1563 : * @gfp: Memory allocation flags.
1564 : *
1565 : * After this function returns, loads from this index will return @entry.
1566 : * Storing into an existing multi-index entry updates the entry of every index.
1567 : * The marks associated with @index are unaffected unless @entry is %NULL.
1568 : *
1569 : * Context: Any context. Takes and releases the xa_lock.
1570 : * May sleep if the @gfp flags permit.
1571 : * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1572 : * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1573 : * failed.
1574 : */
1575 2 : void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1576 : {
1577 : void *curr;
1578 :
1579 4 : xa_lock(xa);
1580 2 : curr = __xa_store(xa, index, entry, gfp);
1581 4 : xa_unlock(xa);
1582 :
1583 2 : return curr;
1584 : }
1585 : EXPORT_SYMBOL(xa_store);
1586 :
1587 : /**
1588 : * __xa_cmpxchg() - Store this entry in the XArray.
1589 : * @xa: XArray.
1590 : * @index: Index into array.
1591 : * @old: Old value to test against.
1592 : * @entry: New entry.
1593 : * @gfp: Memory allocation flags.
1594 : *
1595 : * You must already be holding the xa_lock when calling this function.
1596 : * It will drop the lock if needed to allocate memory, and then reacquire
1597 : * it afterwards.
1598 : *
1599 : * Context: Any context. Expects xa_lock to be held on entry. May
1600 : * release and reacquire xa_lock if @gfp flags permit.
1601 : * Return: The old entry at this index or xa_err() if an error happened.
1602 : */
1603 0 : void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1604 : void *old, void *entry, gfp_t gfp)
1605 : {
1606 0 : XA_STATE(xas, xa, index);
1607 : void *curr;
1608 :
1609 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1610 : return XA_ERROR(-EINVAL);
1611 :
1612 : do {
1613 0 : curr = xas_load(&xas);
1614 0 : if (curr == old) {
1615 0 : xas_store(&xas, entry);
1616 0 : if (xa_track_free(xa) && entry && !curr)
1617 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1618 : }
1619 0 : } while (__xas_nomem(&xas, gfp));
1620 :
1621 : return xas_result(&xas, curr);
1622 : }
1623 : EXPORT_SYMBOL(__xa_cmpxchg);
1624 :
1625 : /**
1626 : * __xa_insert() - Store this entry in the XArray if no entry is present.
1627 : * @xa: XArray.
1628 : * @index: Index into array.
1629 : * @entry: New entry.
1630 : * @gfp: Memory allocation flags.
1631 : *
1632 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1633 : * if no entry is present. Inserting will fail if a reserved entry is
1634 : * present, even though loading from this index will return NULL.
1635 : *
1636 : * Context: Any context. Expects xa_lock to be held on entry. May
1637 : * release and reacquire xa_lock if @gfp flags permit.
1638 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
1639 : * -ENOMEM if memory could not be allocated.
1640 : */
1641 0 : int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1642 : {
1643 0 : XA_STATE(xas, xa, index);
1644 : void *curr;
1645 :
1646 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1647 : return -EINVAL;
1648 0 : if (!entry)
1649 0 : entry = XA_ZERO_ENTRY;
1650 :
1651 : do {
1652 0 : curr = xas_load(&xas);
1653 0 : if (!curr) {
1654 0 : xas_store(&xas, entry);
1655 0 : if (xa_track_free(xa))
1656 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1657 : } else {
1658 0 : xas_set_err(&xas, -EBUSY);
1659 : }
1660 0 : } while (__xas_nomem(&xas, gfp));
1661 :
1662 0 : return xas_error(&xas);
1663 : }
1664 : EXPORT_SYMBOL(__xa_insert);
1665 :
1666 : #ifdef CONFIG_XARRAY_MULTI
1667 : static void xas_set_range(struct xa_state *xas, unsigned long first,
1668 : unsigned long last)
1669 : {
1670 : unsigned int shift = 0;
1671 : unsigned long sibs = last - first;
1672 : unsigned int offset = XA_CHUNK_MASK;
1673 :
1674 : xas_set(xas, first);
1675 :
1676 : while ((first & XA_CHUNK_MASK) == 0) {
1677 : if (sibs < XA_CHUNK_MASK)
1678 : break;
1679 : if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1680 : break;
1681 : shift += XA_CHUNK_SHIFT;
1682 : if (offset == XA_CHUNK_MASK)
1683 : offset = sibs & XA_CHUNK_MASK;
1684 : sibs >>= XA_CHUNK_SHIFT;
1685 : first >>= XA_CHUNK_SHIFT;
1686 : }
1687 :
1688 : offset = first & XA_CHUNK_MASK;
1689 : if (offset + sibs > XA_CHUNK_MASK)
1690 : sibs = XA_CHUNK_MASK - offset;
1691 : if ((((first + sibs + 1) << shift) - 1) > last)
1692 : sibs -= 1;
1693 :
1694 : xas->xa_shift = shift;
1695 : xas->xa_sibs = sibs;
1696 : }
1697 :
1698 : /**
1699 : * xa_store_range() - Store this entry at a range of indices in the XArray.
1700 : * @xa: XArray.
1701 : * @first: First index to affect.
1702 : * @last: Last index to affect.
1703 : * @entry: New entry.
1704 : * @gfp: Memory allocation flags.
1705 : *
1706 : * After this function returns, loads from any index between @first and @last,
1707 : * inclusive will return @entry.
1708 : * Storing into an existing multi-index entry updates the entry of every index.
1709 : * The marks associated with @index are unaffected unless @entry is %NULL.
1710 : *
1711 : * Context: Process context. Takes and releases the xa_lock. May sleep
1712 : * if the @gfp flags permit.
1713 : * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1714 : * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1715 : */
1716 : void *xa_store_range(struct xarray *xa, unsigned long first,
1717 : unsigned long last, void *entry, gfp_t gfp)
1718 : {
1719 : XA_STATE(xas, xa, 0);
1720 :
1721 : if (WARN_ON_ONCE(xa_is_internal(entry)))
1722 : return XA_ERROR(-EINVAL);
1723 : if (last < first)
1724 : return XA_ERROR(-EINVAL);
1725 :
1726 : do {
1727 : xas_lock(&xas);
1728 : if (entry) {
1729 : unsigned int order = BITS_PER_LONG;
1730 : if (last + 1)
1731 : order = __ffs(last + 1);
1732 : xas_set_order(&xas, last, order);
1733 : xas_create(&xas, true);
1734 : if (xas_error(&xas))
1735 : goto unlock;
1736 : }
1737 : do {
1738 : xas_set_range(&xas, first, last);
1739 : xas_store(&xas, entry);
1740 : if (xas_error(&xas))
1741 : goto unlock;
1742 : first += xas_size(&xas);
1743 : } while (first <= last);
1744 : unlock:
1745 : xas_unlock(&xas);
1746 : } while (xas_nomem(&xas, gfp));
1747 :
1748 : return xas_result(&xas, NULL);
1749 : }
1750 : EXPORT_SYMBOL(xa_store_range);
1751 :
1752 : /**
1753 : * xa_get_order() - Get the order of an entry.
1754 : * @xa: XArray.
1755 : * @index: Index of the entry.
1756 : *
1757 : * Return: A number between 0 and 63 indicating the order of the entry.
1758 : */
1759 : int xa_get_order(struct xarray *xa, unsigned long index)
1760 : {
1761 : XA_STATE(xas, xa, index);
1762 : void *entry;
1763 : int order = 0;
1764 :
1765 : rcu_read_lock();
1766 : entry = xas_load(&xas);
1767 :
1768 : if (!entry)
1769 : goto unlock;
1770 :
1771 : if (!xas.xa_node)
1772 : goto unlock;
1773 :
1774 : for (;;) {
1775 : unsigned int slot = xas.xa_offset + (1 << order);
1776 :
1777 : if (slot >= XA_CHUNK_SIZE)
1778 : break;
1779 : if (!xa_is_sibling(xas.xa_node->slots[slot]))
1780 : break;
1781 : order++;
1782 : }
1783 :
1784 : order += xas.xa_node->shift;
1785 : unlock:
1786 : rcu_read_unlock();
1787 :
1788 : return order;
1789 : }
1790 : EXPORT_SYMBOL(xa_get_order);
1791 : #endif /* CONFIG_XARRAY_MULTI */
1792 :
1793 : /**
1794 : * __xa_alloc() - Find somewhere to store this entry in the XArray.
1795 : * @xa: XArray.
1796 : * @id: Pointer to ID.
1797 : * @limit: Range for allocated ID.
1798 : * @entry: New entry.
1799 : * @gfp: Memory allocation flags.
1800 : *
1801 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1802 : * stores the index into the @id pointer, then stores the entry at
1803 : * that index. A concurrent lookup will not see an uninitialised @id.
1804 : *
1805 : * Context: Any context. Expects xa_lock to be held on entry. May
1806 : * release and reacquire xa_lock if @gfp flags permit.
1807 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
1808 : * -EBUSY if there are no free entries in @limit.
1809 : */
1810 0 : int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1811 : struct xa_limit limit, gfp_t gfp)
1812 : {
1813 0 : XA_STATE(xas, xa, 0);
1814 :
1815 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1816 : return -EINVAL;
1817 0 : if (WARN_ON_ONCE(!xa_track_free(xa)))
1818 : return -EINVAL;
1819 :
1820 0 : if (!entry)
1821 0 : entry = XA_ZERO_ENTRY;
1822 :
1823 : do {
1824 0 : xas.xa_index = limit.min;
1825 0 : xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1826 0 : if (xas.xa_node == XAS_RESTART)
1827 0 : xas_set_err(&xas, -EBUSY);
1828 : else
1829 0 : *id = xas.xa_index;
1830 0 : xas_store(&xas, entry);
1831 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1832 0 : } while (__xas_nomem(&xas, gfp));
1833 :
1834 0 : return xas_error(&xas);
1835 : }
1836 : EXPORT_SYMBOL(__xa_alloc);
1837 :
1838 : /**
1839 : * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1840 : * @xa: XArray.
1841 : * @id: Pointer to ID.
1842 : * @entry: New entry.
1843 : * @limit: Range of allocated ID.
1844 : * @next: Pointer to next ID to allocate.
1845 : * @gfp: Memory allocation flags.
1846 : *
1847 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1848 : * stores the index into the @id pointer, then stores the entry at
1849 : * that index. A concurrent lookup will not see an uninitialised @id.
1850 : * The search for an empty entry will start at @next and will wrap
1851 : * around if necessary.
1852 : *
1853 : * Context: Any context. Expects xa_lock to be held on entry. May
1854 : * release and reacquire xa_lock if @gfp flags permit.
1855 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
1856 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
1857 : * allocated or -EBUSY if there are no free entries in @limit.
1858 : */
1859 0 : int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1860 : struct xa_limit limit, u32 *next, gfp_t gfp)
1861 : {
1862 0 : u32 min = limit.min;
1863 : int ret;
1864 :
1865 0 : limit.min = max(min, *next);
1866 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1867 0 : if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1868 0 : xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1869 0 : ret = 1;
1870 : }
1871 :
1872 0 : if (ret < 0 && limit.min > min) {
1873 0 : limit.min = min;
1874 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1875 0 : if (ret == 0)
1876 0 : ret = 1;
1877 : }
1878 :
1879 0 : if (ret >= 0) {
1880 0 : *next = *id + 1;
1881 0 : if (*next == 0)
1882 0 : xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1883 : }
1884 0 : return ret;
1885 : }
1886 : EXPORT_SYMBOL(__xa_alloc_cyclic);
1887 :
1888 : /**
1889 : * __xa_set_mark() - Set this mark on this entry while locked.
1890 : * @xa: XArray.
1891 : * @index: Index of entry.
1892 : * @mark: Mark number.
1893 : *
1894 : * Attempting to set a mark on a %NULL entry does not succeed.
1895 : *
1896 : * Context: Any context. Expects xa_lock to be held on entry.
1897 : */
1898 0 : void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1899 : {
1900 0 : XA_STATE(xas, xa, index);
1901 0 : void *entry = xas_load(&xas);
1902 :
1903 0 : if (entry)
1904 0 : xas_set_mark(&xas, mark);
1905 0 : }
1906 : EXPORT_SYMBOL(__xa_set_mark);
1907 :
1908 : /**
1909 : * __xa_clear_mark() - Clear this mark on this entry while locked.
1910 : * @xa: XArray.
1911 : * @index: Index of entry.
1912 : * @mark: Mark number.
1913 : *
1914 : * Context: Any context. Expects xa_lock to be held on entry.
1915 : */
1916 0 : void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1917 : {
1918 0 : XA_STATE(xas, xa, index);
1919 0 : void *entry = xas_load(&xas);
1920 :
1921 0 : if (entry)
1922 0 : xas_clear_mark(&xas, mark);
1923 0 : }
1924 : EXPORT_SYMBOL(__xa_clear_mark);
1925 :
1926 : /**
1927 : * xa_get_mark() - Inquire whether this mark is set on this entry.
1928 : * @xa: XArray.
1929 : * @index: Index of entry.
1930 : * @mark: Mark number.
1931 : *
1932 : * This function uses the RCU read lock, so the result may be out of date
1933 : * by the time it returns. If you need the result to be stable, use a lock.
1934 : *
1935 : * Context: Any context. Takes and releases the RCU lock.
1936 : * Return: True if the entry at @index has this mark set, false if it doesn't.
1937 : */
1938 0 : bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1939 : {
1940 0 : XA_STATE(xas, xa, index);
1941 : void *entry;
1942 :
1943 : rcu_read_lock();
1944 0 : entry = xas_start(&xas);
1945 0 : while (xas_get_mark(&xas, mark)) {
1946 0 : if (!xa_is_node(entry))
1947 : goto found;
1948 0 : entry = xas_descend(&xas, xa_to_node(entry));
1949 : }
1950 : rcu_read_unlock();
1951 0 : return false;
1952 : found:
1953 : rcu_read_unlock();
1954 0 : return true;
1955 : }
1956 : EXPORT_SYMBOL(xa_get_mark);
1957 :
1958 : /**
1959 : * xa_set_mark() - Set this mark on this entry.
1960 : * @xa: XArray.
1961 : * @index: Index of entry.
1962 : * @mark: Mark number.
1963 : *
1964 : * Attempting to set a mark on a %NULL entry does not succeed.
1965 : *
1966 : * Context: Process context. Takes and releases the xa_lock.
1967 : */
1968 0 : void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1969 : {
1970 0 : xa_lock(xa);
1971 0 : __xa_set_mark(xa, index, mark);
1972 0 : xa_unlock(xa);
1973 0 : }
1974 : EXPORT_SYMBOL(xa_set_mark);
1975 :
1976 : /**
1977 : * xa_clear_mark() - Clear this mark on this entry.
1978 : * @xa: XArray.
1979 : * @index: Index of entry.
1980 : * @mark: Mark number.
1981 : *
1982 : * Clearing a mark always succeeds.
1983 : *
1984 : * Context: Process context. Takes and releases the xa_lock.
1985 : */
1986 0 : void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1987 : {
1988 0 : xa_lock(xa);
1989 0 : __xa_clear_mark(xa, index, mark);
1990 0 : xa_unlock(xa);
1991 0 : }
1992 : EXPORT_SYMBOL(xa_clear_mark);
1993 :
1994 : /**
1995 : * xa_find() - Search the XArray for an entry.
1996 : * @xa: XArray.
1997 : * @indexp: Pointer to an index.
1998 : * @max: Maximum index to search to.
1999 : * @filter: Selection criterion.
2000 : *
2001 : * Finds the entry in @xa which matches the @filter, and has the lowest
2002 : * index that is at least @indexp and no more than @max.
2003 : * If an entry is found, @indexp is updated to be the index of the entry.
2004 : * This function is protected by the RCU read lock, so it may not find
2005 : * entries which are being simultaneously added. It will not return an
2006 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2007 : *
2008 : * Context: Any context. Takes and releases the RCU lock.
2009 : * Return: The entry, if found, otherwise %NULL.
2010 : */
2011 0 : void *xa_find(struct xarray *xa, unsigned long *indexp,
2012 : unsigned long max, xa_mark_t filter)
2013 : {
2014 0 : XA_STATE(xas, xa, *indexp);
2015 : void *entry;
2016 :
2017 : rcu_read_lock();
2018 : do {
2019 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2020 0 : entry = xas_find_marked(&xas, max, filter);
2021 : else
2022 0 : entry = xas_find(&xas, max);
2023 0 : } while (xas_retry(&xas, entry));
2024 : rcu_read_unlock();
2025 :
2026 0 : if (entry)
2027 0 : *indexp = xas.xa_index;
2028 0 : return entry;
2029 : }
2030 : EXPORT_SYMBOL(xa_find);
2031 :
2032 : static bool xas_sibling(struct xa_state *xas)
2033 : {
2034 0 : struct xa_node *node = xas->xa_node;
2035 : unsigned long mask;
2036 :
2037 : if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
2038 : return false;
2039 : mask = (XA_CHUNK_SIZE << node->shift) - 1;
2040 : return (xas->xa_index & mask) >
2041 : ((unsigned long)xas->xa_offset << node->shift);
2042 : }
2043 :
2044 : /**
2045 : * xa_find_after() - Search the XArray for a present entry.
2046 : * @xa: XArray.
2047 : * @indexp: Pointer to an index.
2048 : * @max: Maximum index to search to.
2049 : * @filter: Selection criterion.
2050 : *
2051 : * Finds the entry in @xa which matches the @filter and has the lowest
2052 : * index that is above @indexp and no more than @max.
2053 : * If an entry is found, @indexp is updated to be the index of the entry.
2054 : * This function is protected by the RCU read lock, so it may miss entries
2055 : * which are being simultaneously added. It will not return an
2056 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2057 : *
2058 : * Context: Any context. Takes and releases the RCU lock.
2059 : * Return: The pointer, if found, otherwise %NULL.
2060 : */
2061 0 : void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2062 : unsigned long max, xa_mark_t filter)
2063 : {
2064 0 : XA_STATE(xas, xa, *indexp + 1);
2065 : void *entry;
2066 :
2067 0 : if (xas.xa_index == 0)
2068 : return NULL;
2069 :
2070 : rcu_read_lock();
2071 : for (;;) {
2072 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2073 0 : entry = xas_find_marked(&xas, max, filter);
2074 : else
2075 0 : entry = xas_find(&xas, max);
2076 :
2077 0 : if (xas_invalid(&xas))
2078 : break;
2079 0 : if (xas_sibling(&xas))
2080 : continue;
2081 0 : if (!xas_retry(&xas, entry))
2082 : break;
2083 : }
2084 : rcu_read_unlock();
2085 :
2086 0 : if (entry)
2087 0 : *indexp = xas.xa_index;
2088 : return entry;
2089 : }
2090 : EXPORT_SYMBOL(xa_find_after);
2091 :
2092 0 : static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2093 : unsigned long max, unsigned int n)
2094 : {
2095 : void *entry;
2096 0 : unsigned int i = 0;
2097 :
2098 : rcu_read_lock();
2099 0 : xas_for_each(xas, entry, max) {
2100 0 : if (xas_retry(xas, entry))
2101 0 : continue;
2102 0 : dst[i++] = entry;
2103 0 : if (i == n)
2104 : break;
2105 : }
2106 : rcu_read_unlock();
2107 :
2108 0 : return i;
2109 : }
2110 :
2111 0 : static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2112 : unsigned long max, unsigned int n, xa_mark_t mark)
2113 : {
2114 : void *entry;
2115 0 : unsigned int i = 0;
2116 :
2117 : rcu_read_lock();
2118 0 : xas_for_each_marked(xas, entry, max, mark) {
2119 0 : if (xas_retry(xas, entry))
2120 0 : continue;
2121 0 : dst[i++] = entry;
2122 0 : if (i == n)
2123 : break;
2124 : }
2125 : rcu_read_unlock();
2126 :
2127 0 : return i;
2128 : }
2129 :
2130 : /**
2131 : * xa_extract() - Copy selected entries from the XArray into a normal array.
2132 : * @xa: The source XArray to copy from.
2133 : * @dst: The buffer to copy entries into.
2134 : * @start: The first index in the XArray eligible to be selected.
2135 : * @max: The last index in the XArray eligible to be selected.
2136 : * @n: The maximum number of entries to copy.
2137 : * @filter: Selection criterion.
2138 : *
2139 : * Copies up to @n entries that match @filter from the XArray. The
2140 : * copied entries will have indices between @start and @max, inclusive.
2141 : *
2142 : * The @filter may be an XArray mark value, in which case entries which are
2143 : * marked with that mark will be copied. It may also be %XA_PRESENT, in
2144 : * which case all entries which are not %NULL will be copied.
2145 : *
2146 : * The entries returned may not represent a snapshot of the XArray at a
2147 : * moment in time. For example, if another thread stores to index 5, then
2148 : * index 10, calling xa_extract() may return the old contents of index 5
2149 : * and the new contents of index 10. Indices not modified while this
2150 : * function is running will not be skipped.
2151 : *
2152 : * If you need stronger guarantees, holding the xa_lock across calls to this
2153 : * function will prevent concurrent modification.
2154 : *
2155 : * Context: Any context. Takes and releases the RCU lock.
2156 : * Return: The number of entries copied.
2157 : */
2158 0 : unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2159 : unsigned long max, unsigned int n, xa_mark_t filter)
2160 : {
2161 0 : XA_STATE(xas, xa, start);
2162 :
2163 0 : if (!n)
2164 : return 0;
2165 :
2166 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2167 0 : return xas_extract_marked(&xas, dst, max, n, filter);
2168 0 : return xas_extract_present(&xas, dst, max, n);
2169 : }
2170 : EXPORT_SYMBOL(xa_extract);
2171 :
2172 : /**
2173 : * xa_delete_node() - Private interface for workingset code.
2174 : * @node: Node to be removed from the tree.
2175 : * @update: Function to call to update ancestor nodes.
2176 : *
2177 : * Context: xa_lock must be held on entry and will not be released.
2178 : */
2179 0 : void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2180 : {
2181 0 : struct xa_state xas = {
2182 0 : .xa = node->array,
2183 0 : .xa_index = (unsigned long)node->offset <<
2184 0 : (node->shift + XA_CHUNK_SHIFT),
2185 0 : .xa_shift = node->shift + XA_CHUNK_SHIFT,
2186 : .xa_offset = node->offset,
2187 0 : .xa_node = xa_parent_locked(node->array, node),
2188 : .xa_update = update,
2189 : };
2190 :
2191 0 : xas_store(&xas, NULL);
2192 0 : }
2193 : EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
2194 :
2195 : /**
2196 : * xa_destroy() - Free all internal data structures.
2197 : * @xa: XArray.
2198 : *
2199 : * After calling this function, the XArray is empty and has freed all memory
2200 : * allocated for its internal data structures. You are responsible for
2201 : * freeing the objects referenced by the XArray.
2202 : *
2203 : * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
2204 : */
2205 0 : void xa_destroy(struct xarray *xa)
2206 : {
2207 0 : XA_STATE(xas, xa, 0);
2208 : unsigned long flags;
2209 : void *entry;
2210 :
2211 0 : xas.xa_node = NULL;
2212 0 : xas_lock_irqsave(&xas, flags);
2213 0 : entry = xa_head_locked(xa);
2214 0 : RCU_INIT_POINTER(xa->xa_head, NULL);
2215 0 : xas_init_marks(&xas);
2216 0 : if (xa_zero_busy(xa))
2217 0 : xa_mark_clear(xa, XA_FREE_MARK);
2218 : /* lockdep checks we're still holding the lock in xas_free_nodes() */
2219 0 : if (xa_is_node(entry))
2220 0 : xas_free_nodes(&xas, xa_to_node(entry));
2221 0 : xas_unlock_irqrestore(&xas, flags);
2222 0 : }
2223 : EXPORT_SYMBOL(xa_destroy);
2224 :
2225 : #ifdef XA_DEBUG
2226 : void xa_dump_node(const struct xa_node *node)
2227 : {
2228 : unsigned i, j;
2229 :
2230 : if (!node)
2231 : return;
2232 : if ((unsigned long)node & 3) {
2233 : pr_cont("node %px\n", node);
2234 : return;
2235 : }
2236 :
2237 : pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2238 : "array %px list %px %px marks",
2239 : node, node->parent ? "offset" : "max", node->offset,
2240 : node->parent, node->shift, node->count, node->nr_values,
2241 : node->array, node->private_list.prev, node->private_list.next);
2242 : for (i = 0; i < XA_MAX_MARKS; i++)
2243 : for (j = 0; j < XA_MARK_LONGS; j++)
2244 : pr_cont(" %lx", node->marks[i][j]);
2245 : pr_cont("\n");
2246 : }
2247 :
2248 : void xa_dump_index(unsigned long index, unsigned int shift)
2249 : {
2250 : if (!shift)
2251 : pr_info("%lu: ", index);
2252 : else if (shift >= BITS_PER_LONG)
2253 : pr_info("0-%lu: ", ~0UL);
2254 : else
2255 : pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2256 : }
2257 :
2258 : void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2259 : {
2260 : if (!entry)
2261 : return;
2262 :
2263 : xa_dump_index(index, shift);
2264 :
2265 : if (xa_is_node(entry)) {
2266 : if (shift == 0) {
2267 : pr_cont("%px\n", entry);
2268 : } else {
2269 : unsigned long i;
2270 : struct xa_node *node = xa_to_node(entry);
2271 : xa_dump_node(node);
2272 : for (i = 0; i < XA_CHUNK_SIZE; i++)
2273 : xa_dump_entry(node->slots[i],
2274 : index + (i << node->shift), node->shift);
2275 : }
2276 : } else if (xa_is_value(entry))
2277 : pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2278 : xa_to_value(entry), entry);
2279 : else if (!xa_is_internal(entry))
2280 : pr_cont("%px\n", entry);
2281 : else if (xa_is_retry(entry))
2282 : pr_cont("retry (%ld)\n", xa_to_internal(entry));
2283 : else if (xa_is_sibling(entry))
2284 : pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2285 : else if (xa_is_zero(entry))
2286 : pr_cont("zero (%ld)\n", xa_to_internal(entry));
2287 : else
2288 : pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2289 : }
2290 :
2291 : void xa_dump(const struct xarray *xa)
2292 : {
2293 : void *entry = xa->xa_head;
2294 : unsigned int shift = 0;
2295 :
2296 : pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2297 : xa->xa_flags, xa_marked(xa, XA_MARK_0),
2298 : xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2299 : if (xa_is_node(entry))
2300 : shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2301 : xa_dump_entry(entry, 0, shift);
2302 : }
2303 : #endif
|