Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * XArray implementation
4 : * Copyright (c) 2017-2018 Microsoft Corporation
5 : * Copyright (c) 2018-2020 Oracle
6 : * Author: Matthew Wilcox <willy@infradead.org>
7 : */
8 :
9 : #include <linux/bitmap.h>
10 : #include <linux/export.h>
11 : #include <linux/list.h>
12 : #include <linux/slab.h>
13 : #include <linux/xarray.h>
14 :
15 : /*
16 : * Coding conventions in this file:
17 : *
18 : * @xa is used to refer to the entire xarray.
19 : * @xas is the 'xarray operation state'. It may be either a pointer to
20 : * an xa_state, or an xa_state stored on the stack. This is an unfortunate
21 : * ambiguity.
22 : * @index is the index of the entry being operated on
23 : * @mark is an xa_mark_t; a small number indicating one of the mark bits.
24 : * @node refers to an xa_node; usually the primary one being operated on by
25 : * this function.
26 : * @offset is the index into the slots array inside an xa_node.
27 : * @parent refers to the @xa_node closer to the head than @node.
28 : * @entry refers to something stored in a slot in the xarray
29 : */
30 :
31 : static inline unsigned int xa_lock_type(const struct xarray *xa)
32 : {
33 2 : return (__force unsigned int)xa->xa_flags & 3;
34 : }
35 :
36 : static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
37 : {
38 0 : if (lock_type == XA_LOCK_IRQ)
39 0 : xas_lock_irq(xas);
40 0 : else if (lock_type == XA_LOCK_BH)
41 0 : xas_lock_bh(xas);
42 : else
43 0 : xas_lock(xas);
44 : }
45 :
46 0 : static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
47 : {
48 0 : if (lock_type == XA_LOCK_IRQ)
49 0 : xas_unlock_irq(xas);
50 0 : else if (lock_type == XA_LOCK_BH)
51 0 : xas_unlock_bh(xas);
52 : else
53 0 : xas_unlock(xas);
54 0 : }
55 :
56 : static inline bool xa_track_free(const struct xarray *xa)
57 : {
58 78 : return xa->xa_flags & XA_FLAGS_TRACK_FREE;
59 : }
60 :
61 : static inline bool xa_zero_busy(const struct xarray *xa)
62 : {
63 32 : return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
64 : }
65 :
66 : static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
67 : {
68 0 : if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
69 0 : xa->xa_flags |= XA_FLAGS_MARK(mark);
70 : }
71 :
72 : static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
73 : {
74 0 : if (xa->xa_flags & XA_FLAGS_MARK(mark))
75 0 : xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
76 : }
77 :
78 : static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
79 : {
80 0 : return node->marks[(__force unsigned)mark];
81 : }
82 :
83 : static inline bool node_get_mark(struct xa_node *node,
84 : unsigned int offset, xa_mark_t mark)
85 : {
86 0 : return test_bit(offset, node_marks(node, mark));
87 : }
88 :
89 : /* returns true if the bit was set */
90 : static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
91 : xa_mark_t mark)
92 : {
93 0 : return __test_and_set_bit(offset, node_marks(node, mark));
94 : }
95 :
96 : /* returns true if the bit was set */
97 : static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
98 : xa_mark_t mark)
99 : {
100 0 : return __test_and_clear_bit(offset, node_marks(node, mark));
101 : }
102 :
103 : static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
104 : {
105 0 : return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
106 : }
107 :
108 : static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
109 : {
110 0 : bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
111 : }
112 :
113 : #define mark_inc(mark) do { \
114 : mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
115 : } while (0)
116 :
117 : /*
118 : * xas_squash_marks() - Merge all marks to the first entry
119 : * @xas: Array operation state.
120 : *
121 : * Set a mark on the first entry if any entry has it set. Clear marks on
122 : * all sibling entries.
123 : */
124 0 : static void xas_squash_marks(const struct xa_state *xas)
125 : {
126 0 : unsigned int mark = 0;
127 0 : unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
128 :
129 0 : if (!xas->xa_sibs)
130 : return;
131 :
132 : do {
133 0 : unsigned long *marks = xas->xa_node->marks[mark];
134 0 : if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
135 0 : continue;
136 0 : __set_bit(xas->xa_offset, marks);
137 0 : bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
138 0 : } while (mark++ != (__force unsigned)XA_MARK_MAX);
139 : }
140 :
141 : /* extracts the offset within this node from the index */
142 : static unsigned int get_offset(unsigned long index, struct xa_node *node)
143 : {
144 6 : return (index >> node->shift) & XA_CHUNK_MASK;
145 : }
146 :
147 : static void xas_set_offset(struct xa_state *xas)
148 : {
149 0 : xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
150 : }
151 :
152 : /* move the index either forwards (find) or backwards (sibling slot) */
153 : static void xas_move_index(struct xa_state *xas, unsigned long offset)
154 : {
155 0 : unsigned int shift = xas->xa_node->shift;
156 0 : xas->xa_index &= ~XA_CHUNK_MASK << shift;
157 0 : xas->xa_index += offset << shift;
158 : }
159 :
160 : static void xas_next_offset(struct xa_state *xas)
161 : {
162 0 : xas->xa_offset++;
163 0 : xas_move_index(xas, xas->xa_offset);
164 : }
165 :
166 : static void *set_bounds(struct xa_state *xas)
167 : {
168 1 : xas->xa_node = XAS_BOUNDS;
169 : return NULL;
170 : }
171 :
172 : /*
173 : * Starts a walk. If the @xas is already valid, we assume that it's on
174 : * the right path and just return where we've got to. If we're in an
175 : * error state, return NULL. If the index is outside the current scope
176 : * of the xarray, return NULL without changing @xas->xa_node. Otherwise
177 : * set @xas->xa_node to NULL and return the current head of the array.
178 : */
179 101 : static void *xas_start(struct xa_state *xas)
180 : {
181 : void *entry;
182 :
183 101 : if (xas_valid(xas))
184 : return xas_reload(xas);
185 154 : if (xas_error(xas))
186 : return NULL;
187 :
188 154 : entry = xa_head(xas->xa);
189 77 : if (!xa_is_node(entry)) {
190 76 : if (xas->xa_index)
191 2 : return set_bounds(xas);
192 : } else {
193 2 : if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
194 0 : return set_bounds(xas);
195 : }
196 :
197 76 : xas->xa_node = NULL;
198 76 : return entry;
199 : }
200 :
201 : static void *xas_descend(struct xa_state *xas, struct xa_node *node)
202 : {
203 12 : unsigned int offset = get_offset(xas->xa_index, node);
204 12 : void *entry = xa_entry(xas->xa, node, offset);
205 :
206 6 : xas->xa_node = node;
207 : if (xa_is_sibling(entry)) {
208 : offset = xa_to_sibling(entry);
209 : entry = xa_entry(xas->xa, node, offset);
210 : if (node->shift && xa_is_node(entry))
211 : entry = XA_RETRY_ENTRY;
212 : }
213 :
214 6 : xas->xa_offset = offset;
215 : return entry;
216 : }
217 :
218 : /**
219 : * xas_load() - Load an entry from the XArray (advanced).
220 : * @xas: XArray operation state.
221 : *
222 : * Usually walks the @xas to the appropriate state to load the entry
223 : * stored at xa_index. However, it will do nothing and return %NULL if
224 : * @xas is in an error state. xas_load() will never expand the tree.
225 : *
226 : * If the xa_state is set up to operate on a multi-index entry, xas_load()
227 : * may return %NULL or an internal entry, even if there are entries
228 : * present within the range specified by @xas.
229 : *
230 : * Context: Any context. The caller should hold the xa_lock or the RCU lock.
231 : * Return: Usually an entry in the XArray, but see description for exceptions.
232 : */
233 101 : void *xas_load(struct xa_state *xas)
234 : {
235 101 : void *entry = xas_start(xas);
236 :
237 203 : while (xa_is_node(entry)) {
238 2 : struct xa_node *node = xa_to_node(entry);
239 :
240 2 : if (xas->xa_shift > node->shift)
241 : break;
242 2 : entry = xas_descend(xas, node);
243 2 : if (node->shift == 0)
244 : break;
245 : }
246 101 : return entry;
247 : }
248 : EXPORT_SYMBOL_GPL(xas_load);
249 :
250 : /* Move the radix tree node cache here */
251 : extern struct kmem_cache *radix_tree_node_cachep;
252 : extern void radix_tree_node_rcu_free(struct rcu_head *head);
253 :
254 : #define XA_RCU_FREE ((struct xarray *)1)
255 :
256 : static void xa_node_free(struct xa_node *node)
257 : {
258 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
259 0 : node->array = XA_RCU_FREE;
260 0 : call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
261 : }
262 :
263 : /*
264 : * xas_destroy() - Free any resources allocated during the XArray operation.
265 : * @xas: XArray operation state.
266 : *
267 : * Most users will not need to call this function; it is called for you
268 : * by xas_nomem().
269 : */
270 0 : void xas_destroy(struct xa_state *xas)
271 : {
272 268 : struct xa_node *next, *node = xas->xa_alloc;
273 :
274 268 : while (node) {
275 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
276 0 : next = rcu_dereference_raw(node->parent);
277 0 : radix_tree_node_rcu_free(&node->rcu_head);
278 0 : xas->xa_alloc = node = next;
279 : }
280 0 : }
281 :
282 : /**
283 : * xas_nomem() - Allocate memory if needed.
284 : * @xas: XArray operation state.
285 : * @gfp: Memory allocation flags.
286 : *
287 : * If we need to add new nodes to the XArray, we try to allocate memory
288 : * with GFP_NOWAIT while holding the lock, which will usually succeed.
289 : * If it fails, @xas is flagged as needing memory to continue. The caller
290 : * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
291 : * the caller should retry the operation.
292 : *
293 : * Forward progress is guaranteed as one node is allocated here and
294 : * stored in the xa_state where it will be found by xas_alloc(). More
295 : * nodes will likely be found in the slab allocator, but we do not tie
296 : * them up here.
297 : *
298 : * Return: true if memory was needed, and was successfully allocated.
299 : */
300 266 : bool xas_nomem(struct xa_state *xas, gfp_t gfp)
301 : {
302 266 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
303 : xas_destroy(xas);
304 : return false;
305 : }
306 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
307 0 : gfp |= __GFP_ACCOUNT;
308 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
309 0 : if (!xas->xa_alloc)
310 : return false;
311 0 : xas->xa_alloc->parent = NULL;
312 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
313 0 : xas->xa_node = XAS_RESTART;
314 0 : return true;
315 : }
316 : EXPORT_SYMBOL_GPL(xas_nomem);
317 :
318 : /*
319 : * __xas_nomem() - Drop locks and allocate memory if needed.
320 : * @xas: XArray operation state.
321 : * @gfp: Memory allocation flags.
322 : *
323 : * Internal variant of xas_nomem().
324 : *
325 : * Return: true if memory was needed, and was successfully allocated.
326 : */
327 2 : static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
328 : __must_hold(xas->xa->xa_lock)
329 : {
330 4 : unsigned int lock_type = xa_lock_type(xas->xa);
331 :
332 2 : if (xas->xa_node != XA_ERROR(-ENOMEM)) {
333 : xas_destroy(xas);
334 : return false;
335 : }
336 0 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
337 0 : gfp |= __GFP_ACCOUNT;
338 0 : if (gfpflags_allow_blocking(gfp)) {
339 0 : xas_unlock_type(xas, lock_type);
340 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
341 0 : xas_lock_type(xas, lock_type);
342 : } else {
343 0 : xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
344 : }
345 0 : if (!xas->xa_alloc)
346 : return false;
347 0 : xas->xa_alloc->parent = NULL;
348 : XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
349 0 : xas->xa_node = XAS_RESTART;
350 0 : return true;
351 : }
352 :
353 : static void xas_update(struct xa_state *xas, struct xa_node *node)
354 : {
355 3 : if (xas->xa_update)
356 0 : xas->xa_update(node);
357 : else
358 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
359 : }
360 :
361 2 : static void *xas_alloc(struct xa_state *xas, unsigned int shift)
362 : {
363 2 : struct xa_node *parent = xas->xa_node;
364 2 : struct xa_node *node = xas->xa_alloc;
365 :
366 4 : if (xas_invalid(xas))
367 : return NULL;
368 :
369 2 : if (node) {
370 0 : xas->xa_alloc = NULL;
371 : } else {
372 2 : gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
373 :
374 2 : if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
375 0 : gfp |= __GFP_ACCOUNT;
376 :
377 2 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
378 2 : if (!node) {
379 0 : xas_set_err(xas, -ENOMEM);
380 0 : return NULL;
381 : }
382 : }
383 :
384 2 : if (parent) {
385 1 : node->offset = xas->xa_offset;
386 1 : parent->count++;
387 : XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
388 1 : xas_update(xas, parent);
389 : }
390 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
391 : XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
392 2 : node->shift = shift;
393 2 : node->count = 0;
394 2 : node->nr_values = 0;
395 2 : RCU_INIT_POINTER(node->parent, xas->xa_node);
396 2 : node->array = xas->xa;
397 :
398 2 : return node;
399 : }
400 :
401 : #ifdef CONFIG_XARRAY_MULTI
402 : /* Returns the number of indices covered by a given xa_state */
403 : static unsigned long xas_size(const struct xa_state *xas)
404 : {
405 : return (xas->xa_sibs + 1UL) << xas->xa_shift;
406 : }
407 : #endif
408 :
409 : /*
410 : * Use this to calculate the maximum index that will need to be created
411 : * in order to add the entry described by @xas. Because we cannot store a
412 : * multi-index entry at index 0, the calculation is a little more complex
413 : * than you might expect.
414 : */
415 : static unsigned long xas_max(struct xa_state *xas)
416 : {
417 184 : unsigned long max = xas->xa_index;
418 :
419 : #ifdef CONFIG_XARRAY_MULTI
420 : if (xas->xa_shift || xas->xa_sibs) {
421 : unsigned long mask = xas_size(xas) - 1;
422 : max |= mask;
423 : if (mask == max)
424 : max++;
425 : }
426 : #endif
427 :
428 : return max;
429 : }
430 :
431 : /* The maximum index that can be contained in the array without expanding it */
432 : static unsigned long max_index(void *entry)
433 : {
434 418 : if (!xa_is_node(entry))
435 : return 0;
436 1 : return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
437 : }
438 :
439 0 : static void xas_shrink(struct xa_state *xas)
440 : {
441 0 : struct xarray *xa = xas->xa;
442 0 : struct xa_node *node = xas->xa_node;
443 :
444 0 : for (;;) {
445 : void *entry;
446 :
447 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
448 0 : if (node->count != 1)
449 : break;
450 0 : entry = xa_entry_locked(xa, node, 0);
451 0 : if (!entry)
452 : break;
453 0 : if (!xa_is_node(entry) && node->shift)
454 : break;
455 0 : if (xa_is_zero(entry) && xa_zero_busy(xa))
456 0 : entry = NULL;
457 0 : xas->xa_node = XAS_BOUNDS;
458 :
459 0 : RCU_INIT_POINTER(xa->xa_head, entry);
460 0 : if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
461 0 : xa_mark_clear(xa, XA_FREE_MARK);
462 :
463 0 : node->count = 0;
464 0 : node->nr_values = 0;
465 0 : if (!xa_is_node(entry))
466 0 : RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
467 0 : xas_update(xas, node);
468 0 : xa_node_free(node);
469 0 : if (!xa_is_node(entry))
470 : break;
471 0 : node = xa_to_node(entry);
472 0 : node->parent = NULL;
473 : }
474 0 : }
475 :
476 : /*
477 : * xas_delete_node() - Attempt to delete an xa_node
478 : * @xas: Array operation state.
479 : *
480 : * Attempts to delete the @xas->xa_node. This will fail if xa->node has
481 : * a non-zero reference count.
482 : */
483 0 : static void xas_delete_node(struct xa_state *xas)
484 : {
485 0 : struct xa_node *node = xas->xa_node;
486 :
487 : for (;;) {
488 : struct xa_node *parent;
489 :
490 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
491 0 : if (node->count)
492 : break;
493 :
494 0 : parent = xa_parent_locked(xas->xa, node);
495 0 : xas->xa_node = parent;
496 0 : xas->xa_offset = node->offset;
497 0 : xa_node_free(node);
498 :
499 0 : if (!parent) {
500 0 : xas->xa->xa_head = NULL;
501 0 : xas->xa_node = XAS_BOUNDS;
502 0 : return;
503 : }
504 :
505 0 : parent->slots[xas->xa_offset] = NULL;
506 0 : parent->count--;
507 : XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
508 0 : node = parent;
509 0 : xas_update(xas, node);
510 : }
511 :
512 0 : if (!node->parent)
513 0 : xas_shrink(xas);
514 : }
515 :
516 : /**
517 : * xas_free_nodes() - Free this node and all nodes that it references
518 : * @xas: Array operation state.
519 : * @top: Node to free
520 : *
521 : * This node has been removed from the tree. We must now free it and all
522 : * of its subnodes. There may be RCU walkers with references into the tree,
523 : * so we must replace all entries with retry markers.
524 : */
525 0 : static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
526 : {
527 0 : unsigned int offset = 0;
528 0 : struct xa_node *node = top;
529 :
530 : for (;;) {
531 0 : void *entry = xa_entry_locked(xas->xa, node, offset);
532 :
533 0 : if (node->shift && xa_is_node(entry)) {
534 0 : node = xa_to_node(entry);
535 0 : offset = 0;
536 0 : continue;
537 : }
538 0 : if (entry)
539 0 : RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
540 0 : offset++;
541 0 : while (offset == XA_CHUNK_SIZE) {
542 : struct xa_node *parent;
543 :
544 0 : parent = xa_parent_locked(xas->xa, node);
545 0 : offset = node->offset + 1;
546 0 : node->count = 0;
547 0 : node->nr_values = 0;
548 0 : xas_update(xas, node);
549 0 : xa_node_free(node);
550 0 : if (node == top)
551 0 : return;
552 : node = parent;
553 : }
554 : }
555 : }
556 :
557 : /*
558 : * xas_expand adds nodes to the head of the tree until it has reached
559 : * sufficient height to be able to contain @xas->xa_index
560 : */
561 184 : static int xas_expand(struct xa_state *xas, void *head)
562 : {
563 184 : struct xarray *xa = xas->xa;
564 184 : struct xa_node *node = NULL;
565 184 : unsigned int shift = 0;
566 368 : unsigned long max = xas_max(xas);
567 :
568 184 : if (!head) {
569 32 : if (max == 0)
570 : return 0;
571 2 : while ((max >> shift) >= XA_CHUNK_SIZE)
572 1 : shift += XA_CHUNK_SHIFT;
573 1 : return shift + XA_CHUNK_SHIFT;
574 152 : } else if (xa_is_node(head)) {
575 1 : node = xa_to_node(head);
576 1 : shift = node->shift + XA_CHUNK_SHIFT;
577 : }
578 152 : xas->xa_node = NULL;
579 :
580 304 : while (max > max_index(head)) {
581 0 : xa_mark_t mark = 0;
582 :
583 : XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
584 0 : node = xas_alloc(xas, shift);
585 0 : if (!node)
586 : return -ENOMEM;
587 :
588 0 : node->count = 1;
589 0 : if (xa_is_value(head))
590 0 : node->nr_values = 1;
591 0 : RCU_INIT_POINTER(node->slots[0], head);
592 :
593 : /* Propagate the aggregated mark info to the new child */
594 : for (;;) {
595 0 : if (xa_track_free(xa) && mark == XA_FREE_MARK) {
596 0 : node_mark_all(node, XA_FREE_MARK);
597 0 : if (!xa_marked(xa, XA_FREE_MARK)) {
598 0 : node_clear_mark(node, 0, XA_FREE_MARK);
599 0 : xa_mark_set(xa, XA_FREE_MARK);
600 : }
601 0 : } else if (xa_marked(xa, mark)) {
602 : node_set_mark(node, 0, mark);
603 : }
604 0 : if (mark == XA_MARK_MAX)
605 : break;
606 0 : mark_inc(mark);
607 : }
608 :
609 : /*
610 : * Now that the new node is fully initialised, we can add
611 : * it to the tree
612 : */
613 0 : if (xa_is_node(head)) {
614 0 : xa_to_node(head)->offset = 0;
615 0 : rcu_assign_pointer(xa_to_node(head)->parent, node);
616 : }
617 0 : head = xa_mk_node(node);
618 0 : rcu_assign_pointer(xa->xa_head, head);
619 0 : xas_update(xas, node);
620 :
621 0 : shift += XA_CHUNK_SHIFT;
622 : }
623 :
624 152 : xas->xa_node = node;
625 152 : return shift;
626 : }
627 :
628 : /*
629 : * xas_create() - Create a slot to store an entry in.
630 : * @xas: XArray operation state.
631 : * @allow_root: %true if we can store the entry in the root directly
632 : *
633 : * Most users will not need to call this function directly, as it is called
634 : * by xas_store(). It is useful for doing conditional store operations
635 : * (see the xa_cmpxchg() implementation for an example).
636 : *
637 : * Return: If the slot already existed, returns the contents of this slot.
638 : * If the slot was newly created, returns %NULL. If it failed to create the
639 : * slot, returns %NULL and indicates the error in @xas.
640 : */
641 184 : static void *xas_create(struct xa_state *xas, bool allow_root)
642 : {
643 184 : struct xarray *xa = xas->xa;
644 : void *entry;
645 : void __rcu **slot;
646 184 : struct xa_node *node = xas->xa_node;
647 : int shift;
648 184 : unsigned int order = xas->xa_shift;
649 :
650 184 : if (xas_top(node)) {
651 184 : entry = xa_head_locked(xa);
652 184 : xas->xa_node = NULL;
653 216 : if (!entry && xa_zero_busy(xa))
654 0 : entry = XA_ZERO_ENTRY;
655 184 : shift = xas_expand(xas, entry);
656 184 : if (shift < 0)
657 : return NULL;
658 184 : if (!shift && !allow_root)
659 0 : shift = XA_CHUNK_SHIFT;
660 184 : entry = xa_head_locked(xa);
661 184 : slot = &xa->xa_head;
662 0 : } else if (xas_error(xas)) {
663 : return NULL;
664 0 : } else if (node) {
665 0 : unsigned int offset = xas->xa_offset;
666 :
667 0 : shift = node->shift;
668 0 : entry = xa_entry_locked(xa, node, offset);
669 0 : slot = &node->slots[offset];
670 : } else {
671 0 : shift = 0;
672 0 : entry = xa_head_locked(xa);
673 0 : slot = &xa->xa_head;
674 : }
675 :
676 188 : while (shift > order) {
677 4 : shift -= XA_CHUNK_SHIFT;
678 4 : if (!entry) {
679 2 : node = xas_alloc(xas, shift);
680 2 : if (!node)
681 : break;
682 4 : if (xa_track_free(xa))
683 : node_mark_all(node, XA_FREE_MARK);
684 2 : rcu_assign_pointer(*slot, xa_mk_node(node));
685 2 : } else if (xa_is_node(entry)) {
686 2 : node = xa_to_node(entry);
687 : } else {
688 : break;
689 : }
690 4 : entry = xas_descend(xas, node);
691 4 : slot = &node->slots[xas->xa_offset];
692 : }
693 :
694 : return entry;
695 : }
696 :
697 : /**
698 : * xas_create_range() - Ensure that stores to this range will succeed
699 : * @xas: XArray operation state.
700 : *
701 : * Creates all of the slots in the range covered by @xas. Sets @xas to
702 : * create single-index entries and positions it at the beginning of the
703 : * range. This is for the benefit of users which have not yet been
704 : * converted to use multi-index entries.
705 : */
706 0 : void xas_create_range(struct xa_state *xas)
707 : {
708 0 : unsigned long index = xas->xa_index;
709 0 : unsigned char shift = xas->xa_shift;
710 0 : unsigned char sibs = xas->xa_sibs;
711 :
712 0 : xas->xa_index |= ((sibs + 1UL) << shift) - 1;
713 0 : if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
714 0 : xas->xa_offset |= sibs;
715 0 : xas->xa_shift = 0;
716 0 : xas->xa_sibs = 0;
717 :
718 : for (;;) {
719 0 : xas_create(xas, true);
720 0 : if (xas_error(xas))
721 : goto restore;
722 0 : if (xas->xa_index <= (index | XA_CHUNK_MASK))
723 : goto success;
724 0 : xas->xa_index -= XA_CHUNK_SIZE;
725 :
726 : for (;;) {
727 0 : struct xa_node *node = xas->xa_node;
728 0 : if (node->shift >= shift)
729 : break;
730 0 : xas->xa_node = xa_parent_locked(xas->xa, node);
731 0 : xas->xa_offset = node->offset - 1;
732 0 : if (node->offset != 0)
733 : break;
734 : }
735 : }
736 :
737 : restore:
738 0 : xas->xa_shift = shift;
739 0 : xas->xa_sibs = sibs;
740 0 : xas->xa_index = index;
741 0 : return;
742 : success:
743 0 : xas->xa_index = index;
744 0 : if (xas->xa_node)
745 : xas_set_offset(xas);
746 : }
747 : EXPORT_SYMBOL_GPL(xas_create_range);
748 :
749 208 : static void update_node(struct xa_state *xas, struct xa_node *node,
750 : int count, int values)
751 : {
752 208 : if (!node || (!count && !values))
753 : return;
754 :
755 2 : node->count += count;
756 2 : node->nr_values += values;
757 : XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
758 : XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
759 4 : xas_update(xas, node);
760 2 : if (count < 0)
761 0 : xas_delete_node(xas);
762 : }
763 :
764 : /**
765 : * xas_store() - Store this entry in the XArray.
766 : * @xas: XArray operation state.
767 : * @entry: New entry.
768 : *
769 : * If @xas is operating on a multi-index entry, the entry returned by this
770 : * function is essentially meaningless (it may be an internal entry or it
771 : * may be %NULL, even if there are non-NULL entries at some of the indices
772 : * covered by the range). This is not a problem for any current users,
773 : * and can be changed if needed.
774 : *
775 : * Return: The old entry at this index.
776 : */
777 208 : void *xas_store(struct xa_state *xas, void *entry)
778 : {
779 : struct xa_node *node;
780 208 : void __rcu **slot = &xas->xa->xa_head;
781 : unsigned int offset, max;
782 208 : int count = 0;
783 208 : int values = 0;
784 : void *first, *next;
785 208 : bool value = xa_is_value(entry);
786 :
787 208 : if (entry) {
788 368 : bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
789 184 : first = xas_create(xas, allow_root);
790 : } else {
791 24 : first = xas_load(xas);
792 : }
793 :
794 416 : if (xas_invalid(xas))
795 : return first;
796 208 : node = xas->xa_node;
797 208 : if (node && (xas->xa_shift < node->shift))
798 0 : xas->xa_sibs = 0;
799 208 : if ((first == entry) && !xas->xa_sibs)
800 : return first;
801 :
802 208 : next = first;
803 208 : offset = xas->xa_offset;
804 208 : max = xas->xa_offset + xas->xa_sibs;
805 208 : if (node) {
806 2 : slot = &node->slots[offset];
807 2 : if (xas->xa_sibs)
808 0 : xas_squash_marks(xas);
809 : }
810 208 : if (!entry)
811 24 : xas_init_marks(xas);
812 :
813 : for (;;) {
814 : /*
815 : * Must clear the marks before setting the entry to NULL,
816 : * otherwise xas_for_each_marked may find a NULL entry and
817 : * stop early. rcu_assign_pointer contains a release barrier
818 : * so the mark clearing will appear to happen before the
819 : * entry is set to NULL.
820 : */
821 208 : rcu_assign_pointer(*slot, entry);
822 208 : if (xa_is_node(next) && (!node || node->shift))
823 0 : xas_free_nodes(xas, xa_to_node(next));
824 208 : if (!node)
825 : break;
826 2 : count += !next - !entry;
827 2 : values += !xa_is_value(first) - !value;
828 2 : if (entry) {
829 2 : if (offset == max)
830 : break;
831 0 : if (!xa_is_sibling(entry))
832 0 : entry = xa_mk_sibling(xas->xa_offset);
833 : } else {
834 0 : if (offset == XA_CHUNK_MASK)
835 : break;
836 : }
837 0 : next = xa_entry_locked(xas->xa, node, ++offset);
838 : if (!xa_is_sibling(next)) {
839 0 : if (!entry && (offset > max))
840 : break;
841 0 : first = next;
842 : }
843 0 : slot++;
844 : }
845 :
846 208 : update_node(xas, node, count, values);
847 208 : return first;
848 : }
849 : EXPORT_SYMBOL_GPL(xas_store);
850 :
851 : /**
852 : * xas_get_mark() - Returns the state of this mark.
853 : * @xas: XArray operation state.
854 : * @mark: Mark number.
855 : *
856 : * Return: true if the mark is set, false if the mark is clear or @xas
857 : * is in an error state.
858 : */
859 0 : bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
860 : {
861 0 : if (xas_invalid(xas))
862 : return false;
863 0 : if (!xas->xa_node)
864 0 : return xa_marked(xas->xa, mark);
865 0 : return node_get_mark(xas->xa_node, xas->xa_offset, mark);
866 : }
867 : EXPORT_SYMBOL_GPL(xas_get_mark);
868 :
869 : /**
870 : * xas_set_mark() - Sets the mark on this entry and its parents.
871 : * @xas: XArray operation state.
872 : * @mark: Mark number.
873 : *
874 : * Sets the specified mark on this entry, and walks up the tree setting it
875 : * on all the ancestor entries. Does nothing if @xas has not been walked to
876 : * an entry, or is in an error state.
877 : */
878 24 : void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
879 : {
880 24 : struct xa_node *node = xas->xa_node;
881 24 : unsigned int offset = xas->xa_offset;
882 :
883 48 : if (xas_invalid(xas))
884 : return;
885 :
886 24 : while (node) {
887 0 : if (node_set_mark(node, offset, mark))
888 : return;
889 0 : offset = node->offset;
890 0 : node = xa_parent_locked(xas->xa, node);
891 : }
892 :
893 48 : if (!xa_marked(xas->xa, mark))
894 0 : xa_mark_set(xas->xa, mark);
895 : }
896 : EXPORT_SYMBOL_GPL(xas_set_mark);
897 :
898 : /**
899 : * xas_clear_mark() - Clears the mark on this entry and its parents.
900 : * @xas: XArray operation state.
901 : * @mark: Mark number.
902 : *
903 : * Clears the specified mark on this entry, and walks back to the head
904 : * attempting to clear it on all the ancestor entries. Does nothing if
905 : * @xas has not been walked to an entry, or is in an error state.
906 : */
907 48 : void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
908 : {
909 48 : struct xa_node *node = xas->xa_node;
910 48 : unsigned int offset = xas->xa_offset;
911 :
912 96 : if (xas_invalid(xas))
913 : return;
914 :
915 48 : while (node) {
916 0 : if (!node_clear_mark(node, offset, mark))
917 : return;
918 0 : if (node_any_mark(node, mark))
919 : return;
920 :
921 0 : offset = node->offset;
922 0 : node = xa_parent_locked(xas->xa, node);
923 : }
924 :
925 96 : if (xa_marked(xas->xa, mark))
926 0 : xa_mark_clear(xas->xa, mark);
927 : }
928 : EXPORT_SYMBOL_GPL(xas_clear_mark);
929 :
930 : /**
931 : * xas_init_marks() - Initialise all marks for the entry
932 : * @xas: Array operations state.
933 : *
934 : * Initialise all marks for the entry specified by @xas. If we're tracking
935 : * free entries with a mark, we need to set it on all entries. All other
936 : * marks are cleared.
937 : *
938 : * This implementation is not as efficient as it could be; we may walk
939 : * up the tree multiple times.
940 : */
941 24 : void xas_init_marks(const struct xa_state *xas)
942 : {
943 24 : xa_mark_t mark = 0;
944 :
945 : for (;;) {
946 192 : if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
947 24 : xas_set_mark(xas, mark);
948 : else
949 48 : xas_clear_mark(xas, mark);
950 72 : if (mark == XA_MARK_MAX)
951 : break;
952 48 : mark_inc(mark);
953 : }
954 24 : }
955 : EXPORT_SYMBOL_GPL(xas_init_marks);
956 :
957 : #ifdef CONFIG_XARRAY_MULTI
958 : static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
959 : {
960 : unsigned int marks = 0;
961 : xa_mark_t mark = XA_MARK_0;
962 :
963 : for (;;) {
964 : if (node_get_mark(node, offset, mark))
965 : marks |= 1 << (__force unsigned int)mark;
966 : if (mark == XA_MARK_MAX)
967 : break;
968 : mark_inc(mark);
969 : }
970 :
971 : return marks;
972 : }
973 :
974 : static void node_set_marks(struct xa_node *node, unsigned int offset,
975 : struct xa_node *child, unsigned int marks)
976 : {
977 : xa_mark_t mark = XA_MARK_0;
978 :
979 : for (;;) {
980 : if (marks & (1 << (__force unsigned int)mark)) {
981 : node_set_mark(node, offset, mark);
982 : if (child)
983 : node_mark_all(child, mark);
984 : }
985 : if (mark == XA_MARK_MAX)
986 : break;
987 : mark_inc(mark);
988 : }
989 : }
990 :
991 : /**
992 : * xas_split_alloc() - Allocate memory for splitting an entry.
993 : * @xas: XArray operation state.
994 : * @entry: New entry which will be stored in the array.
995 : * @order: Current entry order.
996 : * @gfp: Memory allocation flags.
997 : *
998 : * This function should be called before calling xas_split().
999 : * If necessary, it will allocate new nodes (and fill them with @entry)
1000 : * to prepare for the upcoming split of an entry of @order size into
1001 : * entries of the order stored in the @xas.
1002 : *
1003 : * Context: May sleep if @gfp flags permit.
1004 : */
1005 : void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
1006 : gfp_t gfp)
1007 : {
1008 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1009 : unsigned int mask = xas->xa_sibs;
1010 :
1011 : /* XXX: no support for splitting really large entries yet */
1012 : if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
1013 : goto nomem;
1014 : if (xas->xa_shift + XA_CHUNK_SHIFT > order)
1015 : return;
1016 :
1017 : do {
1018 : unsigned int i;
1019 : void *sibling = NULL;
1020 : struct xa_node *node;
1021 :
1022 : node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
1023 : if (!node)
1024 : goto nomem;
1025 : node->array = xas->xa;
1026 : for (i = 0; i < XA_CHUNK_SIZE; i++) {
1027 : if ((i & mask) == 0) {
1028 : RCU_INIT_POINTER(node->slots[i], entry);
1029 : sibling = xa_mk_sibling(i);
1030 : } else {
1031 : RCU_INIT_POINTER(node->slots[i], sibling);
1032 : }
1033 : }
1034 : RCU_INIT_POINTER(node->parent, xas->xa_alloc);
1035 : xas->xa_alloc = node;
1036 : } while (sibs-- > 0);
1037 :
1038 : return;
1039 : nomem:
1040 : xas_destroy(xas);
1041 : xas_set_err(xas, -ENOMEM);
1042 : }
1043 : EXPORT_SYMBOL_GPL(xas_split_alloc);
1044 :
1045 : /**
1046 : * xas_split() - Split a multi-index entry into smaller entries.
1047 : * @xas: XArray operation state.
1048 : * @entry: New entry to store in the array.
1049 : * @order: Current entry order.
1050 : *
1051 : * The size of the new entries is set in @xas. The value in @entry is
1052 : * copied to all the replacement entries.
1053 : *
1054 : * Context: Any context. The caller should hold the xa_lock.
1055 : */
1056 : void xas_split(struct xa_state *xas, void *entry, unsigned int order)
1057 : {
1058 : unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1059 : unsigned int offset, marks;
1060 : struct xa_node *node;
1061 : void *curr = xas_load(xas);
1062 : int values = 0;
1063 :
1064 : node = xas->xa_node;
1065 : if (xas_top(node))
1066 : return;
1067 :
1068 : marks = node_get_marks(node, xas->xa_offset);
1069 :
1070 : offset = xas->xa_offset + sibs;
1071 : do {
1072 : if (xas->xa_shift < node->shift) {
1073 : struct xa_node *child = xas->xa_alloc;
1074 :
1075 : xas->xa_alloc = rcu_dereference_raw(child->parent);
1076 : child->shift = node->shift - XA_CHUNK_SHIFT;
1077 : child->offset = offset;
1078 : child->count = XA_CHUNK_SIZE;
1079 : child->nr_values = xa_is_value(entry) ?
1080 : XA_CHUNK_SIZE : 0;
1081 : RCU_INIT_POINTER(child->parent, node);
1082 : node_set_marks(node, offset, child, marks);
1083 : rcu_assign_pointer(node->slots[offset],
1084 : xa_mk_node(child));
1085 : if (xa_is_value(curr))
1086 : values--;
1087 : xas_update(xas, child);
1088 : } else {
1089 : unsigned int canon = offset - xas->xa_sibs;
1090 :
1091 : node_set_marks(node, canon, NULL, marks);
1092 : rcu_assign_pointer(node->slots[canon], entry);
1093 : while (offset > canon)
1094 : rcu_assign_pointer(node->slots[offset--],
1095 : xa_mk_sibling(canon));
1096 : values += (xa_is_value(entry) - xa_is_value(curr)) *
1097 : (xas->xa_sibs + 1);
1098 : }
1099 : } while (offset-- > xas->xa_offset);
1100 :
1101 : node->nr_values += values;
1102 : xas_update(xas, node);
1103 : }
1104 : EXPORT_SYMBOL_GPL(xas_split);
1105 : #endif
1106 :
1107 : /**
1108 : * xas_pause() - Pause a walk to drop a lock.
1109 : * @xas: XArray operation state.
1110 : *
1111 : * Some users need to pause a walk and drop the lock they're holding in
1112 : * order to yield to a higher priority thread or carry out an operation
1113 : * on an entry. Those users should call this function before they drop
1114 : * the lock. It resets the @xas to be suitable for the next iteration
1115 : * of the loop after the user has reacquired the lock. If most entries
1116 : * found during a walk require you to call xas_pause(), the xa_for_each()
1117 : * iterator may be more appropriate.
1118 : *
1119 : * Note that xas_pause() only works for forward iteration. If a user needs
1120 : * to pause a reverse iteration, we will need a xas_pause_rev().
1121 : */
1122 0 : void xas_pause(struct xa_state *xas)
1123 : {
1124 0 : struct xa_node *node = xas->xa_node;
1125 :
1126 0 : if (xas_invalid(xas))
1127 : return;
1128 :
1129 0 : xas->xa_node = XAS_RESTART;
1130 0 : if (node) {
1131 0 : unsigned long offset = xas->xa_offset;
1132 0 : while (++offset < XA_CHUNK_SIZE) {
1133 0 : if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1134 : break;
1135 : }
1136 0 : xas->xa_index += (offset - xas->xa_offset) << node->shift;
1137 0 : if (xas->xa_index == 0)
1138 0 : xas->xa_node = XAS_BOUNDS;
1139 : } else {
1140 0 : xas->xa_index++;
1141 : }
1142 : }
1143 : EXPORT_SYMBOL_GPL(xas_pause);
1144 :
1145 : /*
1146 : * __xas_prev() - Find the previous entry in the XArray.
1147 : * @xas: XArray operation state.
1148 : *
1149 : * Helper function for xas_prev() which handles all the complex cases
1150 : * out of line.
1151 : */
1152 0 : void *__xas_prev(struct xa_state *xas)
1153 : {
1154 : void *entry;
1155 :
1156 0 : if (!xas_frozen(xas->xa_node))
1157 0 : xas->xa_index--;
1158 0 : if (!xas->xa_node)
1159 0 : return set_bounds(xas);
1160 0 : if (xas_not_node(xas->xa_node))
1161 0 : return xas_load(xas);
1162 :
1163 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1164 0 : xas->xa_offset--;
1165 :
1166 0 : while (xas->xa_offset == 255) {
1167 0 : xas->xa_offset = xas->xa_node->offset - 1;
1168 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1169 0 : if (!xas->xa_node)
1170 0 : return set_bounds(xas);
1171 : }
1172 :
1173 : for (;;) {
1174 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1175 0 : if (!xa_is_node(entry))
1176 : return entry;
1177 :
1178 0 : xas->xa_node = xa_to_node(entry);
1179 : xas_set_offset(xas);
1180 : }
1181 : }
1182 : EXPORT_SYMBOL_GPL(__xas_prev);
1183 :
1184 : /*
1185 : * __xas_next() - Find the next entry in the XArray.
1186 : * @xas: XArray operation state.
1187 : *
1188 : * Helper function for xas_next() which handles all the complex cases
1189 : * out of line.
1190 : */
1191 0 : void *__xas_next(struct xa_state *xas)
1192 : {
1193 : void *entry;
1194 :
1195 0 : if (!xas_frozen(xas->xa_node))
1196 0 : xas->xa_index++;
1197 0 : if (!xas->xa_node)
1198 0 : return set_bounds(xas);
1199 0 : if (xas_not_node(xas->xa_node))
1200 0 : return xas_load(xas);
1201 :
1202 0 : if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1203 0 : xas->xa_offset++;
1204 :
1205 0 : while (xas->xa_offset == XA_CHUNK_SIZE) {
1206 0 : xas->xa_offset = xas->xa_node->offset + 1;
1207 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1208 0 : if (!xas->xa_node)
1209 0 : return set_bounds(xas);
1210 : }
1211 :
1212 : for (;;) {
1213 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1214 0 : if (!xa_is_node(entry))
1215 : return entry;
1216 :
1217 0 : xas->xa_node = xa_to_node(entry);
1218 : xas_set_offset(xas);
1219 : }
1220 : }
1221 : EXPORT_SYMBOL_GPL(__xas_next);
1222 :
1223 : /**
1224 : * xas_find() - Find the next present entry in the XArray.
1225 : * @xas: XArray operation state.
1226 : * @max: Highest index to return.
1227 : *
1228 : * If the @xas has not yet been walked to an entry, return the entry
1229 : * which has an index >= xas.xa_index. If it has been walked, the entry
1230 : * currently being pointed at has been processed, and so we move to the
1231 : * next entry.
1232 : *
1233 : * If no entry is found and the array is smaller than @max, the iterator
1234 : * is set to the smallest index not yet in the array. This allows @xas
1235 : * to be immediately passed to xas_store().
1236 : *
1237 : * Return: The entry, if found, otherwise %NULL.
1238 : */
1239 17 : void *xas_find(struct xa_state *xas, unsigned long max)
1240 : {
1241 : void *entry;
1242 :
1243 34 : if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1244 : return NULL;
1245 17 : if (xas->xa_index > max)
1246 0 : return set_bounds(xas);
1247 :
1248 17 : if (!xas->xa_node) {
1249 0 : xas->xa_index = 1;
1250 0 : return set_bounds(xas);
1251 17 : } else if (xas->xa_node == XAS_RESTART) {
1252 17 : entry = xas_load(xas);
1253 34 : if (entry || xas_not_node(xas->xa_node))
1254 : return entry;
1255 0 : } else if (!xas->xa_node->shift &&
1256 0 : xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1257 0 : xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1258 : }
1259 :
1260 : xas_next_offset(xas);
1261 :
1262 0 : while (xas->xa_node && (xas->xa_index <= max)) {
1263 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1264 0 : xas->xa_offset = xas->xa_node->offset + 1;
1265 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1266 0 : continue;
1267 : }
1268 :
1269 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1270 0 : if (xa_is_node(entry)) {
1271 0 : xas->xa_node = xa_to_node(entry);
1272 0 : xas->xa_offset = 0;
1273 0 : continue;
1274 : }
1275 0 : if (entry && !xa_is_sibling(entry))
1276 : return entry;
1277 :
1278 : xas_next_offset(xas);
1279 : }
1280 :
1281 0 : if (!xas->xa_node)
1282 0 : xas->xa_node = XAS_BOUNDS;
1283 : return NULL;
1284 : }
1285 : EXPORT_SYMBOL_GPL(xas_find);
1286 :
1287 : /**
1288 : * xas_find_marked() - Find the next marked entry in the XArray.
1289 : * @xas: XArray operation state.
1290 : * @max: Highest index to return.
1291 : * @mark: Mark number to search for.
1292 : *
1293 : * If the @xas has not yet been walked to an entry, return the marked entry
1294 : * which has an index >= xas.xa_index. If it has been walked, the entry
1295 : * currently being pointed at has been processed, and so we return the
1296 : * first marked entry with an index > xas.xa_index.
1297 : *
1298 : * If no marked entry is found and the array is smaller than @max, @xas is
1299 : * set to the bounds state and xas->xa_index is set to the smallest index
1300 : * not yet in the array. This allows @xas to be immediately passed to
1301 : * xas_store().
1302 : *
1303 : * If no entry is found before @max is reached, @xas is set to the restart
1304 : * state.
1305 : *
1306 : * Return: The entry, if found, otherwise %NULL.
1307 : */
1308 266 : void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1309 : {
1310 266 : bool advance = true;
1311 : unsigned int offset;
1312 : void *entry;
1313 :
1314 532 : if (xas_error(xas))
1315 : return NULL;
1316 266 : if (xas->xa_index > max)
1317 : goto max;
1318 :
1319 266 : if (!xas->xa_node) {
1320 0 : xas->xa_index = 1;
1321 0 : goto out;
1322 266 : } else if (xas_top(xas->xa_node)) {
1323 266 : advance = false;
1324 532 : entry = xa_head(xas->xa);
1325 266 : xas->xa_node = NULL;
1326 532 : if (xas->xa_index > max_index(entry))
1327 : goto out;
1328 266 : if (!xa_is_node(entry)) {
1329 532 : if (xa_marked(xas->xa, mark))
1330 : return entry;
1331 0 : xas->xa_index = 1;
1332 0 : goto out;
1333 : }
1334 0 : xas->xa_node = xa_to_node(entry);
1335 0 : xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1336 : }
1337 :
1338 0 : while (xas->xa_index <= max) {
1339 0 : if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1340 0 : xas->xa_offset = xas->xa_node->offset + 1;
1341 0 : xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1342 0 : if (!xas->xa_node)
1343 : break;
1344 0 : advance = false;
1345 0 : continue;
1346 : }
1347 :
1348 0 : if (!advance) {
1349 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1350 : if (xa_is_sibling(entry)) {
1351 : xas->xa_offset = xa_to_sibling(entry);
1352 : xas_move_index(xas, xas->xa_offset);
1353 : }
1354 : }
1355 :
1356 0 : offset = xas_find_chunk(xas, advance, mark);
1357 0 : if (offset > xas->xa_offset) {
1358 0 : advance = false;
1359 0 : xas_move_index(xas, offset);
1360 : /* Mind the wrap */
1361 0 : if ((xas->xa_index - 1) >= max)
1362 : goto max;
1363 0 : xas->xa_offset = offset;
1364 0 : if (offset == XA_CHUNK_SIZE)
1365 0 : continue;
1366 : }
1367 :
1368 0 : entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1369 0 : if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
1370 0 : continue;
1371 0 : if (!xa_is_node(entry))
1372 : return entry;
1373 0 : xas->xa_node = xa_to_node(entry);
1374 : xas_set_offset(xas);
1375 : }
1376 :
1377 : out:
1378 0 : if (xas->xa_index > max)
1379 : goto max;
1380 0 : return set_bounds(xas);
1381 : max:
1382 0 : xas->xa_node = XAS_RESTART;
1383 0 : return NULL;
1384 : }
1385 : EXPORT_SYMBOL_GPL(xas_find_marked);
1386 :
1387 : /**
1388 : * xas_find_conflict() - Find the next present entry in a range.
1389 : * @xas: XArray operation state.
1390 : *
1391 : * The @xas describes both a range and a position within that range.
1392 : *
1393 : * Context: Any context. Expects xa_lock to be held.
1394 : * Return: The next entry in the range covered by @xas or %NULL.
1395 : */
1396 0 : void *xas_find_conflict(struct xa_state *xas)
1397 : {
1398 : void *curr;
1399 :
1400 0 : if (xas_error(xas))
1401 : return NULL;
1402 :
1403 0 : if (!xas->xa_node)
1404 : return NULL;
1405 :
1406 0 : if (xas_top(xas->xa_node)) {
1407 0 : curr = xas_start(xas);
1408 0 : if (!curr)
1409 : return NULL;
1410 0 : while (xa_is_node(curr)) {
1411 0 : struct xa_node *node = xa_to_node(curr);
1412 0 : curr = xas_descend(xas, node);
1413 : }
1414 0 : if (curr)
1415 : return curr;
1416 : }
1417 :
1418 0 : if (xas->xa_node->shift > xas->xa_shift)
1419 : return NULL;
1420 :
1421 : for (;;) {
1422 0 : if (xas->xa_node->shift == xas->xa_shift) {
1423 0 : if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1424 : break;
1425 0 : } else if (xas->xa_offset == XA_CHUNK_MASK) {
1426 0 : xas->xa_offset = xas->xa_node->offset;
1427 0 : xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1428 0 : if (!xas->xa_node)
1429 : break;
1430 0 : continue;
1431 : }
1432 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1433 : if (xa_is_sibling(curr))
1434 : continue;
1435 0 : while (xa_is_node(curr)) {
1436 0 : xas->xa_node = xa_to_node(curr);
1437 0 : xas->xa_offset = 0;
1438 0 : curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1439 : }
1440 0 : if (curr)
1441 : return curr;
1442 : }
1443 0 : xas->xa_offset -= xas->xa_sibs;
1444 0 : return NULL;
1445 : }
1446 : EXPORT_SYMBOL_GPL(xas_find_conflict);
1447 :
1448 : /**
1449 : * xa_load() - Load an entry from an XArray.
1450 : * @xa: XArray.
1451 : * @index: index into array.
1452 : *
1453 : * Context: Any context. Takes and releases the RCU lock.
1454 : * Return: The entry at @index in @xa.
1455 : */
1456 2 : void *xa_load(struct xarray *xa, unsigned long index)
1457 : {
1458 2 : XA_STATE(xas, xa, index);
1459 : void *entry;
1460 :
1461 : rcu_read_lock();
1462 : do {
1463 2 : entry = xas_load(&xas);
1464 2 : if (xa_is_zero(entry))
1465 0 : entry = NULL;
1466 2 : } while (xas_retry(&xas, entry));
1467 : rcu_read_unlock();
1468 :
1469 2 : return entry;
1470 : }
1471 : EXPORT_SYMBOL(xa_load);
1472 :
1473 : static void *xas_result(struct xa_state *xas, void *curr)
1474 : {
1475 2 : if (xa_is_zero(curr))
1476 : return NULL;
1477 4 : if (xas_error(xas))
1478 0 : curr = xas->xa_node;
1479 : return curr;
1480 : }
1481 :
1482 : /**
1483 : * __xa_erase() - Erase this entry from the XArray while locked.
1484 : * @xa: XArray.
1485 : * @index: Index into array.
1486 : *
1487 : * After this function returns, loading from @index will return %NULL.
1488 : * If the index is part of a multi-index entry, all indices will be erased
1489 : * and none of the entries will be part of a multi-index entry.
1490 : *
1491 : * Context: Any context. Expects xa_lock to be held on entry.
1492 : * Return: The entry which used to be at this index.
1493 : */
1494 0 : void *__xa_erase(struct xarray *xa, unsigned long index)
1495 : {
1496 0 : XA_STATE(xas, xa, index);
1497 0 : return xas_result(&xas, xas_store(&xas, NULL));
1498 : }
1499 : EXPORT_SYMBOL(__xa_erase);
1500 :
1501 : /**
1502 : * xa_erase() - Erase this entry from the XArray.
1503 : * @xa: XArray.
1504 : * @index: Index of entry.
1505 : *
1506 : * After this function returns, loading from @index will return %NULL.
1507 : * If the index is part of a multi-index entry, all indices will be erased
1508 : * and none of the entries will be part of a multi-index entry.
1509 : *
1510 : * Context: Any context. Takes and releases the xa_lock.
1511 : * Return: The entry which used to be at this index.
1512 : */
1513 0 : void *xa_erase(struct xarray *xa, unsigned long index)
1514 : {
1515 : void *entry;
1516 :
1517 0 : xa_lock(xa);
1518 0 : entry = __xa_erase(xa, index);
1519 0 : xa_unlock(xa);
1520 :
1521 0 : return entry;
1522 : }
1523 : EXPORT_SYMBOL(xa_erase);
1524 :
1525 : /**
1526 : * __xa_store() - Store this entry in the XArray.
1527 : * @xa: XArray.
1528 : * @index: Index into array.
1529 : * @entry: New entry.
1530 : * @gfp: Memory allocation flags.
1531 : *
1532 : * You must already be holding the xa_lock when calling this function.
1533 : * It will drop the lock if needed to allocate memory, and then reacquire
1534 : * it afterwards.
1535 : *
1536 : * Context: Any context. Expects xa_lock to be held on entry. May
1537 : * release and reacquire xa_lock if @gfp flags permit.
1538 : * Return: The old entry at this index or xa_err() if an error happened.
1539 : */
1540 2 : void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1541 : {
1542 2 : XA_STATE(xas, xa, index);
1543 : void *curr;
1544 :
1545 2 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1546 : return XA_ERROR(-EINVAL);
1547 4 : if (xa_track_free(xa) && !entry)
1548 0 : entry = XA_ZERO_ENTRY;
1549 :
1550 : do {
1551 2 : curr = xas_store(&xas, entry);
1552 4 : if (xa_track_free(xa))
1553 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1554 2 : } while (__xas_nomem(&xas, gfp));
1555 :
1556 : return xas_result(&xas, curr);
1557 : }
1558 : EXPORT_SYMBOL(__xa_store);
1559 :
1560 : /**
1561 : * xa_store() - Store this entry in the XArray.
1562 : * @xa: XArray.
1563 : * @index: Index into array.
1564 : * @entry: New entry.
1565 : * @gfp: Memory allocation flags.
1566 : *
1567 : * After this function returns, loads from this index will return @entry.
1568 : * Storing into an existing multi-index entry updates the entry of every index.
1569 : * The marks associated with @index are unaffected unless @entry is %NULL.
1570 : *
1571 : * Context: Any context. Takes and releases the xa_lock.
1572 : * May sleep if the @gfp flags permit.
1573 : * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1574 : * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1575 : * failed.
1576 : */
1577 2 : void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1578 : {
1579 : void *curr;
1580 :
1581 4 : xa_lock(xa);
1582 2 : curr = __xa_store(xa, index, entry, gfp);
1583 4 : xa_unlock(xa);
1584 :
1585 2 : return curr;
1586 : }
1587 : EXPORT_SYMBOL(xa_store);
1588 :
1589 : /**
1590 : * __xa_cmpxchg() - Store this entry in the XArray.
1591 : * @xa: XArray.
1592 : * @index: Index into array.
1593 : * @old: Old value to test against.
1594 : * @entry: New entry.
1595 : * @gfp: Memory allocation flags.
1596 : *
1597 : * You must already be holding the xa_lock when calling this function.
1598 : * It will drop the lock if needed to allocate memory, and then reacquire
1599 : * it afterwards.
1600 : *
1601 : * Context: Any context. Expects xa_lock to be held on entry. May
1602 : * release and reacquire xa_lock if @gfp flags permit.
1603 : * Return: The old entry at this index or xa_err() if an error happened.
1604 : */
1605 0 : void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1606 : void *old, void *entry, gfp_t gfp)
1607 : {
1608 0 : XA_STATE(xas, xa, index);
1609 : void *curr;
1610 :
1611 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1612 : return XA_ERROR(-EINVAL);
1613 :
1614 : do {
1615 0 : curr = xas_load(&xas);
1616 0 : if (curr == old) {
1617 0 : xas_store(&xas, entry);
1618 0 : if (xa_track_free(xa) && entry && !curr)
1619 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1620 : }
1621 0 : } while (__xas_nomem(&xas, gfp));
1622 :
1623 : return xas_result(&xas, curr);
1624 : }
1625 : EXPORT_SYMBOL(__xa_cmpxchg);
1626 :
1627 : /**
1628 : * __xa_insert() - Store this entry in the XArray if no entry is present.
1629 : * @xa: XArray.
1630 : * @index: Index into array.
1631 : * @entry: New entry.
1632 : * @gfp: Memory allocation flags.
1633 : *
1634 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1635 : * if no entry is present. Inserting will fail if a reserved entry is
1636 : * present, even though loading from this index will return NULL.
1637 : *
1638 : * Context: Any context. Expects xa_lock to be held on entry. May
1639 : * release and reacquire xa_lock if @gfp flags permit.
1640 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
1641 : * -ENOMEM if memory could not be allocated.
1642 : */
1643 0 : int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1644 : {
1645 0 : XA_STATE(xas, xa, index);
1646 : void *curr;
1647 :
1648 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1649 : return -EINVAL;
1650 0 : if (!entry)
1651 0 : entry = XA_ZERO_ENTRY;
1652 :
1653 : do {
1654 0 : curr = xas_load(&xas);
1655 0 : if (!curr) {
1656 0 : xas_store(&xas, entry);
1657 0 : if (xa_track_free(xa))
1658 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1659 : } else {
1660 0 : xas_set_err(&xas, -EBUSY);
1661 : }
1662 0 : } while (__xas_nomem(&xas, gfp));
1663 :
1664 0 : return xas_error(&xas);
1665 : }
1666 : EXPORT_SYMBOL(__xa_insert);
1667 :
1668 : #ifdef CONFIG_XARRAY_MULTI
1669 : static void xas_set_range(struct xa_state *xas, unsigned long first,
1670 : unsigned long last)
1671 : {
1672 : unsigned int shift = 0;
1673 : unsigned long sibs = last - first;
1674 : unsigned int offset = XA_CHUNK_MASK;
1675 :
1676 : xas_set(xas, first);
1677 :
1678 : while ((first & XA_CHUNK_MASK) == 0) {
1679 : if (sibs < XA_CHUNK_MASK)
1680 : break;
1681 : if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1682 : break;
1683 : shift += XA_CHUNK_SHIFT;
1684 : if (offset == XA_CHUNK_MASK)
1685 : offset = sibs & XA_CHUNK_MASK;
1686 : sibs >>= XA_CHUNK_SHIFT;
1687 : first >>= XA_CHUNK_SHIFT;
1688 : }
1689 :
1690 : offset = first & XA_CHUNK_MASK;
1691 : if (offset + sibs > XA_CHUNK_MASK)
1692 : sibs = XA_CHUNK_MASK - offset;
1693 : if ((((first + sibs + 1) << shift) - 1) > last)
1694 : sibs -= 1;
1695 :
1696 : xas->xa_shift = shift;
1697 : xas->xa_sibs = sibs;
1698 : }
1699 :
1700 : /**
1701 : * xa_store_range() - Store this entry at a range of indices in the XArray.
1702 : * @xa: XArray.
1703 : * @first: First index to affect.
1704 : * @last: Last index to affect.
1705 : * @entry: New entry.
1706 : * @gfp: Memory allocation flags.
1707 : *
1708 : * After this function returns, loads from any index between @first and @last,
1709 : * inclusive will return @entry.
1710 : * Storing into an existing multi-index entry updates the entry of every index.
1711 : * The marks associated with @index are unaffected unless @entry is %NULL.
1712 : *
1713 : * Context: Process context. Takes and releases the xa_lock. May sleep
1714 : * if the @gfp flags permit.
1715 : * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1716 : * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1717 : */
1718 : void *xa_store_range(struct xarray *xa, unsigned long first,
1719 : unsigned long last, void *entry, gfp_t gfp)
1720 : {
1721 : XA_STATE(xas, xa, 0);
1722 :
1723 : if (WARN_ON_ONCE(xa_is_internal(entry)))
1724 : return XA_ERROR(-EINVAL);
1725 : if (last < first)
1726 : return XA_ERROR(-EINVAL);
1727 :
1728 : do {
1729 : xas_lock(&xas);
1730 : if (entry) {
1731 : unsigned int order = BITS_PER_LONG;
1732 : if (last + 1)
1733 : order = __ffs(last + 1);
1734 : xas_set_order(&xas, last, order);
1735 : xas_create(&xas, true);
1736 : if (xas_error(&xas))
1737 : goto unlock;
1738 : }
1739 : do {
1740 : xas_set_range(&xas, first, last);
1741 : xas_store(&xas, entry);
1742 : if (xas_error(&xas))
1743 : goto unlock;
1744 : first += xas_size(&xas);
1745 : } while (first <= last);
1746 : unlock:
1747 : xas_unlock(&xas);
1748 : } while (xas_nomem(&xas, gfp));
1749 :
1750 : return xas_result(&xas, NULL);
1751 : }
1752 : EXPORT_SYMBOL(xa_store_range);
1753 :
1754 : /**
1755 : * xa_get_order() - Get the order of an entry.
1756 : * @xa: XArray.
1757 : * @index: Index of the entry.
1758 : *
1759 : * Return: A number between 0 and 63 indicating the order of the entry.
1760 : */
1761 : int xa_get_order(struct xarray *xa, unsigned long index)
1762 : {
1763 : XA_STATE(xas, xa, index);
1764 : void *entry;
1765 : int order = 0;
1766 :
1767 : rcu_read_lock();
1768 : entry = xas_load(&xas);
1769 :
1770 : if (!entry)
1771 : goto unlock;
1772 :
1773 : if (!xas.xa_node)
1774 : goto unlock;
1775 :
1776 : for (;;) {
1777 : unsigned int slot = xas.xa_offset + (1 << order);
1778 :
1779 : if (slot >= XA_CHUNK_SIZE)
1780 : break;
1781 : if (!xa_is_sibling(xas.xa_node->slots[slot]))
1782 : break;
1783 : order++;
1784 : }
1785 :
1786 : order += xas.xa_node->shift;
1787 : unlock:
1788 : rcu_read_unlock();
1789 :
1790 : return order;
1791 : }
1792 : EXPORT_SYMBOL(xa_get_order);
1793 : #endif /* CONFIG_XARRAY_MULTI */
1794 :
1795 : /**
1796 : * __xa_alloc() - Find somewhere to store this entry in the XArray.
1797 : * @xa: XArray.
1798 : * @id: Pointer to ID.
1799 : * @limit: Range for allocated ID.
1800 : * @entry: New entry.
1801 : * @gfp: Memory allocation flags.
1802 : *
1803 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1804 : * stores the index into the @id pointer, then stores the entry at
1805 : * that index. A concurrent lookup will not see an uninitialised @id.
1806 : *
1807 : * Context: Any context. Expects xa_lock to be held on entry. May
1808 : * release and reacquire xa_lock if @gfp flags permit.
1809 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
1810 : * -EBUSY if there are no free entries in @limit.
1811 : */
1812 0 : int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1813 : struct xa_limit limit, gfp_t gfp)
1814 : {
1815 0 : XA_STATE(xas, xa, 0);
1816 :
1817 0 : if (WARN_ON_ONCE(xa_is_advanced(entry)))
1818 : return -EINVAL;
1819 0 : if (WARN_ON_ONCE(!xa_track_free(xa)))
1820 : return -EINVAL;
1821 :
1822 0 : if (!entry)
1823 0 : entry = XA_ZERO_ENTRY;
1824 :
1825 : do {
1826 0 : xas.xa_index = limit.min;
1827 0 : xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1828 0 : if (xas.xa_node == XAS_RESTART)
1829 0 : xas_set_err(&xas, -EBUSY);
1830 : else
1831 0 : *id = xas.xa_index;
1832 0 : xas_store(&xas, entry);
1833 0 : xas_clear_mark(&xas, XA_FREE_MARK);
1834 0 : } while (__xas_nomem(&xas, gfp));
1835 :
1836 0 : return xas_error(&xas);
1837 : }
1838 : EXPORT_SYMBOL(__xa_alloc);
1839 :
1840 : /**
1841 : * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1842 : * @xa: XArray.
1843 : * @id: Pointer to ID.
1844 : * @entry: New entry.
1845 : * @limit: Range of allocated ID.
1846 : * @next: Pointer to next ID to allocate.
1847 : * @gfp: Memory allocation flags.
1848 : *
1849 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1850 : * stores the index into the @id pointer, then stores the entry at
1851 : * that index. A concurrent lookup will not see an uninitialised @id.
1852 : * The search for an empty entry will start at @next and will wrap
1853 : * around if necessary.
1854 : *
1855 : * Context: Any context. Expects xa_lock to be held on entry. May
1856 : * release and reacquire xa_lock if @gfp flags permit.
1857 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
1858 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
1859 : * allocated or -EBUSY if there are no free entries in @limit.
1860 : */
1861 0 : int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1862 : struct xa_limit limit, u32 *next, gfp_t gfp)
1863 : {
1864 0 : u32 min = limit.min;
1865 : int ret;
1866 :
1867 0 : limit.min = max(min, *next);
1868 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1869 0 : if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1870 0 : xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1871 0 : ret = 1;
1872 : }
1873 :
1874 0 : if (ret < 0 && limit.min > min) {
1875 0 : limit.min = min;
1876 0 : ret = __xa_alloc(xa, id, entry, limit, gfp);
1877 0 : if (ret == 0)
1878 0 : ret = 1;
1879 : }
1880 :
1881 0 : if (ret >= 0) {
1882 0 : *next = *id + 1;
1883 0 : if (*next == 0)
1884 0 : xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1885 : }
1886 0 : return ret;
1887 : }
1888 : EXPORT_SYMBOL(__xa_alloc_cyclic);
1889 :
1890 : /**
1891 : * __xa_set_mark() - Set this mark on this entry while locked.
1892 : * @xa: XArray.
1893 : * @index: Index of entry.
1894 : * @mark: Mark number.
1895 : *
1896 : * Attempting to set a mark on a %NULL entry does not succeed.
1897 : *
1898 : * Context: Any context. Expects xa_lock to be held on entry.
1899 : */
1900 0 : void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1901 : {
1902 0 : XA_STATE(xas, xa, index);
1903 0 : void *entry = xas_load(&xas);
1904 :
1905 0 : if (entry)
1906 0 : xas_set_mark(&xas, mark);
1907 0 : }
1908 : EXPORT_SYMBOL(__xa_set_mark);
1909 :
1910 : /**
1911 : * __xa_clear_mark() - Clear this mark on this entry while locked.
1912 : * @xa: XArray.
1913 : * @index: Index of entry.
1914 : * @mark: Mark number.
1915 : *
1916 : * Context: Any context. Expects xa_lock to be held on entry.
1917 : */
1918 0 : void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1919 : {
1920 0 : XA_STATE(xas, xa, index);
1921 0 : void *entry = xas_load(&xas);
1922 :
1923 0 : if (entry)
1924 0 : xas_clear_mark(&xas, mark);
1925 0 : }
1926 : EXPORT_SYMBOL(__xa_clear_mark);
1927 :
1928 : /**
1929 : * xa_get_mark() - Inquire whether this mark is set on this entry.
1930 : * @xa: XArray.
1931 : * @index: Index of entry.
1932 : * @mark: Mark number.
1933 : *
1934 : * This function uses the RCU read lock, so the result may be out of date
1935 : * by the time it returns. If you need the result to be stable, use a lock.
1936 : *
1937 : * Context: Any context. Takes and releases the RCU lock.
1938 : * Return: True if the entry at @index has this mark set, false if it doesn't.
1939 : */
1940 0 : bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1941 : {
1942 0 : XA_STATE(xas, xa, index);
1943 : void *entry;
1944 :
1945 : rcu_read_lock();
1946 0 : entry = xas_start(&xas);
1947 0 : while (xas_get_mark(&xas, mark)) {
1948 0 : if (!xa_is_node(entry))
1949 : goto found;
1950 0 : entry = xas_descend(&xas, xa_to_node(entry));
1951 : }
1952 : rcu_read_unlock();
1953 0 : return false;
1954 : found:
1955 : rcu_read_unlock();
1956 0 : return true;
1957 : }
1958 : EXPORT_SYMBOL(xa_get_mark);
1959 :
1960 : /**
1961 : * xa_set_mark() - Set this mark on this entry.
1962 : * @xa: XArray.
1963 : * @index: Index of entry.
1964 : * @mark: Mark number.
1965 : *
1966 : * Attempting to set a mark on a %NULL entry does not succeed.
1967 : *
1968 : * Context: Process context. Takes and releases the xa_lock.
1969 : */
1970 0 : void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1971 : {
1972 0 : xa_lock(xa);
1973 0 : __xa_set_mark(xa, index, mark);
1974 0 : xa_unlock(xa);
1975 0 : }
1976 : EXPORT_SYMBOL(xa_set_mark);
1977 :
1978 : /**
1979 : * xa_clear_mark() - Clear this mark on this entry.
1980 : * @xa: XArray.
1981 : * @index: Index of entry.
1982 : * @mark: Mark number.
1983 : *
1984 : * Clearing a mark always succeeds.
1985 : *
1986 : * Context: Process context. Takes and releases the xa_lock.
1987 : */
1988 0 : void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1989 : {
1990 0 : xa_lock(xa);
1991 0 : __xa_clear_mark(xa, index, mark);
1992 0 : xa_unlock(xa);
1993 0 : }
1994 : EXPORT_SYMBOL(xa_clear_mark);
1995 :
1996 : /**
1997 : * xa_find() - Search the XArray for an entry.
1998 : * @xa: XArray.
1999 : * @indexp: Pointer to an index.
2000 : * @max: Maximum index to search to.
2001 : * @filter: Selection criterion.
2002 : *
2003 : * Finds the entry in @xa which matches the @filter, and has the lowest
2004 : * index that is at least @indexp and no more than @max.
2005 : * If an entry is found, @indexp is updated to be the index of the entry.
2006 : * This function is protected by the RCU read lock, so it may not find
2007 : * entries which are being simultaneously added. It will not return an
2008 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2009 : *
2010 : * Context: Any context. Takes and releases the RCU lock.
2011 : * Return: The entry, if found, otherwise %NULL.
2012 : */
2013 0 : void *xa_find(struct xarray *xa, unsigned long *indexp,
2014 : unsigned long max, xa_mark_t filter)
2015 : {
2016 0 : XA_STATE(xas, xa, *indexp);
2017 : void *entry;
2018 :
2019 : rcu_read_lock();
2020 : do {
2021 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2022 0 : entry = xas_find_marked(&xas, max, filter);
2023 : else
2024 0 : entry = xas_find(&xas, max);
2025 0 : } while (xas_retry(&xas, entry));
2026 : rcu_read_unlock();
2027 :
2028 0 : if (entry)
2029 0 : *indexp = xas.xa_index;
2030 0 : return entry;
2031 : }
2032 : EXPORT_SYMBOL(xa_find);
2033 :
2034 : static bool xas_sibling(struct xa_state *xas)
2035 : {
2036 0 : struct xa_node *node = xas->xa_node;
2037 : unsigned long mask;
2038 :
2039 : if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
2040 : return false;
2041 : mask = (XA_CHUNK_SIZE << node->shift) - 1;
2042 : return (xas->xa_index & mask) >
2043 : ((unsigned long)xas->xa_offset << node->shift);
2044 : }
2045 :
2046 : /**
2047 : * xa_find_after() - Search the XArray for a present entry.
2048 : * @xa: XArray.
2049 : * @indexp: Pointer to an index.
2050 : * @max: Maximum index to search to.
2051 : * @filter: Selection criterion.
2052 : *
2053 : * Finds the entry in @xa which matches the @filter and has the lowest
2054 : * index that is above @indexp and no more than @max.
2055 : * If an entry is found, @indexp is updated to be the index of the entry.
2056 : * This function is protected by the RCU read lock, so it may miss entries
2057 : * which are being simultaneously added. It will not return an
2058 : * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2059 : *
2060 : * Context: Any context. Takes and releases the RCU lock.
2061 : * Return: The pointer, if found, otherwise %NULL.
2062 : */
2063 0 : void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2064 : unsigned long max, xa_mark_t filter)
2065 : {
2066 0 : XA_STATE(xas, xa, *indexp + 1);
2067 : void *entry;
2068 :
2069 0 : if (xas.xa_index == 0)
2070 : return NULL;
2071 :
2072 : rcu_read_lock();
2073 : for (;;) {
2074 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2075 0 : entry = xas_find_marked(&xas, max, filter);
2076 : else
2077 0 : entry = xas_find(&xas, max);
2078 :
2079 0 : if (xas_invalid(&xas))
2080 : break;
2081 0 : if (xas_sibling(&xas))
2082 : continue;
2083 0 : if (!xas_retry(&xas, entry))
2084 : break;
2085 : }
2086 : rcu_read_unlock();
2087 :
2088 0 : if (entry)
2089 0 : *indexp = xas.xa_index;
2090 : return entry;
2091 : }
2092 : EXPORT_SYMBOL(xa_find_after);
2093 :
2094 0 : static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
2095 : unsigned long max, unsigned int n)
2096 : {
2097 : void *entry;
2098 0 : unsigned int i = 0;
2099 :
2100 : rcu_read_lock();
2101 0 : xas_for_each(xas, entry, max) {
2102 0 : if (xas_retry(xas, entry))
2103 0 : continue;
2104 0 : dst[i++] = entry;
2105 0 : if (i == n)
2106 : break;
2107 : }
2108 : rcu_read_unlock();
2109 :
2110 0 : return i;
2111 : }
2112 :
2113 0 : static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
2114 : unsigned long max, unsigned int n, xa_mark_t mark)
2115 : {
2116 : void *entry;
2117 0 : unsigned int i = 0;
2118 :
2119 : rcu_read_lock();
2120 0 : xas_for_each_marked(xas, entry, max, mark) {
2121 0 : if (xas_retry(xas, entry))
2122 0 : continue;
2123 0 : dst[i++] = entry;
2124 0 : if (i == n)
2125 : break;
2126 : }
2127 : rcu_read_unlock();
2128 :
2129 0 : return i;
2130 : }
2131 :
2132 : /**
2133 : * xa_extract() - Copy selected entries from the XArray into a normal array.
2134 : * @xa: The source XArray to copy from.
2135 : * @dst: The buffer to copy entries into.
2136 : * @start: The first index in the XArray eligible to be selected.
2137 : * @max: The last index in the XArray eligible to be selected.
2138 : * @n: The maximum number of entries to copy.
2139 : * @filter: Selection criterion.
2140 : *
2141 : * Copies up to @n entries that match @filter from the XArray. The
2142 : * copied entries will have indices between @start and @max, inclusive.
2143 : *
2144 : * The @filter may be an XArray mark value, in which case entries which are
2145 : * marked with that mark will be copied. It may also be %XA_PRESENT, in
2146 : * which case all entries which are not %NULL will be copied.
2147 : *
2148 : * The entries returned may not represent a snapshot of the XArray at a
2149 : * moment in time. For example, if another thread stores to index 5, then
2150 : * index 10, calling xa_extract() may return the old contents of index 5
2151 : * and the new contents of index 10. Indices not modified while this
2152 : * function is running will not be skipped.
2153 : *
2154 : * If you need stronger guarantees, holding the xa_lock across calls to this
2155 : * function will prevent concurrent modification.
2156 : *
2157 : * Context: Any context. Takes and releases the RCU lock.
2158 : * Return: The number of entries copied.
2159 : */
2160 0 : unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
2161 : unsigned long max, unsigned int n, xa_mark_t filter)
2162 : {
2163 0 : XA_STATE(xas, xa, start);
2164 :
2165 0 : if (!n)
2166 : return 0;
2167 :
2168 0 : if ((__force unsigned int)filter < XA_MAX_MARKS)
2169 0 : return xas_extract_marked(&xas, dst, max, n, filter);
2170 0 : return xas_extract_present(&xas, dst, max, n);
2171 : }
2172 : EXPORT_SYMBOL(xa_extract);
2173 :
2174 : /**
2175 : * xa_delete_node() - Private interface for workingset code.
2176 : * @node: Node to be removed from the tree.
2177 : * @update: Function to call to update ancestor nodes.
2178 : *
2179 : * Context: xa_lock must be held on entry and will not be released.
2180 : */
2181 0 : void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2182 : {
2183 0 : struct xa_state xas = {
2184 0 : .xa = node->array,
2185 0 : .xa_index = (unsigned long)node->offset <<
2186 0 : (node->shift + XA_CHUNK_SHIFT),
2187 0 : .xa_shift = node->shift + XA_CHUNK_SHIFT,
2188 : .xa_offset = node->offset,
2189 0 : .xa_node = xa_parent_locked(node->array, node),
2190 : .xa_update = update,
2191 : };
2192 :
2193 0 : xas_store(&xas, NULL);
2194 0 : }
2195 : EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */
2196 :
2197 : /**
2198 : * xa_destroy() - Free all internal data structures.
2199 : * @xa: XArray.
2200 : *
2201 : * After calling this function, the XArray is empty and has freed all memory
2202 : * allocated for its internal data structures. You are responsible for
2203 : * freeing the objects referenced by the XArray.
2204 : *
2205 : * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
2206 : */
2207 0 : void xa_destroy(struct xarray *xa)
2208 : {
2209 0 : XA_STATE(xas, xa, 0);
2210 : unsigned long flags;
2211 : void *entry;
2212 :
2213 0 : xas.xa_node = NULL;
2214 0 : xas_lock_irqsave(&xas, flags);
2215 0 : entry = xa_head_locked(xa);
2216 0 : RCU_INIT_POINTER(xa->xa_head, NULL);
2217 0 : xas_init_marks(&xas);
2218 0 : if (xa_zero_busy(xa))
2219 0 : xa_mark_clear(xa, XA_FREE_MARK);
2220 : /* lockdep checks we're still holding the lock in xas_free_nodes() */
2221 0 : if (xa_is_node(entry))
2222 0 : xas_free_nodes(&xas, xa_to_node(entry));
2223 0 : xas_unlock_irqrestore(&xas, flags);
2224 0 : }
2225 : EXPORT_SYMBOL(xa_destroy);
2226 :
2227 : #ifdef XA_DEBUG
2228 : void xa_dump_node(const struct xa_node *node)
2229 : {
2230 : unsigned i, j;
2231 :
2232 : if (!node)
2233 : return;
2234 : if ((unsigned long)node & 3) {
2235 : pr_cont("node %px\n", node);
2236 : return;
2237 : }
2238 :
2239 : pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2240 : "array %px list %px %px marks",
2241 : node, node->parent ? "offset" : "max", node->offset,
2242 : node->parent, node->shift, node->count, node->nr_values,
2243 : node->array, node->private_list.prev, node->private_list.next);
2244 : for (i = 0; i < XA_MAX_MARKS; i++)
2245 : for (j = 0; j < XA_MARK_LONGS; j++)
2246 : pr_cont(" %lx", node->marks[i][j]);
2247 : pr_cont("\n");
2248 : }
2249 :
2250 : void xa_dump_index(unsigned long index, unsigned int shift)
2251 : {
2252 : if (!shift)
2253 : pr_info("%lu: ", index);
2254 : else if (shift >= BITS_PER_LONG)
2255 : pr_info("0-%lu: ", ~0UL);
2256 : else
2257 : pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2258 : }
2259 :
2260 : void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2261 : {
2262 : if (!entry)
2263 : return;
2264 :
2265 : xa_dump_index(index, shift);
2266 :
2267 : if (xa_is_node(entry)) {
2268 : if (shift == 0) {
2269 : pr_cont("%px\n", entry);
2270 : } else {
2271 : unsigned long i;
2272 : struct xa_node *node = xa_to_node(entry);
2273 : xa_dump_node(node);
2274 : for (i = 0; i < XA_CHUNK_SIZE; i++)
2275 : xa_dump_entry(node->slots[i],
2276 : index + (i << node->shift), node->shift);
2277 : }
2278 : } else if (xa_is_value(entry))
2279 : pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2280 : xa_to_value(entry), entry);
2281 : else if (!xa_is_internal(entry))
2282 : pr_cont("%px\n", entry);
2283 : else if (xa_is_retry(entry))
2284 : pr_cont("retry (%ld)\n", xa_to_internal(entry));
2285 : else if (xa_is_sibling(entry))
2286 : pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
2287 : else if (xa_is_zero(entry))
2288 : pr_cont("zero (%ld)\n", xa_to_internal(entry));
2289 : else
2290 : pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2291 : }
2292 :
2293 : void xa_dump(const struct xarray *xa)
2294 : {
2295 : void *entry = xa->xa_head;
2296 : unsigned int shift = 0;
2297 :
2298 : pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2299 : xa->xa_flags, xa_marked(xa, XA_MARK_0),
2300 : xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2301 : if (xa_is_node(entry))
2302 : shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2303 : xa_dump_entry(entry, 0, shift);
2304 : }
2305 : #endif
|