Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0+ */
2 : #ifndef _LINUX_XARRAY_H
3 : #define _LINUX_XARRAY_H
4 : /*
5 : * eXtensible Arrays
6 : * Copyright (c) 2017 Microsoft Corporation
7 : * Author: Matthew Wilcox <willy@infradead.org>
8 : *
9 : * See Documentation/core-api/xarray.rst for how to use the XArray.
10 : */
11 :
12 : #include <linux/bitmap.h>
13 : #include <linux/bug.h>
14 : #include <linux/compiler.h>
15 : #include <linux/gfp.h>
16 : #include <linux/kconfig.h>
17 : #include <linux/kernel.h>
18 : #include <linux/rcupdate.h>
19 : #include <linux/sched/mm.h>
20 : #include <linux/spinlock.h>
21 : #include <linux/types.h>
22 :
23 : /*
24 : * The bottom two bits of the entry determine how the XArray interprets
25 : * the contents:
26 : *
27 : * 00: Pointer entry
28 : * 10: Internal entry
29 : * x1: Value entry or tagged pointer
30 : *
31 : * Attempting to store internal entries in the XArray is a bug.
32 : *
33 : * Most internal entries are pointers to the next node in the tree.
34 : * The following internal entries have a special meaning:
35 : *
36 : * 0-62: Sibling entries
37 : * 256: Retry entry
38 : * 257: Zero entry
39 : *
40 : * Errors are also represented as internal entries, but use the negative
41 : * space (-4094 to -2). They're never stored in the slots array; only
42 : * returned by the normal API.
43 : */
44 :
45 : #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
46 :
47 : /**
48 : * xa_mk_value() - Create an XArray entry from an integer.
49 : * @v: Value to store in XArray.
50 : *
51 : * Context: Any context.
52 : * Return: An entry suitable for storing in the XArray.
53 : */
54 : static inline void *xa_mk_value(unsigned long v)
55 : {
56 201 : WARN_ON((long)v < 0);
57 201 : return (void *)((v << 1) | 1);
58 : }
59 :
60 : /**
61 : * xa_to_value() - Get value stored in an XArray entry.
62 : * @entry: XArray entry.
63 : *
64 : * Context: Any context.
65 : * Return: The value stored in the XArray entry.
66 : */
67 : static inline unsigned long xa_to_value(const void *entry)
68 : {
69 195 : return (unsigned long)entry >> 1;
70 : }
71 :
72 : /**
73 : * xa_is_value() - Determine if an entry is a value.
74 : * @entry: XArray entry.
75 : *
76 : * Context: Any context.
77 : * Return: True if the entry is a value, false if it is a pointer.
78 : */
79 : static inline bool xa_is_value(const void *entry)
80 : {
81 22878 : return (unsigned long)entry & 1;
82 : }
83 :
84 : /**
85 : * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
86 : * @p: Plain pointer.
87 : * @tag: Tag value (0, 1 or 3).
88 : *
89 : * If the user of the XArray prefers, they can tag their pointers instead
90 : * of storing value entries. Three tags are available (0, 1 and 3).
91 : * These are distinct from the xa_mark_t as they are not replicated up
92 : * through the array and cannot be searched for.
93 : *
94 : * Context: Any context.
95 : * Return: An XArray entry.
96 : */
97 : static inline void *xa_tag_pointer(void *p, unsigned long tag)
98 : {
99 : return (void *)((unsigned long)p | tag);
100 : }
101 :
102 : /**
103 : * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
104 : * @entry: XArray entry.
105 : *
106 : * If you have stored a tagged pointer in the XArray, call this function
107 : * to get the untagged version of the pointer.
108 : *
109 : * Context: Any context.
110 : * Return: A pointer.
111 : */
112 : static inline void *xa_untag_pointer(void *entry)
113 : {
114 : return (void *)((unsigned long)entry & ~3UL);
115 : }
116 :
117 : /**
118 : * xa_pointer_tag() - Get the tag stored in an XArray entry.
119 : * @entry: XArray entry.
120 : *
121 : * If you have stored a tagged pointer in the XArray, call this function
122 : * to get the tag of that pointer.
123 : *
124 : * Context: Any context.
125 : * Return: A tag.
126 : */
127 : static inline unsigned int xa_pointer_tag(void *entry)
128 : {
129 : return (unsigned long)entry & 3UL;
130 : }
131 :
132 : /*
133 : * xa_mk_internal() - Create an internal entry.
134 : * @v: Value to turn into an internal entry.
135 : *
136 : * Internal entries are used for a number of purposes. Entries 0-255 are
137 : * used for sibling entries (only 0-62 are used by the current code). 256
138 : * is used for the retry entry. 257 is used for the reserved / zero entry.
139 : * Negative internal entries are used to represent errnos. Node pointers
140 : * are also tagged as internal entries in some situations.
141 : *
142 : * Context: Any context.
143 : * Return: An XArray internal entry corresponding to this value.
144 : */
145 : static inline void *xa_mk_internal(unsigned long v)
146 : {
147 0 : return (void *)((v << 2) | 2);
148 : }
149 :
150 : /*
151 : * xa_to_internal() - Extract the value from an internal entry.
152 : * @entry: XArray entry.
153 : *
154 : * Context: Any context.
155 : * Return: The value which was stored in the internal entry.
156 : */
157 : static inline unsigned long xa_to_internal(const void *entry)
158 : {
159 : return (unsigned long)entry >> 2;
160 : }
161 :
162 : /*
163 : * xa_is_internal() - Is the entry an internal entry?
164 : * @entry: XArray entry.
165 : *
166 : * Context: Any context.
167 : * Return: %true if the entry is an internal entry.
168 : */
169 : static inline bool xa_is_internal(const void *entry)
170 : {
171 1936 : return ((unsigned long)entry & 3) == 2;
172 : }
173 :
174 : #define XA_ZERO_ENTRY xa_mk_internal(257)
175 :
176 : /**
177 : * xa_is_zero() - Is the entry a zero entry?
178 : * @entry: Entry retrieved from the XArray
179 : *
180 : * The normal API will return NULL as the contents of a slot containing
181 : * a zero entry. You can only see zero entries by using the advanced API.
182 : *
183 : * Return: %true if the entry is a zero entry.
184 : */
185 : static inline bool xa_is_zero(const void *entry)
186 : {
187 210 : return unlikely(entry == XA_ZERO_ENTRY);
188 : }
189 :
190 : /**
191 : * xa_is_err() - Report whether an XArray operation returned an error
192 : * @entry: Result from calling an XArray function
193 : *
194 : * If an XArray operation cannot complete an operation, it will return
195 : * a special value indicating an error. This function tells you
196 : * whether an error occurred; xa_err() tells you which error occurred.
197 : *
198 : * Context: Any context.
199 : * Return: %true if the entry indicates an error.
200 : */
201 : static inline bool xa_is_err(const void *entry)
202 : {
203 671 : return unlikely(xa_is_internal(entry) &&
204 : entry >= xa_mk_internal(-MAX_ERRNO));
205 : }
206 :
207 : /**
208 : * xa_err() - Turn an XArray result into an errno.
209 : * @entry: Result from calling an XArray function.
210 : *
211 : * If an XArray operation cannot complete an operation, it will return
212 : * a special pointer value which encodes an errno. This function extracts
213 : * the errno from the pointer value, or returns 0 if the pointer does not
214 : * represent an errno.
215 : *
216 : * Context: Any context.
217 : * Return: A negative errno or 0.
218 : */
219 : static inline int xa_err(void *entry)
220 : {
221 : /* xa_to_internal() would not do sign extension. */
222 671 : if (xa_is_err(entry))
223 0 : return (long)entry >> 2;
224 : return 0;
225 : }
226 :
227 : /**
228 : * struct xa_limit - Represents a range of IDs.
229 : * @min: The lowest ID to allocate (inclusive).
230 : * @max: The maximum ID to allocate (inclusive).
231 : *
232 : * This structure is used either directly or via the XA_LIMIT() macro
233 : * to communicate the range of IDs that are valid for allocation.
234 : * Three common ranges are predefined for you:
235 : * * xa_limit_32b - [0 - UINT_MAX]
236 : * * xa_limit_31b - [0 - INT_MAX]
237 : * * xa_limit_16b - [0 - USHRT_MAX]
238 : */
239 : struct xa_limit {
240 : u32 max;
241 : u32 min;
242 : };
243 :
244 : #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
245 :
246 : #define xa_limit_32b XA_LIMIT(0, UINT_MAX)
247 : #define xa_limit_31b XA_LIMIT(0, INT_MAX)
248 : #define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
249 :
250 : typedef unsigned __bitwise xa_mark_t;
251 : #define XA_MARK_0 ((__force xa_mark_t)0U)
252 : #define XA_MARK_1 ((__force xa_mark_t)1U)
253 : #define XA_MARK_2 ((__force xa_mark_t)2U)
254 : #define XA_PRESENT ((__force xa_mark_t)8U)
255 : #define XA_MARK_MAX XA_MARK_2
256 : #define XA_FREE_MARK XA_MARK_0
257 :
258 : enum xa_lock_type {
259 : XA_LOCK_IRQ = 1,
260 : XA_LOCK_BH = 2,
261 : };
262 :
263 : /*
264 : * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
265 : * and we remain compatible with that.
266 : */
267 : #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
268 : #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
269 : #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
270 : #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
271 : #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
272 : #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
273 : #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
274 : (__force unsigned)(mark)))
275 :
276 : /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
277 : #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
278 : #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
279 :
280 : /**
281 : * struct xarray - The anchor of the XArray.
282 : * @xa_lock: Lock that protects the contents of the XArray.
283 : *
284 : * To use the xarray, define it statically or embed it in your data structure.
285 : * It is a very small data structure, so it does not usually make sense to
286 : * allocate it separately and keep a pointer to it in your data structure.
287 : *
288 : * You may use the xa_lock to protect your own data structures as well.
289 : */
290 : /*
291 : * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
292 : * If the only non-NULL entry in the array is at index 0, @xa_head is that
293 : * entry. If any other entry in the array is non-NULL, @xa_head points
294 : * to an @xa_node.
295 : */
296 : struct xarray {
297 : spinlock_t xa_lock;
298 : /* private: The rest of the data structure is not to be used directly. */
299 : gfp_t xa_flags;
300 : void __rcu * xa_head;
301 : };
302 :
303 : #define XARRAY_INIT(name, flags) { \
304 : .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
305 : .xa_flags = flags, \
306 : .xa_head = NULL, \
307 : }
308 :
309 : /**
310 : * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
311 : * @name: A string that names your XArray.
312 : * @flags: XA_FLAG values.
313 : *
314 : * This is intended for file scope definitions of XArrays. It declares
315 : * and initialises an empty XArray with the chosen name and flags. It is
316 : * equivalent to calling xa_init_flags() on the array, but it does the
317 : * initialisation at compiletime instead of runtime.
318 : */
319 : #define DEFINE_XARRAY_FLAGS(name, flags) \
320 : struct xarray name = XARRAY_INIT(name, flags)
321 :
322 : /**
323 : * DEFINE_XARRAY() - Define an XArray.
324 : * @name: A string that names your XArray.
325 : *
326 : * This is intended for file scope definitions of XArrays. It declares
327 : * and initialises an empty XArray with the chosen name. It is equivalent
328 : * to calling xa_init() on the array, but it does the initialisation at
329 : * compiletime instead of runtime.
330 : */
331 : #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
332 :
333 : /**
334 : * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
335 : * @name: A string that names your XArray.
336 : *
337 : * This is intended for file scope definitions of allocating XArrays.
338 : * See also DEFINE_XARRAY().
339 : */
340 : #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
341 :
342 : /**
343 : * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
344 : * @name: A string that names your XArray.
345 : *
346 : * This is intended for file scope definitions of allocating XArrays.
347 : * See also DEFINE_XARRAY().
348 : */
349 : #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
350 :
351 : void *xa_load(struct xarray *, unsigned long index);
352 : void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
353 : void *xa_erase(struct xarray *, unsigned long index);
354 : void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
355 : void *entry, gfp_t);
356 : bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
357 : void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
358 : void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
359 : void *xa_find(struct xarray *xa, unsigned long *index,
360 : unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
361 : void *xa_find_after(struct xarray *xa, unsigned long *index,
362 : unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
363 : unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
364 : unsigned long max, unsigned int n, xa_mark_t);
365 : void xa_destroy(struct xarray *);
366 :
367 : /**
368 : * xa_init_flags() - Initialise an empty XArray with flags.
369 : * @xa: XArray.
370 : * @flags: XA_FLAG values.
371 : *
372 : * If you need to initialise an XArray with special flags (eg you need
373 : * to take the lock from interrupt context), use this function instead
374 : * of xa_init().
375 : *
376 : * Context: Any context.
377 : */
378 : static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
379 : {
380 156 : spin_lock_init(&xa->xa_lock);
381 156 : xa->xa_flags = flags;
382 156 : xa->xa_head = NULL;
383 : }
384 :
385 : /**
386 : * xa_init() - Initialise an empty XArray.
387 : * @xa: XArray.
388 : *
389 : * An empty XArray is full of NULL entries.
390 : *
391 : * Context: Any context.
392 : */
393 : static inline void xa_init(struct xarray *xa)
394 : {
395 1 : xa_init_flags(xa, 0);
396 : }
397 :
398 : /**
399 : * xa_empty() - Determine if an array has any present entries.
400 : * @xa: XArray.
401 : *
402 : * Context: Any context.
403 : * Return: %true if the array contains only NULL pointers.
404 : */
405 : static inline bool xa_empty(const struct xarray *xa)
406 : {
407 : return xa->xa_head == NULL;
408 : }
409 :
410 : /**
411 : * xa_marked() - Inquire whether any entry in this array has a mark set
412 : * @xa: Array
413 : * @mark: Mark value
414 : *
415 : * Context: Any context.
416 : * Return: %true if any entry has this mark set.
417 : */
418 : static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
419 : {
420 348 : return xa->xa_flags & XA_FLAGS_MARK(mark);
421 : }
422 :
423 : /**
424 : * xa_for_each_range() - Iterate over a portion of an XArray.
425 : * @xa: XArray.
426 : * @index: Index of @entry.
427 : * @entry: Entry retrieved from array.
428 : * @start: First index to retrieve from array.
429 : * @last: Last index to retrieve from array.
430 : *
431 : * During the iteration, @entry will have the value of the entry stored
432 : * in @xa at @index. You may modify @index during the iteration if you
433 : * want to skip or reprocess indices. It is safe to modify the array
434 : * during the iteration. At the end of the iteration, @entry will be set
435 : * to NULL and @index will have a value less than or equal to max.
436 : *
437 : * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
438 : * to handle your own locking with xas_for_each(), and if you have to unlock
439 : * after each iteration, it will also end up being O(n.log(n)).
440 : * xa_for_each_range() will spin if it hits a retry entry; if you intend to
441 : * see retry entries, you should use the xas_for_each() iterator instead.
442 : * The xas_for_each() iterator will expand into more inline code than
443 : * xa_for_each_range().
444 : *
445 : * Context: Any context. Takes and releases the RCU lock.
446 : */
447 : #define xa_for_each_range(xa, index, entry, start, last) \
448 : for (index = start, \
449 : entry = xa_find(xa, &index, last, XA_PRESENT); \
450 : entry; \
451 : entry = xa_find_after(xa, &index, last, XA_PRESENT))
452 :
453 : /**
454 : * xa_for_each_start() - Iterate over a portion of an XArray.
455 : * @xa: XArray.
456 : * @index: Index of @entry.
457 : * @entry: Entry retrieved from array.
458 : * @start: First index to retrieve from array.
459 : *
460 : * During the iteration, @entry will have the value of the entry stored
461 : * in @xa at @index. You may modify @index during the iteration if you
462 : * want to skip or reprocess indices. It is safe to modify the array
463 : * during the iteration. At the end of the iteration, @entry will be set
464 : * to NULL and @index will have a value less than or equal to max.
465 : *
466 : * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
467 : * to handle your own locking with xas_for_each(), and if you have to unlock
468 : * after each iteration, it will also end up being O(n.log(n)).
469 : * xa_for_each_start() will spin if it hits a retry entry; if you intend to
470 : * see retry entries, you should use the xas_for_each() iterator instead.
471 : * The xas_for_each() iterator will expand into more inline code than
472 : * xa_for_each_start().
473 : *
474 : * Context: Any context. Takes and releases the RCU lock.
475 : */
476 : #define xa_for_each_start(xa, index, entry, start) \
477 : xa_for_each_range(xa, index, entry, start, ULONG_MAX)
478 :
479 : /**
480 : * xa_for_each() - Iterate over present entries in an XArray.
481 : * @xa: XArray.
482 : * @index: Index of @entry.
483 : * @entry: Entry retrieved from array.
484 : *
485 : * During the iteration, @entry will have the value of the entry stored
486 : * in @xa at @index. You may modify @index during the iteration if you want
487 : * to skip or reprocess indices. It is safe to modify the array during the
488 : * iteration. At the end of the iteration, @entry will be set to NULL and
489 : * @index will have a value less than or equal to max.
490 : *
491 : * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
492 : * to handle your own locking with xas_for_each(), and if you have to unlock
493 : * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
494 : * will spin if it hits a retry entry; if you intend to see retry entries,
495 : * you should use the xas_for_each() iterator instead. The xas_for_each()
496 : * iterator will expand into more inline code than xa_for_each().
497 : *
498 : * Context: Any context. Takes and releases the RCU lock.
499 : */
500 : #define xa_for_each(xa, index, entry) \
501 : xa_for_each_start(xa, index, entry, 0)
502 :
503 : /**
504 : * xa_for_each_marked() - Iterate over marked entries in an XArray.
505 : * @xa: XArray.
506 : * @index: Index of @entry.
507 : * @entry: Entry retrieved from array.
508 : * @filter: Selection criterion.
509 : *
510 : * During the iteration, @entry will have the value of the entry stored
511 : * in @xa at @index. The iteration will skip all entries in the array
512 : * which do not match @filter. You may modify @index during the iteration
513 : * if you want to skip or reprocess indices. It is safe to modify the array
514 : * during the iteration. At the end of the iteration, @entry will be set to
515 : * NULL and @index will have a value less than or equal to max.
516 : *
517 : * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
518 : * You have to handle your own locking with xas_for_each(), and if you have
519 : * to unlock after each iteration, it will also end up being O(n.log(n)).
520 : * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
521 : * see retry entries, you should use the xas_for_each_marked() iterator
522 : * instead. The xas_for_each_marked() iterator will expand into more inline
523 : * code than xa_for_each_marked().
524 : *
525 : * Context: Any context. Takes and releases the RCU lock.
526 : */
527 : #define xa_for_each_marked(xa, index, entry, filter) \
528 : for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
529 : entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
530 :
531 : #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
532 : #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
533 : #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
534 : #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
535 : #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
536 : #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
537 : #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
538 : #define xa_lock_irqsave(xa, flags) \
539 : spin_lock_irqsave(&(xa)->xa_lock, flags)
540 : #define xa_unlock_irqrestore(xa, flags) \
541 : spin_unlock_irqrestore(&(xa)->xa_lock, flags)
542 : #define xa_lock_nested(xa, subclass) \
543 : spin_lock_nested(&(xa)->xa_lock, subclass)
544 : #define xa_lock_bh_nested(xa, subclass) \
545 : spin_lock_bh_nested(&(xa)->xa_lock, subclass)
546 : #define xa_lock_irq_nested(xa, subclass) \
547 : spin_lock_irq_nested(&(xa)->xa_lock, subclass)
548 : #define xa_lock_irqsave_nested(xa, flags, subclass) \
549 : spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
550 :
551 : /*
552 : * Versions of the normal API which require the caller to hold the
553 : * xa_lock. If the GFP flags allow it, they will drop the lock to
554 : * allocate memory, then reacquire it afterwards. These functions
555 : * may also re-enable interrupts if the XArray flags indicate the
556 : * locking should be interrupt safe.
557 : */
558 : void *__xa_erase(struct xarray *, unsigned long index);
559 : void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
560 : void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
561 : void *entry, gfp_t);
562 : int __must_check __xa_insert(struct xarray *, unsigned long index,
563 : void *entry, gfp_t);
564 : int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
565 : struct xa_limit, gfp_t);
566 : int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
567 : struct xa_limit, u32 *next, gfp_t);
568 : void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
569 : void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
570 :
571 : /**
572 : * xa_store_bh() - Store this entry in the XArray.
573 : * @xa: XArray.
574 : * @index: Index into array.
575 : * @entry: New entry.
576 : * @gfp: Memory allocation flags.
577 : *
578 : * This function is like calling xa_store() except it disables softirqs
579 : * while holding the array lock.
580 : *
581 : * Context: Any context. Takes and releases the xa_lock while
582 : * disabling softirqs.
583 : * Return: The old entry at this index or xa_err() if an error happened.
584 : */
585 : static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
586 : void *entry, gfp_t gfp)
587 : {
588 : void *curr;
589 :
590 : might_alloc(gfp);
591 : xa_lock_bh(xa);
592 : curr = __xa_store(xa, index, entry, gfp);
593 : xa_unlock_bh(xa);
594 :
595 : return curr;
596 : }
597 :
598 : /**
599 : * xa_store_irq() - Store this entry in the XArray.
600 : * @xa: XArray.
601 : * @index: Index into array.
602 : * @entry: New entry.
603 : * @gfp: Memory allocation flags.
604 : *
605 : * This function is like calling xa_store() except it disables interrupts
606 : * while holding the array lock.
607 : *
608 : * Context: Process context. Takes and releases the xa_lock while
609 : * disabling interrupts.
610 : * Return: The old entry at this index or xa_err() if an error happened.
611 : */
612 : static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
613 : void *entry, gfp_t gfp)
614 : {
615 : void *curr;
616 :
617 : might_alloc(gfp);
618 : xa_lock_irq(xa);
619 : curr = __xa_store(xa, index, entry, gfp);
620 : xa_unlock_irq(xa);
621 :
622 : return curr;
623 : }
624 :
625 : /**
626 : * xa_erase_bh() - Erase this entry from the XArray.
627 : * @xa: XArray.
628 : * @index: Index of entry.
629 : *
630 : * After this function returns, loading from @index will return %NULL.
631 : * If the index is part of a multi-index entry, all indices will be erased
632 : * and none of the entries will be part of a multi-index entry.
633 : *
634 : * Context: Any context. Takes and releases the xa_lock while
635 : * disabling softirqs.
636 : * Return: The entry which used to be at this index.
637 : */
638 : static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
639 : {
640 : void *entry;
641 :
642 : xa_lock_bh(xa);
643 : entry = __xa_erase(xa, index);
644 : xa_unlock_bh(xa);
645 :
646 : return entry;
647 : }
648 :
649 : /**
650 : * xa_erase_irq() - Erase this entry from the XArray.
651 : * @xa: XArray.
652 : * @index: Index of entry.
653 : *
654 : * After this function returns, loading from @index will return %NULL.
655 : * If the index is part of a multi-index entry, all indices will be erased
656 : * and none of the entries will be part of a multi-index entry.
657 : *
658 : * Context: Process context. Takes and releases the xa_lock while
659 : * disabling interrupts.
660 : * Return: The entry which used to be at this index.
661 : */
662 : static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
663 : {
664 : void *entry;
665 :
666 : xa_lock_irq(xa);
667 : entry = __xa_erase(xa, index);
668 : xa_unlock_irq(xa);
669 :
670 : return entry;
671 : }
672 :
673 : /**
674 : * xa_cmpxchg() - Conditionally replace an entry in the XArray.
675 : * @xa: XArray.
676 : * @index: Index into array.
677 : * @old: Old value to test against.
678 : * @entry: New value to place in array.
679 : * @gfp: Memory allocation flags.
680 : *
681 : * If the entry at @index is the same as @old, replace it with @entry.
682 : * If the return value is equal to @old, then the exchange was successful.
683 : *
684 : * Context: Any context. Takes and releases the xa_lock. May sleep
685 : * if the @gfp flags permit.
686 : * Return: The old value at this index or xa_err() if an error happened.
687 : */
688 : static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
689 : void *old, void *entry, gfp_t gfp)
690 : {
691 : void *curr;
692 :
693 : might_alloc(gfp);
694 : xa_lock(xa);
695 : curr = __xa_cmpxchg(xa, index, old, entry, gfp);
696 : xa_unlock(xa);
697 :
698 : return curr;
699 : }
700 :
701 : /**
702 : * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
703 : * @xa: XArray.
704 : * @index: Index into array.
705 : * @old: Old value to test against.
706 : * @entry: New value to place in array.
707 : * @gfp: Memory allocation flags.
708 : *
709 : * This function is like calling xa_cmpxchg() except it disables softirqs
710 : * while holding the array lock.
711 : *
712 : * Context: Any context. Takes and releases the xa_lock while
713 : * disabling softirqs. May sleep if the @gfp flags permit.
714 : * Return: The old value at this index or xa_err() if an error happened.
715 : */
716 : static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
717 : void *old, void *entry, gfp_t gfp)
718 : {
719 : void *curr;
720 :
721 : might_alloc(gfp);
722 : xa_lock_bh(xa);
723 : curr = __xa_cmpxchg(xa, index, old, entry, gfp);
724 : xa_unlock_bh(xa);
725 :
726 : return curr;
727 : }
728 :
729 : /**
730 : * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
731 : * @xa: XArray.
732 : * @index: Index into array.
733 : * @old: Old value to test against.
734 : * @entry: New value to place in array.
735 : * @gfp: Memory allocation flags.
736 : *
737 : * This function is like calling xa_cmpxchg() except it disables interrupts
738 : * while holding the array lock.
739 : *
740 : * Context: Process context. Takes and releases the xa_lock while
741 : * disabling interrupts. May sleep if the @gfp flags permit.
742 : * Return: The old value at this index or xa_err() if an error happened.
743 : */
744 : static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
745 : void *old, void *entry, gfp_t gfp)
746 : {
747 : void *curr;
748 :
749 0 : might_alloc(gfp);
750 0 : xa_lock_irq(xa);
751 0 : curr = __xa_cmpxchg(xa, index, old, entry, gfp);
752 0 : xa_unlock_irq(xa);
753 :
754 : return curr;
755 : }
756 :
757 : /**
758 : * xa_insert() - Store this entry in the XArray unless another entry is
759 : * already present.
760 : * @xa: XArray.
761 : * @index: Index into array.
762 : * @entry: New entry.
763 : * @gfp: Memory allocation flags.
764 : *
765 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
766 : * if no entry is present. Inserting will fail if a reserved entry is
767 : * present, even though loading from this index will return NULL.
768 : *
769 : * Context: Any context. Takes and releases the xa_lock. May sleep if
770 : * the @gfp flags permit.
771 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
772 : * -ENOMEM if memory could not be allocated.
773 : */
774 : static inline int __must_check xa_insert(struct xarray *xa,
775 : unsigned long index, void *entry, gfp_t gfp)
776 : {
777 : int err;
778 :
779 0 : might_alloc(gfp);
780 0 : xa_lock(xa);
781 0 : err = __xa_insert(xa, index, entry, gfp);
782 0 : xa_unlock(xa);
783 :
784 : return err;
785 : }
786 :
787 : /**
788 : * xa_insert_bh() - Store this entry in the XArray unless another entry is
789 : * already present.
790 : * @xa: XArray.
791 : * @index: Index into array.
792 : * @entry: New entry.
793 : * @gfp: Memory allocation flags.
794 : *
795 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
796 : * if no entry is present. Inserting will fail if a reserved entry is
797 : * present, even though loading from this index will return NULL.
798 : *
799 : * Context: Any context. Takes and releases the xa_lock while
800 : * disabling softirqs. May sleep if the @gfp flags permit.
801 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
802 : * -ENOMEM if memory could not be allocated.
803 : */
804 : static inline int __must_check xa_insert_bh(struct xarray *xa,
805 : unsigned long index, void *entry, gfp_t gfp)
806 : {
807 : int err;
808 :
809 : might_alloc(gfp);
810 : xa_lock_bh(xa);
811 : err = __xa_insert(xa, index, entry, gfp);
812 : xa_unlock_bh(xa);
813 :
814 : return err;
815 : }
816 :
817 : /**
818 : * xa_insert_irq() - Store this entry in the XArray unless another entry is
819 : * already present.
820 : * @xa: XArray.
821 : * @index: Index into array.
822 : * @entry: New entry.
823 : * @gfp: Memory allocation flags.
824 : *
825 : * Inserting a NULL entry will store a reserved entry (like xa_reserve())
826 : * if no entry is present. Inserting will fail if a reserved entry is
827 : * present, even though loading from this index will return NULL.
828 : *
829 : * Context: Process context. Takes and releases the xa_lock while
830 : * disabling interrupts. May sleep if the @gfp flags permit.
831 : * Return: 0 if the store succeeded. -EBUSY if another entry was present.
832 : * -ENOMEM if memory could not be allocated.
833 : */
834 : static inline int __must_check xa_insert_irq(struct xarray *xa,
835 : unsigned long index, void *entry, gfp_t gfp)
836 : {
837 : int err;
838 :
839 : might_alloc(gfp);
840 : xa_lock_irq(xa);
841 : err = __xa_insert(xa, index, entry, gfp);
842 : xa_unlock_irq(xa);
843 :
844 : return err;
845 : }
846 :
847 : /**
848 : * xa_alloc() - Find somewhere to store this entry in the XArray.
849 : * @xa: XArray.
850 : * @id: Pointer to ID.
851 : * @entry: New entry.
852 : * @limit: Range of ID to allocate.
853 : * @gfp: Memory allocation flags.
854 : *
855 : * Finds an empty entry in @xa between @limit.min and @limit.max,
856 : * stores the index into the @id pointer, then stores the entry at
857 : * that index. A concurrent lookup will not see an uninitialised @id.
858 : *
859 : * Context: Any context. Takes and releases the xa_lock. May sleep if
860 : * the @gfp flags permit.
861 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
862 : * -EBUSY if there are no free entries in @limit.
863 : */
864 : static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
865 : void *entry, struct xa_limit limit, gfp_t gfp)
866 : {
867 : int err;
868 :
869 0 : might_alloc(gfp);
870 0 : xa_lock(xa);
871 0 : err = __xa_alloc(xa, id, entry, limit, gfp);
872 0 : xa_unlock(xa);
873 :
874 : return err;
875 : }
876 :
877 : /**
878 : * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
879 : * @xa: XArray.
880 : * @id: Pointer to ID.
881 : * @entry: New entry.
882 : * @limit: Range of ID to allocate.
883 : * @gfp: Memory allocation flags.
884 : *
885 : * Finds an empty entry in @xa between @limit.min and @limit.max,
886 : * stores the index into the @id pointer, then stores the entry at
887 : * that index. A concurrent lookup will not see an uninitialised @id.
888 : *
889 : * Context: Any context. Takes and releases the xa_lock while
890 : * disabling softirqs. May sleep if the @gfp flags permit.
891 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
892 : * -EBUSY if there are no free entries in @limit.
893 : */
894 : static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
895 : void *entry, struct xa_limit limit, gfp_t gfp)
896 : {
897 : int err;
898 :
899 : might_alloc(gfp);
900 : xa_lock_bh(xa);
901 : err = __xa_alloc(xa, id, entry, limit, gfp);
902 : xa_unlock_bh(xa);
903 :
904 : return err;
905 : }
906 :
907 : /**
908 : * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
909 : * @xa: XArray.
910 : * @id: Pointer to ID.
911 : * @entry: New entry.
912 : * @limit: Range of ID to allocate.
913 : * @gfp: Memory allocation flags.
914 : *
915 : * Finds an empty entry in @xa between @limit.min and @limit.max,
916 : * stores the index into the @id pointer, then stores the entry at
917 : * that index. A concurrent lookup will not see an uninitialised @id.
918 : *
919 : * Context: Process context. Takes and releases the xa_lock while
920 : * disabling interrupts. May sleep if the @gfp flags permit.
921 : * Return: 0 on success, -ENOMEM if memory could not be allocated or
922 : * -EBUSY if there are no free entries in @limit.
923 : */
924 : static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
925 : void *entry, struct xa_limit limit, gfp_t gfp)
926 : {
927 : int err;
928 :
929 : might_alloc(gfp);
930 : xa_lock_irq(xa);
931 : err = __xa_alloc(xa, id, entry, limit, gfp);
932 : xa_unlock_irq(xa);
933 :
934 : return err;
935 : }
936 :
937 : /**
938 : * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
939 : * @xa: XArray.
940 : * @id: Pointer to ID.
941 : * @entry: New entry.
942 : * @limit: Range of allocated ID.
943 : * @next: Pointer to next ID to allocate.
944 : * @gfp: Memory allocation flags.
945 : *
946 : * Finds an empty entry in @xa between @limit.min and @limit.max,
947 : * stores the index into the @id pointer, then stores the entry at
948 : * that index. A concurrent lookup will not see an uninitialised @id.
949 : * The search for an empty entry will start at @next and will wrap
950 : * around if necessary.
951 : *
952 : * Context: Any context. Takes and releases the xa_lock. May sleep if
953 : * the @gfp flags permit.
954 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
955 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
956 : * allocated or -EBUSY if there are no free entries in @limit.
957 : */
958 : static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
959 : struct xa_limit limit, u32 *next, gfp_t gfp)
960 : {
961 : int err;
962 :
963 0 : might_alloc(gfp);
964 0 : xa_lock(xa);
965 0 : err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
966 0 : xa_unlock(xa);
967 :
968 : return err;
969 : }
970 :
971 : /**
972 : * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
973 : * @xa: XArray.
974 : * @id: Pointer to ID.
975 : * @entry: New entry.
976 : * @limit: Range of allocated ID.
977 : * @next: Pointer to next ID to allocate.
978 : * @gfp: Memory allocation flags.
979 : *
980 : * Finds an empty entry in @xa between @limit.min and @limit.max,
981 : * stores the index into the @id pointer, then stores the entry at
982 : * that index. A concurrent lookup will not see an uninitialised @id.
983 : * The search for an empty entry will start at @next and will wrap
984 : * around if necessary.
985 : *
986 : * Context: Any context. Takes and releases the xa_lock while
987 : * disabling softirqs. May sleep if the @gfp flags permit.
988 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
989 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
990 : * allocated or -EBUSY if there are no free entries in @limit.
991 : */
992 : static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
993 : struct xa_limit limit, u32 *next, gfp_t gfp)
994 : {
995 : int err;
996 :
997 : might_alloc(gfp);
998 : xa_lock_bh(xa);
999 : err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1000 : xa_unlock_bh(xa);
1001 :
1002 : return err;
1003 : }
1004 :
1005 : /**
1006 : * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
1007 : * @xa: XArray.
1008 : * @id: Pointer to ID.
1009 : * @entry: New entry.
1010 : * @limit: Range of allocated ID.
1011 : * @next: Pointer to next ID to allocate.
1012 : * @gfp: Memory allocation flags.
1013 : *
1014 : * Finds an empty entry in @xa between @limit.min and @limit.max,
1015 : * stores the index into the @id pointer, then stores the entry at
1016 : * that index. A concurrent lookup will not see an uninitialised @id.
1017 : * The search for an empty entry will start at @next and will wrap
1018 : * around if necessary.
1019 : *
1020 : * Context: Process context. Takes and releases the xa_lock while
1021 : * disabling interrupts. May sleep if the @gfp flags permit.
1022 : * Return: 0 if the allocation succeeded without wrapping. 1 if the
1023 : * allocation succeeded after wrapping, -ENOMEM if memory could not be
1024 : * allocated or -EBUSY if there are no free entries in @limit.
1025 : */
1026 : static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
1027 : struct xa_limit limit, u32 *next, gfp_t gfp)
1028 : {
1029 : int err;
1030 :
1031 : might_alloc(gfp);
1032 : xa_lock_irq(xa);
1033 : err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1034 : xa_unlock_irq(xa);
1035 :
1036 : return err;
1037 : }
1038 :
1039 : /**
1040 : * xa_reserve() - Reserve this index in the XArray.
1041 : * @xa: XArray.
1042 : * @index: Index into array.
1043 : * @gfp: Memory allocation flags.
1044 : *
1045 : * Ensures there is somewhere to store an entry at @index in the array.
1046 : * If there is already something stored at @index, this function does
1047 : * nothing. If there was nothing there, the entry is marked as reserved.
1048 : * Loading from a reserved entry returns a %NULL pointer.
1049 : *
1050 : * If you do not use the entry that you have reserved, call xa_release()
1051 : * or xa_erase() to free any unnecessary memory.
1052 : *
1053 : * Context: Any context. Takes and releases the xa_lock.
1054 : * May sleep if the @gfp flags permit.
1055 : * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1056 : */
1057 : static inline __must_check
1058 : int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1059 : {
1060 : return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1061 : }
1062 :
1063 : /**
1064 : * xa_reserve_bh() - Reserve this index in the XArray.
1065 : * @xa: XArray.
1066 : * @index: Index into array.
1067 : * @gfp: Memory allocation flags.
1068 : *
1069 : * A softirq-disabling version of xa_reserve().
1070 : *
1071 : * Context: Any context. Takes and releases the xa_lock while
1072 : * disabling softirqs.
1073 : * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1074 : */
1075 : static inline __must_check
1076 : int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
1077 : {
1078 : return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1079 : }
1080 :
1081 : /**
1082 : * xa_reserve_irq() - Reserve this index in the XArray.
1083 : * @xa: XArray.
1084 : * @index: Index into array.
1085 : * @gfp: Memory allocation flags.
1086 : *
1087 : * An interrupt-disabling version of xa_reserve().
1088 : *
1089 : * Context: Process context. Takes and releases the xa_lock while
1090 : * disabling interrupts.
1091 : * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1092 : */
1093 : static inline __must_check
1094 : int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
1095 : {
1096 : return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1097 : }
1098 :
1099 : /**
1100 : * xa_release() - Release a reserved entry.
1101 : * @xa: XArray.
1102 : * @index: Index of entry.
1103 : *
1104 : * After calling xa_reserve(), you can call this function to release the
1105 : * reservation. If the entry at @index has been stored to, this function
1106 : * will do nothing.
1107 : */
1108 : static inline void xa_release(struct xarray *xa, unsigned long index)
1109 : {
1110 : xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
1111 : }
1112 :
1113 : /* Everything below here is the Advanced API. Proceed with caution. */
1114 :
1115 : /*
1116 : * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
1117 : * the best chunk size requires some tradeoffs. A power of two recommends
1118 : * itself so that we can walk the tree based purely on shifts and masks.
1119 : * Generally, the larger the better; as the number of slots per level of the
1120 : * tree increases, the less tall the tree needs to be. But that needs to be
1121 : * balanced against the memory consumption of each node. On a 64-bit system,
1122 : * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
1123 : * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
1124 : */
1125 : #ifndef XA_CHUNK_SHIFT
1126 : #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
1127 : #endif
1128 : #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
1129 : #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
1130 : #define XA_MAX_MARKS 3
1131 : #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
1132 :
1133 : /*
1134 : * @count is the count of every non-NULL element in the ->slots array
1135 : * whether that is a value entry, a retry entry, a user pointer,
1136 : * a sibling entry or a pointer to the next level of the tree.
1137 : * @nr_values is the count of every element in ->slots which is
1138 : * either a value entry or a sibling of a value entry.
1139 : */
1140 : struct xa_node {
1141 : unsigned char shift; /* Bits remaining in each slot */
1142 : unsigned char offset; /* Slot offset in parent */
1143 : unsigned char count; /* Total entry count */
1144 : unsigned char nr_values; /* Value entry count */
1145 : struct xa_node __rcu *parent; /* NULL at top of tree */
1146 : struct xarray *array; /* The array we belong to */
1147 : union {
1148 : struct list_head private_list; /* For tree user */
1149 : struct rcu_head rcu_head; /* Used when freeing node */
1150 : };
1151 : void __rcu *slots[XA_CHUNK_SIZE];
1152 : union {
1153 : unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
1154 : unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
1155 : };
1156 : };
1157 :
1158 : void xa_dump(const struct xarray *);
1159 : void xa_dump_node(const struct xa_node *);
1160 :
1161 : #ifdef XA_DEBUG
1162 : #define XA_BUG_ON(xa, x) do { \
1163 : if (x) { \
1164 : xa_dump(xa); \
1165 : BUG(); \
1166 : } \
1167 : } while (0)
1168 : #define XA_NODE_BUG_ON(node, x) do { \
1169 : if (x) { \
1170 : if (node) xa_dump_node(node); \
1171 : BUG(); \
1172 : } \
1173 : } while (0)
1174 : #else
1175 : #define XA_BUG_ON(xa, x) do { } while (0)
1176 : #define XA_NODE_BUG_ON(node, x) do { } while (0)
1177 : #endif
1178 :
1179 : /* Private */
1180 : static inline void *xa_head(const struct xarray *xa)
1181 : {
1182 392 : return rcu_dereference_check(xa->xa_head,
1183 : lockdep_is_held(&xa->xa_lock));
1184 : }
1185 :
1186 : /* Private */
1187 : static inline void *xa_head_locked(const struct xarray *xa)
1188 : {
1189 : return rcu_dereference_protected(xa->xa_head,
1190 : lockdep_is_held(&xa->xa_lock));
1191 : }
1192 :
1193 : /* Private */
1194 : static inline void *xa_entry(const struct xarray *xa,
1195 : const struct xa_node *node, unsigned int offset)
1196 : {
1197 : XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1198 6 : return rcu_dereference_check(node->slots[offset],
1199 : lockdep_is_held(&xa->xa_lock));
1200 : }
1201 :
1202 : /* Private */
1203 : static inline void *xa_entry_locked(const struct xarray *xa,
1204 : const struct xa_node *node, unsigned int offset)
1205 : {
1206 : XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1207 0 : return rcu_dereference_protected(node->slots[offset],
1208 : lockdep_is_held(&xa->xa_lock));
1209 : }
1210 :
1211 : /* Private */
1212 : static inline struct xa_node *xa_parent(const struct xarray *xa,
1213 : const struct xa_node *node)
1214 : {
1215 0 : return rcu_dereference_check(node->parent,
1216 : lockdep_is_held(&xa->xa_lock));
1217 : }
1218 :
1219 : /* Private */
1220 : static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
1221 : const struct xa_node *node)
1222 : {
1223 : return rcu_dereference_protected(node->parent,
1224 : lockdep_is_held(&xa->xa_lock));
1225 : }
1226 :
1227 : /* Private */
1228 : static inline void *xa_mk_node(const struct xa_node *node)
1229 : {
1230 2 : return (void *)((unsigned long)node | 2);
1231 : }
1232 :
1233 : /* Private */
1234 : static inline struct xa_node *xa_to_node(const void *entry)
1235 : {
1236 7 : return (struct xa_node *)((unsigned long)entry - 2);
1237 : }
1238 :
1239 : /* Private */
1240 : static inline bool xa_is_node(const void *entry)
1241 : {
1242 1539 : return xa_is_internal(entry) && (unsigned long)entry > 4096;
1243 : }
1244 :
1245 : /* Private */
1246 : static inline void *xa_mk_sibling(unsigned int offset)
1247 : {
1248 0 : return xa_mk_internal(offset);
1249 : }
1250 :
1251 : /* Private */
1252 : static inline unsigned long xa_to_sibling(const void *entry)
1253 : {
1254 : return xa_to_internal(entry);
1255 : }
1256 :
1257 : /**
1258 : * xa_is_sibling() - Is the entry a sibling entry?
1259 : * @entry: Entry retrieved from the XArray
1260 : *
1261 : * Return: %true if the entry is a sibling entry.
1262 : */
1263 : static inline bool xa_is_sibling(const void *entry)
1264 : {
1265 : return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
1266 : (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
1267 : }
1268 :
1269 : #define XA_RETRY_ENTRY xa_mk_internal(256)
1270 :
1271 : /**
1272 : * xa_is_retry() - Is the entry a retry entry?
1273 : * @entry: Entry retrieved from the XArray
1274 : *
1275 : * Return: %true if the entry is a retry entry.
1276 : */
1277 : static inline bool xa_is_retry(const void *entry)
1278 : {
1279 2 : return unlikely(entry == XA_RETRY_ENTRY);
1280 : }
1281 :
1282 : /**
1283 : * xa_is_advanced() - Is the entry only permitted for the advanced API?
1284 : * @entry: Entry to be stored in the XArray.
1285 : *
1286 : * Return: %true if the entry cannot be stored by the normal API.
1287 : */
1288 : static inline bool xa_is_advanced(const void *entry)
1289 : {
1290 2 : return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1291 : }
1292 :
1293 : /**
1294 : * typedef xa_update_node_t - A callback function from the XArray.
1295 : * @node: The node which is being processed
1296 : *
1297 : * This function is called every time the XArray updates the count of
1298 : * present and value entries in a node. It allows advanced users to
1299 : * maintain the private_list in the node.
1300 : *
1301 : * Context: The xa_lock is held and interrupts may be disabled.
1302 : * Implementations should not drop the xa_lock, nor re-enable
1303 : * interrupts.
1304 : */
1305 : typedef void (*xa_update_node_t)(struct xa_node *node);
1306 :
1307 : void xa_delete_node(struct xa_node *, xa_update_node_t);
1308 :
1309 : /*
1310 : * The xa_state is opaque to its users. It contains various different pieces
1311 : * of state involved in the current operation on the XArray. It should be
1312 : * declared on the stack and passed between the various internal routines.
1313 : * The various elements in it should not be accessed directly, but only
1314 : * through the provided accessor functions. The below documentation is for
1315 : * the benefit of those working on the code, not for users of the XArray.
1316 : *
1317 : * @xa_node usually points to the xa_node containing the slot we're operating
1318 : * on (and @xa_offset is the offset in the slots array). If there is a
1319 : * single entry in the array at index 0, there are no allocated xa_nodes to
1320 : * point to, and so we store %NULL in @xa_node. @xa_node is set to
1321 : * the value %XAS_RESTART if the xa_state is not walked to the correct
1322 : * position in the tree of nodes for this operation. If an error occurs
1323 : * during an operation, it is set to an %XAS_ERROR value. If we run off the
1324 : * end of the allocated nodes, it is set to %XAS_BOUNDS.
1325 : */
1326 : struct xa_state {
1327 : struct xarray *xa;
1328 : unsigned long xa_index;
1329 : unsigned char xa_shift;
1330 : unsigned char xa_sibs;
1331 : unsigned char xa_offset;
1332 : unsigned char xa_pad; /* Helps gcc generate better code */
1333 : struct xa_node *xa_node;
1334 : struct xa_node *xa_alloc;
1335 : xa_update_node_t xa_update;
1336 : struct list_lru *xa_lru;
1337 : };
1338 :
1339 : /*
1340 : * We encode errnos in the xas->xa_node. If an error has happened, we need to
1341 : * drop the lock to fix it, and once we've done so the xa_state is invalid.
1342 : */
1343 : #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
1344 : #define XAS_BOUNDS ((struct xa_node *)1UL)
1345 : #define XAS_RESTART ((struct xa_node *)3UL)
1346 :
1347 : #define __XA_STATE(array, index, shift, sibs) { \
1348 : .xa = array, \
1349 : .xa_index = index, \
1350 : .xa_shift = shift, \
1351 : .xa_sibs = sibs, \
1352 : .xa_offset = 0, \
1353 : .xa_pad = 0, \
1354 : .xa_node = XAS_RESTART, \
1355 : .xa_alloc = NULL, \
1356 : .xa_update = NULL, \
1357 : .xa_lru = NULL, \
1358 : }
1359 :
1360 : /**
1361 : * XA_STATE() - Declare an XArray operation state.
1362 : * @name: Name of this operation state (usually xas).
1363 : * @array: Array to operate on.
1364 : * @index: Initial index of interest.
1365 : *
1366 : * Declare and initialise an xa_state on the stack.
1367 : */
1368 : #define XA_STATE(name, array, index) \
1369 : struct xa_state name = __XA_STATE(array, index, 0, 0)
1370 :
1371 : /**
1372 : * XA_STATE_ORDER() - Declare an XArray operation state.
1373 : * @name: Name of this operation state (usually xas).
1374 : * @array: Array to operate on.
1375 : * @index: Initial index of interest.
1376 : * @order: Order of entry.
1377 : *
1378 : * Declare and initialise an xa_state on the stack. This variant of
1379 : * XA_STATE() allows you to specify the 'order' of the element you
1380 : * want to operate on.`
1381 : */
1382 : #define XA_STATE_ORDER(name, array, index, order) \
1383 : struct xa_state name = __XA_STATE(array, \
1384 : (index >> order) << order, \
1385 : order - (order % XA_CHUNK_SHIFT), \
1386 : (1U << (order % XA_CHUNK_SHIFT)) - 1)
1387 :
1388 : #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1389 : #define xas_trylock(xas) xa_trylock((xas)->xa)
1390 : #define xas_lock(xas) xa_lock((xas)->xa)
1391 : #define xas_unlock(xas) xa_unlock((xas)->xa)
1392 : #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1393 : #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1394 : #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1395 : #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1396 : #define xas_lock_irqsave(xas, flags) \
1397 : xa_lock_irqsave((xas)->xa, flags)
1398 : #define xas_unlock_irqrestore(xas, flags) \
1399 : xa_unlock_irqrestore((xas)->xa, flags)
1400 :
1401 : /**
1402 : * xas_error() - Return an errno stored in the xa_state.
1403 : * @xas: XArray operation state.
1404 : *
1405 : * Return: 0 if no error has been noted. A negative errno if one has.
1406 : */
1407 : static inline int xas_error(const struct xa_state *xas)
1408 : {
1409 1338 : return xa_err(xas->xa_node);
1410 : }
1411 :
1412 : /**
1413 : * xas_set_err() - Note an error in the xa_state.
1414 : * @xas: XArray operation state.
1415 : * @err: Negative error number.
1416 : *
1417 : * Only call this function with a negative @err; zero or positive errors
1418 : * will probably not behave the way you think they should. If you want
1419 : * to clear the error from an xa_state, use xas_reset().
1420 : */
1421 : static inline void xas_set_err(struct xa_state *xas, long err)
1422 : {
1423 0 : xas->xa_node = XA_ERROR(err);
1424 : }
1425 :
1426 : /**
1427 : * xas_invalid() - Is the xas in a retry or error state?
1428 : * @xas: XArray operation state.
1429 : *
1430 : * Return: %true if the xas cannot be used for operations.
1431 : */
1432 : static inline bool xas_invalid(const struct xa_state *xas)
1433 : {
1434 418 : return (unsigned long)xas->xa_node & 3;
1435 : }
1436 :
1437 : /**
1438 : * xas_valid() - Is the xas a valid cursor into the array?
1439 : * @xas: XArray operation state.
1440 : *
1441 : * Return: %true if the xas can be used for operations.
1442 : */
1443 : static inline bool xas_valid(const struct xa_state *xas)
1444 : {
1445 116 : return !xas_invalid(xas);
1446 : }
1447 :
1448 : /**
1449 : * xas_is_node() - Does the xas point to a node?
1450 : * @xas: XArray operation state.
1451 : *
1452 : * Return: %true if the xas currently references a node.
1453 : */
1454 : static inline bool xas_is_node(const struct xa_state *xas)
1455 : {
1456 0 : return xas_valid(xas) && xas->xa_node;
1457 : }
1458 :
1459 : /* True if the pointer is something other than a node */
1460 : static inline bool xas_not_node(struct xa_node *node)
1461 : {
1462 22 : return ((unsigned long)node & 3) || !node;
1463 : }
1464 :
1465 : /* True if the node represents RESTART or an error */
1466 : static inline bool xas_frozen(struct xa_node *node)
1467 : {
1468 0 : return (unsigned long)node & 2;
1469 : }
1470 :
1471 : /* True if the node represents head-of-tree, RESTART or BOUNDS */
1472 : static inline bool xas_top(struct xa_node *node)
1473 : {
1474 : return node <= XAS_RESTART;
1475 : }
1476 :
1477 : /**
1478 : * xas_reset() - Reset an XArray operation state.
1479 : * @xas: XArray operation state.
1480 : *
1481 : * Resets the error or walk state of the @xas so future walks of the
1482 : * array will start from the root. Use this if you have dropped the
1483 : * xarray lock and want to reuse the xa_state.
1484 : *
1485 : * Context: Any context.
1486 : */
1487 : static inline void xas_reset(struct xa_state *xas)
1488 : {
1489 0 : xas->xa_node = XAS_RESTART;
1490 : }
1491 :
1492 : /**
1493 : * xas_retry() - Retry the operation if appropriate.
1494 : * @xas: XArray operation state.
1495 : * @entry: Entry from xarray.
1496 : *
1497 : * The advanced functions may sometimes return an internal entry, such as
1498 : * a retry entry or a zero entry. This function sets up the @xas to restart
1499 : * the walk from the head of the array if needed.
1500 : *
1501 : * Context: Any context.
1502 : * Return: true if the operation needs to be retried.
1503 : */
1504 : static inline bool xas_retry(struct xa_state *xas, const void *entry)
1505 : {
1506 2 : if (xa_is_zero(entry))
1507 : return true;
1508 2 : if (!xa_is_retry(entry))
1509 : return false;
1510 0 : xas_reset(xas);
1511 : return true;
1512 : }
1513 :
1514 : void *xas_load(struct xa_state *);
1515 : void *xas_store(struct xa_state *, void *entry);
1516 : void *xas_find(struct xa_state *, unsigned long max);
1517 : void *xas_find_conflict(struct xa_state *);
1518 :
1519 : bool xas_get_mark(const struct xa_state *, xa_mark_t);
1520 : void xas_set_mark(const struct xa_state *, xa_mark_t);
1521 : void xas_clear_mark(const struct xa_state *, xa_mark_t);
1522 : void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1523 : void xas_init_marks(const struct xa_state *);
1524 :
1525 : bool xas_nomem(struct xa_state *, gfp_t);
1526 : void xas_destroy(struct xa_state *);
1527 : void xas_pause(struct xa_state *);
1528 :
1529 : void xas_create_range(struct xa_state *);
1530 :
1531 : #ifdef CONFIG_XARRAY_MULTI
1532 : int xa_get_order(struct xarray *, unsigned long index);
1533 : void xas_split(struct xa_state *, void *entry, unsigned int order);
1534 : void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1535 : #else
1536 : static inline int xa_get_order(struct xarray *xa, unsigned long index)
1537 : {
1538 : return 0;
1539 : }
1540 :
1541 : static inline void xas_split(struct xa_state *xas, void *entry,
1542 : unsigned int order)
1543 : {
1544 : xas_store(xas, entry);
1545 : }
1546 :
1547 : static inline void xas_split_alloc(struct xa_state *xas, void *entry,
1548 : unsigned int order, gfp_t gfp)
1549 : {
1550 : }
1551 : #endif
1552 :
1553 : /**
1554 : * xas_reload() - Refetch an entry from the xarray.
1555 : * @xas: XArray operation state.
1556 : *
1557 : * Use this function to check that a previously loaded entry still has
1558 : * the same value. This is useful for the lockless pagecache lookup where
1559 : * we walk the array with only the RCU lock to protect us, lock the page,
1560 : * then check that the page hasn't moved since we looked it up.
1561 : *
1562 : * The caller guarantees that @xas is still valid. If it may be in an
1563 : * error or restart state, call xas_load() instead.
1564 : *
1565 : * Return: The entry at this location in the xarray.
1566 : */
1567 : static inline void *xas_reload(struct xa_state *xas)
1568 : {
1569 24 : struct xa_node *node = xas->xa_node;
1570 : void *entry;
1571 : char offset;
1572 :
1573 24 : if (!node)
1574 48 : return xa_head(xas->xa);
1575 : if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
1576 : offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
1577 : entry = xa_entry(xas->xa, node, offset);
1578 : if (!xa_is_sibling(entry))
1579 : return entry;
1580 : offset = xa_to_sibling(entry);
1581 : } else {
1582 0 : offset = xas->xa_offset;
1583 : }
1584 0 : return xa_entry(xas->xa, node, offset);
1585 : }
1586 :
1587 : /**
1588 : * xas_set() - Set up XArray operation state for a different index.
1589 : * @xas: XArray operation state.
1590 : * @index: New index into the XArray.
1591 : *
1592 : * Move the operation state to refer to a different index. This will
1593 : * have the effect of starting a walk from the top; see xas_next()
1594 : * to move to an adjacent index.
1595 : */
1596 : static inline void xas_set(struct xa_state *xas, unsigned long index)
1597 : {
1598 0 : xas->xa_index = index;
1599 0 : xas->xa_node = XAS_RESTART;
1600 : }
1601 :
1602 : /**
1603 : * xas_advance() - Skip over sibling entries.
1604 : * @xas: XArray operation state.
1605 : * @index: Index of last sibling entry.
1606 : *
1607 : * Move the operation state to refer to the last sibling entry.
1608 : * This is useful for loops that normally want to see sibling
1609 : * entries but sometimes want to skip them. Use xas_set() if you
1610 : * want to move to an index which is not part of this entry.
1611 : */
1612 : static inline void xas_advance(struct xa_state *xas, unsigned long index)
1613 : {
1614 0 : unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
1615 :
1616 0 : xas->xa_index = index;
1617 0 : xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
1618 : }
1619 :
1620 : /**
1621 : * xas_set_order() - Set up XArray operation state for a multislot entry.
1622 : * @xas: XArray operation state.
1623 : * @index: Target of the operation.
1624 : * @order: Entry occupies 2^@order indices.
1625 : */
1626 0 : static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1627 : unsigned int order)
1628 : {
1629 : #ifdef CONFIG_XARRAY_MULTI
1630 : xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1631 : xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1632 : xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1633 : xas->xa_node = XAS_RESTART;
1634 : #else
1635 0 : BUG_ON(order > 0);
1636 0 : xas_set(xas, index);
1637 : #endif
1638 0 : }
1639 :
1640 : /**
1641 : * xas_set_update() - Set up XArray operation state for a callback.
1642 : * @xas: XArray operation state.
1643 : * @update: Function to call when updating a node.
1644 : *
1645 : * The XArray can notify a caller after it has updated an xa_node.
1646 : * This is advanced functionality and is only needed by the page
1647 : * cache and swap cache.
1648 : */
1649 : static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1650 : {
1651 0 : xas->xa_update = update;
1652 : }
1653 :
1654 : static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
1655 : {
1656 0 : xas->xa_lru = lru;
1657 : }
1658 :
1659 : /**
1660 : * xas_next_entry() - Advance iterator to next present entry.
1661 : * @xas: XArray operation state.
1662 : * @max: Highest index to return.
1663 : *
1664 : * xas_next_entry() is an inline function to optimise xarray traversal for
1665 : * speed. It is equivalent to calling xas_find(), and will call xas_find()
1666 : * for all the hard cases.
1667 : *
1668 : * Return: The next present entry after the one currently referred to by @xas.
1669 : */
1670 0 : static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1671 : {
1672 0 : struct xa_node *node = xas->xa_node;
1673 : void *entry;
1674 :
1675 0 : if (unlikely(xas_not_node(node) || node->shift ||
1676 : xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1677 0 : return xas_find(xas, max);
1678 :
1679 : do {
1680 0 : if (unlikely(xas->xa_index >= max))
1681 0 : return xas_find(xas, max);
1682 0 : if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1683 0 : return xas_find(xas, max);
1684 0 : entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1685 0 : if (unlikely(xa_is_internal(entry)))
1686 0 : return xas_find(xas, max);
1687 0 : xas->xa_offset++;
1688 0 : xas->xa_index++;
1689 0 : } while (!entry);
1690 :
1691 : return entry;
1692 : }
1693 :
1694 : /* Private */
1695 : static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1696 : xa_mark_t mark)
1697 : {
1698 0 : unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1699 0 : unsigned int offset = xas->xa_offset;
1700 :
1701 0 : if (advance)
1702 0 : offset++;
1703 : if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1704 0 : if (offset < XA_CHUNK_SIZE) {
1705 0 : unsigned long data = *addr & (~0UL << offset);
1706 0 : if (data)
1707 0 : return __ffs(data);
1708 : }
1709 : return XA_CHUNK_SIZE;
1710 : }
1711 :
1712 : return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1713 : }
1714 :
1715 : /**
1716 : * xas_next_marked() - Advance iterator to next marked entry.
1717 : * @xas: XArray operation state.
1718 : * @max: Highest index to return.
1719 : * @mark: Mark to search for.
1720 : *
1721 : * xas_next_marked() is an inline function to optimise xarray traversal for
1722 : * speed. It is equivalent to calling xas_find_marked(), and will call
1723 : * xas_find_marked() for all the hard cases.
1724 : *
1725 : * Return: The next marked entry after the one currently referred to by @xas.
1726 : */
1727 0 : static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1728 : xa_mark_t mark)
1729 : {
1730 0 : struct xa_node *node = xas->xa_node;
1731 : void *entry;
1732 : unsigned int offset;
1733 :
1734 0 : if (unlikely(xas_not_node(node) || node->shift))
1735 0 : return xas_find_marked(xas, max, mark);
1736 0 : offset = xas_find_chunk(xas, true, mark);
1737 0 : xas->xa_offset = offset;
1738 0 : xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1739 0 : if (xas->xa_index > max)
1740 : return NULL;
1741 0 : if (offset == XA_CHUNK_SIZE)
1742 0 : return xas_find_marked(xas, max, mark);
1743 0 : entry = xa_entry(xas->xa, node, offset);
1744 0 : if (!entry)
1745 0 : return xas_find_marked(xas, max, mark);
1746 : return entry;
1747 : }
1748 :
1749 : /*
1750 : * If iterating while holding a lock, drop the lock and reschedule
1751 : * every %XA_CHECK_SCHED loops.
1752 : */
1753 : enum {
1754 : XA_CHECK_SCHED = 4096,
1755 : };
1756 :
1757 : /**
1758 : * xas_for_each() - Iterate over a range of an XArray.
1759 : * @xas: XArray operation state.
1760 : * @entry: Entry retrieved from the array.
1761 : * @max: Maximum index to retrieve from array.
1762 : *
1763 : * The loop body will be executed for each entry present in the xarray
1764 : * between the current xas position and @max. @entry will be set to
1765 : * the entry retrieved from the xarray. It is safe to delete entries
1766 : * from the array in the loop body. You should hold either the RCU lock
1767 : * or the xa_lock while iterating. If you need to drop the lock, call
1768 : * xas_pause() first.
1769 : */
1770 : #define xas_for_each(xas, entry, max) \
1771 : for (entry = xas_find(xas, max); entry; \
1772 : entry = xas_next_entry(xas, max))
1773 :
1774 : /**
1775 : * xas_for_each_marked() - Iterate over a range of an XArray.
1776 : * @xas: XArray operation state.
1777 : * @entry: Entry retrieved from the array.
1778 : * @max: Maximum index to retrieve from array.
1779 : * @mark: Mark to search for.
1780 : *
1781 : * The loop body will be executed for each marked entry in the xarray
1782 : * between the current xas position and @max. @entry will be set to
1783 : * the entry retrieved from the xarray. It is safe to delete entries
1784 : * from the array in the loop body. You should hold either the RCU lock
1785 : * or the xa_lock while iterating. If you need to drop the lock, call
1786 : * xas_pause() first.
1787 : */
1788 : #define xas_for_each_marked(xas, entry, max, mark) \
1789 : for (entry = xas_find_marked(xas, max, mark); entry; \
1790 : entry = xas_next_marked(xas, max, mark))
1791 :
1792 : /**
1793 : * xas_for_each_conflict() - Iterate over a range of an XArray.
1794 : * @xas: XArray operation state.
1795 : * @entry: Entry retrieved from the array.
1796 : *
1797 : * The loop body will be executed for each entry in the XArray that
1798 : * lies within the range specified by @xas. If the loop terminates
1799 : * normally, @entry will be %NULL. The user may break out of the loop,
1800 : * which will leave @entry set to the conflicting entry. The caller
1801 : * may also call xa_set_err() to exit the loop while setting an error
1802 : * to record the reason.
1803 : */
1804 : #define xas_for_each_conflict(xas, entry) \
1805 : while ((entry = xas_find_conflict(xas)))
1806 :
1807 : void *__xas_next(struct xa_state *);
1808 : void *__xas_prev(struct xa_state *);
1809 :
1810 : /**
1811 : * xas_prev() - Move iterator to previous index.
1812 : * @xas: XArray operation state.
1813 : *
1814 : * If the @xas was in an error state, it will remain in an error state
1815 : * and this function will return %NULL. If the @xas has never been walked,
1816 : * it will have the effect of calling xas_load(). Otherwise one will be
1817 : * subtracted from the index and the state will be walked to the correct
1818 : * location in the array for the next operation.
1819 : *
1820 : * If the iterator was referencing index 0, this function wraps
1821 : * around to %ULONG_MAX.
1822 : *
1823 : * Return: The entry at the new index. This may be %NULL or an internal
1824 : * entry.
1825 : */
1826 0 : static inline void *xas_prev(struct xa_state *xas)
1827 : {
1828 0 : struct xa_node *node = xas->xa_node;
1829 :
1830 0 : if (unlikely(xas_not_node(node) || node->shift ||
1831 : xas->xa_offset == 0))
1832 0 : return __xas_prev(xas);
1833 :
1834 0 : xas->xa_index--;
1835 0 : xas->xa_offset--;
1836 0 : return xa_entry(xas->xa, node, xas->xa_offset);
1837 : }
1838 :
1839 : /**
1840 : * xas_next() - Move state to next index.
1841 : * @xas: XArray operation state.
1842 : *
1843 : * If the @xas was in an error state, it will remain in an error state
1844 : * and this function will return %NULL. If the @xas has never been walked,
1845 : * it will have the effect of calling xas_load(). Otherwise one will be
1846 : * added to the index and the state will be walked to the correct
1847 : * location in the array for the next operation.
1848 : *
1849 : * If the iterator was referencing index %ULONG_MAX, this function wraps
1850 : * around to 0.
1851 : *
1852 : * Return: The entry at the new index. This may be %NULL or an internal
1853 : * entry.
1854 : */
1855 0 : static inline void *xas_next(struct xa_state *xas)
1856 : {
1857 0 : struct xa_node *node = xas->xa_node;
1858 :
1859 0 : if (unlikely(xas_not_node(node) || node->shift ||
1860 : xas->xa_offset == XA_CHUNK_MASK))
1861 0 : return __xas_next(xas);
1862 :
1863 0 : xas->xa_index++;
1864 0 : xas->xa_offset++;
1865 0 : return xa_entry(xas->xa, node, xas->xa_offset);
1866 : }
1867 :
1868 : #endif /* _LINUX_XARRAY_H */
|