Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0-only */
2 : /*
3 : * include/linux/idr.h
4 : *
5 : * 2002-10-18 written by Jim Houston jim.houston@ccur.com
6 : * Copyright (C) 2002 by Concurrent Computer Corporation
7 : *
8 : * Small id to pointer translation service avoiding fixed sized
9 : * tables.
10 : */
11 :
12 : #ifndef __IDR_H__
13 : #define __IDR_H__
14 :
15 : #include <linux/radix-tree.h>
16 : #include <linux/gfp.h>
17 : #include <linux/percpu.h>
18 :
19 : struct idr {
20 : struct radix_tree_root idr_rt;
21 : unsigned int idr_base;
22 : unsigned int idr_next;
23 : };
24 :
25 : /*
26 : * The IDR API does not expose the tagging functionality of the radix tree
27 : * to users. Use tag 0 to track whether a node has free space below it.
28 : */
29 : #define IDR_FREE 0
30 :
31 : /* Set the IDR flag and the IDR_FREE tag */
32 : #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
33 : (1 << (ROOT_TAG_SHIFT + IDR_FREE)))
34 :
35 : #define IDR_INIT_BASE(name, base) { \
36 : .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \
37 : .idr_base = (base), \
38 : .idr_next = 0, \
39 : }
40 :
41 : /**
42 : * IDR_INIT() - Initialise an IDR.
43 : * @name: Name of IDR.
44 : *
45 : * A freshly-initialised IDR contains no IDs.
46 : */
47 : #define IDR_INIT(name) IDR_INIT_BASE(name, 0)
48 :
49 : /**
50 : * DEFINE_IDR() - Define a statically-allocated IDR.
51 : * @name: Name of IDR.
52 : *
53 : * An IDR defined using this macro is ready for use with no additional
54 : * initialisation required. It contains no IDs.
55 : */
56 : #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
57 :
58 : /**
59 : * idr_get_cursor - Return the current position of the cyclic allocator
60 : * @idr: idr handle
61 : *
62 : * The value returned is the value that will be next returned from
63 : * idr_alloc_cyclic() if it is free (otherwise the search will start from
64 : * this position).
65 : */
66 : static inline unsigned int idr_get_cursor(const struct idr *idr)
67 : {
68 348 : return READ_ONCE(idr->idr_next);
69 : }
70 :
71 : /**
72 : * idr_set_cursor - Set the current position of the cyclic allocator
73 : * @idr: idr handle
74 : * @val: new position
75 : *
76 : * The next call to idr_alloc_cyclic() will return @val if it is free
77 : * (otherwise the search will start from this position).
78 : */
79 : static inline void idr_set_cursor(struct idr *idr, unsigned int val)
80 : {
81 0 : WRITE_ONCE(idr->idr_next, val);
82 : }
83 :
84 : /**
85 : * DOC: idr sync
86 : * idr synchronization (stolen from radix-tree.h)
87 : *
88 : * idr_find() is able to be called locklessly, using RCU. The caller must
89 : * ensure calls to this function are made within rcu_read_lock() regions.
90 : * Other readers (lock-free or otherwise) and modifications may be running
91 : * concurrently.
92 : *
93 : * It is still required that the caller manage the synchronization and
94 : * lifetimes of the items. So if RCU lock-free lookups are used, typically
95 : * this would mean that the items have their own locks, or are amenable to
96 : * lock-free access; and that the items are freed by RCU (or only freed after
97 : * having been deleted from the idr tree *and* a synchronize_rcu() grace
98 : * period).
99 : */
100 :
101 : #define idr_lock(idr) xa_lock(&(idr)->idr_rt)
102 : #define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
103 : #define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
104 : #define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
105 : #define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
106 : #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
107 : #define idr_lock_irqsave(idr, flags) \
108 : xa_lock_irqsave(&(idr)->idr_rt, flags)
109 : #define idr_unlock_irqrestore(idr, flags) \
110 : xa_unlock_irqrestore(&(idr)->idr_rt, flags)
111 :
112 : void idr_preload(gfp_t gfp_mask);
113 :
114 : int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
115 : int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
116 : unsigned long max, gfp_t);
117 : int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
118 : void *idr_remove(struct idr *, unsigned long id);
119 : void *idr_find(const struct idr *, unsigned long id);
120 : int idr_for_each(const struct idr *,
121 : int (*fn)(int id, void *p, void *data), void *data);
122 : void *idr_get_next(struct idr *, int *nextid);
123 : void *idr_get_next_ul(struct idr *, unsigned long *nextid);
124 : void *idr_replace(struct idr *, void *, unsigned long id);
125 : void idr_destroy(struct idr *);
126 :
127 : /**
128 : * idr_init_base() - Initialise an IDR.
129 : * @idr: IDR handle.
130 : * @base: The base value for the IDR.
131 : *
132 : * This variation of idr_init() creates an IDR which will allocate IDs
133 : * starting at %base.
134 : */
135 : static inline void idr_init_base(struct idr *idr, int base)
136 : {
137 74 : INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
138 37 : idr->idr_base = base;
139 37 : idr->idr_next = 0;
140 : }
141 :
142 : /**
143 : * idr_init() - Initialise an IDR.
144 : * @idr: IDR handle.
145 : *
146 : * Initialise a dynamically allocated IDR. To initialise a
147 : * statically allocated IDR, use DEFINE_IDR().
148 : */
149 : static inline void idr_init(struct idr *idr)
150 : {
151 3 : idr_init_base(idr, 0);
152 : }
153 :
154 : /**
155 : * idr_is_empty() - Are there any IDs allocated?
156 : * @idr: IDR handle.
157 : *
158 : * Return: %true if any IDs have been allocated from this IDR.
159 : */
160 : static inline bool idr_is_empty(const struct idr *idr)
161 : {
162 0 : return radix_tree_empty(&idr->idr_rt) &&
163 0 : radix_tree_tagged(&idr->idr_rt, IDR_FREE);
164 : }
165 :
166 : /**
167 : * idr_preload_end - end preload section started with idr_preload()
168 : *
169 : * Each idr_preload() should be matched with an invocation of this
170 : * function. See idr_preload() for details.
171 : */
172 : static inline void idr_preload_end(void)
173 : {
174 9009 : local_unlock(&radix_tree_preloads.lock);
175 : }
176 :
177 : /**
178 : * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
179 : * @idr: IDR handle.
180 : * @entry: The type * to use as cursor
181 : * @id: Entry ID.
182 : *
183 : * @entry and @id do not need to be initialized before the loop, and
184 : * after normal termination @entry is left with the value NULL. This
185 : * is convenient for a "not found" value.
186 : */
187 : #define idr_for_each_entry(idr, entry, id) \
188 : for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U)
189 :
190 : /**
191 : * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
192 : * @idr: IDR handle.
193 : * @entry: The type * to use as cursor.
194 : * @tmp: A temporary placeholder for ID.
195 : * @id: Entry ID.
196 : *
197 : * @entry and @id do not need to be initialized before the loop, and
198 : * after normal termination @entry is left with the value NULL. This
199 : * is convenient for a "not found" value.
200 : */
201 : #define idr_for_each_entry_ul(idr, entry, tmp, id) \
202 : for (tmp = 0, id = 0; \
203 : tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
204 : tmp = id, ++id)
205 :
206 : /**
207 : * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
208 : * @idr: IDR handle.
209 : * @entry: The type * to use as a cursor.
210 : * @id: Entry ID.
211 : *
212 : * Continue to iterate over entries, continuing after the current position.
213 : */
214 : #define idr_for_each_entry_continue(idr, entry, id) \
215 : for ((entry) = idr_get_next((idr), &(id)); \
216 : entry; \
217 : ++id, (entry) = idr_get_next((idr), &(id)))
218 :
219 : /**
220 : * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type
221 : * @idr: IDR handle.
222 : * @entry: The type * to use as a cursor.
223 : * @tmp: A temporary placeholder for ID.
224 : * @id: Entry ID.
225 : *
226 : * Continue to iterate over entries, continuing after the current position.
227 : */
228 : #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \
229 : for (tmp = id; \
230 : tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
231 : tmp = id, ++id)
232 :
233 : /*
234 : * IDA - ID Allocator, use when translation from id to pointer isn't necessary.
235 : */
236 : #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
237 : #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
238 : #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
239 :
240 : struct ida_bitmap {
241 : unsigned long bitmap[IDA_BITMAP_LONGS];
242 : };
243 :
244 : struct ida {
245 : struct xarray xa;
246 : };
247 :
248 : #define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC)
249 :
250 : #define IDA_INIT(name) { \
251 : .xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \
252 : }
253 : #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
254 :
255 : int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
256 : void ida_free(struct ida *, unsigned int id);
257 : void ida_destroy(struct ida *ida);
258 :
259 : /**
260 : * ida_alloc() - Allocate an unused ID.
261 : * @ida: IDA handle.
262 : * @gfp: Memory allocation flags.
263 : *
264 : * Allocate an ID between 0 and %INT_MAX, inclusive.
265 : *
266 : * Context: Any context. It is safe to call this function without
267 : * locking in your code.
268 : * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
269 : * or %-ENOSPC if there are no free IDs.
270 : */
271 : static inline int ida_alloc(struct ida *ida, gfp_t gfp)
272 : {
273 32 : return ida_alloc_range(ida, 0, ~0, gfp);
274 : }
275 :
276 : /**
277 : * ida_alloc_min() - Allocate an unused ID.
278 : * @ida: IDA handle.
279 : * @min: Lowest ID to allocate.
280 : * @gfp: Memory allocation flags.
281 : *
282 : * Allocate an ID between @min and %INT_MAX, inclusive.
283 : *
284 : * Context: Any context. It is safe to call this function without
285 : * locking in your code.
286 : * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
287 : * or %-ENOSPC if there are no free IDs.
288 : */
289 : static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
290 : {
291 12 : return ida_alloc_range(ida, min, ~0, gfp);
292 : }
293 :
294 : /**
295 : * ida_alloc_max() - Allocate an unused ID.
296 : * @ida: IDA handle.
297 : * @max: Highest ID to allocate.
298 : * @gfp: Memory allocation flags.
299 : *
300 : * Allocate an ID between 0 and @max, inclusive.
301 : *
302 : * Context: Any context. It is safe to call this function without
303 : * locking in your code.
304 : * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
305 : * or %-ENOSPC if there are no free IDs.
306 : */
307 : static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
308 : {
309 13 : return ida_alloc_range(ida, 0, max, gfp);
310 : }
311 :
312 : static inline void ida_init(struct ida *ida)
313 : {
314 82 : xa_init_flags(&ida->xa, IDA_INIT_FLAGS);
315 : }
316 :
317 : /*
318 : * ida_simple_get() and ida_simple_remove() are deprecated. Use
319 : * ida_alloc() and ida_free() instead respectively.
320 : */
321 : #define ida_simple_get(ida, start, end, gfp) \
322 : ida_alloc_range(ida, start, (end) - 1, gfp)
323 : #define ida_simple_remove(ida, id) ida_free(ida, id)
324 :
325 : static inline bool ida_is_empty(const struct ida *ida)
326 : {
327 : return xa_empty(&ida->xa);
328 : }
329 : #endif /* __IDR_H__ */
|