Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * drivers/base/devres.c - device resource management
4 : *
5 : * Copyright (c) 2006 SUSE Linux Products GmbH
6 : * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 : */
8 :
9 : #include <linux/device.h>
10 : #include <linux/module.h>
11 : #include <linux/slab.h>
12 : #include <linux/percpu.h>
13 :
14 : #include <asm/sections.h>
15 :
16 : #include "base.h"
17 : #include "trace.h"
18 :
19 : struct devres_node {
20 : struct list_head entry;
21 : dr_release_t release;
22 : const char *name;
23 : size_t size;
24 : };
25 :
26 : struct devres {
27 : struct devres_node node;
28 : /*
29 : * Some archs want to perform DMA into kmalloc caches
30 : * and need a guaranteed alignment larger than
31 : * the alignment of a 64-bit integer.
32 : * Thus we use ARCH_DMA_MINALIGN for data[] which will force the same
33 : * alignment for struct devres when allocated by kmalloc().
34 : */
35 : u8 __aligned(ARCH_DMA_MINALIGN) data[];
36 : };
37 :
38 : struct devres_group {
39 : struct devres_node node[2];
40 : void *id;
41 : int color;
42 : /* -- 8 pointers */
43 : };
44 :
45 : static void set_node_dbginfo(struct devres_node *node, const char *name,
46 : size_t size)
47 : {
48 5 : node->name = name;
49 5 : node->size = size;
50 : }
51 :
52 : #ifdef CONFIG_DEBUG_DEVRES
53 : static int log_devres = 0;
54 : module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
55 :
56 : static void devres_dbg(struct device *dev, struct devres_node *node,
57 : const char *op)
58 : {
59 : if (unlikely(log_devres))
60 : dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
61 : op, node, node->name, node->size);
62 : }
63 : #else /* CONFIG_DEBUG_DEVRES */
64 : #define devres_dbg(dev, node, op) do {} while (0)
65 : #endif /* CONFIG_DEBUG_DEVRES */
66 :
67 : static void devres_log(struct device *dev, struct devres_node *node,
68 : const char *op)
69 : {
70 10 : trace_devres_log(dev, op, node, node->name, node->size);
71 : devres_dbg(dev, node, op);
72 : }
73 :
74 : /*
75 : * Release functions for devres group. These callbacks are used only
76 : * for identification.
77 : */
78 0 : static void group_open_release(struct device *dev, void *res)
79 : {
80 : /* noop */
81 0 : }
82 :
83 0 : static void group_close_release(struct device *dev, void *res)
84 : {
85 : /* noop */
86 0 : }
87 :
88 : static struct devres_group * node_to_group(struct devres_node *node)
89 : {
90 5 : if (node->release == &group_open_release)
91 : return container_of(node, struct devres_group, node[0]);
92 5 : if (node->release == &group_close_release)
93 0 : return container_of(node, struct devres_group, node[1]);
94 : return NULL;
95 : }
96 :
97 : static bool check_dr_size(size_t size, size_t *tot_size)
98 : {
99 : /* We must catch any near-SIZE_MAX cases that could overflow. */
100 10 : if (unlikely(check_add_overflow(sizeof(struct devres),
101 : size, tot_size)))
102 : return false;
103 :
104 : /* Actually allocate the full kmalloc bucket size. */
105 5 : *tot_size = kmalloc_size_roundup(*tot_size);
106 :
107 : return true;
108 : }
109 :
110 : static __always_inline struct devres * alloc_dr(dr_release_t release,
111 : size_t size, gfp_t gfp, int nid)
112 : {
113 : size_t tot_size;
114 : struct devres *dr;
115 :
116 5 : if (!check_dr_size(size, &tot_size))
117 : return NULL;
118 :
119 5 : dr = kmalloc_node_track_caller(tot_size, gfp, nid);
120 5 : if (unlikely(!dr))
121 : return NULL;
122 :
123 : /* No need to clear memory twice */
124 0 : if (!(gfp & __GFP_ZERO))
125 0 : memset(dr, 0, offsetof(struct devres, data));
126 :
127 10 : INIT_LIST_HEAD(&dr->node.entry);
128 5 : dr->node.release = release;
129 : return dr;
130 : }
131 :
132 5 : static void add_dr(struct device *dev, struct devres_node *node)
133 : {
134 5 : devres_log(dev, node, "ADD");
135 10 : BUG_ON(!list_empty(&node->entry));
136 10 : list_add_tail(&node->entry, &dev->devres_head);
137 5 : }
138 :
139 0 : static void replace_dr(struct device *dev,
140 : struct devres_node *old, struct devres_node *new)
141 : {
142 0 : devres_log(dev, old, "REPLACE");
143 0 : BUG_ON(!list_empty(&new->entry));
144 0 : list_replace(&old->entry, &new->entry);
145 0 : }
146 :
147 : /**
148 : * __devres_alloc_node - Allocate device resource data
149 : * @release: Release function devres will be associated with
150 : * @size: Allocation size
151 : * @gfp: Allocation flags
152 : * @nid: NUMA node
153 : * @name: Name of the resource
154 : *
155 : * Allocate devres of @size bytes. The allocated area is zeroed, then
156 : * associated with @release. The returned pointer can be passed to
157 : * other devres_*() functions.
158 : *
159 : * RETURNS:
160 : * Pointer to allocated devres on success, NULL on failure.
161 : */
162 5 : void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
163 : const char *name)
164 : {
165 : struct devres *dr;
166 :
167 10 : dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
168 5 : if (unlikely(!dr))
169 : return NULL;
170 10 : set_node_dbginfo(&dr->node, name, size);
171 5 : return dr->data;
172 : }
173 : EXPORT_SYMBOL_GPL(__devres_alloc_node);
174 :
175 : /**
176 : * devres_for_each_res - Resource iterator
177 : * @dev: Device to iterate resource from
178 : * @release: Look for resources associated with this release function
179 : * @match: Match function (optional)
180 : * @match_data: Data for the match function
181 : * @fn: Function to be called for each matched resource.
182 : * @data: Data for @fn, the 3rd parameter of @fn
183 : *
184 : * Call @fn for each devres of @dev which is associated with @release
185 : * and for which @match returns 1.
186 : *
187 : * RETURNS:
188 : * void
189 : */
190 0 : void devres_for_each_res(struct device *dev, dr_release_t release,
191 : dr_match_t match, void *match_data,
192 : void (*fn)(struct device *, void *, void *),
193 : void *data)
194 : {
195 : struct devres_node *node;
196 : struct devres_node *tmp;
197 : unsigned long flags;
198 :
199 0 : if (!fn)
200 : return;
201 :
202 0 : spin_lock_irqsave(&dev->devres_lock, flags);
203 0 : list_for_each_entry_safe_reverse(node, tmp,
204 : &dev->devres_head, entry) {
205 0 : struct devres *dr = container_of(node, struct devres, node);
206 :
207 0 : if (node->release != release)
208 0 : continue;
209 0 : if (match && !match(dev, dr->data, match_data))
210 0 : continue;
211 0 : fn(dev, dr->data, data);
212 : }
213 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
214 : }
215 : EXPORT_SYMBOL_GPL(devres_for_each_res);
216 :
217 : /**
218 : * devres_free - Free device resource data
219 : * @res: Pointer to devres data to free
220 : *
221 : * Free devres created with devres_alloc().
222 : */
223 0 : void devres_free(void *res)
224 : {
225 0 : if (res) {
226 0 : struct devres *dr = container_of(res, struct devres, data);
227 :
228 0 : BUG_ON(!list_empty(&dr->node.entry));
229 0 : kfree(dr);
230 : }
231 0 : }
232 : EXPORT_SYMBOL_GPL(devres_free);
233 :
234 : /**
235 : * devres_add - Register device resource
236 : * @dev: Device to add resource to
237 : * @res: Resource to register
238 : *
239 : * Register devres @res to @dev. @res should have been allocated
240 : * using devres_alloc(). On driver detach, the associated release
241 : * function will be invoked and devres will be freed automatically.
242 : */
243 5 : void devres_add(struct device *dev, void *res)
244 : {
245 5 : struct devres *dr = container_of(res, struct devres, data);
246 : unsigned long flags;
247 :
248 5 : spin_lock_irqsave(&dev->devres_lock, flags);
249 5 : add_dr(dev, &dr->node);
250 10 : spin_unlock_irqrestore(&dev->devres_lock, flags);
251 5 : }
252 : EXPORT_SYMBOL_GPL(devres_add);
253 :
254 : static struct devres *find_dr(struct device *dev, dr_release_t release,
255 : dr_match_t match, void *match_data)
256 : {
257 : struct devres_node *node;
258 :
259 0 : list_for_each_entry_reverse(node, &dev->devres_head, entry) {
260 0 : struct devres *dr = container_of(node, struct devres, node);
261 :
262 0 : if (node->release != release)
263 0 : continue;
264 0 : if (match && !match(dev, dr->data, match_data))
265 0 : continue;
266 : return dr;
267 : }
268 :
269 : return NULL;
270 : }
271 :
272 : /**
273 : * devres_find - Find device resource
274 : * @dev: Device to lookup resource from
275 : * @release: Look for resources associated with this release function
276 : * @match: Match function (optional)
277 : * @match_data: Data for the match function
278 : *
279 : * Find the latest devres of @dev which is associated with @release
280 : * and for which @match returns 1. If @match is NULL, it's considered
281 : * to match all.
282 : *
283 : * RETURNS:
284 : * Pointer to found devres, NULL if not found.
285 : */
286 0 : void * devres_find(struct device *dev, dr_release_t release,
287 : dr_match_t match, void *match_data)
288 : {
289 : struct devres *dr;
290 : unsigned long flags;
291 :
292 0 : spin_lock_irqsave(&dev->devres_lock, flags);
293 0 : dr = find_dr(dev, release, match, match_data);
294 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
295 :
296 0 : if (dr)
297 0 : return dr->data;
298 : return NULL;
299 : }
300 : EXPORT_SYMBOL_GPL(devres_find);
301 :
302 : /**
303 : * devres_get - Find devres, if non-existent, add one atomically
304 : * @dev: Device to lookup or add devres for
305 : * @new_res: Pointer to new initialized devres to add if not found
306 : * @match: Match function (optional)
307 : * @match_data: Data for the match function
308 : *
309 : * Find the latest devres of @dev which has the same release function
310 : * as @new_res and for which @match return 1. If found, @new_res is
311 : * freed; otherwise, @new_res is added atomically.
312 : *
313 : * RETURNS:
314 : * Pointer to found or added devres.
315 : */
316 0 : void * devres_get(struct device *dev, void *new_res,
317 : dr_match_t match, void *match_data)
318 : {
319 0 : struct devres *new_dr = container_of(new_res, struct devres, data);
320 : struct devres *dr;
321 : unsigned long flags;
322 :
323 0 : spin_lock_irqsave(&dev->devres_lock, flags);
324 0 : dr = find_dr(dev, new_dr->node.release, match, match_data);
325 0 : if (!dr) {
326 0 : add_dr(dev, &new_dr->node);
327 0 : dr = new_dr;
328 0 : new_res = NULL;
329 : }
330 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
331 0 : devres_free(new_res);
332 :
333 0 : return dr->data;
334 : }
335 : EXPORT_SYMBOL_GPL(devres_get);
336 :
337 : /**
338 : * devres_remove - Find a device resource and remove it
339 : * @dev: Device to find resource from
340 : * @release: Look for resources associated with this release function
341 : * @match: Match function (optional)
342 : * @match_data: Data for the match function
343 : *
344 : * Find the latest devres of @dev associated with @release and for
345 : * which @match returns 1. If @match is NULL, it's considered to
346 : * match all. If found, the resource is removed atomically and
347 : * returned.
348 : *
349 : * RETURNS:
350 : * Pointer to removed devres on success, NULL if not found.
351 : */
352 0 : void * devres_remove(struct device *dev, dr_release_t release,
353 : dr_match_t match, void *match_data)
354 : {
355 : struct devres *dr;
356 : unsigned long flags;
357 :
358 0 : spin_lock_irqsave(&dev->devres_lock, flags);
359 0 : dr = find_dr(dev, release, match, match_data);
360 0 : if (dr) {
361 0 : list_del_init(&dr->node.entry);
362 0 : devres_log(dev, &dr->node, "REM");
363 : }
364 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
365 :
366 0 : if (dr)
367 0 : return dr->data;
368 : return NULL;
369 : }
370 : EXPORT_SYMBOL_GPL(devres_remove);
371 :
372 : /**
373 : * devres_destroy - Find a device resource and destroy it
374 : * @dev: Device to find resource from
375 : * @release: Look for resources associated with this release function
376 : * @match: Match function (optional)
377 : * @match_data: Data for the match function
378 : *
379 : * Find the latest devres of @dev associated with @release and for
380 : * which @match returns 1. If @match is NULL, it's considered to
381 : * match all. If found, the resource is removed atomically and freed.
382 : *
383 : * Note that the release function for the resource will not be called,
384 : * only the devres-allocated data will be freed. The caller becomes
385 : * responsible for freeing any other data.
386 : *
387 : * RETURNS:
388 : * 0 if devres is found and freed, -ENOENT if not found.
389 : */
390 0 : int devres_destroy(struct device *dev, dr_release_t release,
391 : dr_match_t match, void *match_data)
392 : {
393 : void *res;
394 :
395 0 : res = devres_remove(dev, release, match, match_data);
396 0 : if (unlikely(!res))
397 : return -ENOENT;
398 :
399 0 : devres_free(res);
400 0 : return 0;
401 : }
402 : EXPORT_SYMBOL_GPL(devres_destroy);
403 :
404 :
405 : /**
406 : * devres_release - Find a device resource and destroy it, calling release
407 : * @dev: Device to find resource from
408 : * @release: Look for resources associated with this release function
409 : * @match: Match function (optional)
410 : * @match_data: Data for the match function
411 : *
412 : * Find the latest devres of @dev associated with @release and for
413 : * which @match returns 1. If @match is NULL, it's considered to
414 : * match all. If found, the resource is removed atomically, the
415 : * release function called and the resource freed.
416 : *
417 : * RETURNS:
418 : * 0 if devres is found and freed, -ENOENT if not found.
419 : */
420 0 : int devres_release(struct device *dev, dr_release_t release,
421 : dr_match_t match, void *match_data)
422 : {
423 : void *res;
424 :
425 0 : res = devres_remove(dev, release, match, match_data);
426 0 : if (unlikely(!res))
427 : return -ENOENT;
428 :
429 0 : (*release)(dev, res);
430 0 : devres_free(res);
431 0 : return 0;
432 : }
433 : EXPORT_SYMBOL_GPL(devres_release);
434 :
435 5 : static int remove_nodes(struct device *dev,
436 : struct list_head *first, struct list_head *end,
437 : struct list_head *todo)
438 : {
439 : struct devres_node *node, *n;
440 5 : int cnt = 0, nr_groups = 0;
441 :
442 : /* First pass - move normal devres entries to @todo and clear
443 : * devres_group colors.
444 : */
445 5 : node = list_entry(first, struct devres_node, entry);
446 10 : list_for_each_entry_safe_from(node, n, end, entry) {
447 : struct devres_group *grp;
448 :
449 5 : grp = node_to_group(node);
450 5 : if (grp) {
451 : /* clear color of group markers in the first pass */
452 0 : grp->color = 0;
453 0 : nr_groups++;
454 : } else {
455 : /* regular devres entry */
456 5 : if (&node->entry == first)
457 5 : first = first->next;
458 10 : list_move_tail(&node->entry, todo);
459 5 : cnt++;
460 : }
461 : }
462 :
463 5 : if (!nr_groups)
464 : return cnt;
465 :
466 : /* Second pass - Scan groups and color them. A group gets
467 : * color value of two iff the group is wholly contained in
468 : * [current node, end). That is, for a closed group, both opening
469 : * and closing markers should be in the range, while just the
470 : * opening marker is enough for an open group.
471 : */
472 0 : node = list_entry(first, struct devres_node, entry);
473 0 : list_for_each_entry_safe_from(node, n, end, entry) {
474 : struct devres_group *grp;
475 :
476 0 : grp = node_to_group(node);
477 0 : BUG_ON(!grp || list_empty(&grp->node[0].entry));
478 :
479 0 : grp->color++;
480 0 : if (list_empty(&grp->node[1].entry))
481 0 : grp->color++;
482 :
483 0 : BUG_ON(grp->color <= 0 || grp->color > 2);
484 0 : if (grp->color == 2) {
485 : /* No need to update current node or end. The removed
486 : * nodes are always before both.
487 : */
488 0 : list_move_tail(&grp->node[0].entry, todo);
489 0 : list_del_init(&grp->node[1].entry);
490 : }
491 : }
492 :
493 : return cnt;
494 : }
495 :
496 5 : static void release_nodes(struct device *dev, struct list_head *todo)
497 : {
498 : struct devres *dr, *tmp;
499 :
500 : /* Release. Note that both devres and devres_group are
501 : * handled as devres in the following loop. This is safe.
502 : */
503 10 : list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
504 5 : devres_log(dev, &dr->node, "REL");
505 5 : dr->node.release(dev, dr->data);
506 5 : kfree(dr);
507 : }
508 5 : }
509 :
510 : /**
511 : * devres_release_all - Release all managed resources
512 : * @dev: Device to release resources for
513 : *
514 : * Release all resources associated with @dev. This function is
515 : * called on driver detach.
516 : */
517 15 : int devres_release_all(struct device *dev)
518 : {
519 : unsigned long flags;
520 15 : LIST_HEAD(todo);
521 : int cnt;
522 :
523 : /* Looks like an uninitialized device structure */
524 15 : if (WARN_ON(dev->devres_head.next == NULL))
525 : return -ENODEV;
526 :
527 : /* Nothing to release if list is empty */
528 30 : if (list_empty(&dev->devres_head))
529 : return 0;
530 :
531 5 : spin_lock_irqsave(&dev->devres_lock, flags);
532 5 : cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
533 10 : spin_unlock_irqrestore(&dev->devres_lock, flags);
534 :
535 5 : release_nodes(dev, &todo);
536 5 : return cnt;
537 : }
538 :
539 : /**
540 : * devres_open_group - Open a new devres group
541 : * @dev: Device to open devres group for
542 : * @id: Separator ID
543 : * @gfp: Allocation flags
544 : *
545 : * Open a new devres group for @dev with @id. For @id, using a
546 : * pointer to an object which won't be used for another group is
547 : * recommended. If @id is NULL, address-wise unique ID is created.
548 : *
549 : * RETURNS:
550 : * ID of the new group, NULL on failure.
551 : */
552 0 : void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
553 : {
554 : struct devres_group *grp;
555 : unsigned long flags;
556 :
557 0 : grp = kmalloc(sizeof(*grp), gfp);
558 0 : if (unlikely(!grp))
559 : return NULL;
560 :
561 0 : grp->node[0].release = &group_open_release;
562 0 : grp->node[1].release = &group_close_release;
563 0 : INIT_LIST_HEAD(&grp->node[0].entry);
564 0 : INIT_LIST_HEAD(&grp->node[1].entry);
565 0 : set_node_dbginfo(&grp->node[0], "grp<", 0);
566 0 : set_node_dbginfo(&grp->node[1], "grp>", 0);
567 0 : grp->id = grp;
568 0 : if (id)
569 0 : grp->id = id;
570 :
571 0 : spin_lock_irqsave(&dev->devres_lock, flags);
572 0 : add_dr(dev, &grp->node[0]);
573 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
574 0 : return grp->id;
575 : }
576 : EXPORT_SYMBOL_GPL(devres_open_group);
577 :
578 : /* Find devres group with ID @id. If @id is NULL, look for the latest. */
579 : static struct devres_group * find_group(struct device *dev, void *id)
580 : {
581 : struct devres_node *node;
582 :
583 0 : list_for_each_entry_reverse(node, &dev->devres_head, entry) {
584 : struct devres_group *grp;
585 :
586 0 : if (node->release != &group_open_release)
587 0 : continue;
588 :
589 0 : grp = container_of(node, struct devres_group, node[0]);
590 :
591 0 : if (id) {
592 0 : if (grp->id == id)
593 : return grp;
594 0 : } else if (list_empty(&grp->node[1].entry))
595 : return grp;
596 : }
597 :
598 : return NULL;
599 : }
600 :
601 : /**
602 : * devres_close_group - Close a devres group
603 : * @dev: Device to close devres group for
604 : * @id: ID of target group, can be NULL
605 : *
606 : * Close the group identified by @id. If @id is NULL, the latest open
607 : * group is selected.
608 : */
609 0 : void devres_close_group(struct device *dev, void *id)
610 : {
611 : struct devres_group *grp;
612 : unsigned long flags;
613 :
614 0 : spin_lock_irqsave(&dev->devres_lock, flags);
615 :
616 0 : grp = find_group(dev, id);
617 0 : if (grp)
618 0 : add_dr(dev, &grp->node[1]);
619 : else
620 0 : WARN_ON(1);
621 :
622 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
623 0 : }
624 : EXPORT_SYMBOL_GPL(devres_close_group);
625 :
626 : /**
627 : * devres_remove_group - Remove a devres group
628 : * @dev: Device to remove group for
629 : * @id: ID of target group, can be NULL
630 : *
631 : * Remove the group identified by @id. If @id is NULL, the latest
632 : * open group is selected. Note that removing a group doesn't affect
633 : * any other resources.
634 : */
635 0 : void devres_remove_group(struct device *dev, void *id)
636 : {
637 : struct devres_group *grp;
638 : unsigned long flags;
639 :
640 0 : spin_lock_irqsave(&dev->devres_lock, flags);
641 :
642 0 : grp = find_group(dev, id);
643 0 : if (grp) {
644 0 : list_del_init(&grp->node[0].entry);
645 0 : list_del_init(&grp->node[1].entry);
646 0 : devres_log(dev, &grp->node[0], "REM");
647 : } else
648 0 : WARN_ON(1);
649 :
650 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
651 :
652 0 : kfree(grp);
653 0 : }
654 : EXPORT_SYMBOL_GPL(devres_remove_group);
655 :
656 : /**
657 : * devres_release_group - Release resources in a devres group
658 : * @dev: Device to release group for
659 : * @id: ID of target group, can be NULL
660 : *
661 : * Release all resources in the group identified by @id. If @id is
662 : * NULL, the latest open group is selected. The selected group and
663 : * groups properly nested inside the selected group are removed.
664 : *
665 : * RETURNS:
666 : * The number of released non-group resources.
667 : */
668 0 : int devres_release_group(struct device *dev, void *id)
669 : {
670 : struct devres_group *grp;
671 : unsigned long flags;
672 0 : LIST_HEAD(todo);
673 0 : int cnt = 0;
674 :
675 0 : spin_lock_irqsave(&dev->devres_lock, flags);
676 :
677 0 : grp = find_group(dev, id);
678 0 : if (grp) {
679 0 : struct list_head *first = &grp->node[0].entry;
680 0 : struct list_head *end = &dev->devres_head;
681 :
682 0 : if (!list_empty(&grp->node[1].entry))
683 0 : end = grp->node[1].entry.next;
684 :
685 0 : cnt = remove_nodes(dev, first, end, &todo);
686 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
687 :
688 0 : release_nodes(dev, &todo);
689 : } else {
690 0 : WARN_ON(1);
691 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
692 : }
693 :
694 0 : return cnt;
695 : }
696 : EXPORT_SYMBOL_GPL(devres_release_group);
697 :
698 : /*
699 : * Custom devres actions allow inserting a simple function call
700 : * into the teardown sequence.
701 : */
702 :
703 : struct action_devres {
704 : void *data;
705 : void (*action)(void *);
706 : };
707 :
708 0 : static int devm_action_match(struct device *dev, void *res, void *p)
709 : {
710 0 : struct action_devres *devres = res;
711 0 : struct action_devres *target = p;
712 :
713 0 : return devres->action == target->action &&
714 0 : devres->data == target->data;
715 : }
716 :
717 5 : static void devm_action_release(struct device *dev, void *res)
718 : {
719 5 : struct action_devres *devres = res;
720 :
721 5 : devres->action(devres->data);
722 5 : }
723 :
724 : /**
725 : * __devm_add_action() - add a custom action to list of managed resources
726 : * @dev: Device that owns the action
727 : * @action: Function that should be called
728 : * @data: Pointer to data passed to @action implementation
729 : * @name: Name of the resource (for debugging purposes)
730 : *
731 : * This adds a custom action to the list of managed resources so that
732 : * it gets executed as part of standard resource unwinding.
733 : */
734 5 : int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name)
735 : {
736 : struct action_devres *devres;
737 :
738 5 : devres = __devres_alloc_node(devm_action_release, sizeof(struct action_devres),
739 : GFP_KERNEL, NUMA_NO_NODE, name);
740 5 : if (!devres)
741 : return -ENOMEM;
742 :
743 5 : devres->data = data;
744 5 : devres->action = action;
745 :
746 5 : devres_add(dev, devres);
747 5 : return 0;
748 : }
749 : EXPORT_SYMBOL_GPL(__devm_add_action);
750 :
751 : /**
752 : * devm_remove_action() - removes previously added custom action
753 : * @dev: Device that owns the action
754 : * @action: Function implementing the action
755 : * @data: Pointer to data passed to @action implementation
756 : *
757 : * Removes instance of @action previously added by devm_add_action().
758 : * Both action and data should match one of the existing entries.
759 : */
760 0 : void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
761 : {
762 0 : struct action_devres devres = {
763 : .data = data,
764 : .action = action,
765 : };
766 :
767 0 : WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
768 : &devres));
769 0 : }
770 : EXPORT_SYMBOL_GPL(devm_remove_action);
771 :
772 : /**
773 : * devm_release_action() - release previously added custom action
774 : * @dev: Device that owns the action
775 : * @action: Function implementing the action
776 : * @data: Pointer to data passed to @action implementation
777 : *
778 : * Releases and removes instance of @action previously added by
779 : * devm_add_action(). Both action and data should match one of the
780 : * existing entries.
781 : */
782 0 : void devm_release_action(struct device *dev, void (*action)(void *), void *data)
783 : {
784 0 : struct action_devres devres = {
785 : .data = data,
786 : .action = action,
787 : };
788 :
789 0 : WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
790 : &devres));
791 :
792 0 : }
793 : EXPORT_SYMBOL_GPL(devm_release_action);
794 :
795 : /*
796 : * Managed kmalloc/kfree
797 : */
798 0 : static void devm_kmalloc_release(struct device *dev, void *res)
799 : {
800 : /* noop */
801 0 : }
802 :
803 0 : static int devm_kmalloc_match(struct device *dev, void *res, void *data)
804 : {
805 0 : return res == data;
806 : }
807 :
808 : /**
809 : * devm_kmalloc - Resource-managed kmalloc
810 : * @dev: Device to allocate memory for
811 : * @size: Allocation size
812 : * @gfp: Allocation gfp flags
813 : *
814 : * Managed kmalloc. Memory allocated with this function is
815 : * automatically freed on driver detach. Like all other devres
816 : * resources, guaranteed alignment is unsigned long long.
817 : *
818 : * RETURNS:
819 : * Pointer to allocated memory on success, NULL on failure.
820 : */
821 0 : void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
822 : {
823 : struct devres *dr;
824 :
825 0 : if (unlikely(!size))
826 : return ZERO_SIZE_PTR;
827 :
828 : /* use raw alloc_dr for kmalloc caller tracing */
829 0 : dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
830 0 : if (unlikely(!dr))
831 : return NULL;
832 :
833 : /*
834 : * This is named devm_kzalloc_release for historical reasons
835 : * The initial implementation did not support kmalloc, only kzalloc
836 : */
837 0 : set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
838 0 : devres_add(dev, dr->data);
839 0 : return dr->data;
840 : }
841 : EXPORT_SYMBOL_GPL(devm_kmalloc);
842 :
843 : /**
844 : * devm_krealloc - Resource-managed krealloc()
845 : * @dev: Device to re-allocate memory for
846 : * @ptr: Pointer to the memory chunk to re-allocate
847 : * @new_size: New allocation size
848 : * @gfp: Allocation gfp flags
849 : *
850 : * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
851 : * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
852 : * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
853 : * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
854 : * change the order in which the release callback for the re-alloc'ed devres
855 : * will be called (except when falling back to devm_kmalloc() or when freeing
856 : * resources when new_size is zero). The contents of the memory are preserved
857 : * up to the lesser of new and old sizes.
858 : */
859 0 : void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
860 : {
861 : size_t total_new_size, total_old_size;
862 : struct devres *old_dr, *new_dr;
863 : unsigned long flags;
864 :
865 0 : if (unlikely(!new_size)) {
866 0 : devm_kfree(dev, ptr);
867 0 : return ZERO_SIZE_PTR;
868 : }
869 :
870 0 : if (unlikely(ZERO_OR_NULL_PTR(ptr)))
871 0 : return devm_kmalloc(dev, new_size, gfp);
872 :
873 0 : if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
874 : /*
875 : * We cannot reliably realloc a const string returned by
876 : * devm_kstrdup_const().
877 : */
878 : return NULL;
879 :
880 0 : if (!check_dr_size(new_size, &total_new_size))
881 : return NULL;
882 :
883 0 : total_old_size = ksize(container_of(ptr, struct devres, data));
884 0 : if (total_old_size == 0) {
885 0 : WARN(1, "Pointer doesn't point to dynamically allocated memory.");
886 0 : return NULL;
887 : }
888 :
889 : /*
890 : * If new size is smaller or equal to the actual number of bytes
891 : * allocated previously - just return the same pointer.
892 : */
893 0 : if (total_new_size <= total_old_size)
894 : return ptr;
895 :
896 : /*
897 : * Otherwise: allocate new, larger chunk. We need to allocate before
898 : * taking the lock as most probably the caller uses GFP_KERNEL.
899 : */
900 0 : new_dr = alloc_dr(devm_kmalloc_release,
901 : total_new_size, gfp, dev_to_node(dev));
902 0 : if (!new_dr)
903 : return NULL;
904 :
905 : /*
906 : * The spinlock protects the linked list against concurrent
907 : * modifications but not the resource itself.
908 : */
909 0 : spin_lock_irqsave(&dev->devres_lock, flags);
910 :
911 0 : old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
912 0 : if (!old_dr) {
913 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
914 0 : kfree(new_dr);
915 0 : WARN(1, "Memory chunk not managed or managed by a different device.");
916 0 : return NULL;
917 : }
918 :
919 0 : replace_dr(dev, &old_dr->node, &new_dr->node);
920 :
921 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
922 :
923 : /*
924 : * We can copy the memory contents after releasing the lock as we're
925 : * no longer modifying the list links.
926 : */
927 0 : memcpy(new_dr->data, old_dr->data,
928 : total_old_size - offsetof(struct devres, data));
929 : /*
930 : * Same for releasing the old devres - it's now been removed from the
931 : * list. This is also the reason why we must not use devm_kfree() - the
932 : * links are no longer valid.
933 : */
934 0 : kfree(old_dr);
935 :
936 0 : return new_dr->data;
937 : }
938 : EXPORT_SYMBOL_GPL(devm_krealloc);
939 :
940 : /**
941 : * devm_kstrdup - Allocate resource managed space and
942 : * copy an existing string into that.
943 : * @dev: Device to allocate memory for
944 : * @s: the string to duplicate
945 : * @gfp: the GFP mask used in the devm_kmalloc() call when
946 : * allocating memory
947 : * RETURNS:
948 : * Pointer to allocated string on success, NULL on failure.
949 : */
950 0 : char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
951 : {
952 : size_t size;
953 : char *buf;
954 :
955 0 : if (!s)
956 : return NULL;
957 :
958 0 : size = strlen(s) + 1;
959 0 : buf = devm_kmalloc(dev, size, gfp);
960 0 : if (buf)
961 0 : memcpy(buf, s, size);
962 : return buf;
963 : }
964 : EXPORT_SYMBOL_GPL(devm_kstrdup);
965 :
966 : /**
967 : * devm_kstrdup_const - resource managed conditional string duplication
968 : * @dev: device for which to duplicate the string
969 : * @s: the string to duplicate
970 : * @gfp: the GFP mask used in the kmalloc() call when allocating memory
971 : *
972 : * Strings allocated by devm_kstrdup_const will be automatically freed when
973 : * the associated device is detached.
974 : *
975 : * RETURNS:
976 : * Source string if it is in .rodata section otherwise it falls back to
977 : * devm_kstrdup.
978 : */
979 0 : const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
980 : {
981 0 : if (is_kernel_rodata((unsigned long)s))
982 : return s;
983 :
984 0 : return devm_kstrdup(dev, s, gfp);
985 : }
986 : EXPORT_SYMBOL_GPL(devm_kstrdup_const);
987 :
988 : /**
989 : * devm_kvasprintf - Allocate resource managed space and format a string
990 : * into that.
991 : * @dev: Device to allocate memory for
992 : * @gfp: the GFP mask used in the devm_kmalloc() call when
993 : * allocating memory
994 : * @fmt: The printf()-style format string
995 : * @ap: Arguments for the format string
996 : * RETURNS:
997 : * Pointer to allocated string on success, NULL on failure.
998 : */
999 0 : char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
1000 : va_list ap)
1001 : {
1002 : unsigned int len;
1003 : char *p;
1004 : va_list aq;
1005 :
1006 0 : va_copy(aq, ap);
1007 0 : len = vsnprintf(NULL, 0, fmt, aq);
1008 0 : va_end(aq);
1009 :
1010 0 : p = devm_kmalloc(dev, len+1, gfp);
1011 0 : if (!p)
1012 : return NULL;
1013 :
1014 0 : vsnprintf(p, len+1, fmt, ap);
1015 :
1016 0 : return p;
1017 : }
1018 : EXPORT_SYMBOL(devm_kvasprintf);
1019 :
1020 : /**
1021 : * devm_kasprintf - Allocate resource managed space and format a string
1022 : * into that.
1023 : * @dev: Device to allocate memory for
1024 : * @gfp: the GFP mask used in the devm_kmalloc() call when
1025 : * allocating memory
1026 : * @fmt: The printf()-style format string
1027 : * @...: Arguments for the format string
1028 : * RETURNS:
1029 : * Pointer to allocated string on success, NULL on failure.
1030 : */
1031 0 : char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1032 : {
1033 : va_list ap;
1034 : char *p;
1035 :
1036 0 : va_start(ap, fmt);
1037 0 : p = devm_kvasprintf(dev, gfp, fmt, ap);
1038 0 : va_end(ap);
1039 :
1040 0 : return p;
1041 : }
1042 : EXPORT_SYMBOL_GPL(devm_kasprintf);
1043 :
1044 : /**
1045 : * devm_kfree - Resource-managed kfree
1046 : * @dev: Device this memory belongs to
1047 : * @p: Memory to free
1048 : *
1049 : * Free memory allocated with devm_kmalloc().
1050 : */
1051 0 : void devm_kfree(struct device *dev, const void *p)
1052 : {
1053 : int rc;
1054 :
1055 : /*
1056 : * Special cases: pointer to a string in .rodata returned by
1057 : * devm_kstrdup_const() or NULL/ZERO ptr.
1058 : */
1059 0 : if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1060 : return;
1061 :
1062 0 : rc = devres_destroy(dev, devm_kmalloc_release,
1063 : devm_kmalloc_match, (void *)p);
1064 0 : WARN_ON(rc);
1065 : }
1066 : EXPORT_SYMBOL_GPL(devm_kfree);
1067 :
1068 : /**
1069 : * devm_kmemdup - Resource-managed kmemdup
1070 : * @dev: Device this memory belongs to
1071 : * @src: Memory region to duplicate
1072 : * @len: Memory region length
1073 : * @gfp: GFP mask to use
1074 : *
1075 : * Duplicate region of a memory using resource managed kmalloc
1076 : */
1077 0 : void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1078 : {
1079 : void *p;
1080 :
1081 0 : p = devm_kmalloc(dev, len, gfp);
1082 0 : if (p)
1083 0 : memcpy(p, src, len);
1084 :
1085 0 : return p;
1086 : }
1087 : EXPORT_SYMBOL_GPL(devm_kmemdup);
1088 :
1089 : struct pages_devres {
1090 : unsigned long addr;
1091 : unsigned int order;
1092 : };
1093 :
1094 0 : static int devm_pages_match(struct device *dev, void *res, void *p)
1095 : {
1096 0 : struct pages_devres *devres = res;
1097 0 : struct pages_devres *target = p;
1098 :
1099 0 : return devres->addr == target->addr;
1100 : }
1101 :
1102 0 : static void devm_pages_release(struct device *dev, void *res)
1103 : {
1104 0 : struct pages_devres *devres = res;
1105 :
1106 0 : free_pages(devres->addr, devres->order);
1107 0 : }
1108 :
1109 : /**
1110 : * devm_get_free_pages - Resource-managed __get_free_pages
1111 : * @dev: Device to allocate memory for
1112 : * @gfp_mask: Allocation gfp flags
1113 : * @order: Allocation size is (1 << order) pages
1114 : *
1115 : * Managed get_free_pages. Memory allocated with this function is
1116 : * automatically freed on driver detach.
1117 : *
1118 : * RETURNS:
1119 : * Address of allocated memory on success, 0 on failure.
1120 : */
1121 :
1122 0 : unsigned long devm_get_free_pages(struct device *dev,
1123 : gfp_t gfp_mask, unsigned int order)
1124 : {
1125 : struct pages_devres *devres;
1126 : unsigned long addr;
1127 :
1128 0 : addr = __get_free_pages(gfp_mask, order);
1129 :
1130 0 : if (unlikely(!addr))
1131 : return 0;
1132 :
1133 0 : devres = devres_alloc(devm_pages_release,
1134 : sizeof(struct pages_devres), GFP_KERNEL);
1135 0 : if (unlikely(!devres)) {
1136 0 : free_pages(addr, order);
1137 0 : return 0;
1138 : }
1139 :
1140 0 : devres->addr = addr;
1141 0 : devres->order = order;
1142 :
1143 0 : devres_add(dev, devres);
1144 0 : return addr;
1145 : }
1146 : EXPORT_SYMBOL_GPL(devm_get_free_pages);
1147 :
1148 : /**
1149 : * devm_free_pages - Resource-managed free_pages
1150 : * @dev: Device this memory belongs to
1151 : * @addr: Memory to free
1152 : *
1153 : * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1154 : * there is no need to supply the @order.
1155 : */
1156 0 : void devm_free_pages(struct device *dev, unsigned long addr)
1157 : {
1158 0 : struct pages_devres devres = { .addr = addr };
1159 :
1160 0 : WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1161 : &devres));
1162 0 : }
1163 : EXPORT_SYMBOL_GPL(devm_free_pages);
1164 :
1165 0 : static void devm_percpu_release(struct device *dev, void *pdata)
1166 : {
1167 : void __percpu *p;
1168 :
1169 0 : p = *(void __percpu **)pdata;
1170 0 : free_percpu(p);
1171 0 : }
1172 :
1173 0 : static int devm_percpu_match(struct device *dev, void *data, void *p)
1174 : {
1175 0 : struct devres *devr = container_of(data, struct devres, data);
1176 :
1177 0 : return *(void **)devr->data == p;
1178 : }
1179 :
1180 : /**
1181 : * __devm_alloc_percpu - Resource-managed alloc_percpu
1182 : * @dev: Device to allocate per-cpu memory for
1183 : * @size: Size of per-cpu memory to allocate
1184 : * @align: Alignment of per-cpu memory to allocate
1185 : *
1186 : * Managed alloc_percpu. Per-cpu memory allocated with this function is
1187 : * automatically freed on driver detach.
1188 : *
1189 : * RETURNS:
1190 : * Pointer to allocated memory on success, NULL on failure.
1191 : */
1192 0 : void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1193 : size_t align)
1194 : {
1195 : void *p;
1196 : void __percpu *pcpu;
1197 :
1198 0 : pcpu = __alloc_percpu(size, align);
1199 0 : if (!pcpu)
1200 : return NULL;
1201 :
1202 0 : p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1203 0 : if (!p) {
1204 0 : free_percpu(pcpu);
1205 0 : return NULL;
1206 : }
1207 :
1208 0 : *(void __percpu **)p = pcpu;
1209 :
1210 0 : devres_add(dev, p);
1211 :
1212 0 : return pcpu;
1213 : }
1214 : EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1215 :
1216 : /**
1217 : * devm_free_percpu - Resource-managed free_percpu
1218 : * @dev: Device this memory belongs to
1219 : * @pdata: Per-cpu memory to free
1220 : *
1221 : * Free memory allocated with devm_alloc_percpu().
1222 : */
1223 0 : void devm_free_percpu(struct device *dev, void __percpu *pdata)
1224 : {
1225 0 : WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1226 : (__force void *)pdata));
1227 0 : }
1228 : EXPORT_SYMBOL_GPL(devm_free_percpu);
|