Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * drivers/base/devres.c - device resource management
4 : *
5 : * Copyright (c) 2006 SUSE Linux Products GmbH
6 : * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 : */
8 :
9 : #include <linux/device.h>
10 : #include <linux/module.h>
11 : #include <linux/slab.h>
12 : #include <linux/percpu.h>
13 :
14 : #include <asm/sections.h>
15 :
16 : #include "base.h"
17 : #include "trace.h"
18 :
19 : struct devres_node {
20 : struct list_head entry;
21 : dr_release_t release;
22 : const char *name;
23 : size_t size;
24 : };
25 :
26 : struct devres {
27 : struct devres_node node;
28 : /*
29 : * Some archs want to perform DMA into kmalloc caches
30 : * and need a guaranteed alignment larger than
31 : * the alignment of a 64-bit integer.
32 : * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
33 : * buffer alignment as if it was allocated by plain kmalloc().
34 : */
35 : u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
36 : };
37 :
38 : struct devres_group {
39 : struct devres_node node[2];
40 : void *id;
41 : int color;
42 : /* -- 8 pointers */
43 : };
44 :
45 : static void set_node_dbginfo(struct devres_node *node, const char *name,
46 : size_t size)
47 : {
48 17 : node->name = name;
49 17 : node->size = size;
50 : }
51 :
52 : #ifdef CONFIG_DEBUG_DEVRES
53 : static int log_devres = 0;
54 : module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
55 :
56 : static void devres_dbg(struct device *dev, struct devres_node *node,
57 : const char *op)
58 : {
59 : if (unlikely(log_devres))
60 : dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
61 : op, node, node->name, node->size);
62 : }
63 : #else /* CONFIG_DEBUG_DEVRES */
64 : #define devres_dbg(dev, node, op) do {} while (0)
65 : #endif /* CONFIG_DEBUG_DEVRES */
66 :
67 : static void devres_log(struct device *dev, struct devres_node *node,
68 : const char *op)
69 : {
70 34 : trace_devres_log(dev, op, node, node->name, node->size);
71 : devres_dbg(dev, node, op);
72 : }
73 :
74 : /*
75 : * Release functions for devres group. These callbacks are used only
76 : * for identification.
77 : */
78 0 : static void group_open_release(struct device *dev, void *res)
79 : {
80 : /* noop */
81 0 : }
82 :
83 0 : static void group_close_release(struct device *dev, void *res)
84 : {
85 : /* noop */
86 0 : }
87 :
88 : static struct devres_group * node_to_group(struct devres_node *node)
89 : {
90 17 : if (node->release == &group_open_release)
91 : return container_of(node, struct devres_group, node[0]);
92 17 : if (node->release == &group_close_release)
93 0 : return container_of(node, struct devres_group, node[1]);
94 : return NULL;
95 : }
96 :
97 : static bool check_dr_size(size_t size, size_t *tot_size)
98 : {
99 : /* We must catch any near-SIZE_MAX cases that could overflow. */
100 34 : if (unlikely(check_add_overflow(sizeof(struct devres),
101 : size, tot_size)))
102 : return false;
103 :
104 : /* Actually allocate the full kmalloc bucket size. */
105 17 : *tot_size = kmalloc_size_roundup(*tot_size);
106 :
107 : return true;
108 : }
109 :
110 : static __always_inline struct devres * alloc_dr(dr_release_t release,
111 : size_t size, gfp_t gfp, int nid)
112 : {
113 : size_t tot_size;
114 : struct devres *dr;
115 :
116 17 : if (!check_dr_size(size, &tot_size))
117 : return NULL;
118 :
119 17 : dr = kmalloc_node_track_caller(tot_size, gfp, nid);
120 17 : if (unlikely(!dr))
121 : return NULL;
122 :
123 : /* No need to clear memory twice */
124 0 : if (!(gfp & __GFP_ZERO))
125 0 : memset(dr, 0, offsetof(struct devres, data));
126 :
127 34 : INIT_LIST_HEAD(&dr->node.entry);
128 17 : dr->node.release = release;
129 : return dr;
130 : }
131 :
132 17 : static void add_dr(struct device *dev, struct devres_node *node)
133 : {
134 17 : devres_log(dev, node, "ADD");
135 34 : BUG_ON(!list_empty(&node->entry));
136 34 : list_add_tail(&node->entry, &dev->devres_head);
137 17 : }
138 :
139 0 : static void replace_dr(struct device *dev,
140 : struct devres_node *old, struct devres_node *new)
141 : {
142 0 : devres_log(dev, old, "REPLACE");
143 0 : BUG_ON(!list_empty(&new->entry));
144 0 : list_replace(&old->entry, &new->entry);
145 0 : }
146 :
147 : /**
148 : * __devres_alloc_node - Allocate device resource data
149 : * @release: Release function devres will be associated with
150 : * @size: Allocation size
151 : * @gfp: Allocation flags
152 : * @nid: NUMA node
153 : * @name: Name of the resource
154 : *
155 : * Allocate devres of @size bytes. The allocated area is zeroed, then
156 : * associated with @release. The returned pointer can be passed to
157 : * other devres_*() functions.
158 : *
159 : * RETURNS:
160 : * Pointer to allocated devres on success, NULL on failure.
161 : */
162 17 : void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
163 : const char *name)
164 : {
165 : struct devres *dr;
166 :
167 34 : dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
168 17 : if (unlikely(!dr))
169 : return NULL;
170 34 : set_node_dbginfo(&dr->node, name, size);
171 17 : return dr->data;
172 : }
173 : EXPORT_SYMBOL_GPL(__devres_alloc_node);
174 :
175 : /**
176 : * devres_for_each_res - Resource iterator
177 : * @dev: Device to iterate resource from
178 : * @release: Look for resources associated with this release function
179 : * @match: Match function (optional)
180 : * @match_data: Data for the match function
181 : * @fn: Function to be called for each matched resource.
182 : * @data: Data for @fn, the 3rd parameter of @fn
183 : *
184 : * Call @fn for each devres of @dev which is associated with @release
185 : * and for which @match returns 1.
186 : *
187 : * RETURNS:
188 : * void
189 : */
190 0 : void devres_for_each_res(struct device *dev, dr_release_t release,
191 : dr_match_t match, void *match_data,
192 : void (*fn)(struct device *, void *, void *),
193 : void *data)
194 : {
195 : struct devres_node *node;
196 : struct devres_node *tmp;
197 : unsigned long flags;
198 :
199 0 : if (!fn)
200 : return;
201 :
202 0 : spin_lock_irqsave(&dev->devres_lock, flags);
203 0 : list_for_each_entry_safe_reverse(node, tmp,
204 : &dev->devres_head, entry) {
205 0 : struct devres *dr = container_of(node, struct devres, node);
206 :
207 0 : if (node->release != release)
208 0 : continue;
209 0 : if (match && !match(dev, dr->data, match_data))
210 0 : continue;
211 0 : fn(dev, dr->data, data);
212 : }
213 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
214 : }
215 : EXPORT_SYMBOL_GPL(devres_for_each_res);
216 :
217 : /**
218 : * devres_free - Free device resource data
219 : * @res: Pointer to devres data to free
220 : *
221 : * Free devres created with devres_alloc().
222 : */
223 0 : void devres_free(void *res)
224 : {
225 0 : if (res) {
226 0 : struct devres *dr = container_of(res, struct devres, data);
227 :
228 0 : BUG_ON(!list_empty(&dr->node.entry));
229 0 : kfree(dr);
230 : }
231 0 : }
232 : EXPORT_SYMBOL_GPL(devres_free);
233 :
234 : /**
235 : * devres_add - Register device resource
236 : * @dev: Device to add resource to
237 : * @res: Resource to register
238 : *
239 : * Register devres @res to @dev. @res should have been allocated
240 : * using devres_alloc(). On driver detach, the associated release
241 : * function will be invoked and devres will be freed automatically.
242 : */
243 17 : void devres_add(struct device *dev, void *res)
244 : {
245 17 : struct devres *dr = container_of(res, struct devres, data);
246 : unsigned long flags;
247 :
248 17 : spin_lock_irqsave(&dev->devres_lock, flags);
249 17 : add_dr(dev, &dr->node);
250 34 : spin_unlock_irqrestore(&dev->devres_lock, flags);
251 17 : }
252 : EXPORT_SYMBOL_GPL(devres_add);
253 :
254 : static struct devres *find_dr(struct device *dev, dr_release_t release,
255 : dr_match_t match, void *match_data)
256 : {
257 : struct devres_node *node;
258 :
259 0 : list_for_each_entry_reverse(node, &dev->devres_head, entry) {
260 0 : struct devres *dr = container_of(node, struct devres, node);
261 :
262 0 : if (node->release != release)
263 0 : continue;
264 0 : if (match && !match(dev, dr->data, match_data))
265 0 : continue;
266 : return dr;
267 : }
268 :
269 : return NULL;
270 : }
271 :
272 : /**
273 : * devres_find - Find device resource
274 : * @dev: Device to lookup resource from
275 : * @release: Look for resources associated with this release function
276 : * @match: Match function (optional)
277 : * @match_data: Data for the match function
278 : *
279 : * Find the latest devres of @dev which is associated with @release
280 : * and for which @match returns 1. If @match is NULL, it's considered
281 : * to match all.
282 : *
283 : * RETURNS:
284 : * Pointer to found devres, NULL if not found.
285 : */
286 0 : void * devres_find(struct device *dev, dr_release_t release,
287 : dr_match_t match, void *match_data)
288 : {
289 : struct devres *dr;
290 : unsigned long flags;
291 :
292 0 : spin_lock_irqsave(&dev->devres_lock, flags);
293 0 : dr = find_dr(dev, release, match, match_data);
294 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
295 :
296 0 : if (dr)
297 0 : return dr->data;
298 : return NULL;
299 : }
300 : EXPORT_SYMBOL_GPL(devres_find);
301 :
302 : /**
303 : * devres_get - Find devres, if non-existent, add one atomically
304 : * @dev: Device to lookup or add devres for
305 : * @new_res: Pointer to new initialized devres to add if not found
306 : * @match: Match function (optional)
307 : * @match_data: Data for the match function
308 : *
309 : * Find the latest devres of @dev which has the same release function
310 : * as @new_res and for which @match return 1. If found, @new_res is
311 : * freed; otherwise, @new_res is added atomically.
312 : *
313 : * RETURNS:
314 : * Pointer to found or added devres.
315 : */
316 0 : void * devres_get(struct device *dev, void *new_res,
317 : dr_match_t match, void *match_data)
318 : {
319 0 : struct devres *new_dr = container_of(new_res, struct devres, data);
320 : struct devres *dr;
321 : unsigned long flags;
322 :
323 0 : spin_lock_irqsave(&dev->devres_lock, flags);
324 0 : dr = find_dr(dev, new_dr->node.release, match, match_data);
325 0 : if (!dr) {
326 0 : add_dr(dev, &new_dr->node);
327 0 : dr = new_dr;
328 0 : new_res = NULL;
329 : }
330 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
331 0 : devres_free(new_res);
332 :
333 0 : return dr->data;
334 : }
335 : EXPORT_SYMBOL_GPL(devres_get);
336 :
337 : /**
338 : * devres_remove - Find a device resource and remove it
339 : * @dev: Device to find resource from
340 : * @release: Look for resources associated with this release function
341 : * @match: Match function (optional)
342 : * @match_data: Data for the match function
343 : *
344 : * Find the latest devres of @dev associated with @release and for
345 : * which @match returns 1. If @match is NULL, it's considered to
346 : * match all. If found, the resource is removed atomically and
347 : * returned.
348 : *
349 : * RETURNS:
350 : * Pointer to removed devres on success, NULL if not found.
351 : */
352 0 : void * devres_remove(struct device *dev, dr_release_t release,
353 : dr_match_t match, void *match_data)
354 : {
355 : struct devres *dr;
356 : unsigned long flags;
357 :
358 0 : spin_lock_irqsave(&dev->devres_lock, flags);
359 0 : dr = find_dr(dev, release, match, match_data);
360 0 : if (dr) {
361 0 : list_del_init(&dr->node.entry);
362 0 : devres_log(dev, &dr->node, "REM");
363 : }
364 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
365 :
366 0 : if (dr)
367 0 : return dr->data;
368 : return NULL;
369 : }
370 : EXPORT_SYMBOL_GPL(devres_remove);
371 :
372 : /**
373 : * devres_destroy - Find a device resource and destroy it
374 : * @dev: Device to find resource from
375 : * @release: Look for resources associated with this release function
376 : * @match: Match function (optional)
377 : * @match_data: Data for the match function
378 : *
379 : * Find the latest devres of @dev associated with @release and for
380 : * which @match returns 1. If @match is NULL, it's considered to
381 : * match all. If found, the resource is removed atomically and freed.
382 : *
383 : * Note that the release function for the resource will not be called,
384 : * only the devres-allocated data will be freed. The caller becomes
385 : * responsible for freeing any other data.
386 : *
387 : * RETURNS:
388 : * 0 if devres is found and freed, -ENOENT if not found.
389 : */
390 0 : int devres_destroy(struct device *dev, dr_release_t release,
391 : dr_match_t match, void *match_data)
392 : {
393 : void *res;
394 :
395 0 : res = devres_remove(dev, release, match, match_data);
396 0 : if (unlikely(!res))
397 : return -ENOENT;
398 :
399 0 : devres_free(res);
400 0 : return 0;
401 : }
402 : EXPORT_SYMBOL_GPL(devres_destroy);
403 :
404 :
405 : /**
406 : * devres_release - Find a device resource and destroy it, calling release
407 : * @dev: Device to find resource from
408 : * @release: Look for resources associated with this release function
409 : * @match: Match function (optional)
410 : * @match_data: Data for the match function
411 : *
412 : * Find the latest devres of @dev associated with @release and for
413 : * which @match returns 1. If @match is NULL, it's considered to
414 : * match all. If found, the resource is removed atomically, the
415 : * release function called and the resource freed.
416 : *
417 : * RETURNS:
418 : * 0 if devres is found and freed, -ENOENT if not found.
419 : */
420 0 : int devres_release(struct device *dev, dr_release_t release,
421 : dr_match_t match, void *match_data)
422 : {
423 : void *res;
424 :
425 0 : res = devres_remove(dev, release, match, match_data);
426 0 : if (unlikely(!res))
427 : return -ENOENT;
428 :
429 0 : (*release)(dev, res);
430 0 : devres_free(res);
431 0 : return 0;
432 : }
433 : EXPORT_SYMBOL_GPL(devres_release);
434 :
435 17 : static int remove_nodes(struct device *dev,
436 : struct list_head *first, struct list_head *end,
437 : struct list_head *todo)
438 : {
439 : struct devres_node *node, *n;
440 17 : int cnt = 0, nr_groups = 0;
441 :
442 : /* First pass - move normal devres entries to @todo and clear
443 : * devres_group colors.
444 : */
445 17 : node = list_entry(first, struct devres_node, entry);
446 34 : list_for_each_entry_safe_from(node, n, end, entry) {
447 : struct devres_group *grp;
448 :
449 17 : grp = node_to_group(node);
450 17 : if (grp) {
451 : /* clear color of group markers in the first pass */
452 0 : grp->color = 0;
453 0 : nr_groups++;
454 : } else {
455 : /* regular devres entry */
456 17 : if (&node->entry == first)
457 17 : first = first->next;
458 34 : list_move_tail(&node->entry, todo);
459 17 : cnt++;
460 : }
461 : }
462 :
463 17 : if (!nr_groups)
464 : return cnt;
465 :
466 : /* Second pass - Scan groups and color them. A group gets
467 : * color value of two iff the group is wholly contained in
468 : * [current node, end). That is, for a closed group, both opening
469 : * and closing markers should be in the range, while just the
470 : * opening marker is enough for an open group.
471 : */
472 0 : node = list_entry(first, struct devres_node, entry);
473 0 : list_for_each_entry_safe_from(node, n, end, entry) {
474 : struct devres_group *grp;
475 :
476 0 : grp = node_to_group(node);
477 0 : BUG_ON(!grp || list_empty(&grp->node[0].entry));
478 :
479 0 : grp->color++;
480 0 : if (list_empty(&grp->node[1].entry))
481 0 : grp->color++;
482 :
483 0 : BUG_ON(grp->color <= 0 || grp->color > 2);
484 0 : if (grp->color == 2) {
485 : /* No need to update current node or end. The removed
486 : * nodes are always before both.
487 : */
488 0 : list_move_tail(&grp->node[0].entry, todo);
489 0 : list_del_init(&grp->node[1].entry);
490 : }
491 : }
492 :
493 : return cnt;
494 : }
495 :
496 17 : static void release_nodes(struct device *dev, struct list_head *todo)
497 : {
498 : struct devres *dr, *tmp;
499 :
500 : /* Release. Note that both devres and devres_group are
501 : * handled as devres in the following loop. This is safe.
502 : */
503 34 : list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
504 17 : devres_log(dev, &dr->node, "REL");
505 17 : dr->node.release(dev, dr->data);
506 17 : kfree(dr);
507 : }
508 17 : }
509 :
510 : /**
511 : * devres_release_all - Release all managed resources
512 : * @dev: Device to release resources for
513 : *
514 : * Release all resources associated with @dev. This function is
515 : * called on driver detach.
516 : */
517 51 : int devres_release_all(struct device *dev)
518 : {
519 : unsigned long flags;
520 51 : LIST_HEAD(todo);
521 : int cnt;
522 :
523 : /* Looks like an uninitialized device structure */
524 51 : if (WARN_ON(dev->devres_head.next == NULL))
525 : return -ENODEV;
526 :
527 : /* Nothing to release if list is empty */
528 102 : if (list_empty(&dev->devres_head))
529 : return 0;
530 :
531 17 : spin_lock_irqsave(&dev->devres_lock, flags);
532 17 : cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
533 34 : spin_unlock_irqrestore(&dev->devres_lock, flags);
534 :
535 17 : release_nodes(dev, &todo);
536 17 : return cnt;
537 : }
538 :
539 : /**
540 : * devres_open_group - Open a new devres group
541 : * @dev: Device to open devres group for
542 : * @id: Separator ID
543 : * @gfp: Allocation flags
544 : *
545 : * Open a new devres group for @dev with @id. For @id, using a
546 : * pointer to an object which won't be used for another group is
547 : * recommended. If @id is NULL, address-wise unique ID is created.
548 : *
549 : * RETURNS:
550 : * ID of the new group, NULL on failure.
551 : */
552 0 : void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
553 : {
554 : struct devres_group *grp;
555 : unsigned long flags;
556 :
557 0 : grp = kmalloc(sizeof(*grp), gfp);
558 0 : if (unlikely(!grp))
559 : return NULL;
560 :
561 0 : grp->node[0].release = &group_open_release;
562 0 : grp->node[1].release = &group_close_release;
563 0 : INIT_LIST_HEAD(&grp->node[0].entry);
564 0 : INIT_LIST_HEAD(&grp->node[1].entry);
565 0 : set_node_dbginfo(&grp->node[0], "grp<", 0);
566 0 : set_node_dbginfo(&grp->node[1], "grp>", 0);
567 0 : grp->id = grp;
568 0 : if (id)
569 0 : grp->id = id;
570 :
571 0 : spin_lock_irqsave(&dev->devres_lock, flags);
572 0 : add_dr(dev, &grp->node[0]);
573 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
574 0 : return grp->id;
575 : }
576 : EXPORT_SYMBOL_GPL(devres_open_group);
577 :
578 : /* Find devres group with ID @id. If @id is NULL, look for the latest. */
579 : static struct devres_group * find_group(struct device *dev, void *id)
580 : {
581 : struct devres_node *node;
582 :
583 0 : list_for_each_entry_reverse(node, &dev->devres_head, entry) {
584 : struct devres_group *grp;
585 :
586 0 : if (node->release != &group_open_release)
587 0 : continue;
588 :
589 0 : grp = container_of(node, struct devres_group, node[0]);
590 :
591 0 : if (id) {
592 0 : if (grp->id == id)
593 : return grp;
594 0 : } else if (list_empty(&grp->node[1].entry))
595 : return grp;
596 : }
597 :
598 : return NULL;
599 : }
600 :
601 : /**
602 : * devres_close_group - Close a devres group
603 : * @dev: Device to close devres group for
604 : * @id: ID of target group, can be NULL
605 : *
606 : * Close the group identified by @id. If @id is NULL, the latest open
607 : * group is selected.
608 : */
609 0 : void devres_close_group(struct device *dev, void *id)
610 : {
611 : struct devres_group *grp;
612 : unsigned long flags;
613 :
614 0 : spin_lock_irqsave(&dev->devres_lock, flags);
615 :
616 0 : grp = find_group(dev, id);
617 0 : if (grp)
618 0 : add_dr(dev, &grp->node[1]);
619 : else
620 0 : WARN_ON(1);
621 :
622 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
623 0 : }
624 : EXPORT_SYMBOL_GPL(devres_close_group);
625 :
626 : /**
627 : * devres_remove_group - Remove a devres group
628 : * @dev: Device to remove group for
629 : * @id: ID of target group, can be NULL
630 : *
631 : * Remove the group identified by @id. If @id is NULL, the latest
632 : * open group is selected. Note that removing a group doesn't affect
633 : * any other resources.
634 : */
635 0 : void devres_remove_group(struct device *dev, void *id)
636 : {
637 : struct devres_group *grp;
638 : unsigned long flags;
639 :
640 0 : spin_lock_irqsave(&dev->devres_lock, flags);
641 :
642 0 : grp = find_group(dev, id);
643 0 : if (grp) {
644 0 : list_del_init(&grp->node[0].entry);
645 0 : list_del_init(&grp->node[1].entry);
646 0 : devres_log(dev, &grp->node[0], "REM");
647 : } else
648 0 : WARN_ON(1);
649 :
650 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
651 :
652 0 : kfree(grp);
653 0 : }
654 : EXPORT_SYMBOL_GPL(devres_remove_group);
655 :
656 : /**
657 : * devres_release_group - Release resources in a devres group
658 : * @dev: Device to release group for
659 : * @id: ID of target group, can be NULL
660 : *
661 : * Release all resources in the group identified by @id. If @id is
662 : * NULL, the latest open group is selected. The selected group and
663 : * groups properly nested inside the selected group are removed.
664 : *
665 : * RETURNS:
666 : * The number of released non-group resources.
667 : */
668 0 : int devres_release_group(struct device *dev, void *id)
669 : {
670 : struct devres_group *grp;
671 : unsigned long flags;
672 0 : LIST_HEAD(todo);
673 0 : int cnt = 0;
674 :
675 0 : spin_lock_irqsave(&dev->devres_lock, flags);
676 :
677 0 : grp = find_group(dev, id);
678 0 : if (grp) {
679 0 : struct list_head *first = &grp->node[0].entry;
680 0 : struct list_head *end = &dev->devres_head;
681 :
682 0 : if (!list_empty(&grp->node[1].entry))
683 0 : end = grp->node[1].entry.next;
684 :
685 0 : cnt = remove_nodes(dev, first, end, &todo);
686 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
687 :
688 0 : release_nodes(dev, &todo);
689 : } else {
690 0 : WARN_ON(1);
691 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
692 : }
693 :
694 0 : return cnt;
695 : }
696 : EXPORT_SYMBOL_GPL(devres_release_group);
697 :
698 : /*
699 : * Custom devres actions allow inserting a simple function call
700 : * into the teardown sequence.
701 : */
702 :
703 : struct action_devres {
704 : void *data;
705 : void (*action)(void *);
706 : };
707 :
708 0 : static int devm_action_match(struct device *dev, void *res, void *p)
709 : {
710 0 : struct action_devres *devres = res;
711 0 : struct action_devres *target = p;
712 :
713 0 : return devres->action == target->action &&
714 0 : devres->data == target->data;
715 : }
716 :
717 17 : static void devm_action_release(struct device *dev, void *res)
718 : {
719 17 : struct action_devres *devres = res;
720 :
721 17 : devres->action(devres->data);
722 17 : }
723 :
724 : /**
725 : * devm_add_action() - add a custom action to list of managed resources
726 : * @dev: Device that owns the action
727 : * @action: Function that should be called
728 : * @data: Pointer to data passed to @action implementation
729 : *
730 : * This adds a custom action to the list of managed resources so that
731 : * it gets executed as part of standard resource unwinding.
732 : */
733 17 : int devm_add_action(struct device *dev, void (*action)(void *), void *data)
734 : {
735 : struct action_devres *devres;
736 :
737 17 : devres = devres_alloc(devm_action_release,
738 : sizeof(struct action_devres), GFP_KERNEL);
739 17 : if (!devres)
740 : return -ENOMEM;
741 :
742 17 : devres->data = data;
743 17 : devres->action = action;
744 :
745 17 : devres_add(dev, devres);
746 17 : return 0;
747 : }
748 : EXPORT_SYMBOL_GPL(devm_add_action);
749 :
750 : /**
751 : * devm_remove_action() - removes previously added custom action
752 : * @dev: Device that owns the action
753 : * @action: Function implementing the action
754 : * @data: Pointer to data passed to @action implementation
755 : *
756 : * Removes instance of @action previously added by devm_add_action().
757 : * Both action and data should match one of the existing entries.
758 : */
759 0 : void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
760 : {
761 0 : struct action_devres devres = {
762 : .data = data,
763 : .action = action,
764 : };
765 :
766 0 : WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
767 : &devres));
768 0 : }
769 : EXPORT_SYMBOL_GPL(devm_remove_action);
770 :
771 : /**
772 : * devm_release_action() - release previously added custom action
773 : * @dev: Device that owns the action
774 : * @action: Function implementing the action
775 : * @data: Pointer to data passed to @action implementation
776 : *
777 : * Releases and removes instance of @action previously added by
778 : * devm_add_action(). Both action and data should match one of the
779 : * existing entries.
780 : */
781 0 : void devm_release_action(struct device *dev, void (*action)(void *), void *data)
782 : {
783 0 : struct action_devres devres = {
784 : .data = data,
785 : .action = action,
786 : };
787 :
788 0 : WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
789 : &devres));
790 :
791 0 : }
792 : EXPORT_SYMBOL_GPL(devm_release_action);
793 :
794 : /*
795 : * Managed kmalloc/kfree
796 : */
797 0 : static void devm_kmalloc_release(struct device *dev, void *res)
798 : {
799 : /* noop */
800 0 : }
801 :
802 0 : static int devm_kmalloc_match(struct device *dev, void *res, void *data)
803 : {
804 0 : return res == data;
805 : }
806 :
807 : /**
808 : * devm_kmalloc - Resource-managed kmalloc
809 : * @dev: Device to allocate memory for
810 : * @size: Allocation size
811 : * @gfp: Allocation gfp flags
812 : *
813 : * Managed kmalloc. Memory allocated with this function is
814 : * automatically freed on driver detach. Like all other devres
815 : * resources, guaranteed alignment is unsigned long long.
816 : *
817 : * RETURNS:
818 : * Pointer to allocated memory on success, NULL on failure.
819 : */
820 0 : void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
821 : {
822 : struct devres *dr;
823 :
824 0 : if (unlikely(!size))
825 : return ZERO_SIZE_PTR;
826 :
827 : /* use raw alloc_dr for kmalloc caller tracing */
828 0 : dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
829 0 : if (unlikely(!dr))
830 : return NULL;
831 :
832 : /*
833 : * This is named devm_kzalloc_release for historical reasons
834 : * The initial implementation did not support kmalloc, only kzalloc
835 : */
836 0 : set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
837 0 : devres_add(dev, dr->data);
838 0 : return dr->data;
839 : }
840 : EXPORT_SYMBOL_GPL(devm_kmalloc);
841 :
842 : /**
843 : * devm_krealloc - Resource-managed krealloc()
844 : * @dev: Device to re-allocate memory for
845 : * @ptr: Pointer to the memory chunk to re-allocate
846 : * @new_size: New allocation size
847 : * @gfp: Allocation gfp flags
848 : *
849 : * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
850 : * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
851 : * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
852 : * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
853 : * change the order in which the release callback for the re-alloc'ed devres
854 : * will be called (except when falling back to devm_kmalloc() or when freeing
855 : * resources when new_size is zero). The contents of the memory are preserved
856 : * up to the lesser of new and old sizes.
857 : */
858 0 : void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
859 : {
860 : size_t total_new_size, total_old_size;
861 : struct devres *old_dr, *new_dr;
862 : unsigned long flags;
863 :
864 0 : if (unlikely(!new_size)) {
865 0 : devm_kfree(dev, ptr);
866 0 : return ZERO_SIZE_PTR;
867 : }
868 :
869 0 : if (unlikely(ZERO_OR_NULL_PTR(ptr)))
870 0 : return devm_kmalloc(dev, new_size, gfp);
871 :
872 0 : if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
873 : /*
874 : * We cannot reliably realloc a const string returned by
875 : * devm_kstrdup_const().
876 : */
877 : return NULL;
878 :
879 0 : if (!check_dr_size(new_size, &total_new_size))
880 : return NULL;
881 :
882 0 : total_old_size = ksize(container_of(ptr, struct devres, data));
883 0 : if (total_old_size == 0) {
884 0 : WARN(1, "Pointer doesn't point to dynamically allocated memory.");
885 0 : return NULL;
886 : }
887 :
888 : /*
889 : * If new size is smaller or equal to the actual number of bytes
890 : * allocated previously - just return the same pointer.
891 : */
892 0 : if (total_new_size <= total_old_size)
893 : return ptr;
894 :
895 : /*
896 : * Otherwise: allocate new, larger chunk. We need to allocate before
897 : * taking the lock as most probably the caller uses GFP_KERNEL.
898 : */
899 0 : new_dr = alloc_dr(devm_kmalloc_release,
900 : total_new_size, gfp, dev_to_node(dev));
901 0 : if (!new_dr)
902 : return NULL;
903 :
904 : /*
905 : * The spinlock protects the linked list against concurrent
906 : * modifications but not the resource itself.
907 : */
908 0 : spin_lock_irqsave(&dev->devres_lock, flags);
909 :
910 0 : old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
911 0 : if (!old_dr) {
912 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
913 0 : kfree(new_dr);
914 0 : WARN(1, "Memory chunk not managed or managed by a different device.");
915 0 : return NULL;
916 : }
917 :
918 0 : replace_dr(dev, &old_dr->node, &new_dr->node);
919 :
920 0 : spin_unlock_irqrestore(&dev->devres_lock, flags);
921 :
922 : /*
923 : * We can copy the memory contents after releasing the lock as we're
924 : * no longer modifying the list links.
925 : */
926 0 : memcpy(new_dr->data, old_dr->data,
927 : total_old_size - offsetof(struct devres, data));
928 : /*
929 : * Same for releasing the old devres - it's now been removed from the
930 : * list. This is also the reason why we must not use devm_kfree() - the
931 : * links are no longer valid.
932 : */
933 0 : kfree(old_dr);
934 :
935 0 : return new_dr->data;
936 : }
937 : EXPORT_SYMBOL_GPL(devm_krealloc);
938 :
939 : /**
940 : * devm_kstrdup - Allocate resource managed space and
941 : * copy an existing string into that.
942 : * @dev: Device to allocate memory for
943 : * @s: the string to duplicate
944 : * @gfp: the GFP mask used in the devm_kmalloc() call when
945 : * allocating memory
946 : * RETURNS:
947 : * Pointer to allocated string on success, NULL on failure.
948 : */
949 0 : char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
950 : {
951 : size_t size;
952 : char *buf;
953 :
954 0 : if (!s)
955 : return NULL;
956 :
957 0 : size = strlen(s) + 1;
958 0 : buf = devm_kmalloc(dev, size, gfp);
959 0 : if (buf)
960 0 : memcpy(buf, s, size);
961 : return buf;
962 : }
963 : EXPORT_SYMBOL_GPL(devm_kstrdup);
964 :
965 : /**
966 : * devm_kstrdup_const - resource managed conditional string duplication
967 : * @dev: device for which to duplicate the string
968 : * @s: the string to duplicate
969 : * @gfp: the GFP mask used in the kmalloc() call when allocating memory
970 : *
971 : * Strings allocated by devm_kstrdup_const will be automatically freed when
972 : * the associated device is detached.
973 : *
974 : * RETURNS:
975 : * Source string if it is in .rodata section otherwise it falls back to
976 : * devm_kstrdup.
977 : */
978 0 : const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
979 : {
980 0 : if (is_kernel_rodata((unsigned long)s))
981 : return s;
982 :
983 0 : return devm_kstrdup(dev, s, gfp);
984 : }
985 : EXPORT_SYMBOL_GPL(devm_kstrdup_const);
986 :
987 : /**
988 : * devm_kvasprintf - Allocate resource managed space and format a string
989 : * into that.
990 : * @dev: Device to allocate memory for
991 : * @gfp: the GFP mask used in the devm_kmalloc() call when
992 : * allocating memory
993 : * @fmt: The printf()-style format string
994 : * @ap: Arguments for the format string
995 : * RETURNS:
996 : * Pointer to allocated string on success, NULL on failure.
997 : */
998 0 : char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
999 : va_list ap)
1000 : {
1001 : unsigned int len;
1002 : char *p;
1003 : va_list aq;
1004 :
1005 0 : va_copy(aq, ap);
1006 0 : len = vsnprintf(NULL, 0, fmt, aq);
1007 0 : va_end(aq);
1008 :
1009 0 : p = devm_kmalloc(dev, len+1, gfp);
1010 0 : if (!p)
1011 : return NULL;
1012 :
1013 0 : vsnprintf(p, len+1, fmt, ap);
1014 :
1015 0 : return p;
1016 : }
1017 : EXPORT_SYMBOL(devm_kvasprintf);
1018 :
1019 : /**
1020 : * devm_kasprintf - Allocate resource managed space and format a string
1021 : * into that.
1022 : * @dev: Device to allocate memory for
1023 : * @gfp: the GFP mask used in the devm_kmalloc() call when
1024 : * allocating memory
1025 : * @fmt: The printf()-style format string
1026 : * @...: Arguments for the format string
1027 : * RETURNS:
1028 : * Pointer to allocated string on success, NULL on failure.
1029 : */
1030 0 : char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
1031 : {
1032 : va_list ap;
1033 : char *p;
1034 :
1035 0 : va_start(ap, fmt);
1036 0 : p = devm_kvasprintf(dev, gfp, fmt, ap);
1037 0 : va_end(ap);
1038 :
1039 0 : return p;
1040 : }
1041 : EXPORT_SYMBOL_GPL(devm_kasprintf);
1042 :
1043 : /**
1044 : * devm_kfree - Resource-managed kfree
1045 : * @dev: Device this memory belongs to
1046 : * @p: Memory to free
1047 : *
1048 : * Free memory allocated with devm_kmalloc().
1049 : */
1050 0 : void devm_kfree(struct device *dev, const void *p)
1051 : {
1052 : int rc;
1053 :
1054 : /*
1055 : * Special cases: pointer to a string in .rodata returned by
1056 : * devm_kstrdup_const() or NULL/ZERO ptr.
1057 : */
1058 0 : if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
1059 : return;
1060 :
1061 0 : rc = devres_destroy(dev, devm_kmalloc_release,
1062 : devm_kmalloc_match, (void *)p);
1063 0 : WARN_ON(rc);
1064 : }
1065 : EXPORT_SYMBOL_GPL(devm_kfree);
1066 :
1067 : /**
1068 : * devm_kmemdup - Resource-managed kmemdup
1069 : * @dev: Device this memory belongs to
1070 : * @src: Memory region to duplicate
1071 : * @len: Memory region length
1072 : * @gfp: GFP mask to use
1073 : *
1074 : * Duplicate region of a memory using resource managed kmalloc
1075 : */
1076 0 : void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
1077 : {
1078 : void *p;
1079 :
1080 0 : p = devm_kmalloc(dev, len, gfp);
1081 0 : if (p)
1082 0 : memcpy(p, src, len);
1083 :
1084 0 : return p;
1085 : }
1086 : EXPORT_SYMBOL_GPL(devm_kmemdup);
1087 :
1088 : struct pages_devres {
1089 : unsigned long addr;
1090 : unsigned int order;
1091 : };
1092 :
1093 0 : static int devm_pages_match(struct device *dev, void *res, void *p)
1094 : {
1095 0 : struct pages_devres *devres = res;
1096 0 : struct pages_devres *target = p;
1097 :
1098 0 : return devres->addr == target->addr;
1099 : }
1100 :
1101 0 : static void devm_pages_release(struct device *dev, void *res)
1102 : {
1103 0 : struct pages_devres *devres = res;
1104 :
1105 0 : free_pages(devres->addr, devres->order);
1106 0 : }
1107 :
1108 : /**
1109 : * devm_get_free_pages - Resource-managed __get_free_pages
1110 : * @dev: Device to allocate memory for
1111 : * @gfp_mask: Allocation gfp flags
1112 : * @order: Allocation size is (1 << order) pages
1113 : *
1114 : * Managed get_free_pages. Memory allocated with this function is
1115 : * automatically freed on driver detach.
1116 : *
1117 : * RETURNS:
1118 : * Address of allocated memory on success, 0 on failure.
1119 : */
1120 :
1121 0 : unsigned long devm_get_free_pages(struct device *dev,
1122 : gfp_t gfp_mask, unsigned int order)
1123 : {
1124 : struct pages_devres *devres;
1125 : unsigned long addr;
1126 :
1127 0 : addr = __get_free_pages(gfp_mask, order);
1128 :
1129 0 : if (unlikely(!addr))
1130 : return 0;
1131 :
1132 0 : devres = devres_alloc(devm_pages_release,
1133 : sizeof(struct pages_devres), GFP_KERNEL);
1134 0 : if (unlikely(!devres)) {
1135 0 : free_pages(addr, order);
1136 0 : return 0;
1137 : }
1138 :
1139 0 : devres->addr = addr;
1140 0 : devres->order = order;
1141 :
1142 0 : devres_add(dev, devres);
1143 0 : return addr;
1144 : }
1145 : EXPORT_SYMBOL_GPL(devm_get_free_pages);
1146 :
1147 : /**
1148 : * devm_free_pages - Resource-managed free_pages
1149 : * @dev: Device this memory belongs to
1150 : * @addr: Memory to free
1151 : *
1152 : * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
1153 : * there is no need to supply the @order.
1154 : */
1155 0 : void devm_free_pages(struct device *dev, unsigned long addr)
1156 : {
1157 0 : struct pages_devres devres = { .addr = addr };
1158 :
1159 0 : WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
1160 : &devres));
1161 0 : }
1162 : EXPORT_SYMBOL_GPL(devm_free_pages);
1163 :
1164 0 : static void devm_percpu_release(struct device *dev, void *pdata)
1165 : {
1166 : void __percpu *p;
1167 :
1168 0 : p = *(void __percpu **)pdata;
1169 0 : free_percpu(p);
1170 0 : }
1171 :
1172 0 : static int devm_percpu_match(struct device *dev, void *data, void *p)
1173 : {
1174 0 : struct devres *devr = container_of(data, struct devres, data);
1175 :
1176 0 : return *(void **)devr->data == p;
1177 : }
1178 :
1179 : /**
1180 : * __devm_alloc_percpu - Resource-managed alloc_percpu
1181 : * @dev: Device to allocate per-cpu memory for
1182 : * @size: Size of per-cpu memory to allocate
1183 : * @align: Alignment of per-cpu memory to allocate
1184 : *
1185 : * Managed alloc_percpu. Per-cpu memory allocated with this function is
1186 : * automatically freed on driver detach.
1187 : *
1188 : * RETURNS:
1189 : * Pointer to allocated memory on success, NULL on failure.
1190 : */
1191 0 : void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
1192 : size_t align)
1193 : {
1194 : void *p;
1195 : void __percpu *pcpu;
1196 :
1197 0 : pcpu = __alloc_percpu(size, align);
1198 0 : if (!pcpu)
1199 : return NULL;
1200 :
1201 0 : p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
1202 0 : if (!p) {
1203 0 : free_percpu(pcpu);
1204 0 : return NULL;
1205 : }
1206 :
1207 0 : *(void __percpu **)p = pcpu;
1208 :
1209 0 : devres_add(dev, p);
1210 :
1211 0 : return pcpu;
1212 : }
1213 : EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
1214 :
1215 : /**
1216 : * devm_free_percpu - Resource-managed free_percpu
1217 : * @dev: Device this memory belongs to
1218 : * @pdata: Per-cpu memory to free
1219 : *
1220 : * Free memory allocated with devm_alloc_percpu().
1221 : */
1222 0 : void devm_free_percpu(struct device *dev, void __percpu *pdata)
1223 : {
1224 0 : WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
1225 : (__force void *)pdata));
1226 0 : }
1227 : EXPORT_SYMBOL_GPL(devm_free_percpu);
|