Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * DMA Pool allocator
4 : *
5 : * Copyright 2001 David Brownell
6 : * Copyright 2007 Intel Corporation
7 : * Author: Matthew Wilcox <willy@linux.intel.com>
8 : *
9 : * This allocator returns small blocks of a given size which are DMA-able by
10 : * the given device. It uses the dma_alloc_coherent page allocator to get
11 : * new pages, then splits them up into blocks of the required size.
12 : * Many older drivers still have their own code to do this.
13 : *
14 : * The current design of this allocator is fairly simple. The pool is
15 : * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 : * allocated pages. Each page in the page_list is split into blocks of at
17 : * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 : * list of free blocks within the page. Used blocks aren't tracked, but we
19 : * keep a count of how many are currently allocated from each page.
20 : */
21 :
22 : #include <linux/device.h>
23 : #include <linux/dma-mapping.h>
24 : #include <linux/dmapool.h>
25 : #include <linux/kernel.h>
26 : #include <linux/list.h>
27 : #include <linux/export.h>
28 : #include <linux/mutex.h>
29 : #include <linux/poison.h>
30 : #include <linux/sched.h>
31 : #include <linux/sched/mm.h>
32 : #include <linux/slab.h>
33 : #include <linux/stat.h>
34 : #include <linux/spinlock.h>
35 : #include <linux/string.h>
36 : #include <linux/types.h>
37 : #include <linux/wait.h>
38 :
39 : #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 : #define DMAPOOL_DEBUG 1
41 : #endif
42 :
43 : struct dma_pool { /* the pool */
44 : struct list_head page_list;
45 : spinlock_t lock;
46 : size_t size;
47 : struct device *dev;
48 : size_t allocation;
49 : size_t boundary;
50 : char name[32];
51 : struct list_head pools;
52 : };
53 :
54 : struct dma_page { /* cacheable header for 'allocation' bytes */
55 : struct list_head page_list;
56 : void *vaddr;
57 : dma_addr_t dma;
58 : unsigned int in_use;
59 : unsigned int offset;
60 : };
61 :
62 : static DEFINE_MUTEX(pools_lock);
63 : static DEFINE_MUTEX(pools_reg_lock);
64 :
65 0 : static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
66 : {
67 : unsigned temp;
68 : unsigned size;
69 : char *next;
70 : struct dma_page *page;
71 : struct dma_pool *pool;
72 :
73 0 : next = buf;
74 0 : size = PAGE_SIZE;
75 :
76 0 : temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 0 : size -= temp;
78 0 : next += temp;
79 :
80 0 : mutex_lock(&pools_lock);
81 0 : list_for_each_entry(pool, &dev->dma_pools, pools) {
82 0 : unsigned pages = 0;
83 0 : unsigned blocks = 0;
84 :
85 0 : spin_lock_irq(&pool->lock);
86 0 : list_for_each_entry(page, &pool->page_list, page_list) {
87 0 : pages++;
88 0 : blocks += page->in_use;
89 : }
90 0 : spin_unlock_irq(&pool->lock);
91 :
92 : /* per-pool info, no real statistics yet */
93 0 : temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94 0 : pool->name, blocks,
95 0 : pages * (pool->allocation / pool->size),
96 : pool->size, pages);
97 0 : size -= temp;
98 0 : next += temp;
99 : }
100 0 : mutex_unlock(&pools_lock);
101 :
102 0 : return PAGE_SIZE - size;
103 : }
104 :
105 : static DEVICE_ATTR_RO(pools);
106 :
107 : /**
108 : * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
109 : * @name: name of pool, for diagnostics
110 : * @dev: device that will be doing the DMA
111 : * @size: size of the blocks in this pool.
112 : * @align: alignment requirement for blocks; must be a power of two
113 : * @boundary: returned blocks won't cross this power of two boundary
114 : * Context: not in_interrupt()
115 : *
116 : * Given one of these pools, dma_pool_alloc()
117 : * may be used to allocate memory. Such memory will all have "consistent"
118 : * DMA mappings, accessible by the device and its driver without using
119 : * cache flushing primitives. The actual size of blocks allocated may be
120 : * larger than requested because of alignment.
121 : *
122 : * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
123 : * cross that size boundary. This is useful for devices which have
124 : * addressing restrictions on individual DMA transfers, such as not crossing
125 : * boundaries of 4KBytes.
126 : *
127 : * Return: a dma allocation pool with the requested characteristics, or
128 : * %NULL if one can't be created.
129 : */
130 0 : struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 : size_t size, size_t align, size_t boundary)
132 : {
133 : struct dma_pool *retval;
134 : size_t allocation;
135 0 : bool empty = false;
136 :
137 0 : if (align == 0)
138 : align = 1;
139 0 : else if (align & (align - 1))
140 : return NULL;
141 :
142 0 : if (size == 0)
143 : return NULL;
144 0 : else if (size < 4)
145 0 : size = 4;
146 :
147 0 : size = ALIGN(size, align);
148 0 : allocation = max_t(size_t, size, PAGE_SIZE);
149 :
150 0 : if (!boundary)
151 : boundary = allocation;
152 0 : else if ((boundary < size) || (boundary & (boundary - 1)))
153 : return NULL;
154 :
155 0 : retval = kmalloc(sizeof(*retval), GFP_KERNEL);
156 0 : if (!retval)
157 : return retval;
158 :
159 0 : strscpy(retval->name, name, sizeof(retval->name));
160 :
161 0 : retval->dev = dev;
162 :
163 0 : INIT_LIST_HEAD(&retval->page_list);
164 0 : spin_lock_init(&retval->lock);
165 0 : retval->size = size;
166 0 : retval->boundary = boundary;
167 0 : retval->allocation = allocation;
168 :
169 0 : INIT_LIST_HEAD(&retval->pools);
170 :
171 : /*
172 : * pools_lock ensures that the ->dma_pools list does not get corrupted.
173 : * pools_reg_lock ensures that there is not a race between
174 : * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
175 : * when the first invocation of dma_pool_create() failed on
176 : * device_create_file() and the second assumes that it has been done (I
177 : * know it is a short window).
178 : */
179 0 : mutex_lock(&pools_reg_lock);
180 0 : mutex_lock(&pools_lock);
181 0 : if (list_empty(&dev->dma_pools))
182 0 : empty = true;
183 0 : list_add(&retval->pools, &dev->dma_pools);
184 0 : mutex_unlock(&pools_lock);
185 0 : if (empty) {
186 : int err;
187 :
188 0 : err = device_create_file(dev, &dev_attr_pools);
189 0 : if (err) {
190 0 : mutex_lock(&pools_lock);
191 0 : list_del(&retval->pools);
192 0 : mutex_unlock(&pools_lock);
193 0 : mutex_unlock(&pools_reg_lock);
194 0 : kfree(retval);
195 0 : return NULL;
196 : }
197 : }
198 0 : mutex_unlock(&pools_reg_lock);
199 0 : return retval;
200 : }
201 : EXPORT_SYMBOL(dma_pool_create);
202 :
203 : static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204 : {
205 0 : unsigned int offset = 0;
206 0 : unsigned int next_boundary = pool->boundary;
207 :
208 : do {
209 0 : unsigned int next = offset + pool->size;
210 0 : if (unlikely((next + pool->size) >= next_boundary)) {
211 0 : next = next_boundary;
212 0 : next_boundary += pool->boundary;
213 : }
214 0 : *(int *)(page->vaddr + offset) = next;
215 0 : offset = next;
216 0 : } while (offset < pool->allocation);
217 : }
218 :
219 0 : static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
220 : {
221 : struct dma_page *page;
222 :
223 0 : page = kmalloc(sizeof(*page), mem_flags);
224 0 : if (!page)
225 : return NULL;
226 0 : page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
227 : &page->dma, mem_flags);
228 0 : if (page->vaddr) {
229 : #ifdef DMAPOOL_DEBUG
230 : memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
231 : #endif
232 0 : pool_initialise_page(pool, page);
233 0 : page->in_use = 0;
234 0 : page->offset = 0;
235 : } else {
236 0 : kfree(page);
237 0 : page = NULL;
238 : }
239 : return page;
240 : }
241 :
242 : static inline bool is_page_busy(struct dma_page *page)
243 : {
244 : return page->in_use != 0;
245 : }
246 :
247 0 : static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
248 : {
249 0 : dma_addr_t dma = page->dma;
250 :
251 : #ifdef DMAPOOL_DEBUG
252 : memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
253 : #endif
254 0 : dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255 0 : list_del(&page->page_list);
256 0 : kfree(page);
257 0 : }
258 :
259 : /**
260 : * dma_pool_destroy - destroys a pool of dma memory blocks.
261 : * @pool: dma pool that will be destroyed
262 : * Context: !in_interrupt()
263 : *
264 : * Caller guarantees that no more memory from the pool is in use,
265 : * and that nothing will try to use the pool after this call.
266 : */
267 0 : void dma_pool_destroy(struct dma_pool *pool)
268 : {
269 : struct dma_page *page, *tmp;
270 0 : bool empty = false;
271 :
272 0 : if (unlikely(!pool))
273 : return;
274 :
275 0 : mutex_lock(&pools_reg_lock);
276 0 : mutex_lock(&pools_lock);
277 0 : list_del(&pool->pools);
278 0 : if (pool->dev && list_empty(&pool->dev->dma_pools))
279 0 : empty = true;
280 0 : mutex_unlock(&pools_lock);
281 0 : if (empty)
282 0 : device_remove_file(pool->dev, &dev_attr_pools);
283 0 : mutex_unlock(&pools_reg_lock);
284 :
285 0 : list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
286 0 : if (is_page_busy(page)) {
287 0 : if (pool->dev)
288 0 : dev_err(pool->dev, "%s %s, %p busy\n", __func__,
289 : pool->name, page->vaddr);
290 : else
291 0 : pr_err("%s %s, %p busy\n", __func__,
292 : pool->name, page->vaddr);
293 : /* leak the still-in-use consistent memory */
294 0 : list_del(&page->page_list);
295 0 : kfree(page);
296 : } else
297 0 : pool_free_page(pool, page);
298 : }
299 :
300 0 : kfree(pool);
301 : }
302 : EXPORT_SYMBOL(dma_pool_destroy);
303 :
304 : /**
305 : * dma_pool_alloc - get a block of consistent memory
306 : * @pool: dma pool that will produce the block
307 : * @mem_flags: GFP_* bitmask
308 : * @handle: pointer to dma address of block
309 : *
310 : * Return: the kernel virtual address of a currently unused block,
311 : * and reports its dma address through the handle.
312 : * If such a memory block can't be allocated, %NULL is returned.
313 : */
314 0 : void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 : dma_addr_t *handle)
316 : {
317 : unsigned long flags;
318 : struct dma_page *page;
319 : size_t offset;
320 : void *retval;
321 :
322 0 : might_alloc(mem_flags);
323 :
324 0 : spin_lock_irqsave(&pool->lock, flags);
325 0 : list_for_each_entry(page, &pool->page_list, page_list) {
326 0 : if (page->offset < pool->allocation)
327 : goto ready;
328 : }
329 :
330 : /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
331 0 : spin_unlock_irqrestore(&pool->lock, flags);
332 :
333 0 : page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
334 0 : if (!page)
335 : return NULL;
336 :
337 0 : spin_lock_irqsave(&pool->lock, flags);
338 :
339 0 : list_add(&page->page_list, &pool->page_list);
340 : ready:
341 0 : page->in_use++;
342 0 : offset = page->offset;
343 0 : page->offset = *(int *)(page->vaddr + offset);
344 0 : retval = offset + page->vaddr;
345 0 : *handle = offset + page->dma;
346 : #ifdef DMAPOOL_DEBUG
347 : {
348 : int i;
349 : u8 *data = retval;
350 : /* page->offset is stored in first 4 bytes */
351 : for (i = sizeof(page->offset); i < pool->size; i++) {
352 : if (data[i] == POOL_POISON_FREED)
353 : continue;
354 : if (pool->dev)
355 : dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356 : __func__, pool->name, retval);
357 : else
358 : pr_err("%s %s, %p (corrupted)\n",
359 : __func__, pool->name, retval);
360 :
361 : /*
362 : * Dump the first 4 bytes even if they are not
363 : * POOL_POISON_FREED
364 : */
365 : print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366 : data, pool->size, 1);
367 : break;
368 : }
369 : }
370 : if (!(mem_flags & __GFP_ZERO))
371 : memset(retval, POOL_POISON_ALLOCATED, pool->size);
372 : #endif
373 0 : spin_unlock_irqrestore(&pool->lock, flags);
374 :
375 0 : if (want_init_on_alloc(mem_flags))
376 0 : memset(retval, 0, pool->size);
377 :
378 : return retval;
379 : }
380 : EXPORT_SYMBOL(dma_pool_alloc);
381 :
382 : static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
383 : {
384 : struct dma_page *page;
385 :
386 0 : list_for_each_entry(page, &pool->page_list, page_list) {
387 0 : if (dma < page->dma)
388 0 : continue;
389 0 : if ((dma - page->dma) < pool->allocation)
390 : return page;
391 : }
392 : return NULL;
393 : }
394 :
395 : /**
396 : * dma_pool_free - put block back into dma pool
397 : * @pool: the dma pool holding the block
398 : * @vaddr: virtual address of block
399 : * @dma: dma address of block
400 : *
401 : * Caller promises neither device nor driver will again touch this block
402 : * unless it is first re-allocated.
403 : */
404 0 : void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405 : {
406 : struct dma_page *page;
407 : unsigned long flags;
408 : unsigned int offset;
409 :
410 0 : spin_lock_irqsave(&pool->lock, flags);
411 0 : page = pool_find_page(pool, dma);
412 0 : if (!page) {
413 0 : spin_unlock_irqrestore(&pool->lock, flags);
414 0 : if (pool->dev)
415 0 : dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416 : __func__, pool->name, vaddr, &dma);
417 : else
418 0 : pr_err("%s %s, %p/%pad (bad dma)\n",
419 : __func__, pool->name, vaddr, &dma);
420 : return;
421 : }
422 :
423 0 : offset = vaddr - page->vaddr;
424 0 : if (want_init_on_free())
425 0 : memset(vaddr, 0, pool->size);
426 : #ifdef DMAPOOL_DEBUG
427 : if ((dma - page->dma) != offset) {
428 : spin_unlock_irqrestore(&pool->lock, flags);
429 : if (pool->dev)
430 : dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431 : __func__, pool->name, vaddr, &dma);
432 : else
433 : pr_err("%s %s, %p (bad vaddr)/%pad\n",
434 : __func__, pool->name, vaddr, &dma);
435 : return;
436 : }
437 : {
438 : unsigned int chain = page->offset;
439 : while (chain < pool->allocation) {
440 : if (chain != offset) {
441 : chain = *(int *)(page->vaddr + chain);
442 : continue;
443 : }
444 : spin_unlock_irqrestore(&pool->lock, flags);
445 : if (pool->dev)
446 : dev_err(pool->dev, "%s %s, dma %pad already free\n",
447 : __func__, pool->name, &dma);
448 : else
449 : pr_err("%s %s, dma %pad already free\n",
450 : __func__, pool->name, &dma);
451 : return;
452 : }
453 : }
454 : memset(vaddr, POOL_POISON_FREED, pool->size);
455 : #endif
456 :
457 0 : page->in_use--;
458 0 : *(int *)vaddr = page->offset;
459 0 : page->offset = offset;
460 : /*
461 : * Resist a temptation to do
462 : * if (!is_page_busy(page)) pool_free_page(pool, page);
463 : * Better have a few empty pages hang around.
464 : */
465 0 : spin_unlock_irqrestore(&pool->lock, flags);
466 : }
467 : EXPORT_SYMBOL(dma_pool_free);
468 :
469 : /*
470 : * Managed DMA pool
471 : */
472 0 : static void dmam_pool_release(struct device *dev, void *res)
473 : {
474 0 : struct dma_pool *pool = *(struct dma_pool **)res;
475 :
476 0 : dma_pool_destroy(pool);
477 0 : }
478 :
479 0 : static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480 : {
481 0 : return *(struct dma_pool **)res == match_data;
482 : }
483 :
484 : /**
485 : * dmam_pool_create - Managed dma_pool_create()
486 : * @name: name of pool, for diagnostics
487 : * @dev: device that will be doing the DMA
488 : * @size: size of the blocks in this pool.
489 : * @align: alignment requirement for blocks; must be a power of two
490 : * @allocation: returned blocks won't cross this boundary (or zero)
491 : *
492 : * Managed dma_pool_create(). DMA pool created with this function is
493 : * automatically destroyed on driver detach.
494 : *
495 : * Return: a managed dma allocation pool with the requested
496 : * characteristics, or %NULL if one can't be created.
497 : */
498 0 : struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499 : size_t size, size_t align, size_t allocation)
500 : {
501 : struct dma_pool **ptr, *pool;
502 :
503 0 : ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504 0 : if (!ptr)
505 : return NULL;
506 :
507 0 : pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508 0 : if (pool)
509 0 : devres_add(dev, ptr);
510 : else
511 0 : devres_free(ptr);
512 :
513 : return pool;
514 : }
515 : EXPORT_SYMBOL(dmam_pool_create);
516 :
517 : /**
518 : * dmam_pool_destroy - Managed dma_pool_destroy()
519 : * @pool: dma pool that will be destroyed
520 : *
521 : * Managed dma_pool_destroy().
522 : */
523 0 : void dmam_pool_destroy(struct dma_pool *pool)
524 : {
525 0 : struct device *dev = pool->dev;
526 :
527 0 : WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
528 0 : }
529 : EXPORT_SYMBOL(dmam_pool_destroy);
|