Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * DMA Pool allocator
4 : *
5 : * Copyright 2001 David Brownell
6 : * Copyright 2007 Intel Corporation
7 : * Author: Matthew Wilcox <willy@linux.intel.com>
8 : *
9 : * This allocator returns small blocks of a given size which are DMA-able by
10 : * the given device. It uses the dma_alloc_coherent page allocator to get
11 : * new pages, then splits them up into blocks of the required size.
12 : * Many older drivers still have their own code to do this.
13 : *
14 : * The current design of this allocator is fairly simple. The pool is
15 : * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 : * allocated pages. Each page in the page_list is split into blocks of at
17 : * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 : * list of free blocks across all pages. Used blocks aren't tracked, but we
19 : * keep a count of how many are currently allocated from each page.
20 : */
21 :
22 : #include <linux/device.h>
23 : #include <linux/dma-mapping.h>
24 : #include <linux/dmapool.h>
25 : #include <linux/kernel.h>
26 : #include <linux/list.h>
27 : #include <linux/export.h>
28 : #include <linux/mutex.h>
29 : #include <linux/poison.h>
30 : #include <linux/sched.h>
31 : #include <linux/sched/mm.h>
32 : #include <linux/slab.h>
33 : #include <linux/stat.h>
34 : #include <linux/spinlock.h>
35 : #include <linux/string.h>
36 : #include <linux/types.h>
37 : #include <linux/wait.h>
38 :
39 : #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 : #define DMAPOOL_DEBUG 1
41 : #endif
42 :
43 : struct dma_block {
44 : struct dma_block *next_block;
45 : dma_addr_t dma;
46 : };
47 :
48 : struct dma_pool { /* the pool */
49 : struct list_head page_list;
50 : spinlock_t lock;
51 : struct dma_block *next_block;
52 : size_t nr_blocks;
53 : size_t nr_active;
54 : size_t nr_pages;
55 : struct device *dev;
56 : unsigned int size;
57 : unsigned int allocation;
58 : unsigned int boundary;
59 : char name[32];
60 : struct list_head pools;
61 : };
62 :
63 : struct dma_page { /* cacheable header for 'allocation' bytes */
64 : struct list_head page_list;
65 : void *vaddr;
66 : dma_addr_t dma;
67 : };
68 :
69 : static DEFINE_MUTEX(pools_lock);
70 : static DEFINE_MUTEX(pools_reg_lock);
71 :
72 0 : static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
73 : {
74 : struct dma_pool *pool;
75 : unsigned size;
76 :
77 0 : size = sysfs_emit(buf, "poolinfo - 0.1\n");
78 :
79 0 : mutex_lock(&pools_lock);
80 0 : list_for_each_entry(pool, &dev->dma_pools, pools) {
81 : /* per-pool info, no real statistics yet */
82 0 : size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n",
83 0 : pool->name, pool->nr_active,
84 : pool->nr_blocks, pool->size,
85 : pool->nr_pages);
86 : }
87 0 : mutex_unlock(&pools_lock);
88 :
89 0 : return size;
90 : }
91 :
92 : static DEVICE_ATTR_RO(pools);
93 :
94 : #ifdef DMAPOOL_DEBUG
95 : static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
96 : gfp_t mem_flags)
97 : {
98 : u8 *data = (void *)block;
99 : int i;
100 :
101 : for (i = sizeof(struct dma_block); i < pool->size; i++) {
102 : if (data[i] == POOL_POISON_FREED)
103 : continue;
104 : dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__,
105 : pool->name, block);
106 :
107 : /*
108 : * Dump the first 4 bytes even if they are not
109 : * POOL_POISON_FREED
110 : */
111 : print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
112 : data, pool->size, 1);
113 : break;
114 : }
115 :
116 : if (!want_init_on_alloc(mem_flags))
117 : memset(block, POOL_POISON_ALLOCATED, pool->size);
118 : }
119 :
120 : static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
121 : {
122 : struct dma_page *page;
123 :
124 : list_for_each_entry(page, &pool->page_list, page_list) {
125 : if (dma < page->dma)
126 : continue;
127 : if ((dma - page->dma) < pool->allocation)
128 : return page;
129 : }
130 : return NULL;
131 : }
132 :
133 : static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
134 : {
135 : struct dma_block *block = pool->next_block;
136 : struct dma_page *page;
137 :
138 : page = pool_find_page(pool, dma);
139 : if (!page) {
140 : dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
141 : __func__, pool->name, vaddr, &dma);
142 : return true;
143 : }
144 :
145 : while (block) {
146 : if (block != vaddr) {
147 : block = block->next_block;
148 : continue;
149 : }
150 : dev_err(pool->dev, "%s %s, dma %pad already free\n",
151 : __func__, pool->name, &dma);
152 : return true;
153 : }
154 :
155 : memset(vaddr, POOL_POISON_FREED, pool->size);
156 : return false;
157 : }
158 :
159 : static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
160 : {
161 : memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
162 : }
163 : #else
164 : static void pool_check_block(struct dma_pool *pool, struct dma_block *block,
165 : gfp_t mem_flags)
166 : {
167 : }
168 :
169 0 : static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
170 : {
171 0 : if (want_init_on_free())
172 0 : memset(vaddr, 0, pool->size);
173 0 : return false;
174 : }
175 :
176 : static void pool_init_page(struct dma_pool *pool, struct dma_page *page)
177 : {
178 : }
179 : #endif
180 :
181 : static struct dma_block *pool_block_pop(struct dma_pool *pool)
182 : {
183 0 : struct dma_block *block = pool->next_block;
184 :
185 0 : if (block) {
186 0 : pool->next_block = block->next_block;
187 0 : pool->nr_active++;
188 : }
189 : return block;
190 : }
191 :
192 : static void pool_block_push(struct dma_pool *pool, struct dma_block *block,
193 : dma_addr_t dma)
194 : {
195 0 : block->dma = dma;
196 0 : block->next_block = pool->next_block;
197 0 : pool->next_block = block;
198 : }
199 :
200 :
201 : /**
202 : * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203 : * @name: name of pool, for diagnostics
204 : * @dev: device that will be doing the DMA
205 : * @size: size of the blocks in this pool.
206 : * @align: alignment requirement for blocks; must be a power of two
207 : * @boundary: returned blocks won't cross this power of two boundary
208 : * Context: not in_interrupt()
209 : *
210 : * Given one of these pools, dma_pool_alloc()
211 : * may be used to allocate memory. Such memory will all have "consistent"
212 : * DMA mappings, accessible by the device and its driver without using
213 : * cache flushing primitives. The actual size of blocks allocated may be
214 : * larger than requested because of alignment.
215 : *
216 : * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
217 : * cross that size boundary. This is useful for devices which have
218 : * addressing restrictions on individual DMA transfers, such as not crossing
219 : * boundaries of 4KBytes.
220 : *
221 : * Return: a dma allocation pool with the requested characteristics, or
222 : * %NULL if one can't be created.
223 : */
224 0 : struct dma_pool *dma_pool_create(const char *name, struct device *dev,
225 : size_t size, size_t align, size_t boundary)
226 : {
227 : struct dma_pool *retval;
228 : size_t allocation;
229 : bool empty;
230 :
231 0 : if (!dev)
232 : return NULL;
233 :
234 0 : if (align == 0)
235 : align = 1;
236 0 : else if (align & (align - 1))
237 : return NULL;
238 :
239 0 : if (size == 0 || size > INT_MAX)
240 : return NULL;
241 0 : if (size < sizeof(struct dma_block))
242 0 : size = sizeof(struct dma_block);
243 :
244 0 : size = ALIGN(size, align);
245 0 : allocation = max_t(size_t, size, PAGE_SIZE);
246 :
247 0 : if (!boundary)
248 : boundary = allocation;
249 0 : else if ((boundary < size) || (boundary & (boundary - 1)))
250 : return NULL;
251 :
252 0 : boundary = min(boundary, allocation);
253 :
254 0 : retval = kzalloc(sizeof(*retval), GFP_KERNEL);
255 0 : if (!retval)
256 : return retval;
257 :
258 0 : strscpy(retval->name, name, sizeof(retval->name));
259 :
260 0 : retval->dev = dev;
261 :
262 0 : INIT_LIST_HEAD(&retval->page_list);
263 0 : spin_lock_init(&retval->lock);
264 0 : retval->size = size;
265 0 : retval->boundary = boundary;
266 0 : retval->allocation = allocation;
267 0 : INIT_LIST_HEAD(&retval->pools);
268 :
269 : /*
270 : * pools_lock ensures that the ->dma_pools list does not get corrupted.
271 : * pools_reg_lock ensures that there is not a race between
272 : * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
273 : * when the first invocation of dma_pool_create() failed on
274 : * device_create_file() and the second assumes that it has been done (I
275 : * know it is a short window).
276 : */
277 0 : mutex_lock(&pools_reg_lock);
278 0 : mutex_lock(&pools_lock);
279 0 : empty = list_empty(&dev->dma_pools);
280 0 : list_add(&retval->pools, &dev->dma_pools);
281 0 : mutex_unlock(&pools_lock);
282 0 : if (empty) {
283 : int err;
284 :
285 0 : err = device_create_file(dev, &dev_attr_pools);
286 0 : if (err) {
287 0 : mutex_lock(&pools_lock);
288 0 : list_del(&retval->pools);
289 0 : mutex_unlock(&pools_lock);
290 0 : mutex_unlock(&pools_reg_lock);
291 0 : kfree(retval);
292 0 : return NULL;
293 : }
294 : }
295 0 : mutex_unlock(&pools_reg_lock);
296 0 : return retval;
297 : }
298 : EXPORT_SYMBOL(dma_pool_create);
299 :
300 0 : static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
301 : {
302 0 : unsigned int next_boundary = pool->boundary, offset = 0;
303 0 : struct dma_block *block, *first = NULL, *last = NULL;
304 :
305 0 : pool_init_page(pool, page);
306 0 : while (offset + pool->size <= pool->allocation) {
307 0 : if (offset + pool->size > next_boundary) {
308 0 : offset = next_boundary;
309 0 : next_boundary += pool->boundary;
310 0 : continue;
311 : }
312 :
313 0 : block = page->vaddr + offset;
314 0 : block->dma = page->dma + offset;
315 0 : block->next_block = NULL;
316 :
317 0 : if (last)
318 0 : last->next_block = block;
319 : else
320 : first = block;
321 0 : last = block;
322 :
323 0 : offset += pool->size;
324 0 : pool->nr_blocks++;
325 : }
326 :
327 0 : last->next_block = pool->next_block;
328 0 : pool->next_block = first;
329 :
330 0 : list_add(&page->page_list, &pool->page_list);
331 0 : pool->nr_pages++;
332 0 : }
333 :
334 0 : static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
335 : {
336 : struct dma_page *page;
337 :
338 0 : page = kmalloc(sizeof(*page), mem_flags);
339 0 : if (!page)
340 : return NULL;
341 :
342 0 : page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
343 : &page->dma, mem_flags);
344 0 : if (!page->vaddr) {
345 0 : kfree(page);
346 : return NULL;
347 : }
348 :
349 : return page;
350 : }
351 :
352 : /**
353 : * dma_pool_destroy - destroys a pool of dma memory blocks.
354 : * @pool: dma pool that will be destroyed
355 : * Context: !in_interrupt()
356 : *
357 : * Caller guarantees that no more memory from the pool is in use,
358 : * and that nothing will try to use the pool after this call.
359 : */
360 0 : void dma_pool_destroy(struct dma_pool *pool)
361 : {
362 : struct dma_page *page, *tmp;
363 0 : bool empty, busy = false;
364 :
365 0 : if (unlikely(!pool))
366 : return;
367 :
368 0 : mutex_lock(&pools_reg_lock);
369 0 : mutex_lock(&pools_lock);
370 0 : list_del(&pool->pools);
371 0 : empty = list_empty(&pool->dev->dma_pools);
372 0 : mutex_unlock(&pools_lock);
373 0 : if (empty)
374 0 : device_remove_file(pool->dev, &dev_attr_pools);
375 0 : mutex_unlock(&pools_reg_lock);
376 :
377 0 : if (pool->nr_active) {
378 0 : dev_err(pool->dev, "%s %s busy\n", __func__, pool->name);
379 0 : busy = true;
380 : }
381 :
382 0 : list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
383 0 : if (!busy)
384 0 : dma_free_coherent(pool->dev, pool->allocation,
385 : page->vaddr, page->dma);
386 0 : list_del(&page->page_list);
387 0 : kfree(page);
388 : }
389 :
390 0 : kfree(pool);
391 : }
392 : EXPORT_SYMBOL(dma_pool_destroy);
393 :
394 : /**
395 : * dma_pool_alloc - get a block of consistent memory
396 : * @pool: dma pool that will produce the block
397 : * @mem_flags: GFP_* bitmask
398 : * @handle: pointer to dma address of block
399 : *
400 : * Return: the kernel virtual address of a currently unused block,
401 : * and reports its dma address through the handle.
402 : * If such a memory block can't be allocated, %NULL is returned.
403 : */
404 0 : void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
405 : dma_addr_t *handle)
406 : {
407 : struct dma_block *block;
408 : struct dma_page *page;
409 : unsigned long flags;
410 :
411 0 : might_alloc(mem_flags);
412 :
413 0 : spin_lock_irqsave(&pool->lock, flags);
414 0 : block = pool_block_pop(pool);
415 0 : if (!block) {
416 : /*
417 : * pool_alloc_page() might sleep, so temporarily drop
418 : * &pool->lock
419 : */
420 0 : spin_unlock_irqrestore(&pool->lock, flags);
421 :
422 0 : page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
423 0 : if (!page)
424 : return NULL;
425 :
426 0 : spin_lock_irqsave(&pool->lock, flags);
427 0 : pool_initialise_page(pool, page);
428 0 : block = pool_block_pop(pool);
429 : }
430 0 : spin_unlock_irqrestore(&pool->lock, flags);
431 :
432 0 : *handle = block->dma;
433 0 : pool_check_block(pool, block, mem_flags);
434 0 : if (want_init_on_alloc(mem_flags))
435 0 : memset(block, 0, pool->size);
436 :
437 : return block;
438 : }
439 : EXPORT_SYMBOL(dma_pool_alloc);
440 :
441 : /**
442 : * dma_pool_free - put block back into dma pool
443 : * @pool: the dma pool holding the block
444 : * @vaddr: virtual address of block
445 : * @dma: dma address of block
446 : *
447 : * Caller promises neither device nor driver will again touch this block
448 : * unless it is first re-allocated.
449 : */
450 0 : void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
451 : {
452 0 : struct dma_block *block = vaddr;
453 : unsigned long flags;
454 :
455 0 : spin_lock_irqsave(&pool->lock, flags);
456 0 : if (!pool_block_err(pool, vaddr, dma)) {
457 0 : pool_block_push(pool, block, dma);
458 0 : pool->nr_active--;
459 : }
460 0 : spin_unlock_irqrestore(&pool->lock, flags);
461 0 : }
462 : EXPORT_SYMBOL(dma_pool_free);
463 :
464 : /*
465 : * Managed DMA pool
466 : */
467 0 : static void dmam_pool_release(struct device *dev, void *res)
468 : {
469 0 : struct dma_pool *pool = *(struct dma_pool **)res;
470 :
471 0 : dma_pool_destroy(pool);
472 0 : }
473 :
474 0 : static int dmam_pool_match(struct device *dev, void *res, void *match_data)
475 : {
476 0 : return *(struct dma_pool **)res == match_data;
477 : }
478 :
479 : /**
480 : * dmam_pool_create - Managed dma_pool_create()
481 : * @name: name of pool, for diagnostics
482 : * @dev: device that will be doing the DMA
483 : * @size: size of the blocks in this pool.
484 : * @align: alignment requirement for blocks; must be a power of two
485 : * @allocation: returned blocks won't cross this boundary (or zero)
486 : *
487 : * Managed dma_pool_create(). DMA pool created with this function is
488 : * automatically destroyed on driver detach.
489 : *
490 : * Return: a managed dma allocation pool with the requested
491 : * characteristics, or %NULL if one can't be created.
492 : */
493 0 : struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
494 : size_t size, size_t align, size_t allocation)
495 : {
496 : struct dma_pool **ptr, *pool;
497 :
498 0 : ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
499 0 : if (!ptr)
500 : return NULL;
501 :
502 0 : pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
503 0 : if (pool)
504 0 : devres_add(dev, ptr);
505 : else
506 0 : devres_free(ptr);
507 :
508 : return pool;
509 : }
510 : EXPORT_SYMBOL(dmam_pool_create);
511 :
512 : /**
513 : * dmam_pool_destroy - Managed dma_pool_destroy()
514 : * @pool: dma pool that will be destroyed
515 : *
516 : * Managed dma_pool_destroy().
517 : */
518 0 : void dmam_pool_destroy(struct dma_pool *pool)
519 : {
520 0 : struct device *dev = pool->dev;
521 :
522 0 : WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
523 0 : }
524 : EXPORT_SYMBOL(dmam_pool_destroy);
|