4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/stat.h>
36 #include <linux/spinlock.h>
37 #include <linux/string.h>
38 #include <linux/types.h>
39 #include <linux/wait.h>
41 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42 #define DMAPOOL_DEBUG 1
45 struct dma_pool
{ /* the pool */
46 struct list_head page_list
;
53 wait_queue_head_t waitq
;
54 struct list_head pools
;
57 struct dma_page
{ /* cacheable header for 'allocation' bytes */
58 struct list_head page_list
;
65 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
67 static DEFINE_MUTEX(pools_lock
);
70 show_pools(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
75 struct dma_page
*page
;
76 struct dma_pool
*pool
;
81 temp
= scnprintf(next
, size
, "poolinfo - 0.1\n");
85 mutex_lock(&pools_lock
);
86 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
90 spin_lock_irq(&pool
->lock
);
91 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
93 blocks
+= page
->in_use
;
95 spin_unlock_irq(&pool
->lock
);
97 /* per-pool info, no real statistics yet */
98 temp
= scnprintf(next
, size
, "%-16s %4u %4Zu %4Zu %2u\n",
100 pages
* (pool
->allocation
/ pool
->size
),
105 mutex_unlock(&pools_lock
);
107 return PAGE_SIZE
- size
;
110 static DEVICE_ATTR(pools
, S_IRUGO
, show_pools
, NULL
);
113 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
114 * @name: name of pool, for diagnostics
115 * @dev: device that will be doing the DMA
116 * @size: size of the blocks in this pool.
117 * @align: alignment requirement for blocks; must be a power of two
118 * @boundary: returned blocks won't cross this power of two boundary
119 * Context: !in_interrupt()
121 * Returns a dma allocation pool with the requested characteristics, or
122 * null if one can't be created. Given one of these pools, dma_pool_alloc()
123 * may be used to allocate memory. Such memory will all have "consistent"
124 * DMA mappings, accessible by the device and its driver without using
125 * cache flushing primitives. The actual size of blocks allocated may be
126 * larger than requested because of alignment.
128 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
129 * cross that size boundary. This is useful for devices which have
130 * addressing restrictions on individual DMA transfers, such as not crossing
131 * boundaries of 4KBytes.
133 struct dma_pool
*dma_pool_create(const char *name
, struct device
*dev
,
134 size_t size
, size_t align
, size_t boundary
)
136 struct dma_pool
*retval
;
141 } else if (align
& (align
- 1)) {
147 } else if (size
< 4) {
151 if ((size
% align
) != 0)
152 size
= ALIGN(size
, align
);
154 allocation
= max_t(size_t, size
, PAGE_SIZE
);
157 boundary
= allocation
;
158 } else if ((boundary
< size
) || (boundary
& (boundary
- 1))) {
162 retval
= kmalloc_node(sizeof(*retval
), GFP_KERNEL
, dev_to_node(dev
));
166 strlcpy(retval
->name
, name
, sizeof(retval
->name
));
170 INIT_LIST_HEAD(&retval
->page_list
);
171 spin_lock_init(&retval
->lock
);
173 retval
->boundary
= boundary
;
174 retval
->allocation
= allocation
;
175 init_waitqueue_head(&retval
->waitq
);
180 mutex_lock(&pools_lock
);
181 if (list_empty(&dev
->dma_pools
))
182 ret
= device_create_file(dev
, &dev_attr_pools
);
185 /* note: not currently insisting "name" be unique */
187 list_add(&retval
->pools
, &dev
->dma_pools
);
192 mutex_unlock(&pools_lock
);
194 INIT_LIST_HEAD(&retval
->pools
);
198 EXPORT_SYMBOL(dma_pool_create
);
200 static void pool_initialise_page(struct dma_pool
*pool
, struct dma_page
*page
)
202 unsigned int offset
= 0;
203 unsigned int next_boundary
= pool
->boundary
;
206 unsigned int next
= offset
+ pool
->size
;
207 if (unlikely((next
+ pool
->size
) >= next_boundary
)) {
208 next
= next_boundary
;
209 next_boundary
+= pool
->boundary
;
211 *(int *)(page
->vaddr
+ offset
) = next
;
213 } while (offset
< pool
->allocation
);
216 static struct dma_page
*pool_alloc_page(struct dma_pool
*pool
, gfp_t mem_flags
)
218 struct dma_page
*page
;
220 page
= kmalloc(sizeof(*page
), mem_flags
);
223 page
->vaddr
= dma_alloc_coherent(pool
->dev
, pool
->allocation
,
224 &page
->dma
, mem_flags
);
227 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
229 pool_initialise_page(pool
, page
);
230 list_add(&page
->page_list
, &pool
->page_list
);
240 static inline int is_page_busy(struct dma_page
*page
)
242 return page
->in_use
!= 0;
245 static void pool_free_page(struct dma_pool
*pool
, struct dma_page
*page
)
247 dma_addr_t dma
= page
->dma
;
250 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
252 dma_free_coherent(pool
->dev
, pool
->allocation
, page
->vaddr
, dma
);
253 list_del(&page
->page_list
);
258 * dma_pool_destroy - destroys a pool of dma memory blocks.
259 * @pool: dma pool that will be destroyed
260 * Context: !in_interrupt()
262 * Caller guarantees that no more memory from the pool is in use,
263 * and that nothing will try to use the pool after this call.
265 void dma_pool_destroy(struct dma_pool
*pool
)
267 mutex_lock(&pools_lock
);
268 list_del(&pool
->pools
);
269 if (pool
->dev
&& list_empty(&pool
->dev
->dma_pools
))
270 device_remove_file(pool
->dev
, &dev_attr_pools
);
271 mutex_unlock(&pools_lock
);
273 while (!list_empty(&pool
->page_list
)) {
274 struct dma_page
*page
;
275 page
= list_entry(pool
->page_list
.next
,
276 struct dma_page
, page_list
);
277 if (is_page_busy(page
)) {
280 "dma_pool_destroy %s, %p busy\n",
281 pool
->name
, page
->vaddr
);
284 "dma_pool_destroy %s, %p busy\n",
285 pool
->name
, page
->vaddr
);
286 /* leak the still-in-use consistent memory */
287 list_del(&page
->page_list
);
290 pool_free_page(pool
, page
);
295 EXPORT_SYMBOL(dma_pool_destroy
);
298 * dma_pool_alloc - get a block of consistent memory
299 * @pool: dma pool that will produce the block
300 * @mem_flags: GFP_* bitmask
301 * @handle: pointer to dma address of block
303 * This returns the kernel virtual address of a currently unused block,
304 * and reports its dma address through the handle.
305 * If such a memory block can't be allocated, %NULL is returned.
307 void *dma_pool_alloc(struct dma_pool
*pool
, gfp_t mem_flags
,
311 struct dma_page
*page
;
315 might_sleep_if(mem_flags
& __GFP_WAIT
);
317 spin_lock_irqsave(&pool
->lock
, flags
);
319 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
320 if (page
->offset
< pool
->allocation
)
323 page
= pool_alloc_page(pool
, GFP_ATOMIC
);
325 if (mem_flags
& __GFP_WAIT
) {
326 DECLARE_WAITQUEUE(wait
, current
);
328 __set_current_state(TASK_UNINTERRUPTIBLE
);
329 __add_wait_queue(&pool
->waitq
, &wait
);
330 spin_unlock_irqrestore(&pool
->lock
, flags
);
332 schedule_timeout(POOL_TIMEOUT_JIFFIES
);
334 spin_lock_irqsave(&pool
->lock
, flags
);
335 __remove_wait_queue(&pool
->waitq
, &wait
);
344 offset
= page
->offset
;
345 page
->offset
= *(int *)(page
->vaddr
+ offset
);
346 retval
= offset
+ page
->vaddr
;
347 *handle
= offset
+ page
->dma
;
349 memset(retval
, POOL_POISON_ALLOCATED
, pool
->size
);
352 spin_unlock_irqrestore(&pool
->lock
, flags
);
355 EXPORT_SYMBOL(dma_pool_alloc
);
357 static struct dma_page
*pool_find_page(struct dma_pool
*pool
, dma_addr_t dma
)
359 struct dma_page
*page
;
361 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
364 if (dma
< (page
->dma
+ pool
->allocation
))
371 * dma_pool_free - put block back into dma pool
372 * @pool: the dma pool holding the block
373 * @vaddr: virtual address of block
374 * @dma: dma address of block
376 * Caller promises neither device nor driver will again touch this block
377 * unless it is first re-allocated.
379 void dma_pool_free(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
381 struct dma_page
*page
;
385 spin_lock_irqsave(&pool
->lock
, flags
);
386 page
= pool_find_page(pool
, dma
);
388 spin_unlock_irqrestore(&pool
->lock
, flags
);
391 "dma_pool_free %s, %p/%lx (bad dma)\n",
392 pool
->name
, vaddr
, (unsigned long)dma
);
394 printk(KERN_ERR
"dma_pool_free %s, %p/%lx (bad dma)\n",
395 pool
->name
, vaddr
, (unsigned long)dma
);
399 offset
= vaddr
- page
->vaddr
;
401 if ((dma
- page
->dma
) != offset
) {
402 spin_unlock_irqrestore(&pool
->lock
, flags
);
405 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406 pool
->name
, vaddr
, (unsigned long long)dma
);
409 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410 pool
->name
, vaddr
, (unsigned long long)dma
);
414 unsigned int chain
= page
->offset
;
415 while (chain
< pool
->allocation
) {
416 if (chain
!= offset
) {
417 chain
= *(int *)(page
->vaddr
+ chain
);
420 spin_unlock_irqrestore(&pool
->lock
, flags
);
422 dev_err(pool
->dev
, "dma_pool_free %s, dma %Lx "
423 "already free\n", pool
->name
,
424 (unsigned long long)dma
);
426 printk(KERN_ERR
"dma_pool_free %s, dma %Lx "
427 "already free\n", pool
->name
,
428 (unsigned long long)dma
);
432 memset(vaddr
, POOL_POISON_FREED
, pool
->size
);
436 *(int *)vaddr
= page
->offset
;
437 page
->offset
= offset
;
438 if (waitqueue_active(&pool
->waitq
))
439 wake_up_locked(&pool
->waitq
);
441 * Resist a temptation to do
442 * if (!is_page_busy(page)) pool_free_page(pool, page);
443 * Better have a few empty pages hang around.
445 spin_unlock_irqrestore(&pool
->lock
, flags
);
447 EXPORT_SYMBOL(dma_pool_free
);
452 static void dmam_pool_release(struct device
*dev
, void *res
)
454 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
456 dma_pool_destroy(pool
);
459 static int dmam_pool_match(struct device
*dev
, void *res
, void *match_data
)
461 return *(struct dma_pool
**)res
== match_data
;
465 * dmam_pool_create - Managed dma_pool_create()
466 * @name: name of pool, for diagnostics
467 * @dev: device that will be doing the DMA
468 * @size: size of the blocks in this pool.
469 * @align: alignment requirement for blocks; must be a power of two
470 * @allocation: returned blocks won't cross this boundary (or zero)
472 * Managed dma_pool_create(). DMA pool created with this function is
473 * automatically destroyed on driver detach.
475 struct dma_pool
*dmam_pool_create(const char *name
, struct device
*dev
,
476 size_t size
, size_t align
, size_t allocation
)
478 struct dma_pool
**ptr
, *pool
;
480 ptr
= devres_alloc(dmam_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
484 pool
= *ptr
= dma_pool_create(name
, dev
, size
, align
, allocation
);
486 devres_add(dev
, ptr
);
492 EXPORT_SYMBOL(dmam_pool_create
);
495 * dmam_pool_destroy - Managed dma_pool_destroy()
496 * @pool: dma pool that will be destroyed
498 * Managed dma_pool_destroy().
500 void dmam_pool_destroy(struct dma_pool
*pool
)
502 struct device
*dev
= pool
->dev
;
504 WARN_ON(devres_destroy(dev
, dmam_pool_release
, dmam_pool_match
, pool
));
505 dma_pool_destroy(pool
);
507 EXPORT_SYMBOL(dmam_pool_destroy
);