4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmapool.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/poison.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/wait.h>
38 struct dma_pool
{ /* the pool */
39 struct list_head page_list
;
41 size_t blocks_per_page
;
46 wait_queue_head_t waitq
;
47 struct list_head pools
;
50 struct dma_page
{ /* cacheable header for 'allocation' bytes */
51 struct list_head page_list
;
55 unsigned long bitmap
[0];
58 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
60 static DEFINE_MUTEX(pools_lock
);
63 show_pools(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
68 struct dma_page
*page
;
69 struct dma_pool
*pool
;
74 temp
= scnprintf(next
, size
, "poolinfo - 0.1\n");
78 mutex_lock(&pools_lock
);
79 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
83 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
85 blocks
+= page
->in_use
;
88 /* per-pool info, no real statistics yet */
89 temp
= scnprintf(next
, size
, "%-16s %4u %4Zu %4Zu %2u\n",
91 blocks
, pages
* pool
->blocks_per_page
,
96 mutex_unlock(&pools_lock
);
98 return PAGE_SIZE
- size
;
101 static DEVICE_ATTR(pools
, S_IRUGO
, show_pools
, NULL
);
104 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
105 * @name: name of pool, for diagnostics
106 * @dev: device that will be doing the DMA
107 * @size: size of the blocks in this pool.
108 * @align: alignment requirement for blocks; must be a power of two
109 * @allocation: returned blocks won't cross this boundary (or zero)
110 * Context: !in_interrupt()
112 * Returns a dma allocation pool with the requested characteristics, or
113 * null if one can't be created. Given one of these pools, dma_pool_alloc()
114 * may be used to allocate memory. Such memory will all have "consistent"
115 * DMA mappings, accessible by the device and its driver without using
116 * cache flushing primitives. The actual size of blocks allocated may be
117 * larger than requested because of alignment.
119 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
120 * cross that size boundary. This is useful for devices which have
121 * addressing restrictions on individual DMA transfers, such as not crossing
122 * boundaries of 4KBytes.
124 struct dma_pool
*dma_pool_create(const char *name
, struct device
*dev
,
125 size_t size
, size_t align
, size_t allocation
)
127 struct dma_pool
*retval
;
131 } else if (align
& (align
- 1)) {
138 if ((size
% align
) != 0)
139 size
= ALIGN(size
, align
);
141 if (allocation
== 0) {
142 if (PAGE_SIZE
< size
)
145 allocation
= PAGE_SIZE
;
146 /* FIXME: round up for less fragmentation */
147 } else if (allocation
< size
)
152 kmalloc_node(sizeof *retval
, GFP_KERNEL
, dev_to_node(dev
))))
155 strlcpy(retval
->name
, name
, sizeof retval
->name
);
159 INIT_LIST_HEAD(&retval
->page_list
);
160 spin_lock_init(&retval
->lock
);
162 retval
->allocation
= allocation
;
163 retval
->blocks_per_page
= allocation
/ size
;
164 init_waitqueue_head(&retval
->waitq
);
169 mutex_lock(&pools_lock
);
170 if (list_empty(&dev
->dma_pools
))
171 ret
= device_create_file(dev
, &dev_attr_pools
);
174 /* note: not currently insisting "name" be unique */
176 list_add(&retval
->pools
, &dev
->dma_pools
);
181 mutex_unlock(&pools_lock
);
183 INIT_LIST_HEAD(&retval
->pools
);
187 EXPORT_SYMBOL(dma_pool_create
);
189 static struct dma_page
*pool_alloc_page(struct dma_pool
*pool
, gfp_t mem_flags
)
191 struct dma_page
*page
;
194 mapsize
= pool
->blocks_per_page
;
195 mapsize
= (mapsize
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
196 mapsize
*= sizeof(long);
198 page
= kmalloc(mapsize
+ sizeof *page
, mem_flags
);
201 page
->vaddr
= dma_alloc_coherent(pool
->dev
,
203 &page
->dma
, mem_flags
);
205 memset(page
->bitmap
, 0xff, mapsize
); /* bit set == free */
206 #ifdef CONFIG_DEBUG_SLAB
207 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
209 list_add(&page
->page_list
, &pool
->page_list
);
218 static inline int is_page_busy(int blocks
, unsigned long *bitmap
)
221 if (*bitmap
++ != ~0UL)
223 blocks
-= BITS_PER_LONG
;
228 static void pool_free_page(struct dma_pool
*pool
, struct dma_page
*page
)
230 dma_addr_t dma
= page
->dma
;
232 #ifdef CONFIG_DEBUG_SLAB
233 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
235 dma_free_coherent(pool
->dev
, pool
->allocation
, page
->vaddr
, dma
);
236 list_del(&page
->page_list
);
241 * dma_pool_destroy - destroys a pool of dma memory blocks.
242 * @pool: dma pool that will be destroyed
243 * Context: !in_interrupt()
245 * Caller guarantees that no more memory from the pool is in use,
246 * and that nothing will try to use the pool after this call.
248 void dma_pool_destroy(struct dma_pool
*pool
)
250 mutex_lock(&pools_lock
);
251 list_del(&pool
->pools
);
252 if (pool
->dev
&& list_empty(&pool
->dev
->dma_pools
))
253 device_remove_file(pool
->dev
, &dev_attr_pools
);
254 mutex_unlock(&pools_lock
);
256 while (!list_empty(&pool
->page_list
)) {
257 struct dma_page
*page
;
258 page
= list_entry(pool
->page_list
.next
,
259 struct dma_page
, page_list
);
260 if (is_page_busy(pool
->blocks_per_page
, page
->bitmap
)) {
263 "dma_pool_destroy %s, %p busy\n",
264 pool
->name
, page
->vaddr
);
267 "dma_pool_destroy %s, %p busy\n",
268 pool
->name
, page
->vaddr
);
269 /* leak the still-in-use consistent memory */
270 list_del(&page
->page_list
);
273 pool_free_page(pool
, page
);
278 EXPORT_SYMBOL(dma_pool_destroy
);
281 * dma_pool_alloc - get a block of consistent memory
282 * @pool: dma pool that will produce the block
283 * @mem_flags: GFP_* bitmask
284 * @handle: pointer to dma address of block
286 * This returns the kernel virtual address of a currently unused block,
287 * and reports its dma address through the handle.
288 * If such a memory block can't be allocated, %NULL is returned.
290 void *dma_pool_alloc(struct dma_pool
*pool
, gfp_t mem_flags
,
294 struct dma_page
*page
;
299 spin_lock_irqsave(&pool
->lock
, flags
);
301 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
303 /* only cachable accesses here ... */
305 i
< pool
->blocks_per_page
; i
+= BITS_PER_LONG
, map
++) {
306 if (page
->bitmap
[map
] == 0)
308 block
= ffz(~page
->bitmap
[map
]);
309 if ((i
+ block
) < pool
->blocks_per_page
) {
310 clear_bit(block
, &page
->bitmap
[map
]);
311 offset
= (BITS_PER_LONG
* map
) + block
;
312 offset
*= pool
->size
;
317 page
= pool_alloc_page(pool
, GFP_ATOMIC
);
319 if (mem_flags
& __GFP_WAIT
) {
320 DECLARE_WAITQUEUE(wait
, current
);
322 __set_current_state(TASK_INTERRUPTIBLE
);
323 __add_wait_queue(&pool
->waitq
, &wait
);
324 spin_unlock_irqrestore(&pool
->lock
, flags
);
326 schedule_timeout(POOL_TIMEOUT_JIFFIES
);
328 spin_lock_irqsave(&pool
->lock
, flags
);
329 __remove_wait_queue(&pool
->waitq
, &wait
);
336 clear_bit(0, &page
->bitmap
[0]);
340 retval
= offset
+ page
->vaddr
;
341 *handle
= offset
+ page
->dma
;
342 #ifdef CONFIG_DEBUG_SLAB
343 memset(retval
, POOL_POISON_ALLOCATED
, pool
->size
);
346 spin_unlock_irqrestore(&pool
->lock
, flags
);
349 EXPORT_SYMBOL(dma_pool_alloc
);
351 static struct dma_page
*pool_find_page(struct dma_pool
*pool
, dma_addr_t dma
)
354 struct dma_page
*page
;
356 spin_lock_irqsave(&pool
->lock
, flags
);
357 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
360 if (dma
< (page
->dma
+ pool
->allocation
))
365 spin_unlock_irqrestore(&pool
->lock
, flags
);
370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block
373 * @dma: dma address of block
375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated.
378 void dma_pool_free(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
380 struct dma_page
*page
;
384 page
= pool_find_page(pool
, dma
);
388 "dma_pool_free %s, %p/%lx (bad dma)\n",
389 pool
->name
, vaddr
, (unsigned long)dma
);
391 printk(KERN_ERR
"dma_pool_free %s, %p/%lx (bad dma)\n",
392 pool
->name
, vaddr
, (unsigned long)dma
);
396 block
= dma
- page
->dma
;
398 map
= block
/ BITS_PER_LONG
;
399 block
%= BITS_PER_LONG
;
401 #ifdef CONFIG_DEBUG_SLAB
402 if (((dma
- page
->dma
) + (void *)page
->vaddr
) != vaddr
) {
405 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406 pool
->name
, vaddr
, (unsigned long long)dma
);
409 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410 pool
->name
, vaddr
, (unsigned long long)dma
);
413 if (page
->bitmap
[map
] & (1UL << block
)) {
416 "dma_pool_free %s, dma %Lx already free\n",
417 pool
->name
, (unsigned long long)dma
);
420 "dma_pool_free %s, dma %Lx already free\n",
421 pool
->name
, (unsigned long long)dma
);
424 memset(vaddr
, POOL_POISON_FREED
, pool
->size
);
427 spin_lock_irqsave(&pool
->lock
, flags
);
429 set_bit(block
, &page
->bitmap
[map
]);
430 if (waitqueue_active(&pool
->waitq
))
431 wake_up_locked(&pool
->waitq
);
433 * Resist a temptation to do
434 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
435 * Better have a few empty pages hang around.
437 spin_unlock_irqrestore(&pool
->lock
, flags
);
439 EXPORT_SYMBOL(dma_pool_free
);
444 static void dmam_pool_release(struct device
*dev
, void *res
)
446 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
448 dma_pool_destroy(pool
);
451 static int dmam_pool_match(struct device
*dev
, void *res
, void *match_data
)
453 return *(struct dma_pool
**)res
== match_data
;
457 * dmam_pool_create - Managed dma_pool_create()
458 * @name: name of pool, for diagnostics
459 * @dev: device that will be doing the DMA
460 * @size: size of the blocks in this pool.
461 * @align: alignment requirement for blocks; must be a power of two
462 * @allocation: returned blocks won't cross this boundary (or zero)
464 * Managed dma_pool_create(). DMA pool created with this function is
465 * automatically destroyed on driver detach.
467 struct dma_pool
*dmam_pool_create(const char *name
, struct device
*dev
,
468 size_t size
, size_t align
, size_t allocation
)
470 struct dma_pool
**ptr
, *pool
;
472 ptr
= devres_alloc(dmam_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
476 pool
= *ptr
= dma_pool_create(name
, dev
, size
, align
, allocation
);
478 devres_add(dev
, ptr
);
484 EXPORT_SYMBOL(dmam_pool_create
);
487 * dmam_pool_destroy - Managed dma_pool_destroy()
488 * @pool: dma pool that will be destroyed
490 * Managed dma_pool_destroy().
492 void dmam_pool_destroy(struct dma_pool
*pool
)
494 struct device
*dev
= pool
->dev
;
496 dma_pool_destroy(pool
);
497 WARN_ON(devres_destroy(dev
, dmam_pool_release
, dmam_pool_match
, pool
));
499 EXPORT_SYMBOL(dmam_pool_destroy
);