4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/wait.h>
40 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
41 #define DMAPOOL_DEBUG 1
44 struct dma_pool
{ /* the pool */
45 struct list_head page_list
;
52 wait_queue_head_t waitq
;
53 struct list_head pools
;
56 struct dma_page
{ /* cacheable header for 'allocation' bytes */
57 struct list_head page_list
;
64 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
66 static DEFINE_MUTEX(pools_lock
);
69 show_pools(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
74 struct dma_page
*page
;
75 struct dma_pool
*pool
;
80 temp
= scnprintf(next
, size
, "poolinfo - 0.1\n");
84 mutex_lock(&pools_lock
);
85 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
89 spin_lock_irq(&pool
->lock
);
90 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
92 blocks
+= page
->in_use
;
94 spin_unlock_irq(&pool
->lock
);
96 /* per-pool info, no real statistics yet */
97 temp
= scnprintf(next
, size
, "%-16s %4u %4Zu %4Zu %2u\n",
99 pages
* (pool
->allocation
/ pool
->size
),
104 mutex_unlock(&pools_lock
);
106 return PAGE_SIZE
- size
;
109 static DEVICE_ATTR(pools
, S_IRUGO
, show_pools
, NULL
);
112 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
113 * @name: name of pool, for diagnostics
114 * @dev: device that will be doing the DMA
115 * @size: size of the blocks in this pool.
116 * @align: alignment requirement for blocks; must be a power of two
117 * @boundary: returned blocks won't cross this power of two boundary
118 * Context: !in_interrupt()
120 * Returns a dma allocation pool with the requested characteristics, or
121 * null if one can't be created. Given one of these pools, dma_pool_alloc()
122 * may be used to allocate memory. Such memory will all have "consistent"
123 * DMA mappings, accessible by the device and its driver without using
124 * cache flushing primitives. The actual size of blocks allocated may be
125 * larger than requested because of alignment.
127 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
128 * cross that size boundary. This is useful for devices which have
129 * addressing restrictions on individual DMA transfers, such as not crossing
130 * boundaries of 4KBytes.
132 struct dma_pool
*dma_pool_create(const char *name
, struct device
*dev
,
133 size_t size
, size_t align
, size_t boundary
)
135 struct dma_pool
*retval
;
140 } else if (align
& (align
- 1)) {
146 } else if (size
< 4) {
150 if ((size
% align
) != 0)
151 size
= ALIGN(size
, align
);
153 allocation
= max_t(size_t, size
, PAGE_SIZE
);
156 boundary
= allocation
;
157 } else if ((boundary
< size
) || (boundary
& (boundary
- 1))) {
161 retval
= kmalloc_node(sizeof(*retval
), GFP_KERNEL
, dev_to_node(dev
));
165 strlcpy(retval
->name
, name
, sizeof(retval
->name
));
169 INIT_LIST_HEAD(&retval
->page_list
);
170 spin_lock_init(&retval
->lock
);
172 retval
->boundary
= boundary
;
173 retval
->allocation
= allocation
;
174 init_waitqueue_head(&retval
->waitq
);
179 mutex_lock(&pools_lock
);
180 if (list_empty(&dev
->dma_pools
))
181 ret
= device_create_file(dev
, &dev_attr_pools
);
184 /* note: not currently insisting "name" be unique */
186 list_add(&retval
->pools
, &dev
->dma_pools
);
191 mutex_unlock(&pools_lock
);
193 INIT_LIST_HEAD(&retval
->pools
);
197 EXPORT_SYMBOL(dma_pool_create
);
199 static void pool_initialise_page(struct dma_pool
*pool
, struct dma_page
*page
)
201 unsigned int offset
= 0;
202 unsigned int next_boundary
= pool
->boundary
;
205 unsigned int next
= offset
+ pool
->size
;
206 if (unlikely((next
+ pool
->size
) >= next_boundary
)) {
207 next
= next_boundary
;
208 next_boundary
+= pool
->boundary
;
210 *(int *)(page
->vaddr
+ offset
) = next
;
212 } while (offset
< pool
->allocation
);
215 static struct dma_page
*pool_alloc_page(struct dma_pool
*pool
, gfp_t mem_flags
)
217 struct dma_page
*page
;
219 page
= kmalloc(sizeof(*page
), mem_flags
);
222 page
->vaddr
= dma_alloc_coherent(pool
->dev
, pool
->allocation
,
223 &page
->dma
, mem_flags
);
226 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
228 pool_initialise_page(pool
, page
);
229 list_add(&page
->page_list
, &pool
->page_list
);
239 static inline int is_page_busy(struct dma_page
*page
)
241 return page
->in_use
!= 0;
244 static void pool_free_page(struct dma_pool
*pool
, struct dma_page
*page
)
246 dma_addr_t dma
= page
->dma
;
249 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
251 dma_free_coherent(pool
->dev
, pool
->allocation
, page
->vaddr
, dma
);
252 list_del(&page
->page_list
);
257 * dma_pool_destroy - destroys a pool of dma memory blocks.
258 * @pool: dma pool that will be destroyed
259 * Context: !in_interrupt()
261 * Caller guarantees that no more memory from the pool is in use,
262 * and that nothing will try to use the pool after this call.
264 void dma_pool_destroy(struct dma_pool
*pool
)
266 mutex_lock(&pools_lock
);
267 list_del(&pool
->pools
);
268 if (pool
->dev
&& list_empty(&pool
->dev
->dma_pools
))
269 device_remove_file(pool
->dev
, &dev_attr_pools
);
270 mutex_unlock(&pools_lock
);
272 while (!list_empty(&pool
->page_list
)) {
273 struct dma_page
*page
;
274 page
= list_entry(pool
->page_list
.next
,
275 struct dma_page
, page_list
);
276 if (is_page_busy(page
)) {
279 "dma_pool_destroy %s, %p busy\n",
280 pool
->name
, page
->vaddr
);
283 "dma_pool_destroy %s, %p busy\n",
284 pool
->name
, page
->vaddr
);
285 /* leak the still-in-use consistent memory */
286 list_del(&page
->page_list
);
289 pool_free_page(pool
, page
);
294 EXPORT_SYMBOL(dma_pool_destroy
);
297 * dma_pool_alloc - get a block of consistent memory
298 * @pool: dma pool that will produce the block
299 * @mem_flags: GFP_* bitmask
300 * @handle: pointer to dma address of block
302 * This returns the kernel virtual address of a currently unused block,
303 * and reports its dma address through the handle.
304 * If such a memory block can't be allocated, %NULL is returned.
306 void *dma_pool_alloc(struct dma_pool
*pool
, gfp_t mem_flags
,
310 struct dma_page
*page
;
314 might_sleep_if(mem_flags
& __GFP_WAIT
);
316 spin_lock_irqsave(&pool
->lock
, flags
);
318 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
319 if (page
->offset
< pool
->allocation
)
322 page
= pool_alloc_page(pool
, GFP_ATOMIC
);
324 if (mem_flags
& __GFP_WAIT
) {
325 DECLARE_WAITQUEUE(wait
, current
);
327 __set_current_state(TASK_UNINTERRUPTIBLE
);
328 __add_wait_queue(&pool
->waitq
, &wait
);
329 spin_unlock_irqrestore(&pool
->lock
, flags
);
331 schedule_timeout(POOL_TIMEOUT_JIFFIES
);
333 spin_lock_irqsave(&pool
->lock
, flags
);
334 __remove_wait_queue(&pool
->waitq
, &wait
);
343 offset
= page
->offset
;
344 page
->offset
= *(int *)(page
->vaddr
+ offset
);
345 retval
= offset
+ page
->vaddr
;
346 *handle
= offset
+ page
->dma
;
348 memset(retval
, POOL_POISON_ALLOCATED
, pool
->size
);
351 spin_unlock_irqrestore(&pool
->lock
, flags
);
354 EXPORT_SYMBOL(dma_pool_alloc
);
356 static struct dma_page
*pool_find_page(struct dma_pool
*pool
, dma_addr_t dma
)
358 struct dma_page
*page
;
360 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
363 if (dma
< (page
->dma
+ pool
->allocation
))
370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block
373 * @dma: dma address of block
375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated.
378 void dma_pool_free(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
380 struct dma_page
*page
;
384 spin_lock_irqsave(&pool
->lock
, flags
);
385 page
= pool_find_page(pool
, dma
);
387 spin_unlock_irqrestore(&pool
->lock
, flags
);
390 "dma_pool_free %s, %p/%lx (bad dma)\n",
391 pool
->name
, vaddr
, (unsigned long)dma
);
393 printk(KERN_ERR
"dma_pool_free %s, %p/%lx (bad dma)\n",
394 pool
->name
, vaddr
, (unsigned long)dma
);
398 offset
= vaddr
- page
->vaddr
;
400 if ((dma
- page
->dma
) != offset
) {
401 spin_unlock_irqrestore(&pool
->lock
, flags
);
404 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
405 pool
->name
, vaddr
, (unsigned long long)dma
);
408 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
409 pool
->name
, vaddr
, (unsigned long long)dma
);
413 unsigned int chain
= page
->offset
;
414 while (chain
< pool
->allocation
) {
415 if (chain
!= offset
) {
416 chain
= *(int *)(page
->vaddr
+ chain
);
419 spin_unlock_irqrestore(&pool
->lock
, flags
);
421 dev_err(pool
->dev
, "dma_pool_free %s, dma %Lx "
422 "already free\n", pool
->name
,
423 (unsigned long long)dma
);
425 printk(KERN_ERR
"dma_pool_free %s, dma %Lx "
426 "already free\n", pool
->name
,
427 (unsigned long long)dma
);
431 memset(vaddr
, POOL_POISON_FREED
, pool
->size
);
435 *(int *)vaddr
= page
->offset
;
436 page
->offset
= offset
;
437 if (waitqueue_active(&pool
->waitq
))
438 wake_up_locked(&pool
->waitq
);
440 * Resist a temptation to do
441 * if (!is_page_busy(page)) pool_free_page(pool, page);
442 * Better have a few empty pages hang around.
444 spin_unlock_irqrestore(&pool
->lock
, flags
);
446 EXPORT_SYMBOL(dma_pool_free
);
451 static void dmam_pool_release(struct device
*dev
, void *res
)
453 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
455 dma_pool_destroy(pool
);
458 static int dmam_pool_match(struct device
*dev
, void *res
, void *match_data
)
460 return *(struct dma_pool
**)res
== match_data
;
464 * dmam_pool_create - Managed dma_pool_create()
465 * @name: name of pool, for diagnostics
466 * @dev: device that will be doing the DMA
467 * @size: size of the blocks in this pool.
468 * @align: alignment requirement for blocks; must be a power of two
469 * @allocation: returned blocks won't cross this boundary (or zero)
471 * Managed dma_pool_create(). DMA pool created with this function is
472 * automatically destroyed on driver detach.
474 struct dma_pool
*dmam_pool_create(const char *name
, struct device
*dev
,
475 size_t size
, size_t align
, size_t allocation
)
477 struct dma_pool
**ptr
, *pool
;
479 ptr
= devres_alloc(dmam_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
483 pool
= *ptr
= dma_pool_create(name
, dev
, size
, align
, allocation
);
485 devres_add(dev
, ptr
);
491 EXPORT_SYMBOL(dmam_pool_create
);
494 * dmam_pool_destroy - Managed dma_pool_destroy()
495 * @pool: dma pool that will be destroyed
497 * Managed dma_pool_destroy().
499 void dmam_pool_destroy(struct dma_pool
*pool
)
501 struct device
*dev
= pool
->dev
;
503 WARN_ON(devres_destroy(dev
, dmam_pool_release
, dmam_pool_match
, pool
));
504 dma_pool_destroy(pool
);
506 EXPORT_SYMBOL(dmam_pool_destroy
);