dmapool: Tidy up includes and add comments
[linux-2.6/mini2440.git] / mm / dmapool.c
blobe2ea4543abb4067e966d63025db47b2b981e2747
1 /*
2 * DMA Pool allocator
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes.
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmapool.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/poison.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/wait.h>
38 struct dma_pool { /* the pool */
39 struct list_head page_list;
40 spinlock_t lock;
41 size_t blocks_per_page;
42 size_t size;
43 struct device *dev;
44 size_t allocation;
45 char name[32];
46 wait_queue_head_t waitq;
47 struct list_head pools;
50 struct dma_page { /* cacheable header for 'allocation' bytes */
51 struct list_head page_list;
52 void *vaddr;
53 dma_addr_t dma;
54 unsigned in_use;
55 unsigned long bitmap[0];
58 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
60 static DEFINE_MUTEX(pools_lock);
62 static ssize_t
63 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
65 unsigned temp;
66 unsigned size;
67 char *next;
68 struct dma_page *page;
69 struct dma_pool *pool;
71 next = buf;
72 size = PAGE_SIZE;
74 temp = scnprintf(next, size, "poolinfo - 0.1\n");
75 size -= temp;
76 next += temp;
78 mutex_lock(&pools_lock);
79 list_for_each_entry(pool, &dev->dma_pools, pools) {
80 unsigned pages = 0;
81 unsigned blocks = 0;
83 list_for_each_entry(page, &pool->page_list, page_list) {
84 pages++;
85 blocks += page->in_use;
88 /* per-pool info, no real statistics yet */
89 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
90 pool->name,
91 blocks, pages * pool->blocks_per_page,
92 pool->size, pages);
93 size -= temp;
94 next += temp;
96 mutex_unlock(&pools_lock);
98 return PAGE_SIZE - size;
101 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
104 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
105 * @name: name of pool, for diagnostics
106 * @dev: device that will be doing the DMA
107 * @size: size of the blocks in this pool.
108 * @align: alignment requirement for blocks; must be a power of two
109 * @allocation: returned blocks won't cross this boundary (or zero)
110 * Context: !in_interrupt()
112 * Returns a dma allocation pool with the requested characteristics, or
113 * null if one can't be created. Given one of these pools, dma_pool_alloc()
114 * may be used to allocate memory. Such memory will all have "consistent"
115 * DMA mappings, accessible by the device and its driver without using
116 * cache flushing primitives. The actual size of blocks allocated may be
117 * larger than requested because of alignment.
119 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
120 * cross that size boundary. This is useful for devices which have
121 * addressing restrictions on individual DMA transfers, such as not crossing
122 * boundaries of 4KBytes.
124 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
125 size_t size, size_t align, size_t allocation)
127 struct dma_pool *retval;
129 if (align == 0) {
130 align = 1;
131 } else if (align & (align - 1)) {
132 return NULL;
135 if (size == 0)
136 return NULL;
138 if ((size % align) != 0)
139 size = ALIGN(size, align);
141 if (allocation == 0) {
142 if (PAGE_SIZE < size)
143 allocation = size;
144 else
145 allocation = PAGE_SIZE;
146 /* FIXME: round up for less fragmentation */
147 } else if (allocation < size)
148 return NULL;
150 if (!
151 (retval =
152 kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
153 return retval;
155 strlcpy(retval->name, name, sizeof retval->name);
157 retval->dev = dev;
159 INIT_LIST_HEAD(&retval->page_list);
160 spin_lock_init(&retval->lock);
161 retval->size = size;
162 retval->allocation = allocation;
163 retval->blocks_per_page = allocation / size;
164 init_waitqueue_head(&retval->waitq);
166 if (dev) {
167 int ret;
169 mutex_lock(&pools_lock);
170 if (list_empty(&dev->dma_pools))
171 ret = device_create_file(dev, &dev_attr_pools);
172 else
173 ret = 0;
174 /* note: not currently insisting "name" be unique */
175 if (!ret)
176 list_add(&retval->pools, &dev->dma_pools);
177 else {
178 kfree(retval);
179 retval = NULL;
181 mutex_unlock(&pools_lock);
182 } else
183 INIT_LIST_HEAD(&retval->pools);
185 return retval;
187 EXPORT_SYMBOL(dma_pool_create);
189 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
191 struct dma_page *page;
192 int mapsize;
194 mapsize = pool->blocks_per_page;
195 mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
196 mapsize *= sizeof(long);
198 page = kmalloc(mapsize + sizeof *page, mem_flags);
199 if (!page)
200 return NULL;
201 page->vaddr = dma_alloc_coherent(pool->dev,
202 pool->allocation,
203 &page->dma, mem_flags);
204 if (page->vaddr) {
205 memset(page->bitmap, 0xff, mapsize); /* bit set == free */
206 #ifdef CONFIG_DEBUG_SLAB
207 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
208 #endif
209 list_add(&page->page_list, &pool->page_list);
210 page->in_use = 0;
211 } else {
212 kfree(page);
213 page = NULL;
215 return page;
218 static inline int is_page_busy(int blocks, unsigned long *bitmap)
220 while (blocks > 0) {
221 if (*bitmap++ != ~0UL)
222 return 1;
223 blocks -= BITS_PER_LONG;
225 return 0;
228 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
230 dma_addr_t dma = page->dma;
232 #ifdef CONFIG_DEBUG_SLAB
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
234 #endif
235 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
236 list_del(&page->page_list);
237 kfree(page);
241 * dma_pool_destroy - destroys a pool of dma memory blocks.
242 * @pool: dma pool that will be destroyed
243 * Context: !in_interrupt()
245 * Caller guarantees that no more memory from the pool is in use,
246 * and that nothing will try to use the pool after this call.
248 void dma_pool_destroy(struct dma_pool *pool)
250 mutex_lock(&pools_lock);
251 list_del(&pool->pools);
252 if (pool->dev && list_empty(&pool->dev->dma_pools))
253 device_remove_file(pool->dev, &dev_attr_pools);
254 mutex_unlock(&pools_lock);
256 while (!list_empty(&pool->page_list)) {
257 struct dma_page *page;
258 page = list_entry(pool->page_list.next,
259 struct dma_page, page_list);
260 if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
261 if (pool->dev)
262 dev_err(pool->dev,
263 "dma_pool_destroy %s, %p busy\n",
264 pool->name, page->vaddr);
265 else
266 printk(KERN_ERR
267 "dma_pool_destroy %s, %p busy\n",
268 pool->name, page->vaddr);
269 /* leak the still-in-use consistent memory */
270 list_del(&page->page_list);
271 kfree(page);
272 } else
273 pool_free_page(pool, page);
276 kfree(pool);
278 EXPORT_SYMBOL(dma_pool_destroy);
281 * dma_pool_alloc - get a block of consistent memory
282 * @pool: dma pool that will produce the block
283 * @mem_flags: GFP_* bitmask
284 * @handle: pointer to dma address of block
286 * This returns the kernel virtual address of a currently unused block,
287 * and reports its dma address through the handle.
288 * If such a memory block can't be allocated, %NULL is returned.
290 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
291 dma_addr_t *handle)
293 unsigned long flags;
294 struct dma_page *page;
295 int map, block;
296 size_t offset;
297 void *retval;
299 spin_lock_irqsave(&pool->lock, flags);
300 restart:
301 list_for_each_entry(page, &pool->page_list, page_list) {
302 int i;
303 /* only cachable accesses here ... */
304 for (map = 0, i = 0;
305 i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
306 if (page->bitmap[map] == 0)
307 continue;
308 block = ffz(~page->bitmap[map]);
309 if ((i + block) < pool->blocks_per_page) {
310 clear_bit(block, &page->bitmap[map]);
311 offset = (BITS_PER_LONG * map) + block;
312 offset *= pool->size;
313 goto ready;
317 page = pool_alloc_page(pool, GFP_ATOMIC);
318 if (!page) {
319 if (mem_flags & __GFP_WAIT) {
320 DECLARE_WAITQUEUE(wait, current);
322 __set_current_state(TASK_INTERRUPTIBLE);
323 __add_wait_queue(&pool->waitq, &wait);
324 spin_unlock_irqrestore(&pool->lock, flags);
326 schedule_timeout(POOL_TIMEOUT_JIFFIES);
328 spin_lock_irqsave(&pool->lock, flags);
329 __remove_wait_queue(&pool->waitq, &wait);
330 goto restart;
332 retval = NULL;
333 goto done;
336 clear_bit(0, &page->bitmap[0]);
337 offset = 0;
338 ready:
339 page->in_use++;
340 retval = offset + page->vaddr;
341 *handle = offset + page->dma;
342 #ifdef CONFIG_DEBUG_SLAB
343 memset(retval, POOL_POISON_ALLOCATED, pool->size);
344 #endif
345 done:
346 spin_unlock_irqrestore(&pool->lock, flags);
347 return retval;
349 EXPORT_SYMBOL(dma_pool_alloc);
351 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
353 unsigned long flags;
354 struct dma_page *page;
356 spin_lock_irqsave(&pool->lock, flags);
357 list_for_each_entry(page, &pool->page_list, page_list) {
358 if (dma < page->dma)
359 continue;
360 if (dma < (page->dma + pool->allocation))
361 goto done;
363 page = NULL;
364 done:
365 spin_unlock_irqrestore(&pool->lock, flags);
366 return page;
370 * dma_pool_free - put block back into dma pool
371 * @pool: the dma pool holding the block
372 * @vaddr: virtual address of block
373 * @dma: dma address of block
375 * Caller promises neither device nor driver will again touch this block
376 * unless it is first re-allocated.
378 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
380 struct dma_page *page;
381 unsigned long flags;
382 int map, block;
384 page = pool_find_page(pool, dma);
385 if (!page) {
386 if (pool->dev)
387 dev_err(pool->dev,
388 "dma_pool_free %s, %p/%lx (bad dma)\n",
389 pool->name, vaddr, (unsigned long)dma);
390 else
391 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
392 pool->name, vaddr, (unsigned long)dma);
393 return;
396 block = dma - page->dma;
397 block /= pool->size;
398 map = block / BITS_PER_LONG;
399 block %= BITS_PER_LONG;
401 #ifdef CONFIG_DEBUG_SLAB
402 if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
403 if (pool->dev)
404 dev_err(pool->dev,
405 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
406 pool->name, vaddr, (unsigned long long)dma);
407 else
408 printk(KERN_ERR
409 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
410 pool->name, vaddr, (unsigned long long)dma);
411 return;
413 if (page->bitmap[map] & (1UL << block)) {
414 if (pool->dev)
415 dev_err(pool->dev,
416 "dma_pool_free %s, dma %Lx already free\n",
417 pool->name, (unsigned long long)dma);
418 else
419 printk(KERN_ERR
420 "dma_pool_free %s, dma %Lx already free\n",
421 pool->name, (unsigned long long)dma);
422 return;
424 memset(vaddr, POOL_POISON_FREED, pool->size);
425 #endif
427 spin_lock_irqsave(&pool->lock, flags);
428 page->in_use--;
429 set_bit(block, &page->bitmap[map]);
430 if (waitqueue_active(&pool->waitq))
431 wake_up_locked(&pool->waitq);
433 * Resist a temptation to do
434 * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
435 * Better have a few empty pages hang around.
437 spin_unlock_irqrestore(&pool->lock, flags);
439 EXPORT_SYMBOL(dma_pool_free);
442 * Managed DMA pool
444 static void dmam_pool_release(struct device *dev, void *res)
446 struct dma_pool *pool = *(struct dma_pool **)res;
448 dma_pool_destroy(pool);
451 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
453 return *(struct dma_pool **)res == match_data;
457 * dmam_pool_create - Managed dma_pool_create()
458 * @name: name of pool, for diagnostics
459 * @dev: device that will be doing the DMA
460 * @size: size of the blocks in this pool.
461 * @align: alignment requirement for blocks; must be a power of two
462 * @allocation: returned blocks won't cross this boundary (or zero)
464 * Managed dma_pool_create(). DMA pool created with this function is
465 * automatically destroyed on driver detach.
467 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
468 size_t size, size_t align, size_t allocation)
470 struct dma_pool **ptr, *pool;
472 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
473 if (!ptr)
474 return NULL;
476 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
477 if (pool)
478 devres_add(dev, ptr);
479 else
480 devres_free(ptr);
482 return pool;
484 EXPORT_SYMBOL(dmam_pool_create);
487 * dmam_pool_destroy - Managed dma_pool_destroy()
488 * @pool: dma pool that will be destroyed
490 * Managed dma_pool_destroy().
492 void dmam_pool_destroy(struct dma_pool *pool)
494 struct device *dev = pool->dev;
496 dma_pool_destroy(pool);
497 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
499 EXPORT_SYMBOL(dmam_pool_destroy);