1 /* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
3 * arch/arm/common/dmabounce.c
5 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
6 * limited DMA windows. These functions utilize bounce buffers to
7 * copy data to/from buffers located outside the DMA region. This
8 * only works for systems in which DMA memory is at the bottom of
9 * RAM, the remainder of memory is at the top and the DMA memory
10 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
11 * DMA windows will require custom implementations that reserve memory
12 * areas at early bootup.
14 * Original version by Brad Parker (brad@heeltoe.com)
15 * Re-written by Christopher Hoover <ch@murgatroid.com>
16 * Made generic by Deepak Saxena <dsaxena@plexity.net>
18 * Copyright (C) 2002 Hewlett Packard Company.
19 * Copyright (C) 2004 MontaVista Software, Inc.
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License
23 * version 2 as published by the Free Software Foundation.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/slab.h>
29 #include <linux/page-flags.h>
30 #include <linux/device.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/dmapool.h>
33 #include <linux/list.h>
34 #include <linux/scatterlist.h>
36 #include <asm/cacheflush.h>
44 #define DO_STATS(X) do { X ; } while (0)
46 #define DO_STATS(X) do { } while (0)
49 /* ************************************************** */
52 struct list_head node
;
54 /* original request */
59 /* safe buffer info */
60 struct dmabounce_pool
*pool
;
62 dma_addr_t safe_dma_addr
;
65 struct dmabounce_pool
{
67 struct dma_pool
*pool
;
73 struct dmabounce_device_info
{
75 struct list_head safe_buffers
;
77 unsigned long total_allocs
;
78 unsigned long map_op_count
;
79 unsigned long bounce_count
;
82 struct dmabounce_pool small
;
83 struct dmabounce_pool large
;
89 static ssize_t
dmabounce_show(struct device
*dev
, struct device_attribute
*attr
,
92 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
93 return sprintf(buf
, "%lu %lu %lu %lu %lu %lu\n",
94 device_info
->small
.allocs
,
95 device_info
->large
.allocs
,
96 device_info
->total_allocs
- device_info
->small
.allocs
-
97 device_info
->large
.allocs
,
98 device_info
->total_allocs
,
99 device_info
->map_op_count
,
100 device_info
->bounce_count
);
103 static DEVICE_ATTR(dmabounce_stats
, 0400, dmabounce_show
, NULL
);
107 /* allocate a 'safe' buffer and keep track of it */
108 static inline struct safe_buffer
*
109 alloc_safe_buffer(struct dmabounce_device_info
*device_info
, void *ptr
,
110 size_t size
, enum dma_data_direction dir
)
112 struct safe_buffer
*buf
;
113 struct dmabounce_pool
*pool
;
114 struct device
*dev
= device_info
->dev
;
117 dev_dbg(dev
, "%s(ptr=%p, size=%d, dir=%d)\n",
118 __func__
, ptr
, size
, dir
);
120 if (size
<= device_info
->small
.size
) {
121 pool
= &device_info
->small
;
122 } else if (size
<= device_info
->large
.size
) {
123 pool
= &device_info
->large
;
128 buf
= kmalloc(sizeof(struct safe_buffer
), GFP_ATOMIC
);
130 dev_warn(dev
, "%s: kmalloc failed\n", __func__
);
136 buf
->direction
= dir
;
140 buf
->safe
= dma_pool_alloc(pool
->pool
, GFP_ATOMIC
,
141 &buf
->safe_dma_addr
);
143 buf
->safe
= dma_alloc_coherent(dev
, size
, &buf
->safe_dma_addr
,
147 if (buf
->safe
== NULL
) {
149 "%s: could not alloc dma memory (size=%d)\n",
158 device_info
->total_allocs
++;
161 write_lock_irqsave(&device_info
->lock
, flags
);
162 list_add(&buf
->node
, &device_info
->safe_buffers
);
163 write_unlock_irqrestore(&device_info
->lock
, flags
);
168 /* determine if a buffer is from our "safe" pool */
169 static inline struct safe_buffer
*
170 find_safe_buffer(struct dmabounce_device_info
*device_info
, dma_addr_t safe_dma_addr
)
172 struct safe_buffer
*b
, *rb
= NULL
;
175 read_lock_irqsave(&device_info
->lock
, flags
);
177 list_for_each_entry(b
, &device_info
->safe_buffers
, node
)
178 if (b
->safe_dma_addr
== safe_dma_addr
) {
183 read_unlock_irqrestore(&device_info
->lock
, flags
);
188 free_safe_buffer(struct dmabounce_device_info
*device_info
, struct safe_buffer
*buf
)
192 dev_dbg(device_info
->dev
, "%s(buf=%p)\n", __func__
, buf
);
194 write_lock_irqsave(&device_info
->lock
, flags
);
196 list_del(&buf
->node
);
198 write_unlock_irqrestore(&device_info
->lock
, flags
);
201 dma_pool_free(buf
->pool
->pool
, buf
->safe
, buf
->safe_dma_addr
);
203 dma_free_coherent(device_info
->dev
, buf
->size
, buf
->safe
,
209 /* ************************************************** */
211 static struct safe_buffer
*find_safe_buffer_dev(struct device
*dev
,
212 dma_addr_t dma_addr
, const char *where
)
214 if (!dev
|| !dev
->archdata
.dmabounce
)
216 if (dma_mapping_error(dev
, dma_addr
)) {
218 dev_err(dev
, "Trying to %s invalid mapping\n", where
);
220 pr_err("unknown device: Trying to %s invalid mapping\n", where
);
223 return find_safe_buffer(dev
->archdata
.dmabounce
, dma_addr
);
226 static inline dma_addr_t
map_single(struct device
*dev
, void *ptr
, size_t size
,
227 enum dma_data_direction dir
)
229 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
231 int needs_bounce
= 0;
234 DO_STATS ( device_info
->map_op_count
++ );
236 dma_addr
= virt_to_dma(dev
, ptr
);
239 unsigned long mask
= *dev
->dma_mask
;
242 limit
= (mask
+ 1) & ~mask
;
243 if (limit
&& size
> limit
) {
244 dev_err(dev
, "DMA mapping too big (requested %#x "
245 "mask %#Lx)\n", size
, *dev
->dma_mask
);
250 * Figure out if we need to bounce from the DMA mask.
252 needs_bounce
= (dma_addr
| (dma_addr
+ size
- 1)) & ~mask
;
255 if (device_info
&& (needs_bounce
|| dma_needs_bounce(dev
, dma_addr
, size
))) {
256 struct safe_buffer
*buf
;
258 buf
= alloc_safe_buffer(device_info
, ptr
, size
, dir
);
260 dev_err(dev
, "%s: unable to map unsafe buffer %p!\n",
266 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
267 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
268 buf
->safe
, buf
->safe_dma_addr
);
270 if ((dir
== DMA_TO_DEVICE
) ||
271 (dir
== DMA_BIDIRECTIONAL
)) {
272 dev_dbg(dev
, "%s: copy unsafe %p to safe %p, size %d\n",
273 __func__
, ptr
, buf
->safe
, size
);
274 memcpy(buf
->safe
, ptr
, size
);
278 dma_addr
= buf
->safe_dma_addr
;
281 * We don't need to sync the DMA buffer since
282 * it was allocated via the coherent allocators.
284 __dma_single_cpu_to_dev(ptr
, size
, dir
);
290 static inline void unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
291 size_t size
, enum dma_data_direction dir
)
293 struct safe_buffer
*buf
= find_safe_buffer_dev(dev
, dma_addr
, "unmap");
296 BUG_ON(buf
->size
!= size
);
297 BUG_ON(buf
->direction
!= dir
);
300 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
301 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
302 buf
->safe
, buf
->safe_dma_addr
);
304 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
306 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
307 void *ptr
= buf
->ptr
;
310 "%s: copy back safe %p to unsafe %p size %d\n",
311 __func__
, buf
->safe
, ptr
, size
);
312 memcpy(ptr
, buf
->safe
, size
);
315 * Since we may have written to a page cache page,
316 * we need to ensure that the data will be coherent
317 * with user mappings.
319 __cpuc_flush_dcache_area(ptr
, size
);
321 free_safe_buffer(dev
->archdata
.dmabounce
, buf
);
323 __dma_single_dev_to_cpu(dma_to_virt(dev
, dma_addr
), size
, dir
);
327 /* ************************************************** */
330 * see if a buffer address is in an 'unsafe' range. if it is
331 * allocate a 'safe' buffer and copy the unsafe buffer into it.
332 * substitute the safe buffer for the unsafe one.
333 * (basically move the buffer from an unsafe area to a safe one)
335 dma_addr_t BCMFASTPATH
dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
336 enum dma_data_direction dir
)
338 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
339 __func__
, ptr
, size
, dir
);
341 BUG_ON(!valid_dma_direction(dir
));
343 return map_single(dev
, ptr
, size
, dir
);
345 EXPORT_SYMBOL(dma_map_single
);
348 * see if a mapped address was really a "safe" buffer and if so, copy
349 * the data from the safe buffer back to the unsafe buffer and free up
350 * the safe buffer. (basically return things back to the way they
353 void BCMFASTPATH
dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
354 enum dma_data_direction dir
)
356 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
357 __func__
, (void *) dma_addr
, size
, dir
);
359 unmap_single(dev
, dma_addr
, size
, dir
);
361 EXPORT_SYMBOL(dma_unmap_single
);
363 dma_addr_t
dma_map_page(struct device
*dev
, struct page
*page
,
364 unsigned long offset
, size_t size
, enum dma_data_direction dir
)
366 dev_dbg(dev
, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
367 __func__
, page
, offset
, size
, dir
);
369 BUG_ON(!valid_dma_direction(dir
));
371 if (PageHighMem(page
)) {
372 dev_err(dev
, "DMA buffer bouncing of HIGHMEM pages "
373 "is not supported\n");
377 return map_single(dev
, page_address(page
) + offset
, size
, dir
);
379 EXPORT_SYMBOL(dma_map_page
);
382 * see if a mapped address was really a "safe" buffer and if so, copy
383 * the data from the safe buffer back to the unsafe buffer and free up
384 * the safe buffer. (basically return things back to the way they
387 void dma_unmap_page(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
388 enum dma_data_direction dir
)
390 dev_dbg(dev
, "%s(ptr=%p,size=%d,dir=%x)\n",
391 __func__
, (void *) dma_addr
, size
, dir
);
393 unmap_single(dev
, dma_addr
, size
, dir
);
395 EXPORT_SYMBOL(dma_unmap_page
);
397 int dmabounce_sync_for_cpu(struct device
*dev
, dma_addr_t addr
,
398 unsigned long off
, size_t sz
, enum dma_data_direction dir
)
400 struct safe_buffer
*buf
;
402 dev_dbg(dev
, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
403 __func__
, addr
, off
, sz
, dir
);
405 buf
= find_safe_buffer_dev(dev
, addr
, __func__
);
409 BUG_ON(buf
->direction
!= dir
);
411 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
412 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
413 buf
->safe
, buf
->safe_dma_addr
);
415 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
417 if (dir
== DMA_FROM_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
418 dev_dbg(dev
, "%s: copy back safe %p to unsafe %p size %d\n",
419 __func__
, buf
->safe
+ off
, buf
->ptr
+ off
, sz
);
420 memcpy(buf
->ptr
+ off
, buf
->safe
+ off
, sz
);
424 EXPORT_SYMBOL(dmabounce_sync_for_cpu
);
426 int dmabounce_sync_for_device(struct device
*dev
, dma_addr_t addr
,
427 unsigned long off
, size_t sz
, enum dma_data_direction dir
)
429 struct safe_buffer
*buf
;
431 dev_dbg(dev
, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
432 __func__
, addr
, off
, sz
, dir
);
434 buf
= find_safe_buffer_dev(dev
, addr
, __func__
);
438 BUG_ON(buf
->direction
!= dir
);
440 dev_dbg(dev
, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
441 __func__
, buf
->ptr
, virt_to_dma(dev
, buf
->ptr
),
442 buf
->safe
, buf
->safe_dma_addr
);
444 DO_STATS(dev
->archdata
.dmabounce
->bounce_count
++);
446 if (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
) {
447 dev_dbg(dev
, "%s: copy out unsafe %p to safe %p, size %d\n",
448 __func__
,buf
->ptr
+ off
, buf
->safe
+ off
, sz
);
449 memcpy(buf
->safe
+ off
, buf
->ptr
+ off
, sz
);
453 EXPORT_SYMBOL(dmabounce_sync_for_device
);
455 static int dmabounce_init_pool(struct dmabounce_pool
*pool
, struct device
*dev
,
456 const char *name
, unsigned long size
)
459 DO_STATS(pool
->allocs
= 0);
460 pool
->pool
= dma_pool_create(name
, dev
, size
,
461 0 /* byte alignment */,
462 0 /* no page-crossing issues */);
464 return pool
->pool
? 0 : -ENOMEM
;
467 int dmabounce_register_dev(struct device
*dev
, unsigned long small_buffer_size
,
468 unsigned long large_buffer_size
)
470 struct dmabounce_device_info
*device_info
;
473 device_info
= kmalloc(sizeof(struct dmabounce_device_info
), GFP_ATOMIC
);
476 "Could not allocated dmabounce_device_info\n");
480 ret
= dmabounce_init_pool(&device_info
->small
, dev
,
481 "small_dmabounce_pool", small_buffer_size
);
484 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
489 if (large_buffer_size
) {
490 ret
= dmabounce_init_pool(&device_info
->large
, dev
,
491 "large_dmabounce_pool",
495 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
501 device_info
->dev
= dev
;
502 INIT_LIST_HEAD(&device_info
->safe_buffers
);
503 rwlock_init(&device_info
->lock
);
506 device_info
->total_allocs
= 0;
507 device_info
->map_op_count
= 0;
508 device_info
->bounce_count
= 0;
509 device_info
->attr_res
= device_create_file(dev
, &dev_attr_dmabounce_stats
);
512 dev
->archdata
.dmabounce
= device_info
;
514 dev_info(dev
, "dmabounce: registered device\n");
519 dma_pool_destroy(device_info
->small
.pool
);
524 EXPORT_SYMBOL(dmabounce_register_dev
);
526 void dmabounce_unregister_dev(struct device
*dev
)
528 struct dmabounce_device_info
*device_info
= dev
->archdata
.dmabounce
;
530 dev
->archdata
.dmabounce
= NULL
;
534 "Never registered with dmabounce but attempting"
539 if (!list_empty(&device_info
->safe_buffers
)) {
541 "Removing from dmabounce with pending buffers!\n");
545 if (device_info
->small
.pool
)
546 dma_pool_destroy(device_info
->small
.pool
);
547 if (device_info
->large
.pool
)
548 dma_pool_destroy(device_info
->large
.pool
);
551 if (device_info
->attr_res
== 0)
552 device_remove_file(dev
, &dev_attr_dmabounce_stats
);
557 dev_info(dev
, "dmabounce: device unregistered\n");
559 EXPORT_SYMBOL(dmabounce_unregister_dev
);
561 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
562 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
563 MODULE_LICENSE("GPL");