2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
10 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
17 /* Dummy device used for NULL arguments (normally ISA). Better would
18 be probably a smaller DMA mask, but this is bug-to-bug compatible
20 struct device fallback_dev
= {
21 .bus_id
= "fallback device",
22 .coherent_dma_mask
= DMA_32BIT_MASK
,
23 .dma_mask
= &fallback_dev
.coherent_dma_mask
,
27 static int dma_alloc_from_coherent_mem(struct device
*dev
, ssize_t size
,
28 dma_addr_t
*dma_handle
, void **ret
)
30 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
31 int order
= get_order(size
);
34 int page
= bitmap_find_free_region(mem
->bitmap
, mem
->size
,
37 *dma_handle
= mem
->device_base
+ (page
<< PAGE_SHIFT
);
38 *ret
= mem
->virt_base
+ (page
<< PAGE_SHIFT
);
39 memset(*ret
, 0, size
);
41 if (mem
->flags
& DMA_MEMORY_EXCLUSIVE
)
47 static int dma_release_coherent(struct device
*dev
, int order
, void *vaddr
)
49 struct dma_coherent_mem
*mem
= dev
? dev
->dma_mem
: NULL
;
51 if (mem
&& vaddr
>= mem
->virt_base
&& vaddr
<
52 (mem
->virt_base
+ (mem
->size
<< PAGE_SHIFT
))) {
53 int page
= (vaddr
- mem
->virt_base
) >> PAGE_SHIFT
;
55 bitmap_release_region(mem
->bitmap
, page
, order
);
61 /* Allocate DMA memory on node near device */
62 noinline
struct page
*
63 dma_alloc_pages(struct device
*dev
, gfp_t gfp
, unsigned order
)
67 node
= dev_to_node(dev
);
69 return alloc_pages_node(node
, gfp
, order
);
72 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
73 dma_addr_t
*dma_handle
, gfp_t gfp
)
78 int order
= get_order(size
);
79 unsigned long dma_mask
= 0;
81 /* ignore region specifiers */
82 gfp
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
84 if (dma_alloc_from_coherent_mem(dev
, size
, dma_handle
, &ret
))
90 dma_mask
= dev
->coherent_dma_mask
;
92 dma_mask
= DMA_32BIT_MASK
;
94 if (dev
->dma_mask
== NULL
)
97 /* Don't invoke OOM killer */
100 page
= dma_alloc_pages(dev
, gfp
, order
);
106 bus
= page_to_phys(page
);
107 ret
= page_address(page
);
108 high
= (bus
+ size
) >= dma_mask
;
110 if (force_iommu
&& !(gfp
& GFP_DMA
))
113 free_pages((unsigned long)ret
,
116 /* Don't use the 16MB ZONE_DMA unless absolutely
117 needed. It's better to use remapping first. */
118 if (dma_mask
< DMA_32BIT_MASK
&& !(gfp
& GFP_DMA
)) {
119 gfp
= (gfp
& ~GFP_DMA32
) | GFP_DMA
;
123 /* Let low level make its own zone decisions */
124 gfp
&= ~(GFP_DMA32
|GFP_DMA
);
126 if (dma_ops
->alloc_coherent
)
127 return dma_ops
->alloc_coherent(dev
, size
,
132 memset(ret
, 0, size
);
139 if (dma_ops
->alloc_coherent
) {
140 free_pages((unsigned long)ret
, get_order(size
));
141 gfp
&= ~(GFP_DMA
|GFP_DMA32
);
142 return dma_ops
->alloc_coherent(dev
, size
, dma_handle
, gfp
);
145 if (dma_ops
->map_simple
) {
146 *dma_handle
= dma_ops
->map_simple(dev
, virt_to_phys(ret
),
148 PCI_DMA_BIDIRECTIONAL
);
149 if (*dma_handle
!= bad_dma_address
)
153 if (panic_on_overflow
)
154 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
155 (unsigned long)size
);
156 free_pages((unsigned long)ret
, get_order(size
));
159 EXPORT_SYMBOL(dma_alloc_coherent
);
161 void dma_free_coherent(struct device
*dev
, size_t size
,
162 void *vaddr
, dma_addr_t dma_handle
)
164 int order
= get_order(size
);
166 WARN_ON(irqs_disabled()); /* for portability */
167 if (dma_release_coherent(dev
, order
, vaddr
))
169 if (dma_ops
->unmap_single
)
170 dma_ops
->unmap_single(dev
, dma_handle
, size
, 0);
171 free_pages((unsigned long)vaddr
, order
);
173 EXPORT_SYMBOL(dma_free_coherent
);