x86: don't do dma if mask is NULL.
[linux-2.6/mini2440.git] / arch / x86 / kernel / pci-dma_32.c
blobd2f70744a93a42f985089d8eae699e57f650d115
1 /*
2 * Dynamic DMA mapping support.
4 * On i386 there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 */
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <asm/io.h>
17 /* Dummy device used for NULL arguments (normally ISA). Better would
18 be probably a smaller DMA mask, but this is bug-to-bug compatible
19 to i386. */
20 struct device fallback_dev = {
21 .bus_id = "fallback device",
22 .coherent_dma_mask = DMA_32BIT_MASK,
23 .dma_mask = &fallback_dev.coherent_dma_mask,
27 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
28 dma_addr_t *dma_handle, void **ret)
30 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
31 int order = get_order(size);
33 if (mem) {
34 int page = bitmap_find_free_region(mem->bitmap, mem->size,
35 order);
36 if (page >= 0) {
37 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
38 *ret = mem->virt_base + (page << PAGE_SHIFT);
39 memset(*ret, 0, size);
41 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
42 *ret = NULL;
44 return (mem != NULL);
47 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
49 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
51 if (mem && vaddr >= mem->virt_base && vaddr <
52 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
53 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
55 bitmap_release_region(mem->bitmap, page, order);
56 return 1;
58 return 0;
61 /* Allocate DMA memory on node near device */
62 noinline struct page *
63 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
65 int node;
67 node = dev_to_node(dev);
69 return alloc_pages_node(node, gfp, order);
72 void *dma_alloc_coherent(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp)
75 void *ret = NULL;
76 struct page *page;
77 dma_addr_t bus;
78 int order = get_order(size);
79 unsigned long dma_mask = 0;
81 /* ignore region specifiers */
82 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
84 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &ret))
85 return ret;
87 if (!dev)
88 dev = &fallback_dev;
90 dma_mask = dev->coherent_dma_mask;
91 if (dma_mask == 0)
92 dma_mask = DMA_32BIT_MASK;
94 if (dev->dma_mask == NULL)
95 return NULL;
97 /* Don't invoke OOM killer */
98 gfp |= __GFP_NORETRY;
99 again:
100 page = dma_alloc_pages(dev, gfp, order);
101 if (page == NULL)
102 return NULL;
105 int high, mmu;
106 bus = page_to_phys(page);
107 ret = page_address(page);
108 high = (bus + size) >= dma_mask;
109 mmu = high;
110 if (force_iommu && !(gfp & GFP_DMA))
111 mmu = 1;
112 else if (high) {
113 free_pages((unsigned long)ret,
114 get_order(size));
116 /* Don't use the 16MB ZONE_DMA unless absolutely
117 needed. It's better to use remapping first. */
118 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
119 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
120 goto again;
123 /* Let low level make its own zone decisions */
124 gfp &= ~(GFP_DMA32|GFP_DMA);
126 if (dma_ops->alloc_coherent)
127 return dma_ops->alloc_coherent(dev, size,
128 dma_handle, gfp);
129 return NULL;
132 memset(ret, 0, size);
133 if (!mmu) {
134 *dma_handle = bus;
135 return ret;
139 if (dma_ops->alloc_coherent) {
140 free_pages((unsigned long)ret, get_order(size));
141 gfp &= ~(GFP_DMA|GFP_DMA32);
142 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
145 if (dma_ops->map_simple) {
146 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(ret),
147 size,
148 PCI_DMA_BIDIRECTIONAL);
149 if (*dma_handle != bad_dma_address)
150 return ret;
153 if (panic_on_overflow)
154 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
155 (unsigned long)size);
156 free_pages((unsigned long)ret, get_order(size));
157 return NULL;
159 EXPORT_SYMBOL(dma_alloc_coherent);
161 void dma_free_coherent(struct device *dev, size_t size,
162 void *vaddr, dma_addr_t dma_handle)
164 int order = get_order(size);
166 WARN_ON(irqs_disabled()); /* for portability */
167 if (dma_release_coherent(dev, order, vaddr))
168 return;
169 if (dma_ops->unmap_single)
170 dma_ops->unmap_single(dev, dma_handle, size, 0);
171 free_pages((unsigned long)vaddr, order);
173 EXPORT_SYMBOL(dma_free_coherent);