[ALSA] ice1724: fix MIDI
[firewire-audio.git] / arch / powerpc / kernel / dma_64.c
blob3a317cb0636a4f572790982648b65c077bb84b30
1 /*
2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses and busses using the iommu infrastructure
6 */
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <asm/bug.h>
11 #include <asm/iommu.h>
12 #include <asm/abs_addr.h>
15 * Generic iommu implementation
18 static inline unsigned long device_to_mask(struct device *dev)
20 if (dev->dma_mask && *dev->dma_mask)
21 return *dev->dma_mask;
22 /* Assume devices without mask can take 32 bit addresses */
23 return 0xfffffffful;
27 /* Allocates a contiguous real buffer and creates mappings over it.
28 * Returns the virtual address of the buffer and sets dma_handle
29 * to the dma address (mapping) of the first page.
31 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
32 dma_addr_t *dma_handle, gfp_t flag)
34 return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
35 dma_handle, device_to_mask(dev), flag,
36 dev->archdata.numa_node);
39 static void dma_iommu_free_coherent(struct device *dev, size_t size,
40 void *vaddr, dma_addr_t dma_handle)
42 iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle);
45 /* Creates TCEs for a user provided buffer. The user buffer must be
46 * contiguous real kernel storage (not vmalloc). The address of the buffer
47 * passed here is the kernel (virtual) address of the buffer. The buffer
48 * need not be page aligned, the dma_addr_t returned will point to the same
49 * byte within the page as vaddr.
51 static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
52 size_t size,
53 enum dma_data_direction direction)
55 return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
56 device_to_mask(dev), direction);
60 static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
61 size_t size,
62 enum dma_data_direction direction)
64 iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction);
68 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
69 int nelems, enum dma_data_direction direction)
71 return iommu_map_sg(dev, sglist, nelems,
72 device_to_mask(dev), direction);
75 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
76 int nelems, enum dma_data_direction direction)
78 iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction);
81 /* We support DMA to/from any memory page via the iommu */
82 static int dma_iommu_dma_supported(struct device *dev, u64 mask)
84 struct iommu_table *tbl = dev->archdata.dma_data;
86 if (!tbl || tbl->it_offset > mask) {
87 printk(KERN_INFO
88 "Warning: IOMMU offset too big for device mask\n");
89 if (tbl)
90 printk(KERN_INFO
91 "mask: 0x%08lx, table offset: 0x%08lx\n",
92 mask, tbl->it_offset);
93 else
94 printk(KERN_INFO "mask: 0x%08lx, table unavailable\n",
95 mask);
96 return 0;
97 } else
98 return 1;
101 struct dma_mapping_ops dma_iommu_ops = {
102 .alloc_coherent = dma_iommu_alloc_coherent,
103 .free_coherent = dma_iommu_free_coherent,
104 .map_single = dma_iommu_map_single,
105 .unmap_single = dma_iommu_unmap_single,
106 .map_sg = dma_iommu_map_sg,
107 .unmap_sg = dma_iommu_unmap_sg,
108 .dma_supported = dma_iommu_dma_supported,
110 EXPORT_SYMBOL(dma_iommu_ops);
113 * Generic direct DMA implementation
115 * This implementation supports a per-device offset that can be applied if
116 * the address at which memory is visible to devices is not 0. Platform code
117 * can set archdata.dma_data to an unsigned long holding the offset. By
118 * default the offset is zero.
121 static unsigned long get_dma_direct_offset(struct device *dev)
123 return (unsigned long)dev->archdata.dma_data;
126 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
127 dma_addr_t *dma_handle, gfp_t flag)
129 struct page *page;
130 void *ret;
131 int node = dev->archdata.numa_node;
133 page = alloc_pages_node(node, flag, get_order(size));
134 if (page == NULL)
135 return NULL;
136 ret = page_address(page);
137 memset(ret, 0, size);
138 *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
140 return ret;
143 static void dma_direct_free_coherent(struct device *dev, size_t size,
144 void *vaddr, dma_addr_t dma_handle)
146 free_pages((unsigned long)vaddr, get_order(size));
149 static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
150 size_t size,
151 enum dma_data_direction direction)
153 return virt_to_abs(ptr) + get_dma_direct_offset(dev);
156 static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
157 size_t size,
158 enum dma_data_direction direction)
162 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
163 int nents, enum dma_data_direction direction)
165 struct scatterlist *sg;
166 int i;
168 for_each_sg(sgl, sg, nents, i) {
169 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
170 sg->dma_length = sg->length;
173 return nents;
176 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
177 int nents, enum dma_data_direction direction)
181 static int dma_direct_dma_supported(struct device *dev, u64 mask)
183 /* Could be improved to check for memory though it better be
184 * done via some global so platforms can set the limit in case
185 * they have limited DMA windows
187 return mask >= DMA_32BIT_MASK;
190 struct dma_mapping_ops dma_direct_ops = {
191 .alloc_coherent = dma_direct_alloc_coherent,
192 .free_coherent = dma_direct_free_coherent,
193 .map_single = dma_direct_map_single,
194 .unmap_single = dma_direct_unmap_single,
195 .map_sg = dma_direct_map_sg,
196 .unmap_sg = dma_direct_unmap_sg,
197 .dma_supported = dma_direct_dma_supported,
199 EXPORT_SYMBOL(dma_direct_ops);