DMAENGINE: ste_dma40: fix disabled channels list
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / ieee1394 / dma.c
blobd178699b194a054381d5e37067f78594ae5ef480
1 /*
2 * DMA region bookkeeping routines
4 * Copyright (C) 2002 Maas Digital LLC
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
8 */
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/vmalloc.h>
14 #include <linux/scatterlist.h>
16 #include "dma.h"
18 /* dma_prog_region */
20 void dma_prog_region_init(struct dma_prog_region *prog)
22 prog->kvirt = NULL;
23 prog->dev = NULL;
24 prog->n_pages = 0;
25 prog->bus_addr = 0;
28 int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
29 struct pci_dev *dev)
31 /* round up to page size */
32 n_bytes = PAGE_ALIGN(n_bytes);
34 prog->n_pages = n_bytes >> PAGE_SHIFT;
36 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
37 if (!prog->kvirt) {
38 printk(KERN_ERR
39 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
40 dma_prog_region_free(prog);
41 return -ENOMEM;
44 prog->dev = dev;
46 return 0;
49 void dma_prog_region_free(struct dma_prog_region *prog)
51 if (prog->kvirt) {
52 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
53 prog->kvirt, prog->bus_addr);
56 prog->kvirt = NULL;
57 prog->dev = NULL;
58 prog->n_pages = 0;
59 prog->bus_addr = 0;
62 /* dma_region */
64 /**
65 * dma_region_init - clear out all fields but do not allocate anything
67 void dma_region_init(struct dma_region *dma)
69 dma->kvirt = NULL;
70 dma->dev = NULL;
71 dma->n_pages = 0;
72 dma->n_dma_pages = 0;
73 dma->sglist = NULL;
76 /**
77 * dma_region_alloc - allocate the buffer and map it to the IOMMU
79 int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
80 struct pci_dev *dev, int direction)
82 unsigned int i;
84 /* round up to page size */
85 n_bytes = PAGE_ALIGN(n_bytes);
87 dma->n_pages = n_bytes >> PAGE_SHIFT;
89 dma->kvirt = vmalloc_32(n_bytes);
90 if (!dma->kvirt) {
91 printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
92 goto err;
95 /* Clear the ram out, no junk to the user */
96 memset(dma->kvirt, 0, n_bytes);
98 /* allocate scatter/gather list */
99 dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
100 if (!dma->sglist) {
101 printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
102 goto err;
105 sg_init_table(dma->sglist, dma->n_pages);
107 /* fill scatter/gather list with pages */
108 for (i = 0; i < dma->n_pages; i++) {
109 unsigned long va =
110 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
112 sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
113 PAGE_SIZE, 0);
116 /* map sglist to the IOMMU */
117 dma->n_dma_pages =
118 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
120 if (dma->n_dma_pages == 0) {
121 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
122 goto err;
125 dma->dev = dev;
126 dma->direction = direction;
128 return 0;
130 err:
131 dma_region_free(dma);
132 return -ENOMEM;
136 * dma_region_free - unmap and free the buffer
138 void dma_region_free(struct dma_region *dma)
140 if (dma->n_dma_pages) {
141 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
142 dma->direction);
143 dma->n_dma_pages = 0;
144 dma->dev = NULL;
147 vfree(dma->sglist);
148 dma->sglist = NULL;
150 vfree(dma->kvirt);
151 dma->kvirt = NULL;
152 dma->n_pages = 0;
155 /* find the scatterlist index and remaining offset corresponding to a
156 given offset from the beginning of the buffer */
157 static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
158 unsigned int start, unsigned long *rem)
160 int i;
161 unsigned long off = offset;
163 for (i = start; i < dma->n_dma_pages; i++) {
164 if (off < sg_dma_len(&dma->sglist[i])) {
165 *rem = off;
166 break;
169 off -= sg_dma_len(&dma->sglist[i]);
172 BUG_ON(i >= dma->n_dma_pages);
174 return i;
178 * dma_region_offset_to_bus - get bus address of an offset within a DMA region
180 * Returns the DMA bus address of the byte with the given @offset relative to
181 * the beginning of the @dma.
183 dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
184 unsigned long offset)
186 unsigned long rem = 0;
188 struct scatterlist *sg =
189 &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
190 return sg_dma_address(sg) + rem;
194 * dma_region_sync_for_cpu - sync the CPU's view of the buffer
196 void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
197 unsigned long len)
199 int first, last;
200 unsigned long rem = 0;
202 if (!len)
203 len = 1;
205 first = dma_region_find(dma, offset, 0, &rem);
206 last = dma_region_find(dma, rem + len - 1, first, &rem);
208 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
209 dma->direction);
213 * dma_region_sync_for_device - sync the IO bus' view of the buffer
215 void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
216 unsigned long len)
218 int first, last;
219 unsigned long rem = 0;
221 if (!len)
222 len = 1;
224 first = dma_region_find(dma, offset, 0, &rem);
225 last = dma_region_find(dma, rem + len - 1, first, &rem);
227 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
228 last - first + 1, dma->direction);
231 #ifdef CONFIG_MMU
233 static int dma_region_pagefault(struct vm_area_struct *vma,
234 struct vm_fault *vmf)
236 struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
238 if (!dma->kvirt)
239 return VM_FAULT_SIGBUS;
241 if (vmf->pgoff >= dma->n_pages)
242 return VM_FAULT_SIGBUS;
244 vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
245 get_page(vmf->page);
246 return 0;
249 static const struct vm_operations_struct dma_region_vm_ops = {
250 .fault = dma_region_pagefault,
254 * dma_region_mmap - map the buffer into a user space process
256 int dma_region_mmap(struct dma_region *dma, struct file *file,
257 struct vm_area_struct *vma)
259 unsigned long size;
261 if (!dma->kvirt)
262 return -EINVAL;
264 /* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
265 if (vma->vm_pgoff != 0)
266 return -EINVAL;
268 /* check the length */
269 size = vma->vm_end - vma->vm_start;
270 if (size > (dma->n_pages << PAGE_SHIFT))
271 return -EINVAL;
273 vma->vm_ops = &dma_region_vm_ops;
274 vma->vm_private_data = dma;
275 vma->vm_file = file;
276 vma->vm_flags |= VM_RESERVED | VM_ALWAYSDUMP;
278 return 0;
281 #else /* CONFIG_MMU */
283 int dma_region_mmap(struct dma_region *dma, struct file *file,
284 struct vm_area_struct *vma)
286 return -EINVAL;
289 #endif /* CONFIG_MMU */