2 * DMA region bookkeeping routines
4 * Copyright (C) 2002 Maas Digital LLC
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/slab.h>
18 void dma_prog_region_init(struct dma_prog_region
*prog
)
26 int dma_prog_region_alloc(struct dma_prog_region
*prog
, unsigned long n_bytes
,
29 /* round up to page size */
30 n_bytes
= PAGE_ALIGN(n_bytes
);
32 prog
->n_pages
= n_bytes
>> PAGE_SHIFT
;
34 prog
->kvirt
= pci_alloc_consistent(dev
, n_bytes
, &prog
->bus_addr
);
37 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
38 dma_prog_region_free(prog
);
47 void dma_prog_region_free(struct dma_prog_region
*prog
)
50 pci_free_consistent(prog
->dev
, prog
->n_pages
<< PAGE_SHIFT
,
51 prog
->kvirt
, prog
->bus_addr
);
62 void dma_region_init(struct dma_region
*dma
)
71 int dma_region_alloc(struct dma_region
*dma
, unsigned long n_bytes
,
72 struct pci_dev
*dev
, int direction
)
76 /* round up to page size */
77 n_bytes
= PAGE_ALIGN(n_bytes
);
79 dma
->n_pages
= n_bytes
>> PAGE_SHIFT
;
81 dma
->kvirt
= vmalloc_32(n_bytes
);
83 printk(KERN_ERR
"dma_region_alloc: vmalloc_32() failed\n");
87 /* Clear the ram out, no junk to the user */
88 memset(dma
->kvirt
, 0, n_bytes
);
90 /* allocate scatter/gather list */
91 dma
->sglist
= vmalloc(dma
->n_pages
* sizeof(*dma
->sglist
));
93 printk(KERN_ERR
"dma_region_alloc: vmalloc(sglist) failed\n");
97 /* just to be safe - this will become unnecessary once sglist->address goes away */
98 memset(dma
->sglist
, 0, dma
->n_pages
* sizeof(*dma
->sglist
));
100 /* fill scatter/gather list with pages */
101 for (i
= 0; i
< dma
->n_pages
; i
++) {
103 (unsigned long)dma
->kvirt
+ (i
<< PAGE_SHIFT
);
105 dma
->sglist
[i
].page
= vmalloc_to_page((void *)va
);
106 dma
->sglist
[i
].length
= PAGE_SIZE
;
109 /* map sglist to the IOMMU */
111 pci_map_sg(dev
, dma
->sglist
, dma
->n_pages
, direction
);
113 if (dma
->n_dma_pages
== 0) {
114 printk(KERN_ERR
"dma_region_alloc: pci_map_sg() failed\n");
119 dma
->direction
= direction
;
124 dma_region_free(dma
);
128 void dma_region_free(struct dma_region
*dma
)
130 if (dma
->n_dma_pages
) {
131 pci_unmap_sg(dma
->dev
, dma
->sglist
, dma
->n_pages
,
133 dma
->n_dma_pages
= 0;
145 /* find the scatterlist index and remaining offset corresponding to a
146 given offset from the beginning of the buffer */
147 static inline int dma_region_find(struct dma_region
*dma
, unsigned long offset
,
148 unsigned int start
, unsigned long *rem
)
151 unsigned long off
= offset
;
153 for (i
= start
; i
< dma
->n_dma_pages
; i
++) {
154 if (off
< sg_dma_len(&dma
->sglist
[i
])) {
159 off
-= sg_dma_len(&dma
->sglist
[i
]);
162 BUG_ON(i
>= dma
->n_dma_pages
);
167 dma_addr_t
dma_region_offset_to_bus(struct dma_region
* dma
,
168 unsigned long offset
)
170 unsigned long rem
= 0;
172 struct scatterlist
*sg
=
173 &dma
->sglist
[dma_region_find(dma
, offset
, 0, &rem
)];
174 return sg_dma_address(sg
) + rem
;
177 void dma_region_sync_for_cpu(struct dma_region
*dma
, unsigned long offset
,
181 unsigned long rem
= 0;
186 first
= dma_region_find(dma
, offset
, 0, &rem
);
187 last
= dma_region_find(dma
, rem
+ len
- 1, first
, &rem
);
189 pci_dma_sync_sg_for_cpu(dma
->dev
, &dma
->sglist
[first
], last
- first
+ 1,
193 void dma_region_sync_for_device(struct dma_region
*dma
, unsigned long offset
,
197 unsigned long rem
= 0;
202 first
= dma_region_find(dma
, offset
, 0, &rem
);
203 last
= dma_region_find(dma
, rem
+ len
- 1, first
, &rem
);
205 pci_dma_sync_sg_for_device(dma
->dev
, &dma
->sglist
[first
],
206 last
- first
+ 1, dma
->direction
);
211 /* nopage() handler for mmap access */
213 static struct page
*dma_region_pagefault(struct vm_area_struct
*area
,
214 unsigned long address
, int *type
)
216 unsigned long offset
;
217 unsigned long kernel_virt_addr
;
218 struct page
*ret
= NOPAGE_SIGBUS
;
220 struct dma_region
*dma
= (struct dma_region
*)area
->vm_private_data
;
225 if ((address
< (unsigned long)area
->vm_start
) ||
227 (unsigned long)area
->vm_start
+ (dma
->n_pages
<< PAGE_SHIFT
)))
231 *type
= VM_FAULT_MINOR
;
232 offset
= address
- area
->vm_start
;
233 kernel_virt_addr
= (unsigned long)dma
->kvirt
+ offset
;
234 ret
= vmalloc_to_page((void *)kernel_virt_addr
);
240 static struct vm_operations_struct dma_region_vm_ops
= {
241 .nopage
= dma_region_pagefault
,
244 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
,
245 struct vm_area_struct
*vma
)
252 /* must be page-aligned */
253 if (vma
->vm_pgoff
!= 0)
256 /* check the length */
257 size
= vma
->vm_end
- vma
->vm_start
;
258 if (size
> (dma
->n_pages
<< PAGE_SHIFT
))
261 vma
->vm_ops
= &dma_region_vm_ops
;
262 vma
->vm_private_data
= dma
;
264 vma
->vm_flags
|= VM_RESERVED
;
269 #else /* CONFIG_MMU */
271 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
,
272 struct vm_area_struct
*vma
)
277 #endif /* CONFIG_MMU */