2 * DMA region bookkeeping routines
4 * Copyright (C) 2002 Maas Digital LLC
6 * This code is licensed under the GPL. See the file COPYING in the root
7 * directory of the kernel sources for details.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <asm/scatterlist.h>
21 void dma_prog_region_init(struct dma_prog_region
*prog
)
29 int dma_prog_region_alloc(struct dma_prog_region
*prog
, unsigned long n_bytes
,
32 /* round up to page size */
33 n_bytes
= PAGE_ALIGN(n_bytes
);
35 prog
->n_pages
= n_bytes
>> PAGE_SHIFT
;
37 prog
->kvirt
= pci_alloc_consistent(dev
, n_bytes
, &prog
->bus_addr
);
40 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
41 dma_prog_region_free(prog
);
50 void dma_prog_region_free(struct dma_prog_region
*prog
)
53 pci_free_consistent(prog
->dev
, prog
->n_pages
<< PAGE_SHIFT
,
54 prog
->kvirt
, prog
->bus_addr
);
65 void dma_region_init(struct dma_region
*dma
)
74 int dma_region_alloc(struct dma_region
*dma
, unsigned long n_bytes
,
75 struct pci_dev
*dev
, int direction
)
79 /* round up to page size */
80 n_bytes
= PAGE_ALIGN(n_bytes
);
82 dma
->n_pages
= n_bytes
>> PAGE_SHIFT
;
84 dma
->kvirt
= vmalloc_32(n_bytes
);
86 printk(KERN_ERR
"dma_region_alloc: vmalloc_32() failed\n");
90 /* Clear the ram out, no junk to the user */
91 memset(dma
->kvirt
, 0, n_bytes
);
93 /* allocate scatter/gather list */
94 dma
->sglist
= vmalloc(dma
->n_pages
* sizeof(*dma
->sglist
));
96 printk(KERN_ERR
"dma_region_alloc: vmalloc(sglist) failed\n");
100 /* just to be safe - this will become unnecessary once sglist->address goes away */
101 memset(dma
->sglist
, 0, dma
->n_pages
* sizeof(*dma
->sglist
));
103 /* fill scatter/gather list with pages */
104 for (i
= 0; i
< dma
->n_pages
; i
++) {
106 (unsigned long)dma
->kvirt
+ (i
<< PAGE_SHIFT
);
108 dma
->sglist
[i
].page
= vmalloc_to_page((void *)va
);
109 dma
->sglist
[i
].length
= PAGE_SIZE
;
112 /* map sglist to the IOMMU */
114 pci_map_sg(dev
, dma
->sglist
, dma
->n_pages
, direction
);
116 if (dma
->n_dma_pages
== 0) {
117 printk(KERN_ERR
"dma_region_alloc: pci_map_sg() failed\n");
122 dma
->direction
= direction
;
127 dma_region_free(dma
);
131 void dma_region_free(struct dma_region
*dma
)
133 if (dma
->n_dma_pages
) {
134 pci_unmap_sg(dma
->dev
, dma
->sglist
, dma
->n_pages
,
136 dma
->n_dma_pages
= 0;
148 /* find the scatterlist index and remaining offset corresponding to a
149 given offset from the beginning of the buffer */
150 static inline int dma_region_find(struct dma_region
*dma
, unsigned long offset
,
151 unsigned int start
, unsigned long *rem
)
154 unsigned long off
= offset
;
156 for (i
= start
; i
< dma
->n_dma_pages
; i
++) {
157 if (off
< sg_dma_len(&dma
->sglist
[i
])) {
162 off
-= sg_dma_len(&dma
->sglist
[i
]);
165 BUG_ON(i
>= dma
->n_dma_pages
);
170 dma_addr_t
dma_region_offset_to_bus(struct dma_region
* dma
,
171 unsigned long offset
)
173 unsigned long rem
= 0;
175 struct scatterlist
*sg
=
176 &dma
->sglist
[dma_region_find(dma
, offset
, 0, &rem
)];
177 return sg_dma_address(sg
) + rem
;
180 void dma_region_sync_for_cpu(struct dma_region
*dma
, unsigned long offset
,
184 unsigned long rem
= 0;
189 first
= dma_region_find(dma
, offset
, 0, &rem
);
190 last
= dma_region_find(dma
, rem
+ len
- 1, first
, &rem
);
192 pci_dma_sync_sg_for_cpu(dma
->dev
, &dma
->sglist
[first
], last
- first
+ 1,
196 void dma_region_sync_for_device(struct dma_region
*dma
, unsigned long offset
,
200 unsigned long rem
= 0;
205 first
= dma_region_find(dma
, offset
, 0, &rem
);
206 last
= dma_region_find(dma
, rem
+ len
- 1, first
, &rem
);
208 pci_dma_sync_sg_for_device(dma
->dev
, &dma
->sglist
[first
],
209 last
- first
+ 1, dma
->direction
);
214 /* nopage() handler for mmap access */
216 static struct page
*dma_region_pagefault(struct vm_area_struct
*area
,
217 unsigned long address
, int *type
)
219 unsigned long offset
;
220 unsigned long kernel_virt_addr
;
221 struct page
*ret
= NOPAGE_SIGBUS
;
223 struct dma_region
*dma
= (struct dma_region
*)area
->vm_private_data
;
228 if ((address
< (unsigned long)area
->vm_start
) ||
230 (unsigned long)area
->vm_start
+ (dma
->n_pages
<< PAGE_SHIFT
)))
234 *type
= VM_FAULT_MINOR
;
235 offset
= address
- area
->vm_start
;
236 kernel_virt_addr
= (unsigned long)dma
->kvirt
+ offset
;
237 ret
= vmalloc_to_page((void *)kernel_virt_addr
);
243 static struct vm_operations_struct dma_region_vm_ops
= {
244 .nopage
= dma_region_pagefault
,
247 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
,
248 struct vm_area_struct
*vma
)
255 /* must be page-aligned */
256 if (vma
->vm_pgoff
!= 0)
259 /* check the length */
260 size
= vma
->vm_end
- vma
->vm_start
;
261 if (size
> (dma
->n_pages
<< PAGE_SHIFT
))
264 vma
->vm_ops
= &dma_region_vm_ops
;
265 vma
->vm_private_data
= dma
;
267 vma
->vm_flags
|= VM_RESERVED
;
272 #else /* CONFIG_MMU */
274 int dma_region_mmap(struct dma_region
*dma
, struct file
*file
,
275 struct vm_area_struct
*vma
)
280 #endif /* CONFIG_MMU */