Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / parisc / kernel / pci-dma.c
blob368cc095c99fd4a0611708709c2c1f018aead9ec
1 /*
2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-mapping.txt for interface definitions.
6 **
7 ** (c) Copyright 1999,2000 Hewlett-Packard Company
8 ** (c) Copyright 2000 Grant Grundler
9 ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 ** (c) Copyright 2000 John Marvin
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17 ** - ggg
20 #include <linux/init.h>
21 #include <linux/mm.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
28 #include <asm/cacheflush.h>
29 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
30 #include <asm/io.h>
31 #include <asm/page.h> /* get_order */
32 #include <asm/pgalloc.h>
33 #include <asm/uaccess.h>
36 static struct proc_dir_entry * proc_gsc_root = NULL;
37 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
38 static unsigned long pcxl_used_bytes = 0;
39 static unsigned long pcxl_used_pages = 0;
41 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
42 static spinlock_t pcxl_res_lock;
43 static char *pcxl_res_map;
44 static int pcxl_res_hint;
45 static int pcxl_res_size;
47 #ifdef DEBUG_PCXL_RESOURCE
48 #define DBG_RES(x...) printk(x)
49 #else
50 #define DBG_RES(x...)
51 #endif
55 ** Dump a hex representation of the resource map.
58 #ifdef DUMP_RESMAP
59 static
60 void dump_resmap(void)
62 u_long *res_ptr = (unsigned long *)pcxl_res_map;
63 u_long i = 0;
65 printk("res_map: ");
66 for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
67 printk("%08lx ", *res_ptr);
69 printk("\n");
71 #else
72 static inline void dump_resmap(void) {;}
73 #endif
75 static int pa11_dma_supported( struct device *dev, u64 mask)
77 return 1;
80 static inline int map_pte_uncached(pte_t * pte,
81 unsigned long vaddr,
82 unsigned long size, unsigned long *paddr_ptr)
84 unsigned long end;
85 unsigned long orig_vaddr = vaddr;
87 vaddr &= ~PMD_MASK;
88 end = vaddr + size;
89 if (end > PMD_SIZE)
90 end = PMD_SIZE;
91 do {
92 if (!pte_none(*pte))
93 printk(KERN_ERR "map_pte_uncached: page already exists\n");
94 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
95 purge_tlb_start();
96 pdtlb_kernel(orig_vaddr);
97 purge_tlb_end();
98 vaddr += PAGE_SIZE;
99 orig_vaddr += PAGE_SIZE;
100 (*paddr_ptr) += PAGE_SIZE;
101 pte++;
102 } while (vaddr < end);
103 return 0;
106 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
107 unsigned long size, unsigned long *paddr_ptr)
109 unsigned long end;
110 unsigned long orig_vaddr = vaddr;
112 vaddr &= ~PGDIR_MASK;
113 end = vaddr + size;
114 if (end > PGDIR_SIZE)
115 end = PGDIR_SIZE;
116 do {
117 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr);
118 if (!pte)
119 return -ENOMEM;
120 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
121 return -ENOMEM;
122 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
123 orig_vaddr += PMD_SIZE;
124 pmd++;
125 } while (vaddr < end);
126 return 0;
129 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
130 unsigned long paddr)
132 pgd_t * dir;
133 unsigned long end = vaddr + size;
135 dir = pgd_offset_k(vaddr);
136 do {
137 pmd_t *pmd;
139 pmd = pmd_alloc(NULL, dir, vaddr);
140 if (!pmd)
141 return -ENOMEM;
142 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
143 return -ENOMEM;
144 vaddr = vaddr + PGDIR_SIZE;
145 dir++;
146 } while (vaddr && (vaddr < end));
147 return 0;
150 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
151 unsigned long size)
153 pte_t * pte;
154 unsigned long end;
155 unsigned long orig_vaddr = vaddr;
157 if (pmd_none(*pmd))
158 return;
159 if (pmd_bad(*pmd)) {
160 pmd_ERROR(*pmd);
161 pmd_clear(pmd);
162 return;
164 pte = pte_offset_map(pmd, vaddr);
165 vaddr &= ~PMD_MASK;
166 end = vaddr + size;
167 if (end > PMD_SIZE)
168 end = PMD_SIZE;
169 do {
170 pte_t page = *pte;
171 pte_clear(&init_mm, vaddr, pte);
172 purge_tlb_start();
173 pdtlb_kernel(orig_vaddr);
174 purge_tlb_end();
175 vaddr += PAGE_SIZE;
176 orig_vaddr += PAGE_SIZE;
177 pte++;
178 if (pte_none(page) || pte_present(page))
179 continue;
180 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
181 } while (vaddr < end);
184 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
185 unsigned long size)
187 pmd_t * pmd;
188 unsigned long end;
189 unsigned long orig_vaddr = vaddr;
191 if (pgd_none(*dir))
192 return;
193 if (pgd_bad(*dir)) {
194 pgd_ERROR(*dir);
195 pgd_clear(dir);
196 return;
198 pmd = pmd_offset(dir, vaddr);
199 vaddr &= ~PGDIR_MASK;
200 end = vaddr + size;
201 if (end > PGDIR_SIZE)
202 end = PGDIR_SIZE;
203 do {
204 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
205 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
206 orig_vaddr += PMD_SIZE;
207 pmd++;
208 } while (vaddr < end);
211 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
213 pgd_t * dir;
214 unsigned long end = vaddr + size;
216 dir = pgd_offset_k(vaddr);
217 do {
218 unmap_uncached_pmd(dir, vaddr, end - vaddr);
219 vaddr = vaddr + PGDIR_SIZE;
220 dir++;
221 } while (vaddr && (vaddr < end));
224 #define PCXL_SEARCH_LOOP(idx, mask, size) \
225 for(; res_ptr < res_end; ++res_ptr) \
227 if(0 == ((*res_ptr) & mask)) { \
228 *res_ptr |= mask; \
229 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
230 pcxl_res_hint = idx + (size >> 3); \
231 goto resource_found; \
235 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
236 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
237 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
238 PCXL_SEARCH_LOOP(idx, mask, size); \
239 res_ptr = (u##size *)&pcxl_res_map[0]; \
240 PCXL_SEARCH_LOOP(idx, mask, size); \
243 unsigned long
244 pcxl_alloc_range(size_t size)
246 int res_idx;
247 u_long mask, flags;
248 unsigned int pages_needed = size >> PAGE_SHIFT;
250 mask = (u_long) -1L;
251 mask >>= BITS_PER_LONG - pages_needed;
253 DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
254 size, pages_needed, mask);
256 spin_lock_irqsave(&pcxl_res_lock, flags);
258 if(pages_needed <= 8) {
259 PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
260 } else if(pages_needed <= 16) {
261 PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
262 } else if(pages_needed <= 32) {
263 PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
264 } else {
265 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
266 __FILE__);
269 dump_resmap();
270 panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
271 __FILE__);
273 resource_found:
275 DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
276 res_idx, mask, pcxl_res_hint);
278 pcxl_used_pages += pages_needed;
279 pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
281 spin_unlock_irqrestore(&pcxl_res_lock, flags);
283 dump_resmap();
286 ** return the corresponding vaddr in the pcxl dma map
288 return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
291 #define PCXL_FREE_MAPPINGS(idx, m, size) \
292 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
293 /* BUG_ON((*res_ptr & m) != m); */ \
294 *res_ptr &= ~m;
297 ** clear bits in the pcxl resource map
299 static void
300 pcxl_free_range(unsigned long vaddr, size_t size)
302 u_long mask, flags;
303 unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
304 unsigned int pages_mapped = size >> PAGE_SHIFT;
306 mask = (u_long) -1L;
307 mask >>= BITS_PER_LONG - pages_mapped;
309 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
310 res_idx, size, pages_mapped, mask);
312 spin_lock_irqsave(&pcxl_res_lock, flags);
314 if(pages_mapped <= 8) {
315 PCXL_FREE_MAPPINGS(res_idx, mask, 8);
316 } else if(pages_mapped <= 16) {
317 PCXL_FREE_MAPPINGS(res_idx, mask, 16);
318 } else if(pages_mapped <= 32) {
319 PCXL_FREE_MAPPINGS(res_idx, mask, 32);
320 } else {
321 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
322 __FILE__);
325 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
326 pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
328 spin_unlock_irqrestore(&pcxl_res_lock, flags);
330 dump_resmap();
333 static int __init
334 pcxl_dma_init(void)
336 if (pcxl_dma_start == 0)
337 return 0;
339 spin_lock_init(&pcxl_res_lock);
340 pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
341 pcxl_res_hint = 0;
342 pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
343 get_order(pcxl_res_size));
344 memset(pcxl_res_map, 0, pcxl_res_size);
345 proc_gsc_root = proc_mkdir("gsc", 0);
346 create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info);
347 return 0;
350 __initcall(pcxl_dma_init);
352 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
354 unsigned long vaddr;
355 unsigned long paddr;
356 int order;
358 order = get_order(size);
359 size = 1 << (order + PAGE_SHIFT);
360 vaddr = pcxl_alloc_range(size);
361 paddr = __get_free_pages(flag, order);
362 flush_kernel_dcache_range(paddr, size);
363 paddr = __pa(paddr);
364 map_uncached_pages(vaddr, size, paddr);
365 *dma_handle = (dma_addr_t) paddr;
367 #if 0
368 /* This probably isn't needed to support EISA cards.
369 ** ISA cards will certainly only support 24-bit DMA addressing.
370 ** Not clear if we can, want, or need to support ISA.
372 if (!dev || *dev->coherent_dma_mask < 0xffffffff)
373 gfp |= GFP_DMA;
374 #endif
375 return (void *)vaddr;
378 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
380 int order;
382 order = get_order(size);
383 size = 1 << (order + PAGE_SHIFT);
384 unmap_uncached_pages((unsigned long)vaddr, size);
385 pcxl_free_range((unsigned long)vaddr, size);
386 free_pages((unsigned long)__va(dma_handle), order);
389 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
391 if (direction == DMA_NONE) {
392 printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
393 BUG();
396 flush_kernel_dcache_range((unsigned long) addr, size);
397 return virt_to_phys(addr);
400 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
402 if (direction == DMA_NONE) {
403 printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
404 BUG();
407 if (direction == DMA_TO_DEVICE)
408 return;
411 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
412 * simple map/unmap case. However, it IS necessary if if
413 * pci_dma_sync_single_* has been called and the buffer reused.
416 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
417 return;
420 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
422 int i;
424 if (direction == DMA_NONE)
425 BUG();
427 for (i = 0; i < nents; i++, sglist++ ) {
428 unsigned long vaddr = sg_virt_addr(sglist);
429 sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
430 sg_dma_len(sglist) = sglist->length;
431 flush_kernel_dcache_range(vaddr, sglist->length);
433 return nents;
436 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
438 int i;
440 if (direction == DMA_NONE)
441 BUG();
443 if (direction == DMA_TO_DEVICE)
444 return;
446 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
448 for (i = 0; i < nents; i++, sglist++ )
449 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
450 return;
453 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
455 if (direction == DMA_NONE)
456 BUG();
458 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
461 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
463 if (direction == DMA_NONE)
464 BUG();
466 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
469 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
471 int i;
473 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
475 for (i = 0; i < nents; i++, sglist++ )
476 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
479 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
481 int i;
483 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
485 for (i = 0; i < nents; i++, sglist++ )
486 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
489 struct hppa_dma_ops pcxl_dma_ops = {
490 .dma_supported = pa11_dma_supported,
491 .alloc_consistent = pa11_dma_alloc_consistent,
492 .alloc_noncoherent = pa11_dma_alloc_consistent,
493 .free_consistent = pa11_dma_free_consistent,
494 .map_single = pa11_dma_map_single,
495 .unmap_single = pa11_dma_unmap_single,
496 .map_sg = pa11_dma_map_sg,
497 .unmap_sg = pa11_dma_unmap_sg,
498 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
499 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
500 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
501 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
504 static void *fail_alloc_consistent(struct device *dev, size_t size,
505 dma_addr_t *dma_handle, int flag)
507 return NULL;
510 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
511 dma_addr_t *dma_handle, int flag)
513 void *addr = NULL;
515 /* rely on kmalloc to be cacheline aligned */
516 addr = kmalloc(size, flag);
517 if(addr)
518 *dma_handle = (dma_addr_t)virt_to_phys(addr);
520 return addr;
523 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
524 void *vaddr, dma_addr_t iova)
526 kfree(vaddr);
527 return;
530 struct hppa_dma_ops pcx_dma_ops = {
531 .dma_supported = pa11_dma_supported,
532 .alloc_consistent = fail_alloc_consistent,
533 .alloc_noncoherent = pa11_dma_alloc_noncoherent,
534 .free_consistent = pa11_dma_free_noncoherent,
535 .map_single = pa11_dma_map_single,
536 .unmap_single = pa11_dma_unmap_single,
537 .map_sg = pa11_dma_map_sg,
538 .unmap_sg = pa11_dma_unmap_sg,
539 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
540 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
541 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
542 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
546 static int pcxl_proc_info(char *buf, char **start, off_t offset, int len)
548 u_long i = 0;
549 unsigned long *res_ptr = (u_long *)pcxl_res_map;
550 unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
552 sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n",
553 PCXL_DMA_MAP_SIZE,
554 (pcxl_res_size << 3) ); /* 1 bit per page */
556 sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
557 buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */
559 strcat(buf, " total: free: used: % used:\n");
560 sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size,
561 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
562 (pcxl_used_bytes * 100) / pcxl_res_size);
564 sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
565 total_pages - pcxl_used_pages, pcxl_used_pages,
566 (pcxl_used_pages * 100 / total_pages));
568 strcat(buf, "\nResource bitmap:");
570 for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
571 if ((i & 7) == 0)
572 strcat(buf,"\n ");
573 sprintf(buf, "%s %08lx", buf, *res_ptr);
575 strcat(buf, "\n");
576 return strlen(buf);