2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
7 ** (c) Copyright 1999,2000 Hewlett-Packard Company
8 ** (c) Copyright 2000 Grant Grundler
9 ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 ** (c) Copyright 2000 John Marvin
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
20 #include <linux/init.h>
21 #include <linux/gfp.h>
23 #include <linux/pci.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/string.h>
27 #include <linux/types.h>
28 #include <linux/scatterlist.h>
30 #include <asm/cacheflush.h>
31 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
33 #include <asm/page.h> /* get_order */
34 #include <asm/pgalloc.h>
35 #include <asm/uaccess.h>
36 #include <asm/tlbflush.h> /* for purge_tlb_*() macros */
38 static struct proc_dir_entry
* proc_gsc_root __read_mostly
= NULL
;
39 static unsigned long pcxl_used_bytes __read_mostly
= 0;
40 static unsigned long pcxl_used_pages __read_mostly
= 0;
42 extern unsigned long pcxl_dma_start
; /* Start of pcxl dma mapping area */
43 static spinlock_t pcxl_res_lock
;
44 static char *pcxl_res_map
;
45 static int pcxl_res_hint
;
46 static int pcxl_res_size
;
48 #ifdef DEBUG_PCXL_RESOURCE
49 #define DBG_RES(x...) printk(x)
56 ** Dump a hex representation of the resource map.
61 void dump_resmap(void)
63 u_long
*res_ptr
= (unsigned long *)pcxl_res_map
;
67 for(; i
< (pcxl_res_size
/ sizeof(unsigned long)); ++i
, ++res_ptr
)
68 printk("%08lx ", *res_ptr
);
73 static inline void dump_resmap(void) {;}
76 static int pa11_dma_supported( struct device
*dev
, u64 mask
)
81 static inline int map_pte_uncached(pte_t
* pte
,
83 unsigned long size
, unsigned long *paddr_ptr
)
86 unsigned long orig_vaddr
= vaddr
;
96 printk(KERN_ERR
"map_pte_uncached: page already exists\n");
97 set_pte(pte
, __mk_pte(*paddr_ptr
, PAGE_KERNEL_UNC
));
98 purge_tlb_start(flags
);
99 pdtlb_kernel(orig_vaddr
);
100 purge_tlb_end(flags
);
102 orig_vaddr
+= PAGE_SIZE
;
103 (*paddr_ptr
) += PAGE_SIZE
;
105 } while (vaddr
< end
);
109 static inline int map_pmd_uncached(pmd_t
* pmd
, unsigned long vaddr
,
110 unsigned long size
, unsigned long *paddr_ptr
)
113 unsigned long orig_vaddr
= vaddr
;
115 vaddr
&= ~PGDIR_MASK
;
117 if (end
> PGDIR_SIZE
)
120 pte_t
* pte
= pte_alloc_kernel(pmd
, vaddr
);
123 if (map_pte_uncached(pte
, orig_vaddr
, end
- vaddr
, paddr_ptr
))
125 vaddr
= (vaddr
+ PMD_SIZE
) & PMD_MASK
;
126 orig_vaddr
+= PMD_SIZE
;
128 } while (vaddr
< end
);
132 static inline int map_uncached_pages(unsigned long vaddr
, unsigned long size
,
136 unsigned long end
= vaddr
+ size
;
138 dir
= pgd_offset_k(vaddr
);
142 pmd
= pmd_alloc(NULL
, dir
, vaddr
);
145 if (map_pmd_uncached(pmd
, vaddr
, end
- vaddr
, &paddr
))
147 vaddr
= vaddr
+ PGDIR_SIZE
;
149 } while (vaddr
&& (vaddr
< end
));
153 static inline void unmap_uncached_pte(pmd_t
* pmd
, unsigned long vaddr
,
158 unsigned long orig_vaddr
= vaddr
;
167 pte
= pte_offset_map(pmd
, vaddr
);
176 pte_clear(&init_mm
, vaddr
, pte
);
177 purge_tlb_start(flags
);
178 pdtlb_kernel(orig_vaddr
);
179 purge_tlb_end(flags
);
181 orig_vaddr
+= PAGE_SIZE
;
183 if (pte_none(page
) || pte_present(page
))
185 printk(KERN_CRIT
"Whee.. Swapped out page in kernel page table\n");
186 } while (vaddr
< end
);
189 static inline void unmap_uncached_pmd(pgd_t
* dir
, unsigned long vaddr
,
194 unsigned long orig_vaddr
= vaddr
;
203 pmd
= pmd_offset(dir
, vaddr
);
204 vaddr
&= ~PGDIR_MASK
;
206 if (end
> PGDIR_SIZE
)
209 unmap_uncached_pte(pmd
, orig_vaddr
, end
- vaddr
);
210 vaddr
= (vaddr
+ PMD_SIZE
) & PMD_MASK
;
211 orig_vaddr
+= PMD_SIZE
;
213 } while (vaddr
< end
);
216 static void unmap_uncached_pages(unsigned long vaddr
, unsigned long size
)
219 unsigned long end
= vaddr
+ size
;
221 dir
= pgd_offset_k(vaddr
);
223 unmap_uncached_pmd(dir
, vaddr
, end
- vaddr
);
224 vaddr
= vaddr
+ PGDIR_SIZE
;
226 } while (vaddr
&& (vaddr
< end
));
229 #define PCXL_SEARCH_LOOP(idx, mask, size) \
230 for(; res_ptr < res_end; ++res_ptr) \
232 if(0 == ((*res_ptr) & mask)) { \
234 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
235 pcxl_res_hint = idx + (size >> 3); \
236 goto resource_found; \
240 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
241 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
242 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
243 PCXL_SEARCH_LOOP(idx, mask, size); \
244 res_ptr = (u##size *)&pcxl_res_map[0]; \
245 PCXL_SEARCH_LOOP(idx, mask, size); \
249 pcxl_alloc_range(size_t size
)
253 unsigned int pages_needed
= size
>> PAGE_SHIFT
;
256 mask
>>= BITS_PER_LONG
- pages_needed
;
258 DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
259 size
, pages_needed
, mask
);
261 spin_lock_irqsave(&pcxl_res_lock
, flags
);
263 if(pages_needed
<= 8) {
264 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 8);
265 } else if(pages_needed
<= 16) {
266 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 16);
267 } else if(pages_needed
<= 32) {
268 PCXL_FIND_FREE_MAPPING(res_idx
, mask
, 32);
270 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
275 panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
280 DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
281 res_idx
, mask
, pcxl_res_hint
);
283 pcxl_used_pages
+= pages_needed
;
284 pcxl_used_bytes
+= ((pages_needed
>> 3) ? (pages_needed
>> 3) : 1);
286 spin_unlock_irqrestore(&pcxl_res_lock
, flags
);
291 ** return the corresponding vaddr in the pcxl dma map
293 return (pcxl_dma_start
+ (res_idx
<< (PAGE_SHIFT
+ 3)));
296 #define PCXL_FREE_MAPPINGS(idx, m, size) \
297 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
298 /* BUG_ON((*res_ptr & m) != m); */ \
302 ** clear bits in the pcxl resource map
305 pcxl_free_range(unsigned long vaddr
, size_t size
)
308 unsigned int res_idx
= (vaddr
- pcxl_dma_start
) >> (PAGE_SHIFT
+ 3);
309 unsigned int pages_mapped
= size
>> PAGE_SHIFT
;
312 mask
>>= BITS_PER_LONG
- pages_mapped
;
314 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
315 res_idx
, size
, pages_mapped
, mask
);
317 spin_lock_irqsave(&pcxl_res_lock
, flags
);
319 if(pages_mapped
<= 8) {
320 PCXL_FREE_MAPPINGS(res_idx
, mask
, 8);
321 } else if(pages_mapped
<= 16) {
322 PCXL_FREE_MAPPINGS(res_idx
, mask
, 16);
323 } else if(pages_mapped
<= 32) {
324 PCXL_FREE_MAPPINGS(res_idx
, mask
, 32);
326 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
330 pcxl_used_pages
-= (pages_mapped
? pages_mapped
: 1);
331 pcxl_used_bytes
-= ((pages_mapped
>> 3) ? (pages_mapped
>> 3) : 1);
333 spin_unlock_irqrestore(&pcxl_res_lock
, flags
);
338 static int proc_pcxl_dma_show(struct seq_file
*m
, void *v
)
340 unsigned long total_pages
= pcxl_res_size
<< 3; /* 8 bits per byte */
342 seq_printf(m
, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
343 PCXL_DMA_MAP_SIZE
, total_pages
);
345 seq_printf(m
, "Resource bitmap : %d bytes\n", pcxl_res_size
);
347 seq_puts(m
, " total: free: used: % used:\n");
348 seq_printf(m
, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size
,
349 pcxl_res_size
- pcxl_used_bytes
, pcxl_used_bytes
,
350 (pcxl_used_bytes
* 100) / pcxl_res_size
);
352 seq_printf(m
, "pages %8ld %8ld %8ld %8ld%%\n", total_pages
,
353 total_pages
- pcxl_used_pages
, pcxl_used_pages
,
354 (pcxl_used_pages
* 100 / total_pages
));
360 static int proc_pcxl_dma_open(struct inode
*inode
, struct file
*file
)
362 return single_open(file
, proc_pcxl_dma_show
, NULL
);
365 static const struct file_operations proc_pcxl_dma_ops
= {
366 .owner
= THIS_MODULE
,
367 .open
= proc_pcxl_dma_open
,
370 .release
= single_release
,
376 if (pcxl_dma_start
== 0)
379 spin_lock_init(&pcxl_res_lock
);
380 pcxl_res_size
= PCXL_DMA_MAP_SIZE
>> (PAGE_SHIFT
+ 3);
382 pcxl_res_map
= (char *)__get_free_pages(GFP_KERNEL
,
383 get_order(pcxl_res_size
));
384 memset(pcxl_res_map
, 0, pcxl_res_size
);
385 proc_gsc_root
= proc_mkdir("gsc", NULL
);
388 "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
390 struct proc_dir_entry
* ent
;
391 ent
= proc_create("pcxl_dma", 0, proc_gsc_root
,
395 "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
400 __initcall(pcxl_dma_init
);
402 static void * pa11_dma_alloc_consistent (struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
, gfp_t flag
)
408 order
= get_order(size
);
409 size
= 1 << (order
+ PAGE_SHIFT
);
410 vaddr
= pcxl_alloc_range(size
);
411 paddr
= __get_free_pages(flag
, order
);
412 flush_kernel_dcache_range(paddr
, size
);
414 map_uncached_pages(vaddr
, size
, paddr
);
415 *dma_handle
= (dma_addr_t
) paddr
;
417 return (void *)vaddr
;
420 static void pa11_dma_free_consistent (struct device
*dev
, size_t size
, void *vaddr
, dma_addr_t dma_handle
)
424 order
= get_order(size
);
425 size
= 1 << (order
+ PAGE_SHIFT
);
426 unmap_uncached_pages((unsigned long)vaddr
, size
);
427 pcxl_free_range((unsigned long)vaddr
, size
);
428 free_pages((unsigned long)__va(dma_handle
), order
);
431 static dma_addr_t
pa11_dma_map_single(struct device
*dev
, void *addr
, size_t size
, enum dma_data_direction direction
)
433 BUG_ON(direction
== DMA_NONE
);
435 flush_kernel_dcache_range((unsigned long) addr
, size
);
436 return virt_to_phys(addr
);
439 static void pa11_dma_unmap_single(struct device
*dev
, dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
441 BUG_ON(direction
== DMA_NONE
);
443 if (direction
== DMA_TO_DEVICE
)
447 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
448 * simple map/unmap case. However, it IS necessary if if
449 * pci_dma_sync_single_* has been called and the buffer reused.
452 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
), size
);
456 static int pa11_dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
460 BUG_ON(direction
== DMA_NONE
);
462 for (i
= 0; i
< nents
; i
++, sglist
++ ) {
463 unsigned long vaddr
= sg_virt_addr(sglist
);
464 sg_dma_address(sglist
) = (dma_addr_t
) virt_to_phys(vaddr
);
465 sg_dma_len(sglist
) = sglist
->length
;
466 flush_kernel_dcache_range(vaddr
, sglist
->length
);
471 static void pa11_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
475 BUG_ON(direction
== DMA_NONE
);
477 if (direction
== DMA_TO_DEVICE
)
480 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
482 for (i
= 0; i
< nents
; i
++, sglist
++ )
483 flush_kernel_dcache_range(sg_virt_addr(sglist
), sglist
->length
);
487 static void pa11_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, unsigned long offset
, size_t size
, enum dma_data_direction direction
)
489 BUG_ON(direction
== DMA_NONE
);
491 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
) + offset
, size
);
494 static void pa11_dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
, unsigned long offset
, size_t size
, enum dma_data_direction direction
)
496 BUG_ON(direction
== DMA_NONE
);
498 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle
) + offset
, size
);
501 static void pa11_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
505 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
507 for (i
= 0; i
< nents
; i
++, sglist
++ )
508 flush_kernel_dcache_range(sg_virt_addr(sglist
), sglist
->length
);
511 static void pa11_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sglist
, int nents
, enum dma_data_direction direction
)
515 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
517 for (i
= 0; i
< nents
; i
++, sglist
++ )
518 flush_kernel_dcache_range(sg_virt_addr(sglist
), sglist
->length
);
521 struct hppa_dma_ops pcxl_dma_ops
= {
522 .dma_supported
= pa11_dma_supported
,
523 .alloc_consistent
= pa11_dma_alloc_consistent
,
524 .alloc_noncoherent
= pa11_dma_alloc_consistent
,
525 .free_consistent
= pa11_dma_free_consistent
,
526 .map_single
= pa11_dma_map_single
,
527 .unmap_single
= pa11_dma_unmap_single
,
528 .map_sg
= pa11_dma_map_sg
,
529 .unmap_sg
= pa11_dma_unmap_sg
,
530 .dma_sync_single_for_cpu
= pa11_dma_sync_single_for_cpu
,
531 .dma_sync_single_for_device
= pa11_dma_sync_single_for_device
,
532 .dma_sync_sg_for_cpu
= pa11_dma_sync_sg_for_cpu
,
533 .dma_sync_sg_for_device
= pa11_dma_sync_sg_for_device
,
536 static void *fail_alloc_consistent(struct device
*dev
, size_t size
,
537 dma_addr_t
*dma_handle
, gfp_t flag
)
542 static void *pa11_dma_alloc_noncoherent(struct device
*dev
, size_t size
,
543 dma_addr_t
*dma_handle
, gfp_t flag
)
547 addr
= (void *)__get_free_pages(flag
, get_order(size
));
549 *dma_handle
= (dma_addr_t
)virt_to_phys(addr
);
554 static void pa11_dma_free_noncoherent(struct device
*dev
, size_t size
,
555 void *vaddr
, dma_addr_t iova
)
557 free_pages((unsigned long)vaddr
, get_order(size
));
561 struct hppa_dma_ops pcx_dma_ops
= {
562 .dma_supported
= pa11_dma_supported
,
563 .alloc_consistent
= fail_alloc_consistent
,
564 .alloc_noncoherent
= pa11_dma_alloc_noncoherent
,
565 .free_consistent
= pa11_dma_free_noncoherent
,
566 .map_single
= pa11_dma_map_single
,
567 .unmap_single
= pa11_dma_unmap_single
,
568 .map_sg
= pa11_dma_map_sg
,
569 .unmap_sg
= pa11_dma_unmap_sg
,
570 .dma_sync_single_for_cpu
= pa11_dma_sync_single_for_cpu
,
571 .dma_sync_single_for_device
= pa11_dma_sync_single_for_device
,
572 .dma_sync_sg_for_cpu
= pa11_dma_sync_sg_for_cpu
,
573 .dma_sync_sg_for_device
= pa11_dma_sync_sg_for_device
,