2 * ioport.c: Simple io mapping allocator.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
7 * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev.
10 * <rth> zait: as long as pci_alloc_consistent produces something addressable,
12 * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a
13 * pointer into the big page mapping
14 * <rth> zait: so what?
15 * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page()))
17 * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())).
19 * <zaitcev> Now, driver calls pci_free_consistent(with result of
21 * <zaitcev> How do you find the address to pass to free_pages()?
22 * <rth> zait: walk the page tables? It's only two or three level after all.
23 * <rth> zait: you have to walk them anyway to remove the mapping.
25 * <zaitcev> Sounds reasonable
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/types.h>
33 #include <linux/ioport.h>
35 #include <linux/slab.h>
36 #include <linux/pci.h> /* struct pci_dev */
37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h>
39 #include <linux/scatterlist.h>
40 #include <linux/of_device.h>
43 #include <asm/vaddrs.h>
44 #include <asm/oplib.h>
47 #include <asm/pgalloc.h>
49 #include <asm/iommu.h>
50 #include <asm/io-unit.h>
53 const struct sparc32_dma_ops
*sparc32_dma_ops
;
55 /* This function must make sure that caches and memory are coherent after DMA
56 * On LEON systems without cache snooping it flushes the entire D-CACHE.
58 #ifndef CONFIG_SPARC_LEON
59 static inline void dma_make_coherent(unsigned long pa
, unsigned long len
)
63 static inline void dma_make_coherent(unsigned long pa
, unsigned long len
)
65 if (!sparc_leon3_snooping_enabled())
66 leon_flush_dcache_all();
70 static void __iomem
*_sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
);
71 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
72 unsigned long size
, char *name
);
73 static void _sparc_free_io(struct resource
*res
);
75 static void register_proc_sparc_ioport(void);
77 /* This points to the next to use virtual memory for DVMA mappings */
78 static struct resource _sparc_dvma
= {
79 .name
= "sparc_dvma", .start
= DVMA_VADDR
, .end
= DVMA_END
- 1
81 /* This points to the start of I/O mappings, cluable from outside. */
82 /*ext*/ struct resource sparc_iomap
= {
83 .name
= "sparc_iomap", .start
= IOBASE_VADDR
, .end
= IOBASE_END
- 1
87 * Our mini-allocator...
88 * Boy this is gross! We need it because we must map I/O for
89 * timers and interrupt controller before the kmalloc is available.
93 #define XNRES 10 /* SS-10 uses 8 */
96 struct resource xres
; /* Must be first */
97 int xflag
; /* 1 == used */
101 static struct xresource xresv
[XNRES
];
103 static struct xresource
*xres_alloc(void) {
104 struct xresource
*xrp
;
108 for (n
= 0; n
< XNRES
; n
++) {
109 if (xrp
->xflag
== 0) {
118 static void xres_free(struct xresource
*xrp
) {
123 * These are typically used in PCI drivers
124 * which are trying to be cross-platform.
126 * Bus type is always zero on IIep.
128 void __iomem
*ioremap(unsigned long offset
, unsigned long size
)
132 sprintf(name
, "phys_%08x", (u32
)offset
);
133 return _sparc_alloc_io(0, offset
, size
, name
);
135 EXPORT_SYMBOL(ioremap
);
138 * Comlimentary to ioremap().
140 void iounmap(volatile void __iomem
*virtual)
142 unsigned long vaddr
= (unsigned long) virtual & PAGE_MASK
;
143 struct resource
*res
;
146 * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case.
147 * This probably warrants some sort of hashing.
149 if ((res
= lookup_resource(&sparc_iomap
, vaddr
)) == NULL
) {
150 printk("free_io/iounmap: cannot free %lx\n", vaddr
);
155 if ((char *)res
>= (char*)xresv
&& (char *)res
< (char *)&xresv
[XNRES
]) {
156 xres_free((struct xresource
*)res
);
161 EXPORT_SYMBOL(iounmap
);
163 void __iomem
*of_ioremap(struct resource
*res
, unsigned long offset
,
164 unsigned long size
, char *name
)
166 return _sparc_alloc_io(res
->flags
& 0xF,
170 EXPORT_SYMBOL(of_ioremap
);
172 void of_iounmap(struct resource
*res
, void __iomem
*base
, unsigned long size
)
176 EXPORT_SYMBOL(of_iounmap
);
181 static void __iomem
*_sparc_alloc_io(unsigned int busno
, unsigned long phys
,
182 unsigned long size
, char *name
)
184 static int printed_full
;
185 struct xresource
*xres
;
186 struct resource
*res
;
189 void __iomem
*va
; /* P3 diag */
191 if (name
== NULL
) name
= "???";
193 if ((xres
= xres_alloc()) != 0) {
198 printk("ioremap: done with statics, switching to malloc\n");
202 tack
= kmalloc(sizeof (struct resource
) + tlen
+ 1, GFP_KERNEL
);
203 if (tack
== NULL
) return NULL
;
204 memset(tack
, 0, sizeof(struct resource
));
205 res
= (struct resource
*) tack
;
206 tack
+= sizeof (struct resource
);
209 strlcpy(tack
, name
, XNMLN
+1);
212 va
= _sparc_ioremap(res
, busno
, phys
, size
);
213 /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */
219 static void __iomem
*
220 _sparc_ioremap(struct resource
*res
, u32 bus
, u32 pa
, int sz
)
222 unsigned long offset
= ((unsigned long) pa
) & (~PAGE_MASK
);
224 if (allocate_resource(&sparc_iomap
, res
,
225 (offset
+ sz
+ PAGE_SIZE
-1) & PAGE_MASK
,
226 sparc_iomap
.start
, sparc_iomap
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
227 /* Usually we cannot see printks in this case. */
228 prom_printf("alloc_io_res(%s): cannot occupy\n",
229 (res
->name
!= NULL
)? res
->name
: "???");
234 srmmu_mapiorange(bus
, pa
, res
->start
, resource_size(res
));
236 return (void __iomem
*)(unsigned long)(res
->start
+ offset
);
240 * Comlimentary to _sparc_ioremap().
242 static void _sparc_free_io(struct resource
*res
)
246 plen
= resource_size(res
);
247 BUG_ON((plen
& (PAGE_SIZE
-1)) != 0);
248 srmmu_unmapiorange(res
->start
, plen
);
249 release_resource(res
);
254 void sbus_set_sbus64(struct device
*dev
, int x
)
256 printk("sbus_set_sbus64: unsupported\n");
258 EXPORT_SYMBOL(sbus_set_sbus64
);
261 * Allocate a chunk of memory suitable for DMA.
262 * Typically devices use them for control blocks.
263 * CPU may access them without any explicit flushing.
265 static void *sbus_alloc_coherent(struct device
*dev
, size_t len
,
266 dma_addr_t
*dma_addrp
, gfp_t gfp
,
267 struct dma_attrs
*attrs
)
269 struct platform_device
*op
= to_platform_device(dev
);
270 unsigned long len_total
= PAGE_ALIGN(len
);
272 struct resource
*res
;
275 /* XXX why are some lengths signed, others unsigned? */
279 /* XXX So what is maxphys for us and how do drivers know it? */
280 if (len
> 256*1024) { /* __get_free_pages() limit */
284 order
= get_order(len_total
);
285 if ((va
= __get_free_pages(GFP_KERNEL
|__GFP_COMP
, order
)) == 0)
288 if ((res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
)) == NULL
)
291 if (allocate_resource(&_sparc_dvma
, res
, len_total
,
292 _sparc_dvma
.start
, _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
293 printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total
);
297 // XXX The sbus_map_dma_area does this for us below, see comments.
298 // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
300 * XXX That's where sdev would be used. Currently we load
301 * all iommu tables with the same translations.
303 if (sbus_map_dma_area(dev
, dma_addrp
, va
, res
->start
, len_total
) != 0)
306 res
->name
= op
->dev
.of_node
->name
;
308 return (void *)(unsigned long)res
->start
;
311 release_resource(res
);
315 free_pages(va
, order
);
320 static void sbus_free_coherent(struct device
*dev
, size_t n
, void *p
,
321 dma_addr_t ba
, struct dma_attrs
*attrs
)
323 struct resource
*res
;
326 if ((res
= lookup_resource(&_sparc_dvma
,
327 (unsigned long)p
)) == NULL
) {
328 printk("sbus_free_consistent: cannot free %p\n", p
);
332 if (((unsigned long)p
& (PAGE_SIZE
-1)) != 0) {
333 printk("sbus_free_consistent: unaligned va %p\n", p
);
338 if (resource_size(res
) != n
) {
339 printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
340 (long)resource_size(res
), n
);
344 release_resource(res
);
347 pgv
= virt_to_page(p
);
348 sbus_unmap_dma_area(dev
, ba
, n
);
350 __free_pages(pgv
, get_order(n
));
354 * Map a chunk of memory so that devices can see it.
355 * CPU view of this memory may be inconsistent with
356 * a device view and explicit flushing is necessary.
358 static dma_addr_t
sbus_map_page(struct device
*dev
, struct page
*page
,
359 unsigned long offset
, size_t len
,
360 enum dma_data_direction dir
,
361 struct dma_attrs
*attrs
)
363 void *va
= page_address(page
) + offset
;
365 /* XXX why are some lengths signed, others unsigned? */
369 /* XXX So what is maxphys for us and how do drivers know it? */
370 if (len
> 256*1024) { /* __get_free_pages() limit */
373 return mmu_get_scsi_one(dev
, va
, len
);
376 static void sbus_unmap_page(struct device
*dev
, dma_addr_t ba
, size_t n
,
377 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
379 mmu_release_scsi_one(dev
, ba
, n
);
382 static int sbus_map_sg(struct device
*dev
, struct scatterlist
*sg
, int n
,
383 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
385 mmu_get_scsi_sgl(dev
, sg
, n
);
389 static void sbus_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int n
,
390 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
392 mmu_release_scsi_sgl(dev
, sg
, n
);
395 static void sbus_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
396 int n
, enum dma_data_direction dir
)
401 static void sbus_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
402 int n
, enum dma_data_direction dir
)
407 struct dma_map_ops sbus_dma_ops
= {
408 .alloc
= sbus_alloc_coherent
,
409 .free
= sbus_free_coherent
,
410 .map_page
= sbus_map_page
,
411 .unmap_page
= sbus_unmap_page
,
412 .map_sg
= sbus_map_sg
,
413 .unmap_sg
= sbus_unmap_sg
,
414 .sync_sg_for_cpu
= sbus_sync_sg_for_cpu
,
415 .sync_sg_for_device
= sbus_sync_sg_for_device
,
418 static int __init
sparc_register_ioport(void)
420 register_proc_sparc_ioport();
425 arch_initcall(sparc_register_ioport
);
427 #endif /* CONFIG_SBUS */
430 /* LEON reuses PCI DMA ops */
431 #if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
433 /* Allocate and map kernel buffer using consistent mode DMA for a device.
434 * hwdev should be valid struct pci_dev pointer for PCI devices.
436 static void *pci32_alloc_coherent(struct device
*dev
, size_t len
,
437 dma_addr_t
*pba
, gfp_t gfp
,
438 struct dma_attrs
*attrs
)
440 unsigned long len_total
= PAGE_ALIGN(len
);
442 struct resource
*res
;
448 if (len
> 256*1024) { /* __get_free_pages() limit */
452 order
= get_order(len_total
);
453 va
= (void *) __get_free_pages(GFP_KERNEL
, order
);
455 printk("pci_alloc_consistent: no %ld pages\n", len_total
>>PAGE_SHIFT
);
459 if ((res
= kzalloc(sizeof(struct resource
), GFP_KERNEL
)) == NULL
) {
460 printk("pci_alloc_consistent: no core\n");
464 if (allocate_resource(&_sparc_dvma
, res
, len_total
,
465 _sparc_dvma
.start
, _sparc_dvma
.end
, PAGE_SIZE
, NULL
, NULL
) != 0) {
466 printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total
);
469 srmmu_mapiorange(0, virt_to_phys(va
), res
->start
, len_total
);
471 *pba
= virt_to_phys(va
); /* equals virt_to_bus (R.I.P.) for us. */
472 return (void *) res
->start
;
477 free_pages((unsigned long)va
, order
);
482 /* Free and unmap a consistent DMA buffer.
483 * cpu_addr is what was returned from pci_alloc_consistent,
484 * size must be the same as what as passed into pci_alloc_consistent,
485 * and likewise dma_addr must be the same as what *dma_addrp was set to.
487 * References to the memory and mappings associated with cpu_addr/dma_addr
488 * past this call are illegal.
490 static void pci32_free_coherent(struct device
*dev
, size_t n
, void *p
,
491 dma_addr_t ba
, struct dma_attrs
*attrs
)
493 struct resource
*res
;
495 if ((res
= lookup_resource(&_sparc_dvma
,
496 (unsigned long)p
)) == NULL
) {
497 printk("pci_free_consistent: cannot free %p\n", p
);
501 if (((unsigned long)p
& (PAGE_SIZE
-1)) != 0) {
502 printk("pci_free_consistent: unaligned va %p\n", p
);
507 if (resource_size(res
) != n
) {
508 printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
509 (long)resource_size(res
), (long)n
);
513 dma_make_coherent(ba
, n
);
514 srmmu_unmapiorange((unsigned long)p
, n
);
516 release_resource(res
);
518 free_pages((unsigned long)phys_to_virt(ba
), get_order(n
));
522 * Same as pci_map_single, but with pages.
524 static dma_addr_t
pci32_map_page(struct device
*dev
, struct page
*page
,
525 unsigned long offset
, size_t size
,
526 enum dma_data_direction dir
,
527 struct dma_attrs
*attrs
)
529 /* IIep is write-through, not flushing. */
530 return page_to_phys(page
) + offset
;
533 static void pci32_unmap_page(struct device
*dev
, dma_addr_t ba
, size_t size
,
534 enum dma_data_direction dir
, struct dma_attrs
*attrs
)
536 if (dir
!= PCI_DMA_TODEVICE
)
537 dma_make_coherent(ba
, PAGE_ALIGN(size
));
540 /* Map a set of buffers described by scatterlist in streaming
541 * mode for DMA. This is the scather-gather version of the
542 * above pci_map_single interface. Here the scatter gather list
543 * elements are each tagged with the appropriate dma address
544 * and length. They are obtained via sg_dma_{address,length}(SG).
546 * NOTE: An implementation may be able to use a smaller number of
547 * DMA address/length pairs than there are SG table elements.
548 * (for example via virtual mapping capabilities)
549 * The routine returns the number of addr/length pairs actually
550 * used, at most nents.
552 * Device ownership issues as mentioned above for pci_map_single are
555 static int pci32_map_sg(struct device
*device
, struct scatterlist
*sgl
,
556 int nents
, enum dma_data_direction dir
,
557 struct dma_attrs
*attrs
)
559 struct scatterlist
*sg
;
562 /* IIep is write-through, not flushing. */
563 for_each_sg(sgl
, sg
, nents
, n
) {
564 sg
->dma_address
= sg_phys(sg
);
565 sg
->dma_length
= sg
->length
;
570 /* Unmap a set of streaming mode DMA translations.
571 * Again, cpu read rules concerning calls here are the same as for
572 * pci_unmap_single() above.
574 static void pci32_unmap_sg(struct device
*dev
, struct scatterlist
*sgl
,
575 int nents
, enum dma_data_direction dir
,
576 struct dma_attrs
*attrs
)
578 struct scatterlist
*sg
;
581 if (dir
!= PCI_DMA_TODEVICE
) {
582 for_each_sg(sgl
, sg
, nents
, n
) {
583 dma_make_coherent(sg_phys(sg
), PAGE_ALIGN(sg
->length
));
588 /* Make physical memory consistent for a single
589 * streaming mode DMA translation before or after a transfer.
591 * If you perform a pci_map_single() but wish to interrogate the
592 * buffer using the cpu, yet do not wish to teardown the PCI dma
593 * mapping, you must call this function before doing so. At the
594 * next point you give the PCI dma address back to the card, you
595 * must first perform a pci_dma_sync_for_device, and then the
596 * device again owns the buffer.
598 static void pci32_sync_single_for_cpu(struct device
*dev
, dma_addr_t ba
,
599 size_t size
, enum dma_data_direction dir
)
601 if (dir
!= PCI_DMA_TODEVICE
) {
602 dma_make_coherent(ba
, PAGE_ALIGN(size
));
606 static void pci32_sync_single_for_device(struct device
*dev
, dma_addr_t ba
,
607 size_t size
, enum dma_data_direction dir
)
609 if (dir
!= PCI_DMA_TODEVICE
) {
610 dma_make_coherent(ba
, PAGE_ALIGN(size
));
614 /* Make physical memory consistent for a set of streaming
615 * mode DMA translations after a transfer.
617 * The same as pci_dma_sync_single_* but for a scatter-gather list,
618 * same rules and usage.
620 static void pci32_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sgl
,
621 int nents
, enum dma_data_direction dir
)
623 struct scatterlist
*sg
;
626 if (dir
!= PCI_DMA_TODEVICE
) {
627 for_each_sg(sgl
, sg
, nents
, n
) {
628 dma_make_coherent(sg_phys(sg
), PAGE_ALIGN(sg
->length
));
633 static void pci32_sync_sg_for_device(struct device
*device
, struct scatterlist
*sgl
,
634 int nents
, enum dma_data_direction dir
)
636 struct scatterlist
*sg
;
639 if (dir
!= PCI_DMA_TODEVICE
) {
640 for_each_sg(sgl
, sg
, nents
, n
) {
641 dma_make_coherent(sg_phys(sg
), PAGE_ALIGN(sg
->length
));
646 struct dma_map_ops pci32_dma_ops
= {
647 .alloc
= pci32_alloc_coherent
,
648 .free
= pci32_free_coherent
,
649 .map_page
= pci32_map_page
,
650 .unmap_page
= pci32_unmap_page
,
651 .map_sg
= pci32_map_sg
,
652 .unmap_sg
= pci32_unmap_sg
,
653 .sync_single_for_cpu
= pci32_sync_single_for_cpu
,
654 .sync_single_for_device
= pci32_sync_single_for_device
,
655 .sync_sg_for_cpu
= pci32_sync_sg_for_cpu
,
656 .sync_sg_for_device
= pci32_sync_sg_for_device
,
658 EXPORT_SYMBOL(pci32_dma_ops
);
660 #endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
662 #ifdef CONFIG_SPARC_LEON
663 struct dma_map_ops
*dma_ops
= &pci32_dma_ops
;
664 #elif defined(CONFIG_SBUS)
665 struct dma_map_ops
*dma_ops
= &sbus_dma_ops
;
668 EXPORT_SYMBOL(dma_ops
);
672 * Return whether the given PCI device DMA address mask can be
673 * supported properly. For example, if your device can only drive the
674 * low 24-bits during PCI bus mastering, then you would pass
675 * 0x00ffffff as the mask to this function.
677 int dma_supported(struct device
*dev
, u64 mask
)
680 if (dev
->bus
== &pci_bus_type
)
685 EXPORT_SYMBOL(dma_supported
);
687 #ifdef CONFIG_PROC_FS
689 static int sparc_io_proc_show(struct seq_file
*m
, void *v
)
691 struct resource
*root
= m
->private, *r
;
694 for (r
= root
->child
; r
!= NULL
; r
= r
->sibling
) {
695 if ((nm
= r
->name
) == 0) nm
= "???";
696 seq_printf(m
, "%016llx-%016llx: %s\n",
697 (unsigned long long)r
->start
,
698 (unsigned long long)r
->end
, nm
);
704 static int sparc_io_proc_open(struct inode
*inode
, struct file
*file
)
706 return single_open(file
, sparc_io_proc_show
, PDE(inode
)->data
);
709 static const struct file_operations sparc_io_proc_fops
= {
710 .owner
= THIS_MODULE
,
711 .open
= sparc_io_proc_open
,
714 .release
= single_release
,
716 #endif /* CONFIG_PROC_FS */
718 static void register_proc_sparc_ioport(void)
720 #ifdef CONFIG_PROC_FS
721 proc_create_data("io_map", 0, NULL
, &sparc_io_proc_fops
, &sparc_iomap
);
722 proc_create_data("dvma_map", 0, NULL
, &sparc_io_proc_fops
, &_sparc_dvma
);