2 * iommu.c: IOMMU specific routines for memory management.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/kernel.h>
11 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
15 #include <linux/scatterlist.h>
17 #include <linux/of_device.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
31 * This can be sized dynamically, but we will do this
32 * only when we have a guidance about actual I/O pressures.
34 #define IOMMU_RNGE IOMMU_RNGE_256MB
35 #define IOMMU_START 0xF0000000
36 #define IOMMU_WINSIZE (256*1024*1024U)
37 #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
38 #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
41 extern int viking_mxcc_present
;
42 extern int flush_page_for_dma_global
;
43 static int viking_flush
;
45 extern void viking_flush_page(unsigned long page
);
46 extern void viking_mxcc_flush_page(unsigned long page
);
49 * Values precomputed according to CPU type.
51 static unsigned int ioperm_noc
; /* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot
; /* Consistent mapping pte flags */
54 #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
57 static void __init
sbus_iommu_init(struct platform_device
*op
)
59 struct iommu_struct
*iommu
;
60 unsigned int impl
, vers
;
61 unsigned long *bitmap
;
64 iommu
= kmalloc(sizeof(struct iommu_struct
), GFP_KERNEL
);
66 prom_printf("Unable to allocate iommu structure\n");
70 iommu
->regs
= of_ioremap(&op
->resource
[0], 0, PAGE_SIZE
* 3,
73 prom_printf("Cannot map IOMMU registers\n");
76 impl
= (iommu
->regs
->control
& IOMMU_CTRL_IMPL
) >> 28;
77 vers
= (iommu
->regs
->control
& IOMMU_CTRL_VERS
) >> 24;
78 tmp
= iommu
->regs
->control
;
79 tmp
&= ~(IOMMU_CTRL_RNGE
);
80 tmp
|= (IOMMU_RNGE_256MB
| IOMMU_CTRL_ENAB
);
81 iommu
->regs
->control
= tmp
;
82 iommu_invalidate(iommu
->regs
);
83 iommu
->start
= IOMMU_START
;
84 iommu
->end
= 0xffffffff;
86 /* Allocate IOMMU page table */
87 /* Stupid alignment constraints give me a headache.
88 We need 256K or 512K or 1M or 2M area aligned to
89 its size and current gfp will fortunately give
91 tmp
= __get_free_pages(GFP_KERNEL
, IOMMU_ORDER
);
93 prom_printf("Unable to allocate iommu table [0x%lx]\n",
94 IOMMU_NPTES
* sizeof(iopte_t
));
97 iommu
->page_table
= (iopte_t
*)tmp
;
99 /* Initialize new table. */
100 memset(iommu
->page_table
, 0, IOMMU_NPTES
*sizeof(iopte_t
));
103 iommu
->regs
->base
= __pa((unsigned long) iommu
->page_table
) >> 4;
104 iommu_invalidate(iommu
->regs
);
106 bitmap
= kmalloc(IOMMU_NPTES
>>3, GFP_KERNEL
);
108 prom_printf("Unable to allocate iommu bitmap [%d]\n",
109 (int)(IOMMU_NPTES
>>3));
112 bit_map_init(&iommu
->usemap
, bitmap
, IOMMU_NPTES
);
113 /* To be coherent on HyperSparc, the page color of DVMA
114 * and physical addresses must match.
116 if (srmmu_modtype
== HyperSparc
)
117 iommu
->usemap
.num_colors
= vac_cache_size
>> PAGE_SHIFT
;
119 iommu
->usemap
.num_colors
= 1;
121 printk(KERN_INFO
"IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
122 impl
, vers
, iommu
->page_table
,
123 (int)(IOMMU_NPTES
*sizeof(iopte_t
)), (int)IOMMU_NPTES
);
125 op
->dev
.archdata
.iommu
= iommu
;
128 static int __init
iommu_init(void)
130 struct device_node
*dp
;
132 for_each_node_by_name(dp
, "iommu") {
133 struct platform_device
*op
= of_find_device_by_node(dp
);
136 of_propagate_archdata(op
);
142 subsys_initcall(iommu_init
);
144 /* Flush the iotlb entries to ram. */
145 /* This could be better if we didn't have to flush whole pages. */
146 static void iommu_flush_iotlb(iopte_t
*iopte
, unsigned int niopte
)
151 start
= (unsigned long)iopte
;
152 end
= PAGE_ALIGN(start
+ niopte
*sizeof(iopte_t
));
154 if (viking_mxcc_present
) {
156 viking_mxcc_flush_page(start
);
159 } else if (viking_flush
) {
161 viking_flush_page(start
);
166 __flush_page_to_ram(start
);
172 static u32
iommu_get_one(struct device
*dev
, struct page
*page
, int npages
)
174 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
176 iopte_t
*iopte
, *iopte0
;
177 unsigned int busa
, busa0
;
180 /* page color = pfn of page */
181 ioptex
= bit_map_string_get(&iommu
->usemap
, npages
, page_to_pfn(page
));
184 busa0
= iommu
->start
+ (ioptex
<< PAGE_SHIFT
);
185 iopte0
= &iommu
->page_table
[ioptex
];
189 for (i
= 0; i
< npages
; i
++) {
190 iopte_val(*iopte
) = MKIOPTE(page_to_pfn(page
), IOPERM
);
191 iommu_invalidate_page(iommu
->regs
, busa
);
197 iommu_flush_iotlb(iopte0
, npages
);
202 static u32
iommu_get_scsi_one(struct device
*dev
, char *vaddr
, unsigned int len
)
209 off
= (unsigned long)vaddr
& ~PAGE_MASK
;
210 npages
= (off
+ len
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
211 page
= virt_to_page((unsigned long)vaddr
& PAGE_MASK
);
212 busa
= iommu_get_one(dev
, page
, npages
);
216 static __u32
iommu_get_scsi_one_gflush(struct device
*dev
, char *vaddr
, unsigned long len
)
218 flush_page_for_dma(0);
219 return iommu_get_scsi_one(dev
, vaddr
, len
);
222 static __u32
iommu_get_scsi_one_pflush(struct device
*dev
, char *vaddr
, unsigned long len
)
224 unsigned long page
= ((unsigned long) vaddr
) & PAGE_MASK
;
226 while(page
< ((unsigned long)(vaddr
+ len
))) {
227 flush_page_for_dma(page
);
230 return iommu_get_scsi_one(dev
, vaddr
, len
);
233 static void iommu_get_scsi_sgl_gflush(struct device
*dev
, struct scatterlist
*sg
, int sz
)
237 flush_page_for_dma(0);
240 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
241 sg
->dma_address
= iommu_get_one(dev
, sg_page(sg
), n
) + sg
->offset
;
242 sg
->dma_length
= sg
->length
;
247 static void iommu_get_scsi_sgl_pflush(struct device
*dev
, struct scatterlist
*sg
, int sz
)
249 unsigned long page
, oldpage
= 0;
255 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
258 * We expect unmapped highmem pages to be not in the cache.
259 * XXX Is this a good assumption?
260 * XXX What if someone else unmaps it here and races us?
262 if ((page
= (unsigned long) page_address(sg_page(sg
))) != 0) {
263 for (i
= 0; i
< n
; i
++) {
264 if (page
!= oldpage
) { /* Already flushed? */
265 flush_page_for_dma(page
);
272 sg
->dma_address
= iommu_get_one(dev
, sg_page(sg
), n
) + sg
->offset
;
273 sg
->dma_length
= sg
->length
;
278 static void iommu_release_one(struct device
*dev
, u32 busa
, int npages
)
280 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
284 BUG_ON(busa
< iommu
->start
);
285 ioptex
= (busa
- iommu
->start
) >> PAGE_SHIFT
;
286 for (i
= 0; i
< npages
; i
++) {
287 iopte_val(iommu
->page_table
[ioptex
+ i
]) = 0;
288 iommu_invalidate_page(iommu
->regs
, busa
);
291 bit_map_clear(&iommu
->usemap
, ioptex
, npages
);
294 static void iommu_release_scsi_one(struct device
*dev
, __u32 vaddr
, unsigned long len
)
299 off
= vaddr
& ~PAGE_MASK
;
300 npages
= (off
+ len
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
301 iommu_release_one(dev
, vaddr
& PAGE_MASK
, npages
);
304 static void iommu_release_scsi_sgl(struct device
*dev
, struct scatterlist
*sg
, int sz
)
311 n
= (sg
->length
+ sg
->offset
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
312 iommu_release_one(dev
, sg
->dma_address
& PAGE_MASK
, n
);
313 sg
->dma_address
= 0x21212121;
319 static int iommu_map_dma_area(struct device
*dev
, dma_addr_t
*pba
, unsigned long va
,
320 unsigned long addr
, int len
)
322 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
323 unsigned long page
, end
;
324 iopte_t
*iopte
= iommu
->page_table
;
328 BUG_ON((va
& ~PAGE_MASK
) != 0);
329 BUG_ON((addr
& ~PAGE_MASK
) != 0);
330 BUG_ON((len
& ~PAGE_MASK
) != 0);
332 /* page color = physical address */
333 ioptex
= bit_map_string_get(&iommu
->usemap
, len
>> PAGE_SHIFT
,
348 if (viking_mxcc_present
)
349 viking_mxcc_flush_page(page
);
350 else if (viking_flush
)
351 viking_flush_page(page
);
353 __flush_page_to_ram(page
);
355 pgdp
= pgd_offset(&init_mm
, addr
);
356 pmdp
= pmd_offset(pgdp
, addr
);
357 ptep
= pte_offset_map(pmdp
, addr
);
359 set_pte(ptep
, mk_pte(virt_to_page(page
), dvma_prot
));
361 iopte_val(*iopte
++) =
362 MKIOPTE(page_to_pfn(virt_to_page(page
)), ioperm_noc
);
366 /* P3: why do we need this?
368 * DAVEM: Because there are several aspects, none of which
369 * are handled by a single interface. Some cpus are
370 * completely not I/O DMA coherent, and some have
371 * virtually indexed caches. The driver DMA flushing
372 * methods handle the former case, but here during
373 * IOMMU page table modifications, and usage of non-cacheable
374 * cpu mappings of pages potentially in the cpu caches, we have
375 * to handle the latter case as well.
378 iommu_flush_iotlb(first
, len
>> PAGE_SHIFT
);
380 iommu_invalidate(iommu
->regs
);
382 *pba
= iommu
->start
+ (ioptex
<< PAGE_SHIFT
);
386 static void iommu_unmap_dma_area(struct device
*dev
, unsigned long busa
, int len
)
388 struct iommu_struct
*iommu
= dev
->archdata
.iommu
;
389 iopte_t
*iopte
= iommu
->page_table
;
391 int ioptex
= (busa
- iommu
->start
) >> PAGE_SHIFT
;
393 BUG_ON((busa
& ~PAGE_MASK
) != 0);
394 BUG_ON((len
& ~PAGE_MASK
) != 0);
399 iopte_val(*iopte
++) = 0;
403 iommu_invalidate(iommu
->regs
);
404 bit_map_clear(&iommu
->usemap
, ioptex
, len
>> PAGE_SHIFT
);
408 static const struct sparc32_dma_ops iommu_dma_gflush_ops
= {
409 .get_scsi_one
= iommu_get_scsi_one_gflush
,
410 .get_scsi_sgl
= iommu_get_scsi_sgl_gflush
,
411 .release_scsi_one
= iommu_release_scsi_one
,
412 .release_scsi_sgl
= iommu_release_scsi_sgl
,
414 .map_dma_area
= iommu_map_dma_area
,
415 .unmap_dma_area
= iommu_unmap_dma_area
,
419 static const struct sparc32_dma_ops iommu_dma_pflush_ops
= {
420 .get_scsi_one
= iommu_get_scsi_one_pflush
,
421 .get_scsi_sgl
= iommu_get_scsi_sgl_pflush
,
422 .release_scsi_one
= iommu_release_scsi_one
,
423 .release_scsi_sgl
= iommu_release_scsi_sgl
,
425 .map_dma_area
= iommu_map_dma_area
,
426 .unmap_dma_area
= iommu_unmap_dma_area
,
430 void __init
ld_mmu_iommu(void)
432 if (flush_page_for_dma_global
) {
433 /* flush_page_for_dma flushes everything, no matter of what page is it */
434 sparc32_dma_ops
= &iommu_dma_gflush_ops
;
436 sparc32_dma_ops
= &iommu_dma_pflush_ops
;
439 if (viking_mxcc_present
|| srmmu_modtype
== HyperSparc
) {
440 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
441 ioperm_noc
= IOPTE_CACHE
| IOPTE_WRITE
| IOPTE_VALID
;
443 dvma_prot
= __pgprot(SRMMU_ET_PTE
| SRMMU_PRIV
);
444 ioperm_noc
= IOPTE_WRITE
| IOPTE_VALID
;