1 /* $Id: io-unit.c,v 1.22 2000/08/09 00:00:15 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/malloc.h>
11 #include <linux/spinlock.h>
12 #include <asm/scatterlist.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
17 #include <asm/io-unit.h>
19 #include <asm/bitops.h>
21 /* #define IOUNIT_DEBUG */
23 #define IOD(x) printk(x)
25 #define IOD(x) do { } while (0)
28 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
29 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
32 iounit_init(int sbi_node
, int io_node
, struct sbus_bus
*sbus
)
34 iopte_t
*xpt
, *xptend
;
35 struct iounit_struct
*iounit
;
36 struct linux_prom_registers iommu_promregs
[PROMREG_MAX
];
39 iounit
= kmalloc(sizeof(struct iounit_struct
), GFP_ATOMIC
);
41 memset(iounit
, 0, sizeof(*iounit
));
42 iounit
->limit
[0] = IOUNIT_BMAP1_START
;
43 iounit
->limit
[1] = IOUNIT_BMAP2_START
;
44 iounit
->limit
[2] = IOUNIT_BMAPM_START
;
45 iounit
->limit
[3] = IOUNIT_BMAPM_END
;
46 iounit
->rotor
[1] = IOUNIT_BMAP2_START
;
47 iounit
->rotor
[2] = IOUNIT_BMAPM_START
;
49 prom_getproperty(sbi_node
, "reg", (void *) iommu_promregs
,
50 sizeof(iommu_promregs
));
51 prom_apply_generic_ranges(io_node
, 0, iommu_promregs
, 3);
52 memset(&r
, 0, sizeof(r
));
53 r
.flags
= iommu_promregs
[2].which_io
;
54 r
.start
= iommu_promregs
[2].phys_addr
;
55 xpt
= (iopte_t
*) sbus_ioremap(&r
, 0, PAGE_SIZE
* 16, "XPT");
56 if(!xpt
) panic("Cannot map External Page Table.");
58 sbus
->iommu
= (struct iommu_struct
*)iounit
;
59 iounit
->page_table
= xpt
;
61 for (xptend
= iounit
->page_table
+ (16 * PAGE_SIZE
) / sizeof(iopte_t
);
66 /* One has to hold iounit->lock to call this */
67 static unsigned long iounit_get_area(struct iounit_struct
*iounit
, unsigned long vaddr
, int size
)
70 unsigned long rotor
, scan
, limit
;
73 npages
= ((vaddr
& ~PAGE_MASK
) + size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
75 /* A tiny bit of magic ingredience :) */
77 case 1: i
= 0x0231; break;
78 case 2: i
= 0x0132; break;
79 default: i
= 0x0213; break;
82 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr
, size
, npages
));
85 rotor
= iounit
->rotor
[j
- 1];
86 limit
= iounit
->limit
[j
];
88 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
89 if (scan
+ npages
> limit
) {
92 scan
= iounit
->limit
[j
- 1];
97 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr
, size
);
100 for (k
= 1, scan
++; k
< npages
; k
++)
101 if (test_bit(scan
++, iounit
->bmap
))
103 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
105 iopte
= MKIOPTE(__pa(vaddr
& PAGE_MASK
));
106 vaddr
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
) + (vaddr
& ~PAGE_MASK
);
107 for (k
= 0; k
< npages
; k
++, iopte
= __iopte(iopte_val(iopte
) + 0x100), scan
++) {
108 set_bit(scan
, iounit
->bmap
);
109 iounit
->page_table
[scan
] = iopte
;
111 IOD(("%08lx\n", vaddr
));
115 static __u32
iounit_get_scsi_one(char *vaddr
, unsigned long len
, struct sbus_bus
*sbus
)
117 unsigned long ret
, flags
;
118 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
120 spin_lock_irqsave(&iounit
->lock
, flags
);
121 ret
= iounit_get_area(iounit
, (unsigned long)vaddr
, len
);
122 spin_unlock_irqrestore(&iounit
->lock
, flags
);
126 static void iounit_get_scsi_sgl(struct scatterlist
*sg
, int sz
, struct sbus_bus
*sbus
)
129 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
131 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
132 spin_lock_irqsave(&iounit
->lock
, flags
);
133 for (; sz
>= 0; sz
--) {
134 sg
[sz
].dvma_address
= iounit_get_area(iounit
, (unsigned long)sg
[sz
].address
, sg
[sz
].length
);
135 sg
[sz
].dvma_length
= sg
[sz
].length
;
137 spin_unlock_irqrestore(&iounit
->lock
, flags
);
140 static void iounit_release_scsi_one(__u32 vaddr
, unsigned long len
, struct sbus_bus
*sbus
)
143 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
145 spin_lock_irqsave(&iounit
->lock
, flags
);
146 len
= ((vaddr
& ~PAGE_MASK
) + len
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
147 vaddr
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
148 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
149 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
150 clear_bit(vaddr
, iounit
->bmap
);
151 spin_unlock_irqrestore(&iounit
->lock
, flags
);
154 static void iounit_release_scsi_sgl(struct scatterlist
*sg
, int sz
, struct sbus_bus
*sbus
)
157 unsigned long vaddr
, len
;
158 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
160 spin_lock_irqsave(&iounit
->lock
, flags
);
161 for (; sz
>= 0; sz
--) {
162 len
= ((sg
[sz
].dvma_address
& ~PAGE_MASK
) + sg
[sz
].length
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
163 vaddr
= (sg
[sz
].dvma_address
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
164 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
165 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
166 clear_bit(vaddr
, iounit
->bmap
);
168 spin_unlock_irqrestore(&iounit
->lock
, flags
);
172 static void iounit_map_dma_area(unsigned long va
, __u32 addr
, int len
)
174 unsigned long page
, end
;
177 struct sbus_bus
*sbus
;
179 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
180 end
= PAGE_ALIGN((addr
+ len
));
189 pgdp
= pgd_offset(init_task
.mm
, addr
);
190 pmdp
= pmd_offset(pgdp
, addr
);
191 ptep
= pte_offset(pmdp
, addr
);
193 set_pte(ptep
, pte_val(mk_pte(virt_to_page(page
), dvma_prot
)));
195 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
197 for_each_sbus(sbus
) {
198 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
200 iopte
= (iopte_t
*)(iounit
->page_table
+ i
);
201 *iopte
= __iopte(MKIOPTE(__pa(page
)));
211 static void iounit_unmap_dma_area(unsigned long addr
, int len
)
213 /* XXX Somebody please fill this in */
216 /* XXX We do not pass sbus device here, bad. */
217 static unsigned long iounit_translate_dvma(unsigned long addr
)
219 struct sbus_bus
*sbus
= sbus_root
; /* They are all the same */
220 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
224 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
225 iopte
= (iopte_t
*)(iounit
->page_table
+ i
);
226 return (iopte_val(*iopte
) & 0xFFFFFFF0) << 4; /* XXX sun4d guru, help */
230 static char *iounit_lockarea(char *vaddr
, unsigned long len
)
232 /* FIXME: Write this */
236 static void iounit_unlockarea(char *vaddr
, unsigned long len
)
238 /* FIXME: Write this */
241 void __init
ld_mmu_iounit(void)
243 BTFIXUPSET_CALL(mmu_lockarea
, iounit_lockarea
, BTFIXUPCALL_RETO0
);
244 BTFIXUPSET_CALL(mmu_unlockarea
, iounit_unlockarea
, BTFIXUPCALL_NOP
);
246 BTFIXUPSET_CALL(mmu_get_scsi_one
, iounit_get_scsi_one
, BTFIXUPCALL_NORM
);
247 BTFIXUPSET_CALL(mmu_get_scsi_sgl
, iounit_get_scsi_sgl
, BTFIXUPCALL_NORM
);
248 BTFIXUPSET_CALL(mmu_release_scsi_one
, iounit_release_scsi_one
, BTFIXUPCALL_NORM
);
249 BTFIXUPSET_CALL(mmu_release_scsi_sgl
, iounit_release_scsi_sgl
, BTFIXUPCALL_NORM
);
252 BTFIXUPSET_CALL(mmu_map_dma_area
, iounit_map_dma_area
, BTFIXUPCALL_NORM
);
253 BTFIXUPSET_CALL(mmu_unmap_dma_area
, iounit_unmap_dma_area
, BTFIXUPCALL_NORM
);
254 BTFIXUPSET_CALL(mmu_translate_dvma
, iounit_translate_dvma
, BTFIXUPCALL_NORM
);
258 __u32
iounit_map_dma_init(struct sbus_bus
*sbus
, int size
)
261 unsigned long rotor
, scan
, limit
;
264 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
266 npages
= (size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
268 spin_lock_irqsave(&iounit
->lock
, flags
);
270 rotor
= iounit
->rotor
[j
- 1];
271 limit
= iounit
->limit
[j
];
273 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
274 if (scan
+ npages
> limit
) {
275 if (limit
!= rotor
) {
277 scan
= iounit
->limit
[j
- 1];
282 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size
);
285 for (k
= 1, scan
++; k
< npages
; k
++)
286 if (test_bit(scan
++, iounit
->bmap
))
288 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
290 ret
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
);
291 for (k
= 0; k
< npages
; k
++, scan
++)
292 set_bit(scan
, iounit
->bmap
);
293 spin_unlock_irqrestore(&iounit
->lock
, flags
);
297 __u32
iounit_map_dma_page(__u32 vaddr
, void *addr
, struct sbus_bus
*sbus
)
299 int scan
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
300 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
302 iounit
->page_table
[scan
] = MKIOPTE(__pa(((unsigned long)addr
) & PAGE_MASK
));
303 return vaddr
+ (((unsigned long)addr
) & ~PAGE_MASK
);