1 /* $Id: io-unit.c,v 1.14 1999/08/31 06:54:33 davem Exp $
2 * io-unit.c: IO-UNIT specific routines for memory management.
4 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/malloc.h>
11 #include <linux/spinlock.h>
12 #include <asm/pgtable.h>
15 #include <asm/io-unit.h>
17 #include <asm/bitops.h>
19 /* #define IOUNIT_DEBUG */
21 #define IOD(x) printk(x)
23 #define IOD(x) do { } while (0)
26 #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
27 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
30 iounit_init(int sbi_node
, int io_node
, struct linux_sbus
*sbus
)
32 iopte_t
*xpt
, *xptend
;
33 struct iounit_struct
*iounit
;
34 struct linux_prom_registers iommu_promregs
[PROMREG_MAX
];
36 iounit
= kmalloc(sizeof(struct iounit_struct
), GFP_ATOMIC
);
38 memset(iounit
, 0, sizeof(*iounit
));
39 iounit
->limit
[0] = IOUNIT_BMAP1_START
;
40 iounit
->limit
[1] = IOUNIT_BMAP2_START
;
41 iounit
->limit
[2] = IOUNIT_BMAPM_START
;
42 iounit
->limit
[3] = IOUNIT_BMAPM_END
;
43 iounit
->rotor
[1] = IOUNIT_BMAP2_START
;
44 iounit
->rotor
[2] = IOUNIT_BMAPM_START
;
46 prom_getproperty(sbi_node
, "reg", (void *) iommu_promregs
,
47 sizeof(iommu_promregs
));
48 prom_apply_generic_ranges(io_node
, 0, iommu_promregs
, 3);
50 sparc_alloc_io(iommu_promregs
[2].phys_addr
, 0, (PAGE_SIZE
* 16),
51 "XPT", iommu_promregs
[2].which_io
, 0x0);
52 if(!xpt
) panic("Cannot map External Page Table.");
54 sbus
->iommu
= (struct iommu_struct
*)iounit
;
55 iounit
->page_table
= xpt
;
57 for (xptend
= iounit
->page_table
+ (16 * PAGE_SIZE
) / sizeof(iopte_t
);
62 /* One has to hold iounit->lock to call this */
63 static unsigned long iounit_get_area(struct iounit_struct
*iounit
, unsigned long vaddr
, int size
)
66 unsigned long rotor
, scan
, limit
;
69 npages
= ((vaddr
& ~PAGE_MASK
) + size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
71 /* A tiny bit of magic ingredience :) */
73 case 1: i
= 0x0231; break;
74 case 2: i
= 0x0132; break;
75 default: i
= 0x0213; break;
78 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr
, size
, npages
));
81 rotor
= iounit
->rotor
[j
- 1];
82 limit
= iounit
->limit
[j
];
84 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
85 if (scan
+ npages
> limit
) {
88 scan
= iounit
->limit
[j
- 1];
93 panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr
, size
);
96 for (k
= 1, scan
++; k
< npages
; k
++)
97 if (test_bit(scan
++, iounit
->bmap
))
99 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
101 iopte
= MKIOPTE(mmu_v2p(vaddr
& PAGE_MASK
));
102 vaddr
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
) + (vaddr
& ~PAGE_MASK
);
103 for (k
= 0; k
< npages
; k
++, iopte
= __iopte(iopte_val(iopte
) + 0x100), scan
++) {
104 set_bit(scan
, iounit
->bmap
);
105 iounit
->page_table
[scan
] = iopte
;
107 IOD(("%08lx\n", vaddr
));
111 static __u32
iounit_get_scsi_one(char *vaddr
, unsigned long len
, struct linux_sbus
*sbus
)
113 unsigned long ret
, flags
;
114 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
116 spin_lock_irqsave(&iounit
->lock
, flags
);
117 ret
= iounit_get_area(iounit
, (unsigned long)vaddr
, len
);
118 spin_unlock_irqrestore(&iounit
->lock
, flags
);
122 static void iounit_get_scsi_sgl(struct mmu_sglist
*sg
, int sz
, struct linux_sbus
*sbus
)
125 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
127 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
128 spin_lock_irqsave(&iounit
->lock
, flags
);
129 for (; sz
>= 0; sz
--) {
130 sg
[sz
].dvma_addr
= iounit_get_area(iounit
, (unsigned long)sg
[sz
].addr
, sg
[sz
].len
);
132 spin_unlock_irqrestore(&iounit
->lock
, flags
);
135 static void iounit_release_scsi_one(__u32 vaddr
, unsigned long len
, struct linux_sbus
*sbus
)
138 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
140 spin_lock_irqsave(&iounit
->lock
, flags
);
141 len
= ((vaddr
& ~PAGE_MASK
) + len
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
142 vaddr
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
143 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
144 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
145 clear_bit(vaddr
, iounit
->bmap
);
146 spin_unlock_irqrestore(&iounit
->lock
, flags
);
149 static void iounit_release_scsi_sgl(struct mmu_sglist
*sg
, int sz
, struct linux_sbus
*sbus
)
152 unsigned long vaddr
, len
;
153 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
155 spin_lock_irqsave(&iounit
->lock
, flags
);
156 for (; sz
>= 0; sz
--) {
157 len
= ((sg
[sz
].dvma_addr
& ~PAGE_MASK
) + sg
[sz
].len
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
158 vaddr
= (sg
[sz
].dvma_addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
159 IOD(("iounit_release %08lx-%08lx\n", (long)vaddr
, (long)len
+vaddr
));
160 for (len
+= vaddr
; vaddr
< len
; vaddr
++)
161 clear_bit(vaddr
, iounit
->bmap
);
163 spin_unlock_irqrestore(&iounit
->lock
, flags
);
167 static void iounit_map_dma_area(unsigned long addr
, int len
)
169 unsigned long page
, end
;
172 struct linux_sbus
*sbus
;
174 dvma_prot
= __pgprot(SRMMU_CACHE
| SRMMU_ET_PTE
| SRMMU_PRIV
);
175 end
= PAGE_ALIGN((addr
+ len
));
177 page
= get_free_page(GFP_KERNEL
);
179 prom_printf("alloc_dvma: Cannot get a dvma page\n");
187 pgdp
= pgd_offset(init_task
.mm
, addr
);
188 pmdp
= pmd_offset(pgdp
, addr
);
189 ptep
= pte_offset(pmdp
, addr
);
191 set_pte(ptep
, pte_val(mk_pte(page
, dvma_prot
)));
193 i
= ((addr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
);
195 for_each_sbus(sbus
) {
196 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
198 iopte
= (iopte_t
*)(iounit
->page_table
+ i
);
199 *iopte
= __iopte(MKIOPTE(mmu_v2p(page
)));
209 static char *iounit_lockarea(char *vaddr
, unsigned long len
)
211 /* FIXME: Write this */
215 static void iounit_unlockarea(char *vaddr
, unsigned long len
)
217 /* FIXME: Write this */
220 void __init
ld_mmu_iounit(void)
222 BTFIXUPSET_CALL(mmu_lockarea
, iounit_lockarea
, BTFIXUPCALL_RETO0
);
223 BTFIXUPSET_CALL(mmu_unlockarea
, iounit_unlockarea
, BTFIXUPCALL_NOP
);
225 BTFIXUPSET_CALL(mmu_get_scsi_one
, iounit_get_scsi_one
, BTFIXUPCALL_NORM
);
226 BTFIXUPSET_CALL(mmu_get_scsi_sgl
, iounit_get_scsi_sgl
, BTFIXUPCALL_NORM
);
227 BTFIXUPSET_CALL(mmu_release_scsi_one
, iounit_release_scsi_one
, BTFIXUPCALL_NORM
);
228 BTFIXUPSET_CALL(mmu_release_scsi_sgl
, iounit_release_scsi_sgl
, BTFIXUPCALL_NORM
);
231 BTFIXUPSET_CALL(mmu_map_dma_area
, iounit_map_dma_area
, BTFIXUPCALL_NORM
);
235 __u32
iounit_map_dma_init(struct linux_sbus
*sbus
, int size
)
238 unsigned long rotor
, scan
, limit
;
241 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
243 npages
= (size
+ (PAGE_SIZE
-1)) >> PAGE_SHIFT
;
245 spin_lock_irqsave(&iounit
->lock
, flags
);
247 rotor
= iounit
->rotor
[j
- 1];
248 limit
= iounit
->limit
[j
];
250 nexti
: scan
= find_next_zero_bit(iounit
->bmap
, limit
, scan
);
251 if (scan
+ npages
> limit
) {
252 if (limit
!= rotor
) {
254 scan
= iounit
->limit
[j
- 1];
259 panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size
);
262 for (k
= 1, scan
++; k
< npages
; k
++)
263 if (test_bit(scan
++, iounit
->bmap
))
265 iounit
->rotor
[j
- 1] = (scan
< limit
) ? scan
: iounit
->limit
[j
- 1];
267 ret
= IOUNIT_DMA_BASE
+ (scan
<< PAGE_SHIFT
);
268 for (k
= 0; k
< npages
; k
++, scan
++)
269 set_bit(scan
, iounit
->bmap
);
270 spin_unlock_irqrestore(&iounit
->lock
, flags
);
274 __u32
iounit_map_dma_page(__u32 vaddr
, void *addr
, struct linux_sbus
*sbus
)
276 int scan
= (vaddr
- IOUNIT_DMA_BASE
) >> PAGE_SHIFT
;
277 struct iounit_struct
*iounit
= (struct iounit_struct
*)sbus
->iommu
;
279 iounit
->page_table
[scan
] = MKIOPTE(mmu_v2p(((unsigned long)addr
) & PAGE_MASK
));
280 return vaddr
+ (((unsigned long)addr
) & ~PAGE_MASK
);