1 /* $Id: sbus.c,v 1.19 2002/01/23 11:27:32 davem Exp $
2 * sbus.c: UltraSparc SBUS controller support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/spinlock.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
19 #include <asm/cache.h>
23 #include <asm/starfire.h>
25 #include "iommu_common.h"
27 /* These should be allocated on an SMP_CACHE_BYTES
28 * aligned boundary for optimal performance.
30 * On SYSIO, using an 8K page size we have 1GB of SBUS
31 * DMA space mapped. We divide this space into equally
32 * sized clusters. We allocate a DMA mapping from the
33 * cluster that matches the order of the allocation, or
34 * if the order is greater than the number of clusters,
35 * we try to allocate from the last cluster.
39 #define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
40 #define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
41 #define CLUSTER_MASK (CLUSTER_SIZE - 1)
42 #define CLUSTER_NPAGES (CLUSTER_SIZE >> IO_PAGE_SHIFT)
43 #define MAP_BASE ((u32)0xc0000000)
46 /*0x00*/spinlock_t lock
;
48 /*0x08*/iopte_t
*page_table
;
49 /*0x10*/unsigned long strbuf_regs
;
50 /*0x18*/unsigned long iommu_regs
;
51 /*0x20*/unsigned long sbus_control_reg
;
53 /*0x28*/volatile unsigned long strbuf_flushflag
;
55 /* If NCLUSTERS is ever decresed to 4 or lower,
56 * you must increase the size of the type of
57 * these counters. You have been duly warned. -DaveM
62 } alloc_info
[NCLUSTERS
];
64 /* The lowest used consistent mapping entry. Since
65 * we allocate consistent maps out of cluster 0 this
66 * is relative to the beginning of closter 0.
68 /*0x50*/u32 lowest_consistent_map
;
71 /* Offsets from iommu_regs */
72 #define SYSIO_IOMMUREG_BASE 0x2400UL
73 #define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
74 #define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
75 #define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
76 #define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
77 #define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
78 #define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
79 #define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
80 #define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
82 #define IOMMU_DRAM_VALID (1UL << 30UL)
84 static void __iommu_flushall(struct sbus_iommu
*iommu
)
86 unsigned long tag
= iommu
->iommu_regs
+ IOMMU_TAGDIAG
;
89 for (entry
= 0; entry
< 16; entry
++) {
93 upa_readq(iommu
->sbus_control_reg
);
95 for (entry
= 0; entry
< NCLUSTERS
; entry
++) {
96 iommu
->alloc_info
[entry
].flush
=
97 iommu
->alloc_info
[entry
].next
;
101 static void iommu_flush(struct sbus_iommu
*iommu
, u32 base
, unsigned long npages
)
104 upa_writeq(base
+ (npages
<< IO_PAGE_SHIFT
),
105 iommu
->iommu_regs
+ IOMMU_FLUSH
);
106 upa_readq(iommu
->sbus_control_reg
);
109 /* Offsets from strbuf_regs */
110 #define SYSIO_STRBUFREG_BASE 0x2800UL
111 #define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
112 #define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
113 #define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
114 #define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
115 #define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
116 #define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
117 #define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
119 #define STRBUF_TAG_VALID 0x02UL
121 static void sbus_strbuf_flush(struct sbus_iommu
*iommu
, u32 base
, unsigned long npages
, int direction
)
128 upa_writeq(base
+ (n
<< IO_PAGE_SHIFT
),
129 iommu
->strbuf_regs
+ STRBUF_PFLUSH
);
131 /* If the device could not have possibly put dirty data into
132 * the streaming cache, no flush-flag synchronization needs
135 if (direction
== SBUS_DMA_TODEVICE
)
138 iommu
->strbuf_flushflag
= 0UL;
140 /* Whoopee cushion! */
141 upa_writeq(__pa(&iommu
->strbuf_flushflag
),
142 iommu
->strbuf_regs
+ STRBUF_FSYNC
);
143 upa_readq(iommu
->sbus_control_reg
);
146 while (iommu
->strbuf_flushflag
== 0UL) {
154 printk(KERN_WARNING
"sbus_strbuf_flush: flushflag timeout "
155 "vaddr[%08x] npages[%ld]\n",
159 static iopte_t
*alloc_streaming_cluster(struct sbus_iommu
*iommu
, unsigned long npages
)
161 iopte_t
*iopte
, *limit
, *first
, *cluster
;
162 unsigned long cnum
, ent
, nent
, flush_point
, found
;
166 while ((1UL << cnum
) < npages
)
168 if(cnum
>= NCLUSTERS
) {
169 nent
= 1UL << (cnum
- NCLUSTERS
);
170 cnum
= NCLUSTERS
- 1;
172 iopte
= iommu
->page_table
+ (cnum
* CLUSTER_NPAGES
);
175 limit
= (iommu
->page_table
+
176 iommu
->lowest_consistent_map
);
178 limit
= (iopte
+ CLUSTER_NPAGES
);
180 iopte
+= ((ent
= iommu
->alloc_info
[cnum
].next
) << cnum
);
181 flush_point
= iommu
->alloc_info
[cnum
].flush
;
187 if (iopte_val(*iopte
) == 0UL) {
192 /* Used cluster in the way */
200 iopte
+= (1 << cnum
);
202 if (iopte
>= limit
) {
203 iopte
= (iommu
->page_table
+ (cnum
* CLUSTER_NPAGES
));
206 /* Multiple cluster allocations must not wrap */
210 if (ent
== flush_point
)
211 __iommu_flushall(iommu
);
216 /* ent/iopte points to the last cluster entry we're going to use,
217 * so save our place for the next allocation.
219 if ((iopte
+ (1 << cnum
)) >= limit
)
223 iommu
->alloc_info
[cnum
].next
= ent
;
224 if (ent
== flush_point
)
225 __iommu_flushall(iommu
);
227 /* I've got your streaming cluster right here buddy boy... */
231 printk(KERN_EMERG
"sbus: alloc_streaming_cluster of npages(%ld) failed!\n",
236 static void free_streaming_cluster(struct sbus_iommu
*iommu
, u32 base
, unsigned long npages
)
238 unsigned long cnum
, ent
, nent
;
243 while ((1UL << cnum
) < npages
)
245 if(cnum
>= NCLUSTERS
) {
246 nent
= 1UL << (cnum
- NCLUSTERS
);
247 cnum
= NCLUSTERS
- 1;
249 ent
= (base
& CLUSTER_MASK
) >> (IO_PAGE_SHIFT
+ cnum
);
250 iopte
= iommu
->page_table
+ ((base
- MAP_BASE
) >> IO_PAGE_SHIFT
);
252 iopte_val(*iopte
) = 0UL;
256 /* If the global flush might not have caught this entry,
257 * adjust the flush point such that we will flush before
258 * ever trying to reuse it.
260 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
261 if (between(ent
, iommu
->alloc_info
[cnum
].next
, iommu
->alloc_info
[cnum
].flush
))
262 iommu
->alloc_info
[cnum
].flush
= ent
;
266 /* We allocate consistent mappings from the end of cluster zero. */
267 static iopte_t
*alloc_consistent_cluster(struct sbus_iommu
*iommu
, unsigned long npages
)
271 iopte
= iommu
->page_table
+ (1 * CLUSTER_NPAGES
);
272 while (iopte
> iommu
->page_table
) {
274 if (!(iopte_val(*iopte
) & IOPTE_VALID
)) {
275 unsigned long tmp
= npages
;
279 if (iopte_val(*iopte
) & IOPTE_VALID
)
283 u32 entry
= (iopte
- iommu
->page_table
);
285 if (entry
< iommu
->lowest_consistent_map
)
286 iommu
->lowest_consistent_map
= entry
;
294 static void free_consistent_cluster(struct sbus_iommu
*iommu
, u32 base
, unsigned long npages
)
296 iopte_t
*iopte
= iommu
->page_table
+ ((base
- MAP_BASE
) >> IO_PAGE_SHIFT
);
298 if ((iopte
- iommu
->page_table
) == iommu
->lowest_consistent_map
) {
299 iopte_t
*walk
= iopte
+ npages
;
302 limit
= iommu
->page_table
+ CLUSTER_NPAGES
;
303 while (walk
< limit
) {
304 if (iopte_val(*walk
) != 0UL)
308 iommu
->lowest_consistent_map
=
309 (walk
- iommu
->page_table
);
313 *iopte
++ = __iopte(0UL);
316 void *sbus_alloc_consistent(struct sbus_dev
*sdev
, size_t size
, dma_addr_t
*dvma_addr
)
318 unsigned long order
, first_page
, flags
;
319 struct sbus_iommu
*iommu
;
324 if (size
<= 0 || sdev
== NULL
|| dvma_addr
== NULL
)
327 size
= IO_PAGE_ALIGN(size
);
328 order
= get_order(size
);
331 first_page
= __get_free_pages(GFP_KERNEL
|__GFP_COMP
, order
);
332 if (first_page
== 0UL)
334 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
336 iommu
= sdev
->bus
->iommu
;
338 spin_lock_irqsave(&iommu
->lock
, flags
);
339 iopte
= alloc_consistent_cluster(iommu
, size
>> IO_PAGE_SHIFT
);
341 spin_unlock_irqrestore(&iommu
->lock
, flags
);
342 free_pages(first_page
, order
);
346 /* Ok, we're committed at this point. */
347 *dvma_addr
= MAP_BASE
+ ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
);
348 ret
= (void *) first_page
;
349 npages
= size
>> IO_PAGE_SHIFT
;
351 *iopte
++ = __iopte(IOPTE_VALID
| IOPTE_CACHE
| IOPTE_WRITE
|
352 (__pa(first_page
) & IOPTE_PAGE
));
353 first_page
+= IO_PAGE_SIZE
;
355 iommu_flush(iommu
, *dvma_addr
, size
>> IO_PAGE_SHIFT
);
356 spin_unlock_irqrestore(&iommu
->lock
, flags
);
361 void sbus_free_consistent(struct sbus_dev
*sdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
363 unsigned long order
, npages
;
364 struct sbus_iommu
*iommu
;
366 if (size
<= 0 || sdev
== NULL
|| cpu
== NULL
)
369 npages
= IO_PAGE_ALIGN(size
) >> IO_PAGE_SHIFT
;
370 iommu
= sdev
->bus
->iommu
;
372 spin_lock_irq(&iommu
->lock
);
373 free_consistent_cluster(iommu
, dvma
, npages
);
374 iommu_flush(iommu
, dvma
, npages
);
375 spin_unlock_irq(&iommu
->lock
);
377 order
= get_order(size
);
379 free_pages((unsigned long)cpu
, order
);
382 dma_addr_t
sbus_map_single(struct sbus_dev
*sdev
, void *ptr
, size_t size
, int dir
)
384 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
385 unsigned long npages
, pbase
, flags
;
387 u32 dma_base
, offset
;
388 unsigned long iopte_bits
;
390 if (dir
== SBUS_DMA_NONE
)
393 pbase
= (unsigned long) ptr
;
394 offset
= (u32
) (pbase
& ~IO_PAGE_MASK
);
395 size
= (IO_PAGE_ALIGN(pbase
+ size
) - (pbase
& IO_PAGE_MASK
));
396 pbase
= (unsigned long) __pa(pbase
& IO_PAGE_MASK
);
398 spin_lock_irqsave(&iommu
->lock
, flags
);
399 npages
= size
>> IO_PAGE_SHIFT
;
400 iopte
= alloc_streaming_cluster(iommu
, npages
);
403 dma_base
= MAP_BASE
+ ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
);
404 npages
= size
>> IO_PAGE_SHIFT
;
405 iopte_bits
= IOPTE_VALID
| IOPTE_STBUF
| IOPTE_CACHE
;
406 if (dir
!= SBUS_DMA_TODEVICE
)
407 iopte_bits
|= IOPTE_WRITE
;
409 *iopte
++ = __iopte(iopte_bits
| (pbase
& IOPTE_PAGE
));
410 pbase
+= IO_PAGE_SIZE
;
412 npages
= size
>> IO_PAGE_SHIFT
;
413 spin_unlock_irqrestore(&iommu
->lock
, flags
);
415 return (dma_base
| offset
);
418 spin_unlock_irqrestore(&iommu
->lock
, flags
);
423 void sbus_unmap_single(struct sbus_dev
*sdev
, dma_addr_t dma_addr
, size_t size
, int direction
)
425 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
426 u32 dma_base
= dma_addr
& IO_PAGE_MASK
;
429 size
= (IO_PAGE_ALIGN(dma_addr
+ size
) - dma_base
);
431 spin_lock_irqsave(&iommu
->lock
, flags
);
432 free_streaming_cluster(iommu
, dma_base
, size
>> IO_PAGE_SHIFT
);
433 sbus_strbuf_flush(iommu
, dma_base
, size
>> IO_PAGE_SHIFT
, direction
);
434 spin_unlock_irqrestore(&iommu
->lock
, flags
);
437 #define SG_ENT_PHYS_ADDRESS(SG) \
438 (__pa(page_address((SG)->page)) + (SG)->offset)
440 static inline void fill_sg(iopte_t
*iopte
, struct scatterlist
*sg
, int nused
, int nelems
, unsigned long iopte_bits
)
442 struct scatterlist
*dma_sg
= sg
;
443 struct scatterlist
*sg_end
= sg
+ nelems
;
446 for (i
= 0; i
< nused
; i
++) {
447 unsigned long pteval
= ~0UL;
450 dma_npages
= ((dma_sg
->dma_address
& (IO_PAGE_SIZE
- 1UL)) +
452 ((IO_PAGE_SIZE
- 1UL))) >> IO_PAGE_SHIFT
;
454 unsigned long offset
;
457 /* If we are here, we know we have at least one
458 * more page to map. So walk forward until we
459 * hit a page crossing, and begin creating new
460 * mappings from that spot.
465 tmp
= (unsigned long) SG_ENT_PHYS_ADDRESS(sg
);
467 if (((tmp
^ pteval
) >> IO_PAGE_SHIFT
) != 0UL) {
468 pteval
= tmp
& IO_PAGE_MASK
;
469 offset
= tmp
& (IO_PAGE_SIZE
- 1UL);
472 if (((tmp
^ (tmp
+ len
- 1UL)) >> IO_PAGE_SHIFT
) != 0UL) {
473 pteval
= (tmp
+ IO_PAGE_SIZE
) & IO_PAGE_MASK
;
475 len
-= (IO_PAGE_SIZE
- (tmp
& (IO_PAGE_SIZE
- 1UL)));
481 pteval
= ((pteval
& IOPTE_PAGE
) | iopte_bits
);
483 *iopte
++ = __iopte(pteval
);
484 pteval
+= IO_PAGE_SIZE
;
485 len
-= (IO_PAGE_SIZE
- offset
);
490 pteval
= (pteval
& IOPTE_PAGE
) + len
;
493 /* Skip over any tail mappings we've fully mapped,
494 * adjusting pteval along the way. Stop when we
495 * detect a page crossing event.
497 while (sg
< sg_end
&&
498 (pteval
<< (64 - IO_PAGE_SHIFT
)) != 0UL &&
499 (pteval
== SG_ENT_PHYS_ADDRESS(sg
)) &&
501 (SG_ENT_PHYS_ADDRESS(sg
) + sg
->length
- 1UL)) >> IO_PAGE_SHIFT
) == 0UL) {
502 pteval
+= sg
->length
;
505 if ((pteval
<< (64 - IO_PAGE_SHIFT
)) == 0UL)
507 } while (dma_npages
!= 0);
512 int sbus_map_sg(struct sbus_dev
*sdev
, struct scatterlist
*sg
, int nents
, int dir
)
514 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
515 unsigned long flags
, npages
;
518 struct scatterlist
*sgtmp
;
520 unsigned long iopte_bits
;
522 if (dir
== SBUS_DMA_NONE
)
525 /* Fast path single entry scatterlists. */
528 sbus_map_single(sdev
,
529 (page_address(sg
->page
) + sg
->offset
),
531 sg
->dma_length
= sg
->length
;
535 npages
= prepare_sg(sg
, nents
);
537 spin_lock_irqsave(&iommu
->lock
, flags
);
538 iopte
= alloc_streaming_cluster(iommu
, npages
);
541 dma_base
= MAP_BASE
+ ((iopte
- iommu
->page_table
) << IO_PAGE_SHIFT
);
543 /* Normalize DVMA addresses. */
547 while (used
&& sgtmp
->dma_length
) {
548 sgtmp
->dma_address
+= dma_base
;
554 iopte_bits
= IOPTE_VALID
| IOPTE_STBUF
| IOPTE_CACHE
;
555 if (dir
!= SBUS_DMA_TODEVICE
)
556 iopte_bits
|= IOPTE_WRITE
;
558 fill_sg(iopte
, sg
, used
, nents
, iopte_bits
);
560 verify_sglist(sg
, nents
, iopte
, npages
);
562 spin_unlock_irqrestore(&iommu
->lock
, flags
);
567 spin_unlock_irqrestore(&iommu
->lock
, flags
);
572 void sbus_unmap_sg(struct sbus_dev
*sdev
, struct scatterlist
*sg
, int nents
, int direction
)
574 unsigned long size
, flags
;
575 struct sbus_iommu
*iommu
;
579 /* Fast path single entry scatterlists. */
581 sbus_unmap_single(sdev
, sg
->dma_address
, sg
->dma_length
, direction
);
585 dvma_base
= sg
[0].dma_address
& IO_PAGE_MASK
;
586 for (i
= 0; i
< nents
; i
++) {
587 if (sg
[i
].dma_length
== 0)
591 size
= IO_PAGE_ALIGN(sg
[i
].dma_address
+ sg
[i
].dma_length
) - dvma_base
;
593 iommu
= sdev
->bus
->iommu
;
594 spin_lock_irqsave(&iommu
->lock
, flags
);
595 free_streaming_cluster(iommu
, dvma_base
, size
>> IO_PAGE_SHIFT
);
596 sbus_strbuf_flush(iommu
, dvma_base
, size
>> IO_PAGE_SHIFT
, direction
);
597 spin_unlock_irqrestore(&iommu
->lock
, flags
);
600 void sbus_dma_sync_single_for_cpu(struct sbus_dev
*sdev
, dma_addr_t base
, size_t size
, int direction
)
602 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
605 size
= (IO_PAGE_ALIGN(base
+ size
) - (base
& IO_PAGE_MASK
));
607 spin_lock_irqsave(&iommu
->lock
, flags
);
608 sbus_strbuf_flush(iommu
, base
& IO_PAGE_MASK
, size
>> IO_PAGE_SHIFT
, direction
);
609 spin_unlock_irqrestore(&iommu
->lock
, flags
);
612 void sbus_dma_sync_single_for_device(struct sbus_dev
*sdev
, dma_addr_t base
, size_t size
, int direction
)
616 void sbus_dma_sync_sg_for_cpu(struct sbus_dev
*sdev
, struct scatterlist
*sg
, int nents
, int direction
)
618 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
619 unsigned long flags
, size
;
623 base
= sg
[0].dma_address
& IO_PAGE_MASK
;
624 for (i
= 0; i
< nents
; i
++) {
625 if (sg
[i
].dma_length
== 0)
629 size
= IO_PAGE_ALIGN(sg
[i
].dma_address
+ sg
[i
].dma_length
) - base
;
631 spin_lock_irqsave(&iommu
->lock
, flags
);
632 sbus_strbuf_flush(iommu
, base
, size
>> IO_PAGE_SHIFT
, direction
);
633 spin_unlock_irqrestore(&iommu
->lock
, flags
);
636 void sbus_dma_sync_sg_for_device(struct sbus_dev
*sdev
, struct scatterlist
*sg
, int nents
, int direction
)
640 /* Enable 64-bit DVMA mode for the given device. */
641 void sbus_set_sbus64(struct sbus_dev
*sdev
, int bursts
)
643 struct sbus_iommu
*iommu
= sdev
->bus
->iommu
;
644 int slot
= sdev
->slot
;
645 unsigned long cfg_reg
;
648 cfg_reg
= iommu
->sbus_control_reg
;
676 val
= upa_readq(cfg_reg
);
677 if (val
& (1UL << 14UL)) {
678 /* Extended transfer mode already enabled. */
682 val
|= (1UL << 14UL);
684 if (bursts
& DMA_BURST8
)
686 if (bursts
& DMA_BURST16
)
688 if (bursts
& DMA_BURST32
)
690 if (bursts
& DMA_BURST64
)
692 upa_writeq(val
, cfg_reg
);
695 /* INO number to IMAP register offset for SYSIO external IRQ's.
696 * This should conform to both Sunfire/Wildfire server and Fusion
699 #define SYSIO_IMAP_SLOT0 0x2c04UL
700 #define SYSIO_IMAP_SLOT1 0x2c0cUL
701 #define SYSIO_IMAP_SLOT2 0x2c14UL
702 #define SYSIO_IMAP_SLOT3 0x2c1cUL
703 #define SYSIO_IMAP_SCSI 0x3004UL
704 #define SYSIO_IMAP_ETH 0x300cUL
705 #define SYSIO_IMAP_BPP 0x3014UL
706 #define SYSIO_IMAP_AUDIO 0x301cUL
707 #define SYSIO_IMAP_PFAIL 0x3024UL
708 #define SYSIO_IMAP_KMS 0x302cUL
709 #define SYSIO_IMAP_FLPY 0x3034UL
710 #define SYSIO_IMAP_SHW 0x303cUL
711 #define SYSIO_IMAP_KBD 0x3044UL
712 #define SYSIO_IMAP_MS 0x304cUL
713 #define SYSIO_IMAP_SER 0x3054UL
714 #define SYSIO_IMAP_TIM0 0x3064UL
715 #define SYSIO_IMAP_TIM1 0x306cUL
716 #define SYSIO_IMAP_UE 0x3074UL
717 #define SYSIO_IMAP_CE 0x307cUL
718 #define SYSIO_IMAP_SBERR 0x3084UL
719 #define SYSIO_IMAP_PMGMT 0x308cUL
720 #define SYSIO_IMAP_GFX 0x3094UL
721 #define SYSIO_IMAP_EUPA 0x309cUL
723 #define bogon ((unsigned long) -1)
724 static unsigned long sysio_irq_offsets
[] = {
725 /* SBUS Slot 0 --> 3, level 1 --> 7 */
726 SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
,
727 SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
, SYSIO_IMAP_SLOT0
,
728 SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
,
729 SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
, SYSIO_IMAP_SLOT1
,
730 SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
,
731 SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
, SYSIO_IMAP_SLOT2
,
732 SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
,
733 SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
, SYSIO_IMAP_SLOT3
,
735 /* Onboard devices (not relevant/used on SunFire). */
764 #define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
766 /* Convert Interrupt Mapping register pointer to associated
767 * Interrupt Clear register pointer, SYSIO specific version.
769 #define SYSIO_ICLR_UNUSED0 0x3400UL
770 #define SYSIO_ICLR_SLOT0 0x340cUL
771 #define SYSIO_ICLR_SLOT1 0x344cUL
772 #define SYSIO_ICLR_SLOT2 0x348cUL
773 #define SYSIO_ICLR_SLOT3 0x34ccUL
774 static unsigned long sysio_imap_to_iclr(unsigned long imap
)
776 unsigned long diff
= SYSIO_ICLR_UNUSED0
- SYSIO_IMAP_SLOT0
;
780 unsigned int sbus_build_irq(void *buscookie
, unsigned int ino
)
782 struct sbus_bus
*sbus
= (struct sbus_bus
*)buscookie
;
783 struct sbus_iommu
*iommu
= sbus
->iommu
;
784 unsigned long reg_base
= iommu
->sbus_control_reg
- 0x2000UL
;
785 unsigned long imap
, iclr
;
788 imap
= sysio_irq_offsets
[ino
];
789 if (imap
== ((unsigned long)-1)) {
790 prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
796 /* SYSIO inconsistency. For external SLOTS, we have to select
797 * the right ICLR register based upon the lower SBUS irq level
801 iclr
= sysio_imap_to_iclr(imap
);
803 int sbus_slot
= (ino
& 0x18)>>3;
805 sbus_level
= ino
& 0x7;
809 iclr
= reg_base
+ SYSIO_ICLR_SLOT0
;
812 iclr
= reg_base
+ SYSIO_ICLR_SLOT1
;
815 iclr
= reg_base
+ SYSIO_ICLR_SLOT2
;
819 iclr
= reg_base
+ SYSIO_ICLR_SLOT3
;
823 iclr
+= ((unsigned long)sbus_level
- 1UL) * 8UL;
825 return build_irq(sbus_level
, iclr
, imap
);
828 /* Error interrupt handling. */
829 #define SYSIO_UE_AFSR 0x0030UL
830 #define SYSIO_UE_AFAR 0x0038UL
831 #define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
832 #define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
833 #define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
834 #define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
835 #define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
836 #define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
837 #define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
838 #define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
839 #define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
840 #define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
841 #define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
842 static irqreturn_t
sysio_ue_handler(int irq
, void *dev_id
)
844 struct sbus_bus
*sbus
= dev_id
;
845 struct sbus_iommu
*iommu
= sbus
->iommu
;
846 unsigned long reg_base
= iommu
->sbus_control_reg
- 0x2000UL
;
847 unsigned long afsr_reg
, afar_reg
;
848 unsigned long afsr
, afar
, error_bits
;
851 afsr_reg
= reg_base
+ SYSIO_UE_AFSR
;
852 afar_reg
= reg_base
+ SYSIO_UE_AFAR
;
854 /* Latch error status. */
855 afsr
= upa_readq(afsr_reg
);
856 afar
= upa_readq(afar_reg
);
858 /* Clear primary/secondary error status bits. */
860 (SYSIO_UEAFSR_PPIO
| SYSIO_UEAFSR_PDRD
| SYSIO_UEAFSR_PDWR
|
861 SYSIO_UEAFSR_SPIO
| SYSIO_UEAFSR_SDRD
| SYSIO_UEAFSR_SDWR
);
862 upa_writeq(error_bits
, afsr_reg
);
865 printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
867 (((error_bits
& SYSIO_UEAFSR_PPIO
) ?
869 ((error_bits
& SYSIO_UEAFSR_PDRD
) ?
871 ((error_bits
& SYSIO_UEAFSR_PDWR
) ?
872 "DVMA Write" : "???")))));
873 printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
875 (afsr
& SYSIO_UEAFSR_DOFF
) >> 45UL,
876 (afsr
& SYSIO_UEAFSR_SIZE
) >> 42UL,
877 (afsr
& SYSIO_UEAFSR_MID
) >> 37UL);
878 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus
->portid
, afar
);
879 printk("SYSIO[%x]: Secondary UE errors [", sbus
->portid
);
881 if (afsr
& SYSIO_UEAFSR_SPIO
) {
885 if (afsr
& SYSIO_UEAFSR_SDRD
) {
887 printk("(DVMA Read)");
889 if (afsr
& SYSIO_UEAFSR_SDWR
) {
891 printk("(DVMA Write)");
900 #define SYSIO_CE_AFSR 0x0040UL
901 #define SYSIO_CE_AFAR 0x0048UL
902 #define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
903 #define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
904 #define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
905 #define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
906 #define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
907 #define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
908 #define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
909 #define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
910 #define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
911 #define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
912 #define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
913 #define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
914 static irqreturn_t
sysio_ce_handler(int irq
, void *dev_id
)
916 struct sbus_bus
*sbus
= dev_id
;
917 struct sbus_iommu
*iommu
= sbus
->iommu
;
918 unsigned long reg_base
= iommu
->sbus_control_reg
- 0x2000UL
;
919 unsigned long afsr_reg
, afar_reg
;
920 unsigned long afsr
, afar
, error_bits
;
923 afsr_reg
= reg_base
+ SYSIO_CE_AFSR
;
924 afar_reg
= reg_base
+ SYSIO_CE_AFAR
;
926 /* Latch error status. */
927 afsr
= upa_readq(afsr_reg
);
928 afar
= upa_readq(afar_reg
);
930 /* Clear primary/secondary error status bits. */
932 (SYSIO_CEAFSR_PPIO
| SYSIO_CEAFSR_PDRD
| SYSIO_CEAFSR_PDWR
|
933 SYSIO_CEAFSR_SPIO
| SYSIO_CEAFSR_SDRD
| SYSIO_CEAFSR_SDWR
);
934 upa_writeq(error_bits
, afsr_reg
);
936 printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
938 (((error_bits
& SYSIO_CEAFSR_PPIO
) ?
940 ((error_bits
& SYSIO_CEAFSR_PDRD
) ?
942 ((error_bits
& SYSIO_CEAFSR_PDWR
) ?
943 "DVMA Write" : "???")))));
945 /* XXX Use syndrome and afar to print out module string just like
946 * XXX UDB CE trap handler does... -DaveM
948 printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
950 (afsr
& SYSIO_CEAFSR_DOFF
) >> 45UL,
951 (afsr
& SYSIO_CEAFSR_ESYND
) >> 48UL,
952 (afsr
& SYSIO_CEAFSR_SIZE
) >> 42UL,
953 (afsr
& SYSIO_CEAFSR_MID
) >> 37UL);
954 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus
->portid
, afar
);
956 printk("SYSIO[%x]: Secondary CE errors [", sbus
->portid
);
958 if (afsr
& SYSIO_CEAFSR_SPIO
) {
962 if (afsr
& SYSIO_CEAFSR_SDRD
) {
964 printk("(DVMA Read)");
966 if (afsr
& SYSIO_CEAFSR_SDWR
) {
968 printk("(DVMA Write)");
977 #define SYSIO_SBUS_AFSR 0x2010UL
978 #define SYSIO_SBUS_AFAR 0x2018UL
979 #define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
980 #define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
981 #define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
982 #define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
983 #define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
984 #define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
985 #define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
986 #define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
987 #define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
988 #define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
989 #define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
990 #define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
991 static irqreturn_t
sysio_sbus_error_handler(int irq
, void *dev_id
)
993 struct sbus_bus
*sbus
= dev_id
;
994 struct sbus_iommu
*iommu
= sbus
->iommu
;
995 unsigned long afsr_reg
, afar_reg
, reg_base
;
996 unsigned long afsr
, afar
, error_bits
;
999 reg_base
= iommu
->sbus_control_reg
- 0x2000UL
;
1000 afsr_reg
= reg_base
+ SYSIO_SBUS_AFSR
;
1001 afar_reg
= reg_base
+ SYSIO_SBUS_AFAR
;
1003 afsr
= upa_readq(afsr_reg
);
1004 afar
= upa_readq(afar_reg
);
1006 /* Clear primary/secondary error status bits. */
1008 (SYSIO_SBAFSR_PLE
| SYSIO_SBAFSR_PTO
| SYSIO_SBAFSR_PBERR
|
1009 SYSIO_SBAFSR_SLE
| SYSIO_SBAFSR_STO
| SYSIO_SBAFSR_SBERR
);
1010 upa_writeq(error_bits
, afsr_reg
);
1012 /* Log the error. */
1013 printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
1015 (((error_bits
& SYSIO_SBAFSR_PLE
) ?
1017 ((error_bits
& SYSIO_SBAFSR_PTO
) ?
1019 ((error_bits
& SYSIO_SBAFSR_PBERR
) ?
1020 "Error Ack" : "???")))),
1021 (afsr
& SYSIO_SBAFSR_RD
) ? 1 : 0);
1022 printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
1024 (afsr
& SYSIO_SBAFSR_SIZE
) >> 42UL,
1025 (afsr
& SYSIO_SBAFSR_MID
) >> 37UL);
1026 printk("SYSIO[%x]: AFAR[%016lx]\n", sbus
->portid
, afar
);
1027 printk("SYSIO[%x]: Secondary SBUS errors [", sbus
->portid
);
1029 if (afsr
& SYSIO_SBAFSR_SLE
) {
1031 printk("(Late PIO Error)");
1033 if (afsr
& SYSIO_SBAFSR_STO
) {
1035 printk("(Time Out)");
1037 if (afsr
& SYSIO_SBAFSR_SBERR
) {
1039 printk("(Error Ack)");
1045 /* XXX check iommu/strbuf for further error status XXX */
1050 #define ECC_CONTROL 0x0020UL
1051 #define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
1052 #define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
1053 #define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
1055 #define SYSIO_UE_INO 0x34
1056 #define SYSIO_CE_INO 0x35
1057 #define SYSIO_SBUSERR_INO 0x36
1059 static void __init
sysio_register_error_handlers(struct sbus_bus
*sbus
)
1061 struct sbus_iommu
*iommu
= sbus
->iommu
;
1062 unsigned long reg_base
= iommu
->sbus_control_reg
- 0x2000UL
;
1066 irq
= sbus_build_irq(sbus
, SYSIO_UE_INO
);
1067 if (request_irq(irq
, sysio_ue_handler
,
1068 IRQF_SHARED
, "SYSIO UE", sbus
) < 0) {
1069 prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
1074 irq
= sbus_build_irq(sbus
, SYSIO_CE_INO
);
1075 if (request_irq(irq
, sysio_ce_handler
,
1076 IRQF_SHARED
, "SYSIO CE", sbus
) < 0) {
1077 prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
1082 irq
= sbus_build_irq(sbus
, SYSIO_SBUSERR_INO
);
1083 if (request_irq(irq
, sysio_sbus_error_handler
,
1084 IRQF_SHARED
, "SYSIO SBUS Error", sbus
) < 0) {
1085 prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
1090 /* Now turn the error interrupts on and also enable ECC checking. */
1091 upa_writeq((SYSIO_ECNTRL_ECCEN
|
1094 reg_base
+ ECC_CONTROL
);
1096 control
= upa_readq(iommu
->sbus_control_reg
);
1097 control
|= 0x100UL
; /* SBUS Error Interrupt Enable */
1098 upa_writeq(control
, iommu
->sbus_control_reg
);
1101 /* Boot time initialization. */
1102 static void __init
sbus_iommu_init(int __node
, struct sbus_bus
*sbus
)
1104 struct linux_prom64_registers
*pr
;
1105 struct device_node
*dp
;
1106 struct sbus_iommu
*iommu
;
1107 unsigned long regs
, tsb_base
;
1111 dp
= of_find_node_by_phandle(__node
);
1113 sbus
->portid
= of_getintprop_default(dp
, "upa-portid", -1);
1115 pr
= of_get_property(dp
, "reg", NULL
);
1117 prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
1120 regs
= pr
->phys_addr
;
1122 iommu
= kmalloc(sizeof(*iommu
) + SMP_CACHE_BYTES
, GFP_ATOMIC
);
1123 if (iommu
== NULL
) {
1124 prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
1128 /* Align on E$ line boundary. */
1129 iommu
= (struct sbus_iommu
*)
1130 (((unsigned long)iommu
+ (SMP_CACHE_BYTES
- 1UL)) &
1131 ~(SMP_CACHE_BYTES
- 1UL));
1133 memset(iommu
, 0, sizeof(*iommu
));
1135 /* We start with no consistent mappings. */
1136 iommu
->lowest_consistent_map
= CLUSTER_NPAGES
;
1138 for (i
= 0; i
< NCLUSTERS
; i
++) {
1139 iommu
->alloc_info
[i
].flush
= 0;
1140 iommu
->alloc_info
[i
].next
= 0;
1143 /* Setup spinlock. */
1144 spin_lock_init(&iommu
->lock
);
1146 /* Init register offsets. */
1147 iommu
->iommu_regs
= regs
+ SYSIO_IOMMUREG_BASE
;
1148 iommu
->strbuf_regs
= regs
+ SYSIO_STRBUFREG_BASE
;
1150 /* The SYSIO SBUS control register is used for dummy reads
1151 * in order to ensure write completion.
1153 iommu
->sbus_control_reg
= regs
+ 0x2000UL
;
1155 /* Link into SYSIO software state. */
1156 sbus
->iommu
= iommu
;
1158 printk("SYSIO: UPA portID %x, at %016lx\n",
1159 sbus
->portid
, regs
);
1161 /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
1162 control
= upa_readq(iommu
->iommu_regs
+ IOMMU_CONTROL
);
1163 control
= ((7UL << 16UL) |
1168 /* Using the above configuration we need 1MB iommu page
1169 * table (128K ioptes * 8 bytes per iopte). This is
1170 * page order 7 on UltraSparc.
1172 tsb_base
= __get_free_pages(GFP_ATOMIC
, get_order(IO_TSB_SIZE
));
1173 if (tsb_base
== 0UL) {
1174 prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
1178 iommu
->page_table
= (iopte_t
*) tsb_base
;
1179 memset(iommu
->page_table
, 0, IO_TSB_SIZE
);
1181 upa_writeq(control
, iommu
->iommu_regs
+ IOMMU_CONTROL
);
1183 /* Clean out any cruft in the IOMMU using
1184 * diagnostic accesses.
1186 for (i
= 0; i
< 16; i
++) {
1187 unsigned long dram
= iommu
->iommu_regs
+ IOMMU_DRAMDIAG
;
1188 unsigned long tag
= iommu
->iommu_regs
+ IOMMU_TAGDIAG
;
1190 dram
+= (unsigned long)i
* 8UL;
1191 tag
+= (unsigned long)i
* 8UL;
1192 upa_writeq(0, dram
);
1195 upa_readq(iommu
->sbus_control_reg
);
1197 /* Give the TSB to SYSIO. */
1198 upa_writeq(__pa(tsb_base
), iommu
->iommu_regs
+ IOMMU_TSBBASE
);
1200 /* Setup streaming buffer, DE=1 SB_EN=1 */
1201 control
= (1UL << 1UL) | (1UL << 0UL);
1202 upa_writeq(control
, iommu
->strbuf_regs
+ STRBUF_CONTROL
);
1204 /* Clear out the tags using diagnostics. */
1205 for (i
= 0; i
< 16; i
++) {
1206 unsigned long ptag
, ltag
;
1208 ptag
= iommu
->strbuf_regs
+ STRBUF_PTAGDIAG
;
1209 ltag
= iommu
->strbuf_regs
+ STRBUF_LTAGDIAG
;
1210 ptag
+= (unsigned long)i
* 8UL;
1211 ltag
+= (unsigned long)i
* 8UL;
1213 upa_writeq(0UL, ptag
);
1214 upa_writeq(0UL, ltag
);
1217 /* Enable DVMA arbitration for all devices/slots. */
1218 control
= upa_readq(iommu
->sbus_control_reg
);
1220 upa_writeq(control
, iommu
->sbus_control_reg
);
1222 /* Now some Xfire specific grot... */
1223 if (this_is_starfire
)
1224 starfire_hookup(sbus
->portid
);
1226 sysio_register_error_handlers(sbus
);
1229 void sbus_fill_device_irq(struct sbus_dev
*sdev
)
1231 struct device_node
*dp
= of_find_node_by_phandle(sdev
->prom_node
);
1232 struct linux_prom_irqs
*irqs
;
1234 irqs
= of_get_property(dp
, "interrupts", NULL
);
1239 unsigned int pri
= irqs
[0].pri
;
1243 pri
+= sdev
->slot
* 8;
1245 sdev
->irqs
[0] = sbus_build_irq(sdev
->bus
, pri
);
1249 void __init
sbus_arch_bus_ranges_init(struct device_node
*pn
, struct sbus_bus
*sbus
)
1253 void __init
sbus_setup_iommu(struct sbus_bus
*sbus
, struct device_node
*dp
)
1255 sbus_iommu_init(dp
->node
, sbus
);
1258 void __init
sbus_setup_arch_props(struct sbus_bus
*sbus
, struct device_node
*dp
)
1262 int __init
sbus_arch_preinit(void)
1267 void __init
sbus_arch_postinit(void)
1269 extern void firetruck_init(void);