2 * linux/arch/alpha/kernel/core_cia.c
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
7 * Copyright (C) 1995 David A Rusling
8 * Copyright (C) 1997, 1998 Jay Estabrook
9 * Copyright (C) 1998, 1999, 2000 Richard Henderson
11 * Code common to all CIA core logic chips.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
20 #include <asm/system.h>
21 #include <asm/ptrace.h>
22 #include <asm/hwrpb.h>
24 #define __EXTERN_INLINE inline
26 #include <asm/core_cia.h>
27 #undef __EXTERN_INLINE
29 #include <linux/bootmem.h>
36 * NOTE: Herein lie back-to-back mb instructions. They are magic.
37 * One plausible explanation is that the i/o controller does not properly
38 * handle the system transaction. Another involves timing. Ho hum.
41 #define DEBUG_CONFIG 0
43 # define DBGC(args) printk args
48 #define vip volatile int *
51 * Given a bus, device, and function number, compute resulting
52 * configuration space address. It is therefore not safe to have
53 * concurrent invocations to configuration space access routines, but
54 * there really shouldn't be any need for this.
58 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
59 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
60 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
62 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * 31:11 Device select bit.
65 * 10:8 Function number
70 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
71 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
72 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
74 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
77 * 23:16 bus number (8 bits = 128 possible buses)
78 * 15:11 Device number (5 bits)
79 * 10:8 function number
83 * The function number selects which function of a multi-function device
84 * (e.g., SCSI and Ethernet).
86 * The register selects a DWORD (32 bit) register offset. Hence it
87 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
92 mk_conf_addr(struct pci_dev
*dev
, int where
, unsigned long *pci_addr
,
95 u8 bus
= dev
->bus
->number
;
96 u8 device_fn
= dev
->devfn
;
99 *pci_addr
= (bus
<< 16) | (device_fn
<< 8) | where
;
101 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
102 " returning address 0x%p\n"
103 bus
, device_fn
, where
, *pci_addr
));
109 conf_read(unsigned long addr
, unsigned char type1
)
115 DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr
, type1
));
116 __save_and_cli(flags
);
118 /* Reset status register to avoid losing errors. */
119 stat0
= *(vip
)CIA_IOC_CIA_ERR
;
120 *(vip
)CIA_IOC_CIA_ERR
= stat0
;
122 *(vip
)CIA_IOC_CIA_ERR
; /* re-read to force write */
124 /* If Type1 access, must set CIA CFG. */
126 cia_cfg
= *(vip
)CIA_IOC_CFG
;
127 *(vip
)CIA_IOC_CFG
= (cia_cfg
& ~3) | 1;
134 mcheck_expected(0) = 1;
138 /* Access configuration space. */
142 if (mcheck_taken(0)) {
147 mcheck_expected(0) = 0;
150 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
152 *(vip
)CIA_IOC_CFG
= cia_cfg
;
157 __restore_flags(flags
);
164 conf_write(unsigned long addr
, unsigned int value
, unsigned char type1
)
167 int stat0
, cia_cfg
= 0;
169 DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr
, type1
));
170 __save_and_cli(flags
);
172 /* Reset status register to avoid losing errors. */
173 stat0
= *(vip
)CIA_IOC_CIA_ERR
;
174 *(vip
)CIA_IOC_CIA_ERR
= stat0
;
176 *(vip
)CIA_IOC_CIA_ERR
; /* re-read to force write */
178 /* If Type1 access, must set CIA CFG. */
180 cia_cfg
= *(vip
)CIA_IOC_CFG
;
181 *(vip
)CIA_IOC_CFG
= (cia_cfg
& ~3) | 1;
188 mcheck_expected(0) = 1;
192 /* Access configuration space. */
195 *(vip
)addr
; /* read back to force the write */
197 mcheck_expected(0) = 0;
200 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
202 *(vip
)CIA_IOC_CFG
= cia_cfg
;
207 __restore_flags(flags
);
212 cia_read_config_byte(struct pci_dev
*dev
, int where
, u8
*value
)
214 unsigned long addr
, pci_addr
;
217 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
218 return PCIBIOS_DEVICE_NOT_FOUND
;
220 addr
= (pci_addr
<< 5) + 0x00 + CIA_CONF
;
221 *value
= conf_read(addr
, type1
) >> ((where
& 3) * 8);
222 return PCIBIOS_SUCCESSFUL
;
226 cia_read_config_word(struct pci_dev
*dev
, int where
, u16
*value
)
228 unsigned long addr
, pci_addr
;
231 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
232 return PCIBIOS_DEVICE_NOT_FOUND
;
234 addr
= (pci_addr
<< 5) + 0x08 + CIA_CONF
;
235 *value
= conf_read(addr
, type1
) >> ((where
& 3) * 8);
236 return PCIBIOS_SUCCESSFUL
;
240 cia_read_config_dword(struct pci_dev
*dev
, int where
, u32
*value
)
242 unsigned long addr
, pci_addr
;
245 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
246 return PCIBIOS_DEVICE_NOT_FOUND
;
248 addr
= (pci_addr
<< 5) + 0x18 + CIA_CONF
;
249 *value
= conf_read(addr
, type1
);
250 return PCIBIOS_SUCCESSFUL
;
254 cia_write_config(struct pci_dev
*dev
, int where
, u32 value
, long mask
)
256 unsigned long addr
, pci_addr
;
259 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
260 return PCIBIOS_DEVICE_NOT_FOUND
;
262 addr
= (pci_addr
<< 5) + mask
+ CIA_CONF
;
263 conf_write(addr
, value
<< ((where
& 3) * 8), type1
);
264 return PCIBIOS_SUCCESSFUL
;
268 cia_write_config_byte(struct pci_dev
*dev
, int where
, u8 value
)
270 return cia_write_config(dev
, where
, value
, 0x00);
274 cia_write_config_word(struct pci_dev
*dev
, int where
, u16 value
)
276 return cia_write_config(dev
, where
, value
, 0x08);
280 cia_write_config_dword(struct pci_dev
*dev
, int where
, u32 value
)
282 return cia_write_config(dev
, where
, value
, 0x18);
285 struct pci_ops cia_pci_ops
=
287 read_byte
: cia_read_config_byte
,
288 read_word
: cia_read_config_word
,
289 read_dword
: cia_read_config_dword
,
290 write_byte
: cia_write_config_byte
,
291 write_word
: cia_write_config_word
,
292 write_dword
: cia_write_config_dword
296 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
297 * It cannot be invalidated. Rather than hard code the pass numbers,
298 * actually try the tbia to see if it works.
302 cia_pci_tbi(struct pci_controler
*hose
, dma_addr_t start
, dma_addr_t end
)
305 *(vip
)CIA_IOC_PCI_TBIA
= 3; /* Flush all locked and unlocked. */
307 *(vip
)CIA_IOC_PCI_TBIA
;
311 * Fixup attempt number 1.
313 * Write zeros directly into the tag registers.
317 cia_pci_tbi_try1(struct pci_controler
*hose
,
318 dma_addr_t start
, dma_addr_t end
)
321 *(vip
)CIA_IOC_TB_TAGn(0) = 0;
322 *(vip
)CIA_IOC_TB_TAGn(1) = 0;
323 *(vip
)CIA_IOC_TB_TAGn(2) = 0;
324 *(vip
)CIA_IOC_TB_TAGn(3) = 0;
325 *(vip
)CIA_IOC_TB_TAGn(4) = 0;
326 *(vip
)CIA_IOC_TB_TAGn(5) = 0;
327 *(vip
)CIA_IOC_TB_TAGn(6) = 0;
328 *(vip
)CIA_IOC_TB_TAGn(7) = 0;
330 *(vip
)CIA_IOC_TB_TAGn(0);
335 * Fixup attempt number 2. This is the method NT and NetBSD use.
337 * Allocate mappings, and put the chip into DMA loopback mode to read a
338 * garbage page. This works by causing TLB misses, causing old entries to
339 * be purged to make room for the new entries coming in for the garbage page.
342 #define CIA_BROKEN_TBI_TRY2_BASE 0xE0000000
345 cia_enable_broken_tbi_try2(void)
347 unsigned long *ppte
, pte
;
350 ppte
= __alloc_bootmem(PAGE_SIZE
, 32768, 0);
351 pte
= (virt_to_phys(ppte
) >> (PAGE_SHIFT
- 1)) | 1;
353 for (i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); ++i
)
356 *(vip
)CIA_IOC_PCI_W3_BASE
= CIA_BROKEN_TBI_TRY2_BASE
| 3;
357 *(vip
)CIA_IOC_PCI_W3_MASK
= (PAGE_SIZE
- 1) & 0xfff00000;
358 *(vip
)CIA_IOC_PCI_T3_BASE
= virt_to_phys(ppte
) >> 2;
362 cia_pci_tbi_try2(struct pci_controler
*hose
,
363 dma_addr_t start
, dma_addr_t end
)
366 unsigned long bus_addr
;
370 __save_and_cli(flags
);
372 /* Put the chip into PCI loopback mode. */
374 ctrl
= *(vip
)CIA_IOC_CIA_CTRL
;
375 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
| CIA_CTRL_PCI_LOOP_EN
;
377 *(vip
)CIA_IOC_CIA_CTRL
;
380 /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
381 each read. This forces SG TLB misses. NetBSD claims that the
382 TLB entries are not quite LRU, meaning that we need to read more
383 times than there are actual tags. The 2117x docs claim strict
384 round-robin. Oh well, we've come this far... */
386 bus_addr
= cia_ioremap(CIA_BROKEN_TBI_TRY2_BASE
);
387 for (i
= 0; i
< 12; ++i
, bus_addr
+= 32768)
390 /* Restore normal PCI operation. */
392 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
;
394 *(vip
)CIA_IOC_CIA_CTRL
;
397 __restore_flags(flags
);
402 verify_tb_operation(void)
404 static int page
[PAGE_SIZE
/4]
405 __attribute__((aligned(PAGE_SIZE
)))
408 struct pci_iommu_arena
*arena
= pci_isa_hose
->sg_isa
;
409 int ctrl
, addr0
, tag0
, pte0
, data0
;
412 /* Put the chip into PCI loopback mode. */
414 ctrl
= *(vip
)CIA_IOC_CIA_CTRL
;
415 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
| CIA_CTRL_PCI_LOOP_EN
;
417 *(vip
)CIA_IOC_CIA_CTRL
;
420 /* Write a valid entry directly into the TLB registers. */
422 addr0
= arena
->dma_base
;
424 pte0
= (virt_to_phys(page
) >> (PAGE_SHIFT
- 1)) | 1;
426 *(vip
)CIA_IOC_TB_TAGn(0) = tag0
;
427 *(vip
)CIA_IOC_TB_TAGn(1) = 0;
428 *(vip
)CIA_IOC_TB_TAGn(2) = 0;
429 *(vip
)CIA_IOC_TB_TAGn(3) = 0;
430 *(vip
)CIA_IOC_TB_TAGn(4) = 0;
431 *(vip
)CIA_IOC_TB_TAGn(5) = 0;
432 *(vip
)CIA_IOC_TB_TAGn(6) = 0;
433 *(vip
)CIA_IOC_TB_TAGn(7) = 0;
434 *(vip
)CIA_IOC_TBn_PAGEm(0,0) = pte0
;
435 *(vip
)CIA_IOC_TBn_PAGEm(0,1) = 0;
436 *(vip
)CIA_IOC_TBn_PAGEm(0,2) = 0;
437 *(vip
)CIA_IOC_TBn_PAGEm(0,3) = 0;
440 /* First, verify we can read back what we've written. If
441 this fails, we can't be sure of any of the other testing
442 we're going to do, so bail. */
443 /* ??? Actually, we could do the work with machine checks.
444 By passing this register update test, we pretty much
445 guarantee that cia_pci_tbi_try1 works. If this test
446 fails, cia_pci_tbi_try2 might still work. */
448 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
450 printk("pci: failed tb register update test "
451 "(tag0 %#x != %#x)\n", temp
, tag0
);
454 temp
= *(vip
)CIA_IOC_TB_TAGn(1);
456 printk("pci: failed tb register update test "
457 "(tag1 %#x != 0)\n", temp
);
460 temp
= *(vip
)CIA_IOC_TBn_PAGEm(0,0);
462 printk("pci: failed tb register update test "
463 "(pte0 %#x != %#x)\n", temp
, pte0
);
466 printk("pci: passed tb register update test\n");
468 /* Second, verify we can actually do I/O through this entry. */
472 mcheck_expected(0) = 1;
475 temp
= cia_readl(cia_ioremap(addr0
));
477 mcheck_expected(0) = 0;
479 if (mcheck_taken(0)) {
480 printk("pci: failed sg loopback i/o read test (mcheck)\n");
484 printk("pci: failed sg loopback i/o read test "
485 "(%#x != %#x)\n", temp
, data0
);
488 printk("pci: passed sg loopback i/o read test\n");
490 /* Third, try to invalidate the TLB. */
492 cia_pci_tbi(arena
->hose
, 0, -1);
493 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
495 cia_pci_tbi_try1(arena
->hose
, 0, -1);
497 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
499 printk("pci: failed tbia test; "
500 "no usable workaround\n");
504 alpha_mv
.mv_pci_tbi
= cia_pci_tbi_try1
;
505 printk("pci: failed tbia test; workaround 1 succeeded\n");
507 printk("pci: passed tbia test\n");
510 /* Fourth, verify the TLB snoops the EV5's caches when
515 arena
->ptes
[4] = pte0
;
516 mcheck_expected(0) = 1;
519 temp
= cia_readl(cia_ioremap(addr0
+ 4*PAGE_SIZE
));
521 mcheck_expected(0) = 0;
523 if (mcheck_taken(0)) {
524 printk("pci: failed pte write cache snoop test (mcheck)\n");
528 printk("pci: failed pte write cache snoop test "
529 "(%#x != %#x)\n", temp
, data0
);
532 printk("pci: passed pte write cache snoop test\n");
534 /* Fifth, verify that a previously invalid PTE entry gets
535 filled from the page table. */
539 arena
->ptes
[5] = pte0
;
540 mcheck_expected(0) = 1;
543 temp
= cia_readl(cia_ioremap(addr0
+ 5*PAGE_SIZE
));
545 mcheck_expected(0) = 0;
547 if (mcheck_taken(0)) {
548 printk("pci: failed valid tag invalid pte reload test "
549 "(mcheck; workaround available)\n");
550 /* Work around this bug by aligning new allocations
551 on 4 page boundaries. */
552 arena
->align_entry
= 4;
553 } else if (temp
!= data0
) {
554 printk("pci: failed valid tag invalid pte reload test "
555 "(%#x != %#x)\n", temp
, data0
);
558 printk("pci: passed valid tag invalid pte reload test\n");
561 /* Sixth, verify machine checks are working. Test invalid
562 pte under the same valid tag as we used above. */
564 mcheck_expected(0) = 1;
567 temp
= cia_readl(cia_ioremap(addr0
+ 6*PAGE_SIZE
));
569 mcheck_expected(0) = 0;
571 printk("pci: %s pci machine check test\n",
572 mcheck_taken(0) ? "passed" : "failed");
574 /* Clean up after the tests. */
577 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
580 /* Restore normal PCI operation. */
582 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
;
584 *(vip
)CIA_IOC_CIA_CTRL
;
589 printk("pci: disabling sg translation window\n");
590 *(vip
)CIA_IOC_PCI_W0_BASE
= 0;
591 alpha_mv
.mv_pci_tbi
= NULL
;
596 do_init_arch(int is_pyxis
)
598 struct pci_controler
*hose
;
602 cia_rev
= *(vip
)CIA_IOC_CIA_REV
& CIA_REV_MASK
;
603 printk("pci: cia revision %d%s\n",
604 cia_rev
, is_pyxis
? " (pyxis)" : "");
606 /* Set up error reporting. */
607 temp
= *(vip
)CIA_IOC_ERR_MASK
;
608 temp
&= ~(CIA_ERR_CPU_PE
| CIA_ERR_MEM_NEM
| CIA_ERR_PA_PTE_INV
609 | CIA_ERR_RCVD_MAS_ABT
| CIA_ERR_RCVD_TAR_ABT
);
610 *(vip
)CIA_IOC_ERR_MASK
= temp
;
612 /* Clear all currently pending errors. */
613 temp
= *(vip
)CIA_IOC_CIA_ERR
;
614 *(vip
)CIA_IOC_CIA_ERR
= temp
;
616 /* Turn on mchecks. */
617 temp
= *(vip
)CIA_IOC_CIA_CTRL
;
618 temp
|= CIA_CTRL_FILL_ERR_EN
| CIA_CTRL_MCHK_ERR_EN
;
619 *(vip
)CIA_IOC_CIA_CTRL
= temp
;
621 /* Clear the CFG register, which gets used for PCI config space
622 accesses. That is the way we want to use it, and we do not
623 want to depend on what ARC or SRM might have left behind. */
624 *(vip
)CIA_IOC_CFG
= 0;
627 *(vip
)CIA_IOC_HAE_MEM
= 0;
628 *(vip
)CIA_IOC_HAE_IO
= 0;
630 /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
631 make sure they're enabled on the controler. */
633 temp
= *(vip
)CIA_IOC_CIA_CNFG
;
634 temp
|= CIA_CNFG_IOA_BWEN
;
635 *(vip
)CIA_IOC_CIA_CNFG
= temp
;
638 /* Syncronize with all previous changes. */
640 *(vip
)CIA_IOC_CIA_REV
;
643 * Create our single hose.
646 pci_isa_hose
= hose
= alloc_pci_controler();
647 hose
->io_space
= &ioport_resource
;
648 hose
->mem_space
= &iomem_resource
;
652 struct resource
*hae_mem
= alloc_resource();
653 hose
->mem_space
= hae_mem
;
656 hae_mem
->end
= CIA_MEM_R1_MASK
;
657 hae_mem
->name
= pci_hae0_name
;
658 hae_mem
->flags
= IORESOURCE_MEM
;
660 if (request_resource(&iomem_resource
, hae_mem
) < 0)
661 printk(KERN_ERR
"Failed to request HAE_MEM\n");
663 hose
->sparse_mem_base
= CIA_SPARSE_MEM
- IDENT_ADDR
;
664 hose
->dense_mem_base
= CIA_DENSE_MEM
- IDENT_ADDR
;
665 hose
->sparse_io_base
= CIA_IO
- IDENT_ADDR
;
666 hose
->dense_io_base
= 0;
668 hose
->sparse_mem_base
= 0;
669 hose
->dense_mem_base
= CIA_BW_MEM
- IDENT_ADDR
;
670 hose
->sparse_io_base
= 0;
671 hose
->dense_io_base
= CIA_BW_IO
- IDENT_ADDR
;
675 * Set up the PCI to main memory translation windows.
677 * Window 0 is scatter-gather 8MB at 8MB (for isa)
678 * Window 1 is direct access 1GB at 1GB
679 * Window 2 is direct access 1GB at 2GB
681 * We must actually use 2 windows to direct-map the 2GB space,
682 * because of an idiot-syncrasy of the CYPRESS chip used on
683 * many PYXIS systems. It may respond to a PCI bus address in
684 * the last 1MB of the 4GB address range.
686 * ??? NetBSD hints that page tables must be aligned to 32K,
687 * possibly due to a hardware bug. This is over-aligned
688 * from the 8K alignment one would expect for an 8MB window.
689 * No description of what revisions affected.
693 hose
->sg_isa
= iommu_arena_new(hose
, 0x00800000, 0x00800000, 32768);
694 __direct_map_base
= 0x40000000;
695 __direct_map_size
= 0x80000000;
697 *(vip
)CIA_IOC_PCI_W0_BASE
= hose
->sg_isa
->dma_base
| 3;
698 *(vip
)CIA_IOC_PCI_W0_MASK
= (hose
->sg_isa
->size
- 1) & 0xfff00000;
699 *(vip
)CIA_IOC_PCI_T0_BASE
= virt_to_phys(hose
->sg_isa
->ptes
) >> 2;
701 *(vip
)CIA_IOC_PCI_W1_BASE
= 0x40000000 | 1;
702 *(vip
)CIA_IOC_PCI_W1_MASK
= (0x40000000 - 1) & 0xfff00000;
703 *(vip
)CIA_IOC_PCI_T1_BASE
= 0 >> 2;
705 *(vip
)CIA_IOC_PCI_W2_BASE
= 0x80000000 | 1;
706 *(vip
)CIA_IOC_PCI_W2_MASK
= (0x40000000 - 1) & 0xfff00000;
707 *(vip
)CIA_IOC_PCI_T2_BASE
= 0x40000000 >> 2;
709 *(vip
)CIA_IOC_PCI_W3_BASE
= 0;
719 pyxis_init_arch(void)
721 /* On pyxis machines we can precisely calculate the
722 CPU clock frequency using pyxis real time counter.
723 It's especially useful for SX164 with broken RTC.
725 Both CPU and chipset are driven by the single 16.666M
726 or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
729 unsigned int cc0
, cc1
;
730 unsigned long pyxis_cc
;
732 __asm__
__volatile__ ("rpcc %0" : "=r"(cc0
));
733 pyxis_cc
= *(vulp
)PYXIS_RT_COUNT
;
734 do { } while(*(vulp
)PYXIS_RT_COUNT
- pyxis_cc
< 4096);
735 __asm__
__volatile__ ("rpcc %0" : "=r"(cc1
));
737 hwrpb
->cycle_freq
= ((cc1
>> 11) * 100000000UL) / 3;
738 hwrpb_update_checksum(hwrpb
);
746 /* Must delay this from init_arch, as we need machine checks. */
747 verify_tb_operation();
752 cia_pci_clr_err(void)
756 jd
= *(vip
)CIA_IOC_CIA_ERR
;
757 *(vip
)CIA_IOC_CIA_ERR
= jd
;
759 *(vip
)CIA_IOC_CIA_ERR
; /* re-read to force write. */
763 cia_decode_pci_error(struct el_CIA_sysdata_mcheck
*cia
, const char *msg
)
765 static const char * const pci_cmd_desc
[16] = {
766 "Interrupt Acknowledge", "Special Cycle", "I/O Read",
767 "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
768 "Memory Write", "Reserved 0x8", "Reserved 0x9",
769 "Configuration Read", "Configuration Write",
770 "Memory Read Multiple", "Dual Address Cycle",
771 "Memory Read Line", "Memory Write and Invalidate"
774 if (cia
->cia_err
& (CIA_ERR_COR_ERR
777 | CIA_ERR_PA_PTE_INV
)) {
778 static const char * const window_desc
[6] = {
779 "No window active", "Window 0 hit", "Window 1 hit",
780 "Window 2 hit", "Window 3 hit", "Monster window hit"
785 unsigned long addr
, tmp
;
788 cmd
= pci_cmd_desc
[cia
->pci_err0
& 0x7];
789 lock
= (cia
->pci_err0
>> 4) & 1;
790 dac
= (cia
->pci_err0
>> 5) & 1;
792 tmp
= (cia
->pci_err0
>> 8) & 0x1F;
794 window
= window_desc
[tmp
];
796 addr
= cia
->pci_err1
;
798 tmp
= *(vip
)CIA_IOC_PCI_W_DAC
& 0xFFUL
;
802 printk(KERN_CRIT
"CIA machine check: %s\n", msg
);
803 printk(KERN_CRIT
" DMA command: %s\n", cmd
);
804 printk(KERN_CRIT
" PCI address: %#010lx\n", addr
);
805 printk(KERN_CRIT
" %s, Lock: %d, DAC: %d\n",
807 } else if (cia
->cia_err
& (CIA_ERR_PERR
808 | CIA_ERR_PCI_ADDR_PE
809 | CIA_ERR_RCVD_MAS_ABT
810 | CIA_ERR_RCVD_TAR_ABT
811 | CIA_ERR_IOA_TIMEOUT
)) {
812 static const char * const master_st_desc
[16] = {
813 "Idle", "Drive bus", "Address step cycle",
814 "Address cycle", "Data cycle", "Last read data cycle",
815 "Last write data cycle", "Read stop cycle",
816 "Write stop cycle", "Read turnaround cycle",
817 "Write turnaround cycle", "Reserved 0xB",
818 "Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
821 static const char * const target_st_desc
[16] = {
822 "Idle", "Busy", "Read data cycle", "Write data cycle",
823 "Read stop cycle", "Write stop cycle",
824 "Read turnaround cycle", "Write turnaround cycle",
825 "Read wait cycle", "Write wait cycle",
826 "Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
827 "Reserved 0xD", "Reserved 0xE", "Unknown state"
831 const char *master
, *target
;
832 unsigned long addr
, tmp
;
835 master
= master_st_desc
[(cia
->pci_err0
>> 16) & 0xF];
836 target
= target_st_desc
[(cia
->pci_err0
>> 20) & 0xF];
837 cmd
= pci_cmd_desc
[(cia
->pci_err0
>> 24) & 0xF];
838 dac
= (cia
->pci_err0
>> 28) & 1;
840 addr
= cia
->pci_err2
;
842 tmp
= *(volatile int *)CIA_IOC_PCI_W_DAC
& 0xFFUL
;
846 printk(KERN_CRIT
"CIA machine check: %s\n", msg
);
847 printk(KERN_CRIT
" PCI command: %s\n", cmd
);
848 printk(KERN_CRIT
" Master state: %s, Target state: %s\n",
850 printk(KERN_CRIT
" PCI address: %#010lx, DAC: %d\n",
853 printk(KERN_CRIT
"CIA machine check: %s\n", msg
);
854 printk(KERN_CRIT
" Unknown PCI error\n");
855 printk(KERN_CRIT
" PCI_ERR0 = %#08lx", cia
->pci_err0
);
856 printk(KERN_CRIT
" PCI_ERR1 = %#08lx", cia
->pci_err1
);
857 printk(KERN_CRIT
" PCI_ERR2 = %#08lx", cia
->pci_err2
);
862 cia_decode_mem_error(struct el_CIA_sysdata_mcheck
*cia
, const char *msg
)
864 unsigned long mem_port_addr
;
865 unsigned long mem_port_mask
;
866 const char *mem_port_cmd
;
867 const char *seq_state
;
868 const char *set_select
;
871 /* If this is a DMA command, also decode the PCI bits. */
872 if ((cia
->mem_err1
>> 20) & 1)
873 cia_decode_pci_error(cia
, msg
);
875 printk(KERN_CRIT
"CIA machine check: %s\n", msg
);
877 mem_port_addr
= cia
->mem_err0
& 0xfffffff0;
878 mem_port_addr
|= (cia
->mem_err1
& 0x83UL
) << 32;
880 mem_port_mask
= (cia
->mem_err1
>> 12) & 0xF;
882 tmp
= (cia
->mem_err1
>> 8) & 0xF;
883 tmp
|= ((cia
->mem_err1
>> 20) & 1) << 4;
884 if ((tmp
& 0x1E) == 0x06)
885 mem_port_cmd
= "WRITE BLOCK or WRITE BLOCK LOCK";
886 else if ((tmp
& 0x1C) == 0x08)
887 mem_port_cmd
= "READ MISS or READ MISS MODIFY";
888 else if (tmp
== 0x1C)
889 mem_port_cmd
= "BC VICTIM";
890 else if ((tmp
& 0x1E) == 0x0E)
891 mem_port_cmd
= "READ MISS MODIFY";
892 else if ((tmp
& 0x1C) == 0x18)
893 mem_port_cmd
= "DMA READ or DMA READ MODIFY";
894 else if ((tmp
& 0x1E) == 0x12)
895 mem_port_cmd
= "DMA WRITE";
897 mem_port_cmd
= "Unknown";
899 tmp
= (cia
->mem_err1
>> 16) & 0xF;
905 seq_state
= "DMA READ or DMA WRITE";
908 seq_state
= "READ MISS (or READ MISS MODIFY) with victim";
910 case 0x4: case 0x5: case 0x6:
911 seq_state
= "READ MISS (or READ MISS MODIFY) with no victim";
913 case 0x8: case 0x9: case 0xB:
914 seq_state
= "Refresh";
917 seq_state
= "Idle, waiting for DMA pending read";
920 seq_state
= "Idle, ras precharge";
923 seq_state
= "Unknown";
927 tmp
= (cia
->mem_err1
>> 24) & 0x1F;
929 case 0x00: set_select
= "Set 0 selected"; break;
930 case 0x01: set_select
= "Set 1 selected"; break;
931 case 0x02: set_select
= "Set 2 selected"; break;
932 case 0x03: set_select
= "Set 3 selected"; break;
933 case 0x04: set_select
= "Set 4 selected"; break;
934 case 0x05: set_select
= "Set 5 selected"; break;
935 case 0x06: set_select
= "Set 6 selected"; break;
936 case 0x07: set_select
= "Set 7 selected"; break;
937 case 0x08: set_select
= "Set 8 selected"; break;
938 case 0x09: set_select
= "Set 9 selected"; break;
939 case 0x0A: set_select
= "Set A selected"; break;
940 case 0x0B: set_select
= "Set B selected"; break;
941 case 0x0C: set_select
= "Set C selected"; break;
942 case 0x0D: set_select
= "Set D selected"; break;
943 case 0x0E: set_select
= "Set E selected"; break;
944 case 0x0F: set_select
= "Set F selected"; break;
945 case 0x10: set_select
= "No set selected"; break;
946 case 0x1F: set_select
= "Refresh cycle"; break;
947 default: set_select
= "Unknown"; break;
950 printk(KERN_CRIT
" Memory port command: %s\n", mem_port_cmd
);
951 printk(KERN_CRIT
" Memory port address: %#010lx, mask: %#lx\n",
952 mem_port_addr
, mem_port_mask
);
953 printk(KERN_CRIT
" Memory sequencer state: %s\n", seq_state
);
954 printk(KERN_CRIT
" Memory set: %s\n", set_select
);
958 cia_decode_ecc_error(struct el_CIA_sysdata_mcheck
*cia
, const char *msg
)
964 cia_decode_mem_error(cia
, msg
);
966 syn
= cia
->cia_syn
& 0xff;
967 if (syn
== (syn
& -syn
)) {
968 fmt
= KERN_CRIT
" ECC syndrome %#x -- check bit %d\n";
971 static unsigned char const data_bit
[64] = {
972 0xCE, 0xCB, 0xD3, 0xD5,
973 0xD6, 0xD9, 0xDA, 0xDC,
974 0x23, 0x25, 0x26, 0x29,
975 0x2A, 0x2C, 0x31, 0x34,
976 0x0E, 0x0B, 0x13, 0x15,
977 0x16, 0x19, 0x1A, 0x1C,
978 0xE3, 0xE5, 0xE6, 0xE9,
979 0xEA, 0xEC, 0xF1, 0xF4,
980 0x4F, 0x4A, 0x52, 0x54,
981 0x57, 0x58, 0x5B, 0x5D,
982 0xA2, 0xA4, 0xA7, 0xA8,
983 0xAB, 0xAD, 0xB0, 0xB5,
984 0x8F, 0x8A, 0x92, 0x94,
985 0x97, 0x98, 0x9B, 0x9D,
986 0x62, 0x64, 0x67, 0x68,
987 0x6B, 0x6D, 0x70, 0x75
990 for (i
= 0; i
< 64; ++i
)
991 if (data_bit
[i
] == syn
)
995 fmt
= KERN_CRIT
" ECC syndrome %#x -- data bit %d\n";
997 fmt
= KERN_CRIT
" ECC syndrome %#x -- unknown bit\n";
1000 printk (fmt
, syn
, i
);
1004 cia_decode_parity_error(struct el_CIA_sysdata_mcheck
*cia
)
1006 static const char * const cmd_desc
[16] = {
1007 "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
1008 "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
1009 "READ MISS0", "READ MISS1", "READ MISS MOD0",
1010 "READ MISS MOD1", "BCACHE VICTIM", "Spare",
1011 "READ MISS MOD STC0", "READ MISS MOD STC1"
1019 addr
= cia
->cpu_err0
& 0xfffffff0;
1020 addr
|= (cia
->cpu_err1
& 0x83UL
) << 32;
1021 cmd
= cmd_desc
[(cia
->cpu_err1
>> 8) & 0xF];
1022 mask
= (cia
->cpu_err1
>> 12) & 0xF;
1023 par
= (cia
->cpu_err1
>> 21) & 1;
1025 printk(KERN_CRIT
"CIA machine check: System bus parity error\n");
1026 printk(KERN_CRIT
" Command: %s, Parity bit: %d\n", cmd
, par
);
1027 printk(KERN_CRIT
" Address: %#010lx, Mask: %#lx\n", addr
, mask
);
1031 cia_decode_mchk(unsigned long la_ptr
)
1033 struct el_common
*com
;
1034 struct el_CIA_sysdata_mcheck
*cia
;
1037 com
= (void *)la_ptr
;
1038 cia
= (void *)(la_ptr
+ com
->sys_offset
);
1040 if ((cia
->cia_err
& CIA_ERR_VALID
) == 0)
1043 which
= cia
->cia_err
& 0xfff;
1044 switch (ffs(which
) - 1) {
1045 case 0: /* CIA_ERR_COR_ERR */
1046 cia_decode_ecc_error(cia
, "Corrected ECC error");
1048 case 1: /* CIA_ERR_UN_COR_ERR */
1049 cia_decode_ecc_error(cia
, "Uncorrected ECC error");
1051 case 2: /* CIA_ERR_CPU_PE */
1052 cia_decode_parity_error(cia
);
1054 case 3: /* CIA_ERR_MEM_NEM */
1055 cia_decode_mem_error(cia
, "Access to nonexistent memory");
1057 case 4: /* CIA_ERR_PCI_SERR */
1058 cia_decode_pci_error(cia
, "PCI bus system error");
1060 case 5: /* CIA_ERR_PERR */
1061 cia_decode_pci_error(cia
, "PCI data parity error");
1063 case 6: /* CIA_ERR_PCI_ADDR_PE */
1064 cia_decode_pci_error(cia
, "PCI address parity error");
1066 case 7: /* CIA_ERR_RCVD_MAS_ABT */
1067 cia_decode_pci_error(cia
, "PCI master abort");
1069 case 8: /* CIA_ERR_RCVD_TAR_ABT */
1070 cia_decode_pci_error(cia
, "PCI target abort");
1072 case 9: /* CIA_ERR_PA_PTE_INV */
1073 cia_decode_pci_error(cia
, "PCI invalid PTE");
1075 case 10: /* CIA_ERR_FROM_WRT_ERR */
1076 cia_decode_mem_error(cia
, "Write to flash ROM attempted");
1078 case 11: /* CIA_ERR_IOA_TIMEOUT */
1079 cia_decode_pci_error(cia
, "I/O timeout");
1083 if (cia
->cia_err
& CIA_ERR_LOST_CORR_ERR
)
1084 printk(KERN_CRIT
"CIA lost machine check: "
1085 "Correctable ECC error\n");
1086 if (cia
->cia_err
& CIA_ERR_LOST_UN_CORR_ERR
)
1087 printk(KERN_CRIT
"CIA lost machine check: "
1088 "Uncorrectable ECC error\n");
1089 if (cia
->cia_err
& CIA_ERR_LOST_CPU_PE
)
1090 printk(KERN_CRIT
"CIA lost machine check: "
1091 "System bus parity error\n");
1092 if (cia
->cia_err
& CIA_ERR_LOST_MEM_NEM
)
1093 printk(KERN_CRIT
"CIA lost machine check: "
1094 "Access to nonexistent memory\n");
1095 if (cia
->cia_err
& CIA_ERR_LOST_PERR
)
1096 printk(KERN_CRIT
"CIA lost machine check: "
1097 "PCI data parity error\n");
1098 if (cia
->cia_err
& CIA_ERR_LOST_PCI_ADDR_PE
)
1099 printk(KERN_CRIT
"CIA lost machine check: "
1100 "PCI address parity error\n");
1101 if (cia
->cia_err
& CIA_ERR_LOST_RCVD_MAS_ABT
)
1102 printk(KERN_CRIT
"CIA lost machine check: "
1103 "PCI master abort\n");
1104 if (cia
->cia_err
& CIA_ERR_LOST_RCVD_TAR_ABT
)
1105 printk(KERN_CRIT
"CIA lost machine check: "
1106 "PCI target abort\n");
1107 if (cia
->cia_err
& CIA_ERR_LOST_PA_PTE_INV
)
1108 printk(KERN_CRIT
"CIA lost machine check: "
1109 "PCI invalid PTE\n");
1110 if (cia
->cia_err
& CIA_ERR_LOST_FROM_WRT_ERR
)
1111 printk(KERN_CRIT
"CIA lost machine check: "
1112 "Write to flash ROM attempted\n");
1113 if (cia
->cia_err
& CIA_ERR_LOST_IOA_TIMEOUT
)
1114 printk(KERN_CRIT
"CIA lost machine check: "
1121 cia_machine_check(unsigned long vector
, unsigned long la_ptr
,
1122 struct pt_regs
* regs
)
1126 /* Clear the error before any reporting. */
1131 wrmces(rdmces()); /* reset machine check pending flag. */
1134 expected
= mcheck_expected(0);
1135 if (!expected
&& vector
== 0x660)
1136 expected
= cia_decode_mchk(la_ptr
);
1137 process_mcheck_info(vector
, la_ptr
, regs
, "CIA", expected
);