2 * linux/arch/alpha/kernel/core_cia.c
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
7 * Copyright (C) 1995 David A Rusling
8 * Copyright (C) 1997, 1998 Jay Estabrook
9 * Copyright (C) 1998, 1999, 2000 Richard Henderson
11 * Code common to all CIA core logic chips.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
20 #include <asm/system.h>
21 #include <asm/ptrace.h>
22 #include <asm/hwrpb.h>
24 #define __EXTERN_INLINE inline
26 #include <asm/core_cia.h>
27 #undef __EXTERN_INLINE
29 #include <linux/bootmem.h>
36 * NOTE: Herein lie back-to-back mb instructions. They are magic.
37 * One plausible explanation is that the i/o controller does not properly
38 * handle the system transaction. Another involves timing. Ho hum.
41 #define DEBUG_CONFIG 0
43 # define DBGC(args) printk args
48 #define vip volatile int *
51 * Given a bus, device, and function number, compute resulting
52 * configuration space address. It is therefore not safe to have
53 * concurrent invocations to configuration space access routines, but
54 * there really shouldn't be any need for this.
58 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
59 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
60 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
62 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * 31:11 Device select bit.
65 * 10:8 Function number
70 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
71 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
72 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
74 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
77 * 23:16 bus number (8 bits = 128 possible buses)
78 * 15:11 Device number (5 bits)
79 * 10:8 function number
83 * The function number selects which function of a multi-function device
84 * (e.g., SCSI and Ethernet).
86 * The register selects a DWORD (32 bit) register offset. Hence it
87 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
92 mk_conf_addr(struct pci_dev
*dev
, int where
, unsigned long *pci_addr
,
95 u8 bus
= dev
->bus
->number
;
96 u8 device_fn
= dev
->devfn
;
99 *pci_addr
= (bus
<< 16) | (device_fn
<< 8) | where
;
101 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
102 " returning address 0x%p\n"
103 bus
, device_fn
, where
, *pci_addr
));
109 conf_read(unsigned long addr
, unsigned char type1
)
115 DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr
, type1
));
116 __save_and_cli(flags
);
118 /* Reset status register to avoid losing errors. */
119 stat0
= *(vip
)CIA_IOC_CIA_ERR
;
120 *(vip
)CIA_IOC_CIA_ERR
= stat0
;
123 /* If Type1 access, must set CIA CFG. */
125 cia_cfg
= *(vip
)CIA_IOC_CFG
;
126 *(vip
)CIA_IOC_CFG
= (cia_cfg
& ~3) | 1;
132 mcheck_expected(0) = 1;
136 /* Access configuration space. */
140 if (mcheck_taken(0)) {
145 mcheck_expected(0) = 0;
148 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
150 *(vip
)CIA_IOC_CFG
= cia_cfg
;
155 __restore_flags(flags
);
162 conf_write(unsigned long addr
, unsigned int value
, unsigned char type1
)
165 int stat0
, cia_cfg
= 0;
167 DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr
, type1
));
168 __save_and_cli(flags
);
170 /* Reset status register to avoid losing errors. */
171 stat0
= *(vip
)CIA_IOC_CIA_ERR
;
172 *(vip
)CIA_IOC_CIA_ERR
= stat0
;
175 /* If Type1 access, must set CIA CFG. */
177 cia_cfg
= *(vip
)CIA_IOC_CFG
;
178 *(vip
)CIA_IOC_CFG
= (cia_cfg
& ~3) | 1;
184 mcheck_expected(0) = 1;
188 /* Access configuration space. */
193 mcheck_expected(0) = 0;
196 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
198 *(vip
)CIA_IOC_CFG
= cia_cfg
;
203 __restore_flags(flags
);
208 cia_read_config_byte(struct pci_dev
*dev
, int where
, u8
*value
)
210 unsigned long addr
, pci_addr
;
213 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
214 return PCIBIOS_DEVICE_NOT_FOUND
;
216 addr
= (pci_addr
<< 5) + 0x00 + CIA_CONF
;
217 *value
= conf_read(addr
, type1
) >> ((where
& 3) * 8);
218 return PCIBIOS_SUCCESSFUL
;
222 cia_read_config_word(struct pci_dev
*dev
, int where
, u16
*value
)
224 unsigned long addr
, pci_addr
;
227 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
228 return PCIBIOS_DEVICE_NOT_FOUND
;
230 addr
= (pci_addr
<< 5) + 0x08 + CIA_CONF
;
231 *value
= conf_read(addr
, type1
) >> ((where
& 3) * 8);
232 return PCIBIOS_SUCCESSFUL
;
236 cia_read_config_dword(struct pci_dev
*dev
, int where
, u32
*value
)
238 unsigned long addr
, pci_addr
;
241 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
242 return PCIBIOS_DEVICE_NOT_FOUND
;
244 addr
= (pci_addr
<< 5) + 0x18 + CIA_CONF
;
245 *value
= conf_read(addr
, type1
);
246 return PCIBIOS_SUCCESSFUL
;
250 cia_write_config(struct pci_dev
*dev
, int where
, u32 value
, long mask
)
252 unsigned long addr
, pci_addr
;
255 if (mk_conf_addr(dev
, where
, &pci_addr
, &type1
))
256 return PCIBIOS_DEVICE_NOT_FOUND
;
258 addr
= (pci_addr
<< 5) + mask
+ CIA_CONF
;
259 conf_write(addr
, value
<< ((where
& 3) * 8), type1
);
260 return PCIBIOS_SUCCESSFUL
;
264 cia_write_config_byte(struct pci_dev
*dev
, int where
, u8 value
)
266 return cia_write_config(dev
, where
, value
, 0x00);
270 cia_write_config_word(struct pci_dev
*dev
, int where
, u16 value
)
272 return cia_write_config(dev
, where
, value
, 0x08);
276 cia_write_config_dword(struct pci_dev
*dev
, int where
, u32 value
)
278 return cia_write_config(dev
, where
, value
, 0x18);
281 struct pci_ops cia_pci_ops
=
283 read_byte
: cia_read_config_byte
,
284 read_word
: cia_read_config_word
,
285 read_dword
: cia_read_config_dword
,
286 write_byte
: cia_write_config_byte
,
287 write_word
: cia_write_config_word
,
288 write_dword
: cia_write_config_dword
292 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
293 * It cannot be invalidated. Rather than hard code the pass numbers,
294 * actually try the tbia to see if it works.
298 cia_pci_tbi(struct pci_controler
*hose
, dma_addr_t start
, dma_addr_t end
)
301 *(vip
)CIA_IOC_PCI_TBIA
= 3; /* Flush all locked and unlocked. */
303 *(vip
)CIA_IOC_PCI_TBIA
;
307 * Fixup attempt number 1.
309 * Write zeros directly into the tag registers.
313 cia_pci_tbi_try1(struct pci_controler
*hose
,
314 dma_addr_t start
, dma_addr_t end
)
317 *(vip
)CIA_IOC_TB_TAGn(0) = 0;
318 *(vip
)CIA_IOC_TB_TAGn(1) = 0;
319 *(vip
)CIA_IOC_TB_TAGn(2) = 0;
320 *(vip
)CIA_IOC_TB_TAGn(3) = 0;
321 *(vip
)CIA_IOC_TB_TAGn(4) = 0;
322 *(vip
)CIA_IOC_TB_TAGn(5) = 0;
323 *(vip
)CIA_IOC_TB_TAGn(6) = 0;
324 *(vip
)CIA_IOC_TB_TAGn(7) = 0;
326 *(vip
)CIA_IOC_TB_TAGn(0);
331 * Fixup attempt number 2. This is the method NT and NetBSD use.
333 * Allocate mappings, and put the chip into DMA loopback mode to read a
334 * garbage page. This works by causing TLB misses, causing old entries to
335 * be purged to make room for the new entries coming in for the garbage page.
338 #define CIA_BROKEN_TBI_TRY2_BASE 0xE0000000
341 cia_enable_broken_tbi_try2(void)
343 unsigned long *ppte
, pte
;
346 ppte
= __alloc_bootmem(PAGE_SIZE
, 32768, 0);
347 pte
= (virt_to_phys(ppte
) >> (PAGE_SHIFT
- 1)) | 1;
349 for (i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); ++i
)
352 *(vip
)CIA_IOC_PCI_W3_BASE
= CIA_BROKEN_TBI_TRY2_BASE
| 3;
353 *(vip
)CIA_IOC_PCI_W3_MASK
= (PAGE_SIZE
- 1) & 0xfff00000;
354 *(vip
)CIA_IOC_PCI_T3_BASE
= virt_to_phys(ppte
) >> 2;
358 cia_pci_tbi_try2(struct pci_controler
*hose
,
359 dma_addr_t start
, dma_addr_t end
)
362 unsigned long bus_addr
;
366 __save_and_cli(flags
);
368 /* Put the chip into PCI loopback mode. */
370 ctrl
= *(vip
)CIA_IOC_CIA_CTRL
;
371 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
| CIA_CTRL_PCI_LOOP_EN
;
373 *(vip
)CIA_IOC_CIA_CTRL
;
376 /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
377 each read. This forces SG TLB misses. NetBSD claims that the
378 TLB entries are not quite LRU, meaning that we need to read more
379 times than there are actual tags. The 2117x docs claim strict
380 round-robin. Oh well, we've come this far... */
382 bus_addr
= cia_ioremap(CIA_BROKEN_TBI_TRY2_BASE
);
383 for (i
= 0; i
< 12; ++i
, bus_addr
+= 32768)
386 /* Restore normal PCI operation. */
388 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
;
390 *(vip
)CIA_IOC_CIA_CTRL
;
393 __restore_flags(flags
);
398 verify_tb_operation(void)
400 static int page
[PAGE_SIZE
/4]
401 __attribute__((aligned(PAGE_SIZE
)))
402 __initlocaldata
= { 0 };
404 struct pci_iommu_arena
*arena
= pci_isa_hose
->sg_isa
;
405 int ctrl
, addr0
, tag0
, pte0
, data0
;
408 /* Put the chip into PCI loopback mode. */
410 ctrl
= *(vip
)CIA_IOC_CIA_CTRL
;
411 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
| CIA_CTRL_PCI_LOOP_EN
;
413 *(vip
)CIA_IOC_CIA_CTRL
;
416 /* Write a valid entry directly into the TLB registers. */
418 addr0
= arena
->dma_base
;
420 pte0
= (virt_to_phys(page
) >> (PAGE_SHIFT
- 1)) | 1;
422 *(vip
)CIA_IOC_TB_TAGn(0) = tag0
;
423 *(vip
)CIA_IOC_TB_TAGn(1) = 0;
424 *(vip
)CIA_IOC_TB_TAGn(2) = 0;
425 *(vip
)CIA_IOC_TB_TAGn(3) = 0;
426 *(vip
)CIA_IOC_TB_TAGn(4) = 0;
427 *(vip
)CIA_IOC_TB_TAGn(5) = 0;
428 *(vip
)CIA_IOC_TB_TAGn(6) = 0;
429 *(vip
)CIA_IOC_TB_TAGn(7) = 0;
430 *(vip
)CIA_IOC_TBn_PAGEm(0,0) = pte0
;
431 *(vip
)CIA_IOC_TBn_PAGEm(0,1) = 0;
432 *(vip
)CIA_IOC_TBn_PAGEm(0,2) = 0;
433 *(vip
)CIA_IOC_TBn_PAGEm(0,3) = 0;
436 /* First, verify we can read back what we've written. If
437 this fails, we can't be sure of any of the other testing
438 we're going to do, so bail. */
439 /* ??? Actually, we could do the work with machine checks.
440 By passing this register update test, we pretty much
441 guarantee that cia_pci_tbi_try1 works. If this test
442 fails, cia_pci_tbi_try2 might still work. */
444 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
446 printk("pci: failed tb register update test "
447 "(tag0 %#x != %#x)\n", temp
, tag0
);
450 temp
= *(vip
)CIA_IOC_TB_TAGn(1);
452 printk("pci: failed tb register update test "
453 "(tag1 %#x != 0)\n", temp
);
456 temp
= *(vip
)CIA_IOC_TBn_PAGEm(0,0);
458 printk("pci: failed tb register update test "
459 "(pte0 %#x != %#x)\n", temp
, pte0
);
462 printk("pci: passed tb register update test\n");
464 /* Second, verify we can actually do I/O through this entry. */
468 mcheck_expected(0) = 1;
471 temp
= cia_readl(cia_ioremap(addr0
));
473 mcheck_expected(0) = 0;
475 if (mcheck_taken(0)) {
476 printk("pci: failed sg loopback i/o read test (mcheck)\n");
480 printk("pci: failed sg loopback i/o read test "
481 "(%#x != %#x)\n", temp
, data0
);
484 printk("pci: passed sg loopback i/o read test\n");
486 /* Third, try to invalidate the TLB. */
488 cia_pci_tbi(arena
->hose
, 0, -1);
489 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
491 cia_pci_tbi_try1(arena
->hose
, 0, -1);
493 temp
= *(vip
)CIA_IOC_TB_TAGn(0);
495 printk("pci: failed tbia test; "
496 "no usable workaround\n");
500 alpha_mv
.mv_pci_tbi
= cia_pci_tbi_try1
;
501 printk("pci: failed tbia test; workaround 1 succeeded\n");
503 printk("pci: passed tbia test\n");
506 /* Fourth, verify the TLB snoops the EV5's caches when
511 arena
->ptes
[4] = pte0
;
512 mcheck_expected(0) = 1;
515 temp
= cia_readl(cia_ioremap(addr0
+ 4*PAGE_SIZE
));
517 mcheck_expected(0) = 0;
519 if (mcheck_taken(0)) {
520 printk("pci: failed pte write cache snoop test (mcheck)\n");
524 printk("pci: failed pte write cache snoop test "
525 "(%#x != %#x)\n", temp
, data0
);
528 printk("pci: passed pte write cache snoop test\n");
530 /* Fifth, verify that a previously invalid PTE entry gets
531 filled from the page table. */
535 arena
->ptes
[5] = pte0
;
536 mcheck_expected(0) = 1;
539 temp
= cia_readl(cia_ioremap(addr0
+ 5*PAGE_SIZE
));
541 mcheck_expected(0) = 0;
543 if (mcheck_taken(0)) {
544 printk("pci: failed valid tag invalid pte reload test "
545 "(mcheck; workaround available)\n");
546 /* Work around this bug by aligning new allocations
547 on 4 page boundaries. */
548 arena
->align_entry
= 4;
549 } else if (temp
!= data0
) {
550 printk("pci: failed valid tag invalid pte reload test "
551 "(%#x != %#x)\n", temp
, data0
);
554 printk("pci: passed valid tag invalid pte reload test\n");
557 /* Sixth, verify machine checks are working. Test invalid
558 pte under the same valid tag as we used above. */
560 mcheck_expected(0) = 1;
563 temp
= cia_readl(cia_ioremap(addr0
+ 6*PAGE_SIZE
));
565 mcheck_expected(0) = 0;
567 printk("pci: %s pci machine check test\n",
568 mcheck_taken(0) ? "passed" : "failed");
570 /* Clean up after the tests. */
573 alpha_mv
.mv_pci_tbi(arena
->hose
, 0, -1);
576 /* Restore normal PCI operation. */
578 *(vip
)CIA_IOC_CIA_CTRL
= ctrl
;
580 *(vip
)CIA_IOC_CIA_CTRL
;
585 printk("pci: disabling sg translation window\n");
586 *(vip
)CIA_IOC_PCI_W0_BASE
= 0;
587 alpha_mv
.mv_pci_tbi
= NULL
;
592 do_init_arch(int is_pyxis
)
594 struct pci_controler
*hose
;
598 cia_rev
= *(vip
)CIA_IOC_CIA_REV
& CIA_REV_MASK
;
599 printk("pci: cia revision %d%s\n",
600 cia_rev
, is_pyxis
? " (pyxis)" : "");
602 /* Set up error reporting. */
603 temp
= *(vip
)CIA_IOC_ERR_MASK
;
604 temp
&= ~(CIA_ERR_CPU_PE
| CIA_ERR_MEM_NEM
| CIA_ERR_PA_PTE_INV
605 | CIA_ERR_RCVD_MAS_ABT
| CIA_ERR_RCVD_TAR_ABT
);
606 *(vip
)CIA_IOC_ERR_MASK
= temp
;
608 /* Clear all currently pending errors. */
609 *(vip
)CIA_IOC_CIA_ERR
= 0;
611 /* Turn on mchecks. */
612 temp
= *(vip
)CIA_IOC_CIA_CTRL
;
613 temp
|= CIA_CTRL_FILL_ERR_EN
| CIA_CTRL_MCHK_ERR_EN
;
614 *(vip
)CIA_IOC_CIA_CTRL
= temp
;
616 /* Clear the CFG register, which gets used for PCI config space
617 accesses. That is the way we want to use it, and we do not
618 want to depend on what ARC or SRM might have left behind. */
619 *(vip
)CIA_IOC_CFG
= 0;
622 *(vip
)CIA_IOC_HAE_MEM
= 0;
623 *(vip
)CIA_IOC_HAE_IO
= 0;
625 /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
626 make sure they're enabled on the controler. */
628 temp
= *(vip
)CIA_IOC_CIA_CNFG
;
629 temp
|= CIA_CNFG_IOA_BWEN
;
630 *(vip
)CIA_IOC_CIA_CNFG
= temp
;
633 /* Syncronize with all previous changes. */
635 *(vip
)CIA_IOC_CIA_REV
;
638 * Create our single hose.
641 pci_isa_hose
= hose
= alloc_pci_controler();
642 hose
->io_space
= &ioport_resource
;
643 hose
->mem_space
= &iomem_resource
;
647 struct resource
*hae_mem
= alloc_resource();
648 hose
->mem_space
= hae_mem
;
651 hae_mem
->end
= CIA_MEM_R1_MASK
;
652 hae_mem
->name
= pci_hae0_name
;
653 hae_mem
->flags
= IORESOURCE_MEM
;
655 if (request_resource(&iomem_resource
, hae_mem
) < 0)
656 printk(KERN_ERR
"Failed to request HAE_MEM\n");
658 hose
->sparse_mem_base
= CIA_SPARSE_MEM
- IDENT_ADDR
;
659 hose
->dense_mem_base
= CIA_DENSE_MEM
- IDENT_ADDR
;
660 hose
->sparse_io_base
= CIA_IO
- IDENT_ADDR
;
661 hose
->dense_io_base
= 0;
663 hose
->sparse_mem_base
= 0;
664 hose
->dense_mem_base
= CIA_BW_MEM
- IDENT_ADDR
;
665 hose
->sparse_io_base
= 0;
666 hose
->dense_io_base
= CIA_BW_IO
- IDENT_ADDR
;
670 * Set up the PCI to main memory translation windows.
672 * Window 0 is scatter-gather 8MB at 8MB (for isa)
673 * Window 1 is direct access 1GB at 1GB
674 * Window 2 is direct access 1GB at 2GB
676 * We must actually use 2 windows to direct-map the 2GB space,
677 * because of an idiot-syncrasy of the CYPRESS chip used on
678 * many PYXIS systems. It may respond to a PCI bus address in
679 * the last 1MB of the 4GB address range.
681 * ??? NetBSD hints that page tables must be aligned to 32K,
682 * possibly due to a hardware bug. This is over-aligned
683 * from the 8K alignment one would expect for an 8MB window.
684 * No description of what revisions affected.
688 hose
->sg_isa
= iommu_arena_new(hose
, 0x00800000, 0x00800000, 32768);
689 __direct_map_base
= 0x40000000;
690 __direct_map_size
= 0x80000000;
692 *(vip
)CIA_IOC_PCI_W0_BASE
= hose
->sg_isa
->dma_base
| 3;
693 *(vip
)CIA_IOC_PCI_W0_MASK
= (hose
->sg_isa
->size
- 1) & 0xfff00000;
694 *(vip
)CIA_IOC_PCI_T0_BASE
= virt_to_phys(hose
->sg_isa
->ptes
) >> 2;
696 *(vip
)CIA_IOC_PCI_W1_BASE
= 0x40000000 | 1;
697 *(vip
)CIA_IOC_PCI_W1_MASK
= (0x40000000 - 1) & 0xfff00000;
698 *(vip
)CIA_IOC_PCI_T1_BASE
= 0;
700 *(vip
)CIA_IOC_PCI_W2_BASE
= 0x80000000 | 1;
701 *(vip
)CIA_IOC_PCI_W2_MASK
= (0x40000000 - 1) & 0xfff00000;
702 *(vip
)CIA_IOC_PCI_T2_BASE
= 0x40000000;
704 *(vip
)CIA_IOC_PCI_W3_BASE
= 0;
714 pyxis_init_arch(void)
716 /* On pyxis machines we can precisely calculate the
717 CPU clock frequency using pyxis real time counter.
718 It's especially useful for SX164 with broken RTC.
720 Both CPU and chipset are driven by the single 16.666M
721 or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
724 unsigned int cc0
, cc1
;
725 unsigned long pyxis_cc
;
727 __asm__
__volatile__ ("rpcc %0" : "=r"(cc0
));
728 pyxis_cc
= *(vulp
)PYXIS_RT_COUNT
;
729 do { } while(*(vulp
)PYXIS_RT_COUNT
- pyxis_cc
< 4096);
730 __asm__
__volatile__ ("rpcc %0" : "=r"(cc1
));
732 hwrpb
->cycle_freq
= ((cc1
>> 11) * 100000000UL) / 3;
733 hwrpb_update_checksum(hwrpb
);
741 /* Must delay this from init_arch, as we need machine checks. */
742 verify_tb_operation();
747 cia_pci_clr_err(void)
751 jd
= *(vip
)CIA_IOC_CIA_ERR
;
752 *(vip
)CIA_IOC_CIA_ERR
= jd
;
754 *(vip
)CIA_IOC_CIA_ERR
; /* re-read to force write. */
758 cia_machine_check(unsigned long vector
, unsigned long la_ptr
,
759 struct pt_regs
* regs
)
763 /* Clear the error before any reporting. */
768 wrmces(rdmces()); /* reset machine check pending flag. */
771 expected
= mcheck_expected(0);
772 if (!expected
&& vector
== 0x660) {
773 struct el_common
*com
;
774 struct el_common_EV5_uncorrectable_mcheck
*ev5
;
775 struct el_CIA_sysdata_mcheck
*cia
;
777 com
= (void *)la_ptr
;
778 ev5
= (void *)(la_ptr
+ com
->proc_offset
);
779 cia
= (void *)(la_ptr
+ com
->sys_offset
);
781 if (com
->code
== 0x202) {
782 printk(KERN_CRIT
"CIA PCI machine check: err0=%08x "
783 "err1=%08x err2=%08x\n",
784 (int) cia
->pci_err0
, (int) cia
->pci_err1
,
785 (int) cia
->pci_err2
);
789 process_mcheck_info(vector
, la_ptr
, regs
, "CIA", expected
);