Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / arch / alpha / kernel / core_cia.c
blob6203b02476803664d1fa676a9355aa1268e23928
1 /*
2 * linux/arch/alpha/kernel/core_cia.c
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
5 * December 1995.
7 * Copyright (C) 1995 David A Rusling
8 * Copyright (C) 1997, 1998 Jay Estabrook
9 * Copyright (C) 1998, 1999, 2000 Richard Henderson
11 * Code common to all CIA core logic chips.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
20 #include <asm/system.h>
21 #include <asm/ptrace.h>
22 #include <asm/hwrpb.h>
24 #define __EXTERN_INLINE inline
25 #include <asm/io.h>
26 #include <asm/core_cia.h>
27 #undef __EXTERN_INLINE
29 #include <linux/bootmem.h>
31 #include "proto.h"
32 #include "pci_impl.h"
36 * NOTE: Herein lie back-to-back mb instructions. They are magic.
37 * One plausible explanation is that the i/o controller does not properly
38 * handle the system transaction. Another involves timing. Ho hum.
41 #define DEBUG_CONFIG 0
42 #if DEBUG_CONFIG
43 # define DBGC(args) printk args
44 #else
45 # define DBGC(args)
46 #endif
48 #define vip volatile int *
51 * Given a bus, device, and function number, compute resulting
52 * configuration space address. It is therefore not safe to have
53 * concurrent invocations to configuration space access routines, but
54 * there really shouldn't be any need for this.
56 * Type 0:
58 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
59 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
60 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
62 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * 31:11 Device select bit.
65 * 10:8 Function number
66 * 7:2 Register number
68 * Type 1:
70 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
71 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
72 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
74 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 * 31:24 reserved
77 * 23:16 bus number (8 bits = 128 possible buses)
78 * 15:11 Device number (5 bits)
79 * 10:8 function number
80 * 7:2 register number
82 * Notes:
83 * The function number selects which function of a multi-function device
84 * (e.g., SCSI and Ethernet).
86 * The register selects a DWORD (32 bit) register offset. Hence it
87 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
88 * bits.
91 static int
92 mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
93 unsigned char *type1)
95 u8 bus = dev->bus->number;
96 u8 device_fn = dev->devfn;
98 *type1 = (bus != 0);
99 *pci_addr = (bus << 16) | (device_fn << 8) | where;
101 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
102 " returning address 0x%p\n"
103 bus, device_fn, where, *pci_addr));
105 return 0;
108 static unsigned int
109 conf_read(unsigned long addr, unsigned char type1)
111 unsigned long flags;
112 int stat0, value;
113 int cia_cfg = 0;
115 DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
116 __save_and_cli(flags);
118 /* Reset status register to avoid losing errors. */
119 stat0 = *(vip)CIA_IOC_CIA_ERR;
120 *(vip)CIA_IOC_CIA_ERR = stat0;
121 mb();
122 *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
124 /* If Type1 access, must set CIA CFG. */
125 if (type1) {
126 cia_cfg = *(vip)CIA_IOC_CFG;
127 *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
128 mb();
129 *(vip)CIA_IOC_CFG;
132 mb();
133 draina();
134 mcheck_expected(0) = 1;
135 mcheck_taken(0) = 0;
136 mb();
138 /* Access configuration space. */
139 value = *(vip)addr;
140 mb();
141 mb(); /* magic */
142 if (mcheck_taken(0)) {
143 mcheck_taken(0) = 0;
144 value = 0xffffffff;
145 mb();
147 mcheck_expected(0) = 0;
148 mb();
150 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
151 if (type1) {
152 *(vip)CIA_IOC_CFG = cia_cfg;
153 mb();
154 *(vip)CIA_IOC_CFG;
157 __restore_flags(flags);
158 DBGC(("done\n"));
160 return value;
163 static void
164 conf_write(unsigned long addr, unsigned int value, unsigned char type1)
166 unsigned long flags;
167 int stat0, cia_cfg = 0;
169 DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
170 __save_and_cli(flags);
172 /* Reset status register to avoid losing errors. */
173 stat0 = *(vip)CIA_IOC_CIA_ERR;
174 *(vip)CIA_IOC_CIA_ERR = stat0;
175 mb();
176 *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
178 /* If Type1 access, must set CIA CFG. */
179 if (type1) {
180 cia_cfg = *(vip)CIA_IOC_CFG;
181 *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
182 mb();
183 *(vip)CIA_IOC_CFG;
186 mb();
187 draina();
188 mcheck_expected(0) = 1;
189 mcheck_taken(0) = 0;
190 mb();
192 /* Access configuration space. */
193 *(vip)addr = value;
194 mb();
195 *(vip)addr; /* read back to force the write */
197 mcheck_expected(0) = 0;
198 mb();
200 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
201 if (type1) {
202 *(vip)CIA_IOC_CFG = cia_cfg;
203 mb();
204 *(vip)CIA_IOC_CFG;
207 __restore_flags(flags);
208 DBGC(("done\n"));
211 static int
212 cia_read_config_byte(struct pci_dev *dev, int where, u8 *value)
214 unsigned long addr, pci_addr;
215 unsigned char type1;
217 if (mk_conf_addr(dev, where, &pci_addr, &type1))
218 return PCIBIOS_DEVICE_NOT_FOUND;
220 addr = (pci_addr << 5) + 0x00 + CIA_CONF;
221 *value = conf_read(addr, type1) >> ((where & 3) * 8);
222 return PCIBIOS_SUCCESSFUL;
225 static int
226 cia_read_config_word(struct pci_dev *dev, int where, u16 *value)
228 unsigned long addr, pci_addr;
229 unsigned char type1;
231 if (mk_conf_addr(dev, where, &pci_addr, &type1))
232 return PCIBIOS_DEVICE_NOT_FOUND;
234 addr = (pci_addr << 5) + 0x08 + CIA_CONF;
235 *value = conf_read(addr, type1) >> ((where & 3) * 8);
236 return PCIBIOS_SUCCESSFUL;
239 static int
240 cia_read_config_dword(struct pci_dev *dev, int where, u32 *value)
242 unsigned long addr, pci_addr;
243 unsigned char type1;
245 if (mk_conf_addr(dev, where, &pci_addr, &type1))
246 return PCIBIOS_DEVICE_NOT_FOUND;
248 addr = (pci_addr << 5) + 0x18 + CIA_CONF;
249 *value = conf_read(addr, type1);
250 return PCIBIOS_SUCCESSFUL;
253 static int
254 cia_write_config(struct pci_dev *dev, int where, u32 value, long mask)
256 unsigned long addr, pci_addr;
257 unsigned char type1;
259 if (mk_conf_addr(dev, where, &pci_addr, &type1))
260 return PCIBIOS_DEVICE_NOT_FOUND;
262 addr = (pci_addr << 5) + mask + CIA_CONF;
263 conf_write(addr, value << ((where & 3) * 8), type1);
264 return PCIBIOS_SUCCESSFUL;
267 static int
268 cia_write_config_byte(struct pci_dev *dev, int where, u8 value)
270 return cia_write_config(dev, where, value, 0x00);
273 static int
274 cia_write_config_word(struct pci_dev *dev, int where, u16 value)
276 return cia_write_config(dev, where, value, 0x08);
279 static int
280 cia_write_config_dword(struct pci_dev *dev, int where, u32 value)
282 return cia_write_config(dev, where, value, 0x18);
285 struct pci_ops cia_pci_ops =
287 read_byte: cia_read_config_byte,
288 read_word: cia_read_config_word,
289 read_dword: cia_read_config_dword,
290 write_byte: cia_write_config_byte,
291 write_word: cia_write_config_word,
292 write_dword: cia_write_config_dword
296 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
297 * It cannot be invalidated. Rather than hard code the pass numbers,
298 * actually try the tbia to see if it works.
301 void
302 cia_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
304 wmb();
305 *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
306 mb();
307 *(vip)CIA_IOC_PCI_TBIA;
311 * Fixup attempt number 1.
313 * Write zeros directly into the tag registers.
316 static void
317 cia_pci_tbi_try1(struct pci_controler *hose,
318 dma_addr_t start, dma_addr_t end)
320 wmb();
321 *(vip)CIA_IOC_TB_TAGn(0) = 0;
322 *(vip)CIA_IOC_TB_TAGn(1) = 0;
323 *(vip)CIA_IOC_TB_TAGn(2) = 0;
324 *(vip)CIA_IOC_TB_TAGn(3) = 0;
325 *(vip)CIA_IOC_TB_TAGn(4) = 0;
326 *(vip)CIA_IOC_TB_TAGn(5) = 0;
327 *(vip)CIA_IOC_TB_TAGn(6) = 0;
328 *(vip)CIA_IOC_TB_TAGn(7) = 0;
329 mb();
330 *(vip)CIA_IOC_TB_TAGn(0);
333 #if 0
335 * Fixup attempt number 2. This is the method NT and NetBSD use.
337 * Allocate mappings, and put the chip into DMA loopback mode to read a
338 * garbage page. This works by causing TLB misses, causing old entries to
339 * be purged to make room for the new entries coming in for the garbage page.
342 #define CIA_BROKEN_TBI_TRY2_BASE 0xE0000000
344 static void __init
345 cia_enable_broken_tbi_try2(void)
347 unsigned long *ppte, pte;
348 long i;
350 ppte = __alloc_bootmem(PAGE_SIZE, 32768, 0);
351 pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
353 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); ++i)
354 ppte[i] = pte;
356 *(vip)CIA_IOC_PCI_W3_BASE = CIA_BROKEN_TBI_TRY2_BASE | 3;
357 *(vip)CIA_IOC_PCI_W3_MASK = (PAGE_SIZE - 1) & 0xfff00000;
358 *(vip)CIA_IOC_PCI_T3_BASE = virt_to_phys(ppte) >> 2;
361 static void
362 cia_pci_tbi_try2(struct pci_controler *hose,
363 dma_addr_t start, dma_addr_t end)
365 unsigned long flags;
366 unsigned long bus_addr;
367 int ctrl;
368 long i;
370 __save_and_cli(flags);
372 /* Put the chip into PCI loopback mode. */
373 mb();
374 ctrl = *(vip)CIA_IOC_CIA_CTRL;
375 *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
376 mb();
377 *(vip)CIA_IOC_CIA_CTRL;
378 mb();
380 /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
381 each read. This forces SG TLB misses. NetBSD claims that the
382 TLB entries are not quite LRU, meaning that we need to read more
383 times than there are actual tags. The 2117x docs claim strict
384 round-robin. Oh well, we've come this far... */
386 bus_addr = cia_ioremap(CIA_BROKEN_TBI_TRY2_BASE);
387 for (i = 0; i < 12; ++i, bus_addr += 32768)
388 cia_readl(bus_addr);
390 /* Restore normal PCI operation. */
391 mb();
392 *(vip)CIA_IOC_CIA_CTRL = ctrl;
393 mb();
394 *(vip)CIA_IOC_CIA_CTRL;
395 mb();
397 __restore_flags(flags);
399 #endif
401 static void __init
402 verify_tb_operation(void)
404 static int page[PAGE_SIZE/4]
405 __attribute__((aligned(PAGE_SIZE)))
406 __initdata = { 0 };
408 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
409 int ctrl, addr0, tag0, pte0, data0;
410 int temp;
412 /* Put the chip into PCI loopback mode. */
413 mb();
414 ctrl = *(vip)CIA_IOC_CIA_CTRL;
415 *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
416 mb();
417 *(vip)CIA_IOC_CIA_CTRL;
418 mb();
420 /* Write a valid entry directly into the TLB registers. */
422 addr0 = arena->dma_base;
423 tag0 = addr0 | 1;
424 pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
426 *(vip)CIA_IOC_TB_TAGn(0) = tag0;
427 *(vip)CIA_IOC_TB_TAGn(1) = 0;
428 *(vip)CIA_IOC_TB_TAGn(2) = 0;
429 *(vip)CIA_IOC_TB_TAGn(3) = 0;
430 *(vip)CIA_IOC_TB_TAGn(4) = 0;
431 *(vip)CIA_IOC_TB_TAGn(5) = 0;
432 *(vip)CIA_IOC_TB_TAGn(6) = 0;
433 *(vip)CIA_IOC_TB_TAGn(7) = 0;
434 *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
435 *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
436 *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
437 *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
438 mb();
440 /* First, verify we can read back what we've written. If
441 this fails, we can't be sure of any of the other testing
442 we're going to do, so bail. */
443 /* ??? Actually, we could do the work with machine checks.
444 By passing this register update test, we pretty much
445 guarantee that cia_pci_tbi_try1 works. If this test
446 fails, cia_pci_tbi_try2 might still work. */
448 temp = *(vip)CIA_IOC_TB_TAGn(0);
449 if (temp != tag0) {
450 printk("pci: failed tb register update test "
451 "(tag0 %#x != %#x)\n", temp, tag0);
452 goto failed;
454 temp = *(vip)CIA_IOC_TB_TAGn(1);
455 if (temp != 0) {
456 printk("pci: failed tb register update test "
457 "(tag1 %#x != 0)\n", temp);
458 goto failed;
460 temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
461 if (temp != pte0) {
462 printk("pci: failed tb register update test "
463 "(pte0 %#x != %#x)\n", temp, pte0);
464 goto failed;
466 printk("pci: passed tb register update test\n");
468 /* Second, verify we can actually do I/O through this entry. */
470 data0 = 0xdeadbeef;
471 page[0] = data0;
472 mcheck_expected(0) = 1;
473 mcheck_taken(0) = 0;
474 mb();
475 temp = cia_readl(cia_ioremap(addr0));
476 mb();
477 mcheck_expected(0) = 0;
478 mb();
479 if (mcheck_taken(0)) {
480 printk("pci: failed sg loopback i/o read test (mcheck)\n");
481 goto failed;
483 if (temp != data0) {
484 printk("pci: failed sg loopback i/o read test "
485 "(%#x != %#x)\n", temp, data0);
486 goto failed;
488 printk("pci: passed sg loopback i/o read test\n");
490 /* Third, try to invalidate the TLB. */
492 cia_pci_tbi(arena->hose, 0, -1);
493 temp = *(vip)CIA_IOC_TB_TAGn(0);
494 if (temp & 1) {
495 cia_pci_tbi_try1(arena->hose, 0, -1);
497 temp = *(vip)CIA_IOC_TB_TAGn(0);
498 if (temp & 1) {
499 printk("pci: failed tbia test; "
500 "no usable workaround\n");
501 goto failed;
504 alpha_mv.mv_pci_tbi = cia_pci_tbi_try1;
505 printk("pci: failed tbia test; workaround 1 succeeded\n");
506 } else {
507 printk("pci: passed tbia test\n");
510 /* Fourth, verify the TLB snoops the EV5's caches when
511 doing a tlb fill. */
513 data0 = 0x5adda15e;
514 page[0] = data0;
515 arena->ptes[4] = pte0;
516 mcheck_expected(0) = 1;
517 mcheck_taken(0) = 0;
518 mb();
519 temp = cia_readl(cia_ioremap(addr0 + 4*PAGE_SIZE));
520 mb();
521 mcheck_expected(0) = 0;
522 mb();
523 if (mcheck_taken(0)) {
524 printk("pci: failed pte write cache snoop test (mcheck)\n");
525 goto failed;
527 if (temp != data0) {
528 printk("pci: failed pte write cache snoop test "
529 "(%#x != %#x)\n", temp, data0);
530 goto failed;
532 printk("pci: passed pte write cache snoop test\n");
534 /* Fifth, verify that a previously invalid PTE entry gets
535 filled from the page table. */
537 data0 = 0xabcdef12;
538 page[0] = data0;
539 arena->ptes[5] = pte0;
540 mcheck_expected(0) = 1;
541 mcheck_taken(0) = 0;
542 mb();
543 temp = cia_readl(cia_ioremap(addr0 + 5*PAGE_SIZE));
544 mb();
545 mcheck_expected(0) = 0;
546 mb();
547 if (mcheck_taken(0)) {
548 printk("pci: failed valid tag invalid pte reload test "
549 "(mcheck; workaround available)\n");
550 /* Work around this bug by aligning new allocations
551 on 4 page boundaries. */
552 arena->align_entry = 4;
553 } else if (temp != data0) {
554 printk("pci: failed valid tag invalid pte reload test "
555 "(%#x != %#x)\n", temp, data0);
556 goto failed;
557 } else {
558 printk("pci: passed valid tag invalid pte reload test\n");
561 /* Sixth, verify machine checks are working. Test invalid
562 pte under the same valid tag as we used above. */
564 mcheck_expected(0) = 1;
565 mcheck_taken(0) = 0;
566 mb();
567 temp = cia_readl(cia_ioremap(addr0 + 6*PAGE_SIZE));
568 mb();
569 mcheck_expected(0) = 0;
570 mb();
571 printk("pci: %s pci machine check test\n",
572 mcheck_taken(0) ? "passed" : "failed");
574 /* Clean up after the tests. */
575 arena->ptes[4] = 0;
576 arena->ptes[5] = 0;
577 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
579 exit:
580 /* Restore normal PCI operation. */
581 mb();
582 *(vip)CIA_IOC_CIA_CTRL = ctrl;
583 mb();
584 *(vip)CIA_IOC_CIA_CTRL;
585 mb();
586 return;
588 failed:
589 printk("pci: disabling sg translation window\n");
590 *(vip)CIA_IOC_PCI_W0_BASE = 0;
591 alpha_mv.mv_pci_tbi = NULL;
592 goto exit;
595 static void __init
596 do_init_arch(int is_pyxis)
598 struct pci_controler *hose;
599 int temp;
600 int cia_rev;
602 cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
603 printk("pci: cia revision %d%s\n",
604 cia_rev, is_pyxis ? " (pyxis)" : "");
606 /* Set up error reporting. */
607 temp = *(vip)CIA_IOC_ERR_MASK;
608 temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
609 | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
610 *(vip)CIA_IOC_ERR_MASK = temp;
612 /* Clear all currently pending errors. */
613 temp = *(vip)CIA_IOC_CIA_ERR;
614 *(vip)CIA_IOC_CIA_ERR = temp;
616 /* Turn on mchecks. */
617 temp = *(vip)CIA_IOC_CIA_CTRL;
618 temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
619 *(vip)CIA_IOC_CIA_CTRL = temp;
621 /* Clear the CFG register, which gets used for PCI config space
622 accesses. That is the way we want to use it, and we do not
623 want to depend on what ARC or SRM might have left behind. */
624 *(vip)CIA_IOC_CFG = 0;
626 /* Zero the HAEs. */
627 *(vip)CIA_IOC_HAE_MEM = 0;
628 *(vip)CIA_IOC_HAE_IO = 0;
630 /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
631 make sure they're enabled on the controler. */
632 if (is_pyxis) {
633 temp = *(vip)CIA_IOC_CIA_CNFG;
634 temp |= CIA_CNFG_IOA_BWEN;
635 *(vip)CIA_IOC_CIA_CNFG = temp;
638 /* Syncronize with all previous changes. */
639 mb();
640 *(vip)CIA_IOC_CIA_REV;
643 * Create our single hose.
646 pci_isa_hose = hose = alloc_pci_controler();
647 hose->io_space = &ioport_resource;
648 hose->mem_space = &iomem_resource;
649 hose->index = 0;
651 if (! is_pyxis) {
652 struct resource *hae_mem = alloc_resource();
653 hose->mem_space = hae_mem;
655 hae_mem->start = 0;
656 hae_mem->end = CIA_MEM_R1_MASK;
657 hae_mem->name = pci_hae0_name;
658 hae_mem->flags = IORESOURCE_MEM;
660 if (request_resource(&iomem_resource, hae_mem) < 0)
661 printk(KERN_ERR "Failed to request HAE_MEM\n");
663 hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
664 hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
665 hose->sparse_io_base = CIA_IO - IDENT_ADDR;
666 hose->dense_io_base = 0;
667 } else {
668 hose->sparse_mem_base = 0;
669 hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
670 hose->sparse_io_base = 0;
671 hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
675 * Set up the PCI to main memory translation windows.
677 * Window 0 is scatter-gather 8MB at 8MB (for isa)
678 * Window 1 is direct access 1GB at 1GB
679 * Window 2 is direct access 1GB at 2GB
681 * We must actually use 2 windows to direct-map the 2GB space,
682 * because of an idiot-syncrasy of the CYPRESS chip used on
683 * many PYXIS systems. It may respond to a PCI bus address in
684 * the last 1MB of the 4GB address range.
686 * ??? NetBSD hints that page tables must be aligned to 32K,
687 * possibly due to a hardware bug. This is over-aligned
688 * from the 8K alignment one would expect for an 8MB window.
689 * No description of what revisions affected.
692 hose->sg_pci = NULL;
693 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
694 __direct_map_base = 0x40000000;
695 __direct_map_size = 0x80000000;
697 *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
698 *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
699 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
701 *(vip)CIA_IOC_PCI_W1_BASE = 0x40000000 | 1;
702 *(vip)CIA_IOC_PCI_W1_MASK = (0x40000000 - 1) & 0xfff00000;
703 *(vip)CIA_IOC_PCI_T1_BASE = 0 >> 2;
705 *(vip)CIA_IOC_PCI_W2_BASE = 0x80000000 | 1;
706 *(vip)CIA_IOC_PCI_W2_MASK = (0x40000000 - 1) & 0xfff00000;
707 *(vip)CIA_IOC_PCI_T2_BASE = 0x40000000 >> 2;
709 *(vip)CIA_IOC_PCI_W3_BASE = 0;
712 void __init
713 cia_init_arch(void)
715 do_init_arch(0);
718 void __init
719 pyxis_init_arch(void)
721 /* On pyxis machines we can precisely calculate the
722 CPU clock frequency using pyxis real time counter.
723 It's especially useful for SX164 with broken RTC.
725 Both CPU and chipset are driven by the single 16.666M
726 or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
727 66.66 MHz. -ink */
729 unsigned int cc0, cc1;
730 unsigned long pyxis_cc;
732 __asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
733 pyxis_cc = *(vulp)PYXIS_RT_COUNT;
734 do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
735 __asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
736 cc1 -= cc0;
737 hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
738 hwrpb_update_checksum(hwrpb);
740 do_init_arch(1);
743 void __init
744 cia_init_pci(void)
746 /* Must delay this from init_arch, as we need machine checks. */
747 verify_tb_operation();
748 common_init_pci();
751 static inline void
752 cia_pci_clr_err(void)
754 int jd;
756 jd = *(vip)CIA_IOC_CIA_ERR;
757 *(vip)CIA_IOC_CIA_ERR = jd;
758 mb();
759 *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
762 static void
763 cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
765 static const char * const pci_cmd_desc[16] = {
766 "Interrupt Acknowledge", "Special Cycle", "I/O Read",
767 "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
768 "Memory Write", "Reserved 0x8", "Reserved 0x9",
769 "Configuration Read", "Configuration Write",
770 "Memory Read Multiple", "Dual Address Cycle",
771 "Memory Read Line", "Memory Write and Invalidate"
774 if (cia->cia_err & (CIA_ERR_COR_ERR
775 | CIA_ERR_UN_COR_ERR
776 | CIA_ERR_MEM_NEM
777 | CIA_ERR_PA_PTE_INV)) {
778 static const char * const window_desc[6] = {
779 "No window active", "Window 0 hit", "Window 1 hit",
780 "Window 2 hit", "Window 3 hit", "Monster window hit"
783 const char *window;
784 const char *cmd;
785 unsigned long addr, tmp;
786 int lock, dac;
788 cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
789 lock = (cia->pci_err0 >> 4) & 1;
790 dac = (cia->pci_err0 >> 5) & 1;
792 tmp = (cia->pci_err0 >> 8) & 0x1F;
793 tmp = ffs(tmp);
794 window = window_desc[tmp];
796 addr = cia->pci_err1;
797 if (dac) {
798 tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
799 addr |= tmp << 32;
802 printk(KERN_CRIT "CIA machine check: %s\n", msg);
803 printk(KERN_CRIT " DMA command: %s\n", cmd);
804 printk(KERN_CRIT " PCI address: %#010lx\n", addr);
805 printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n",
806 window, lock, dac);
807 } else if (cia->cia_err & (CIA_ERR_PERR
808 | CIA_ERR_PCI_ADDR_PE
809 | CIA_ERR_RCVD_MAS_ABT
810 | CIA_ERR_RCVD_TAR_ABT
811 | CIA_ERR_IOA_TIMEOUT)) {
812 static const char * const master_st_desc[16] = {
813 "Idle", "Drive bus", "Address step cycle",
814 "Address cycle", "Data cycle", "Last read data cycle",
815 "Last write data cycle", "Read stop cycle",
816 "Write stop cycle", "Read turnaround cycle",
817 "Write turnaround cycle", "Reserved 0xB",
818 "Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
819 "Unknown state"
821 static const char * const target_st_desc[16] = {
822 "Idle", "Busy", "Read data cycle", "Write data cycle",
823 "Read stop cycle", "Write stop cycle",
824 "Read turnaround cycle", "Write turnaround cycle",
825 "Read wait cycle", "Write wait cycle",
826 "Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
827 "Reserved 0xD", "Reserved 0xE", "Unknown state"
830 const char *cmd;
831 const char *master, *target;
832 unsigned long addr, tmp;
833 int dac;
835 master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
836 target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
837 cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
838 dac = (cia->pci_err0 >> 28) & 1;
840 addr = cia->pci_err2;
841 if (dac) {
842 tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
843 addr |= tmp << 32;
846 printk(KERN_CRIT "CIA machine check: %s\n", msg);
847 printk(KERN_CRIT " PCI command: %s\n", cmd);
848 printk(KERN_CRIT " Master state: %s, Target state: %s\n",
849 master, target);
850 printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n",
851 addr, dac);
852 } else {
853 printk(KERN_CRIT "CIA machine check: %s\n", msg);
854 printk(KERN_CRIT " Unknown PCI error\n");
855 printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0);
856 printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1);
857 printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2);
861 static void
862 cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
864 unsigned long mem_port_addr;
865 unsigned long mem_port_mask;
866 const char *mem_port_cmd;
867 const char *seq_state;
868 const char *set_select;
869 unsigned long tmp;
871 /* If this is a DMA command, also decode the PCI bits. */
872 if ((cia->mem_err1 >> 20) & 1)
873 cia_decode_pci_error(cia, msg);
874 else
875 printk(KERN_CRIT "CIA machine check: %s\n", msg);
877 mem_port_addr = cia->mem_err0 & 0xfffffff0;
878 mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
880 mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
882 tmp = (cia->mem_err1 >> 8) & 0xF;
883 tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
884 if ((tmp & 0x1E) == 0x06)
885 mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
886 else if ((tmp & 0x1C) == 0x08)
887 mem_port_cmd = "READ MISS or READ MISS MODIFY";
888 else if (tmp == 0x1C)
889 mem_port_cmd = "BC VICTIM";
890 else if ((tmp & 0x1E) == 0x0E)
891 mem_port_cmd = "READ MISS MODIFY";
892 else if ((tmp & 0x1C) == 0x18)
893 mem_port_cmd = "DMA READ or DMA READ MODIFY";
894 else if ((tmp & 0x1E) == 0x12)
895 mem_port_cmd = "DMA WRITE";
896 else
897 mem_port_cmd = "Unknown";
899 tmp = (cia->mem_err1 >> 16) & 0xF;
900 switch (tmp) {
901 case 0x0:
902 seq_state = "Idle";
903 break;
904 case 0x1:
905 seq_state = "DMA READ or DMA WRITE";
906 break;
907 case 0x2: case 0x3:
908 seq_state = "READ MISS (or READ MISS MODIFY) with victim";
909 break;
910 case 0x4: case 0x5: case 0x6:
911 seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
912 break;
913 case 0x8: case 0x9: case 0xB:
914 seq_state = "Refresh";
915 break;
916 case 0xC:
917 seq_state = "Idle, waiting for DMA pending read";
918 break;
919 case 0xE: case 0xF:
920 seq_state = "Idle, ras precharge";
921 break;
922 default:
923 seq_state = "Unknown";
924 break;
927 tmp = (cia->mem_err1 >> 24) & 0x1F;
928 switch (tmp) {
929 case 0x00: set_select = "Set 0 selected"; break;
930 case 0x01: set_select = "Set 1 selected"; break;
931 case 0x02: set_select = "Set 2 selected"; break;
932 case 0x03: set_select = "Set 3 selected"; break;
933 case 0x04: set_select = "Set 4 selected"; break;
934 case 0x05: set_select = "Set 5 selected"; break;
935 case 0x06: set_select = "Set 6 selected"; break;
936 case 0x07: set_select = "Set 7 selected"; break;
937 case 0x08: set_select = "Set 8 selected"; break;
938 case 0x09: set_select = "Set 9 selected"; break;
939 case 0x0A: set_select = "Set A selected"; break;
940 case 0x0B: set_select = "Set B selected"; break;
941 case 0x0C: set_select = "Set C selected"; break;
942 case 0x0D: set_select = "Set D selected"; break;
943 case 0x0E: set_select = "Set E selected"; break;
944 case 0x0F: set_select = "Set F selected"; break;
945 case 0x10: set_select = "No set selected"; break;
946 case 0x1F: set_select = "Refresh cycle"; break;
947 default: set_select = "Unknown"; break;
950 printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd);
951 printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n",
952 mem_port_addr, mem_port_mask);
953 printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state);
954 printk(KERN_CRIT " Memory set: %s\n", set_select);
957 static void
958 cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
960 long syn;
961 long i;
962 const char *fmt;
964 cia_decode_mem_error(cia, msg);
966 syn = cia->cia_syn & 0xff;
967 if (syn == (syn & -syn)) {
968 fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n";
969 i = ffs(syn) - 1;
970 } else {
971 static unsigned char const data_bit[64] = {
972 0xCE, 0xCB, 0xD3, 0xD5,
973 0xD6, 0xD9, 0xDA, 0xDC,
974 0x23, 0x25, 0x26, 0x29,
975 0x2A, 0x2C, 0x31, 0x34,
976 0x0E, 0x0B, 0x13, 0x15,
977 0x16, 0x19, 0x1A, 0x1C,
978 0xE3, 0xE5, 0xE6, 0xE9,
979 0xEA, 0xEC, 0xF1, 0xF4,
980 0x4F, 0x4A, 0x52, 0x54,
981 0x57, 0x58, 0x5B, 0x5D,
982 0xA2, 0xA4, 0xA7, 0xA8,
983 0xAB, 0xAD, 0xB0, 0xB5,
984 0x8F, 0x8A, 0x92, 0x94,
985 0x97, 0x98, 0x9B, 0x9D,
986 0x62, 0x64, 0x67, 0x68,
987 0x6B, 0x6D, 0x70, 0x75
990 for (i = 0; i < 64; ++i)
991 if (data_bit[i] == syn)
992 break;
994 if (i < 64)
995 fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n";
996 else
997 fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n";
1000 printk (fmt, syn, i);
1003 static void
1004 cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
1006 static const char * const cmd_desc[16] = {
1007 "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
1008 "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
1009 "READ MISS0", "READ MISS1", "READ MISS MOD0",
1010 "READ MISS MOD1", "BCACHE VICTIM", "Spare",
1011 "READ MISS MOD STC0", "READ MISS MOD STC1"
1014 unsigned long addr;
1015 unsigned long mask;
1016 const char *cmd;
1017 int par;
1019 addr = cia->cpu_err0 & 0xfffffff0;
1020 addr |= (cia->cpu_err1 & 0x83UL) << 32;
1021 cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
1022 mask = (cia->cpu_err1 >> 12) & 0xF;
1023 par = (cia->cpu_err1 >> 21) & 1;
1025 printk(KERN_CRIT "CIA machine check: System bus parity error\n");
1026 printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par);
1027 printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask);
1030 static int
1031 cia_decode_mchk(unsigned long la_ptr)
1033 struct el_common *com;
1034 struct el_CIA_sysdata_mcheck *cia;
1035 int which;
1037 com = (void *)la_ptr;
1038 cia = (void *)(la_ptr + com->sys_offset);
1040 if ((cia->cia_err & CIA_ERR_VALID) == 0)
1041 return 0;
1043 which = cia->cia_err & 0xfff;
1044 switch (ffs(which) - 1) {
1045 case 0: /* CIA_ERR_COR_ERR */
1046 cia_decode_ecc_error(cia, "Corrected ECC error");
1047 break;
1048 case 1: /* CIA_ERR_UN_COR_ERR */
1049 cia_decode_ecc_error(cia, "Uncorrected ECC error");
1050 break;
1051 case 2: /* CIA_ERR_CPU_PE */
1052 cia_decode_parity_error(cia);
1053 break;
1054 case 3: /* CIA_ERR_MEM_NEM */
1055 cia_decode_mem_error(cia, "Access to nonexistent memory");
1056 break;
1057 case 4: /* CIA_ERR_PCI_SERR */
1058 cia_decode_pci_error(cia, "PCI bus system error");
1059 break;
1060 case 5: /* CIA_ERR_PERR */
1061 cia_decode_pci_error(cia, "PCI data parity error");
1062 break;
1063 case 6: /* CIA_ERR_PCI_ADDR_PE */
1064 cia_decode_pci_error(cia, "PCI address parity error");
1065 break;
1066 case 7: /* CIA_ERR_RCVD_MAS_ABT */
1067 cia_decode_pci_error(cia, "PCI master abort");
1068 break;
1069 case 8: /* CIA_ERR_RCVD_TAR_ABT */
1070 cia_decode_pci_error(cia, "PCI target abort");
1071 break;
1072 case 9: /* CIA_ERR_PA_PTE_INV */
1073 cia_decode_pci_error(cia, "PCI invalid PTE");
1074 break;
1075 case 10: /* CIA_ERR_FROM_WRT_ERR */
1076 cia_decode_mem_error(cia, "Write to flash ROM attempted");
1077 break;
1078 case 11: /* CIA_ERR_IOA_TIMEOUT */
1079 cia_decode_pci_error(cia, "I/O timeout");
1080 break;
1083 if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
1084 printk(KERN_CRIT "CIA lost machine check: "
1085 "Correctable ECC error\n");
1086 if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
1087 printk(KERN_CRIT "CIA lost machine check: "
1088 "Uncorrectable ECC error\n");
1089 if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
1090 printk(KERN_CRIT "CIA lost machine check: "
1091 "System bus parity error\n");
1092 if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
1093 printk(KERN_CRIT "CIA lost machine check: "
1094 "Access to nonexistent memory\n");
1095 if (cia->cia_err & CIA_ERR_LOST_PERR)
1096 printk(KERN_CRIT "CIA lost machine check: "
1097 "PCI data parity error\n");
1098 if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
1099 printk(KERN_CRIT "CIA lost machine check: "
1100 "PCI address parity error\n");
1101 if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
1102 printk(KERN_CRIT "CIA lost machine check: "
1103 "PCI master abort\n");
1104 if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
1105 printk(KERN_CRIT "CIA lost machine check: "
1106 "PCI target abort\n");
1107 if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
1108 printk(KERN_CRIT "CIA lost machine check: "
1109 "PCI invalid PTE\n");
1110 if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
1111 printk(KERN_CRIT "CIA lost machine check: "
1112 "Write to flash ROM attempted\n");
1113 if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
1114 printk(KERN_CRIT "CIA lost machine check: "
1115 "I/O timeout\n");
1117 return 1;
1120 void
1121 cia_machine_check(unsigned long vector, unsigned long la_ptr,
1122 struct pt_regs * regs)
1124 int expected;
1126 /* Clear the error before any reporting. */
1127 mb();
1128 mb(); /* magic */
1129 draina();
1130 cia_pci_clr_err();
1131 wrmces(rdmces()); /* reset machine check pending flag. */
1132 mb();
1134 expected = mcheck_expected(0);
1135 if (!expected && vector == 0x660)
1136 expected = cia_decode_mchk(la_ptr);
1137 process_mcheck_info(vector, la_ptr, regs, "CIA", expected);