Import 2.4.0-test3pre1
[davej-history.git] / arch / alpha / kernel / core_cia.c
blob348bd843f2d449667162785d32fede685b673796
1 /*
2 * linux/arch/alpha/kernel/core_cia.c
4 * Written by David A Rusling (david.rusling@reo.mts.dec.com).
5 * December 1995.
7 * Copyright (C) 1995 David A Rusling
8 * Copyright (C) 1997, 1998 Jay Estabrook
9 * Copyright (C) 1998, 1999, 2000 Richard Henderson
11 * Code common to all CIA core logic chips.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/pci.h>
17 #include <linux/sched.h>
18 #include <linux/init.h>
20 #include <asm/system.h>
21 #include <asm/ptrace.h>
22 #include <asm/hwrpb.h>
24 #define __EXTERN_INLINE inline
25 #include <asm/io.h>
26 #include <asm/core_cia.h>
27 #undef __EXTERN_INLINE
29 #include <linux/bootmem.h>
31 #include "proto.h"
32 #include "pci_impl.h"
36 * NOTE: Herein lie back-to-back mb instructions. They are magic.
37 * One plausible explanation is that the i/o controller does not properly
38 * handle the system transaction. Another involves timing. Ho hum.
41 #define DEBUG_CONFIG 0
42 #if DEBUG_CONFIG
43 # define DBGC(args) printk args
44 #else
45 # define DBGC(args)
46 #endif
48 #define vip volatile int *
51 * Given a bus, device, and function number, compute resulting
52 * configuration space address. It is therefore not safe to have
53 * concurrent invocations to configuration space access routines, but
54 * there really shouldn't be any need for this.
56 * Type 0:
58 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
59 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
60 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
61 * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
62 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * 31:11 Device select bit.
65 * 10:8 Function number
66 * 7:2 Register number
68 * Type 1:
70 * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
71 * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
72 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
73 * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
74 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 * 31:24 reserved
77 * 23:16 bus number (8 bits = 128 possible buses)
78 * 15:11 Device number (5 bits)
79 * 10:8 function number
80 * 7:2 register number
82 * Notes:
83 * The function number selects which function of a multi-function device
84 * (e.g., SCSI and Ethernet).
86 * The register selects a DWORD (32 bit) register offset. Hence it
87 * doesn't get shifted by 2 bits as we want to "drop" the bottom two
88 * bits.
91 static int
92 mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
93 unsigned char *type1)
95 u8 bus = dev->bus->number;
96 u8 device_fn = dev->devfn;
98 *type1 = (bus != 0);
99 *pci_addr = (bus << 16) | (device_fn << 8) | where;
101 DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
102 " returning address 0x%p\n"
103 bus, device_fn, where, *pci_addr));
105 return 0;
108 static unsigned int
109 conf_read(unsigned long addr, unsigned char type1)
111 unsigned long flags;
112 int stat0, value;
113 int cia_cfg = 0;
115 DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
116 __save_and_cli(flags);
118 /* Reset status register to avoid losing errors. */
119 stat0 = *(vip)CIA_IOC_CIA_ERR;
120 *(vip)CIA_IOC_CIA_ERR = stat0;
121 mb();
123 /* If Type1 access, must set CIA CFG. */
124 if (type1) {
125 cia_cfg = *(vip)CIA_IOC_CFG;
126 *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
127 mb();
128 *(vip)CIA_IOC_CFG;
131 draina();
132 mcheck_expected(0) = 1;
133 mcheck_taken(0) = 0;
134 mb();
136 /* Access configuration space. */
137 value = *(vip)addr;
138 mb();
139 mb(); /* magic */
140 if (mcheck_taken(0)) {
141 mcheck_taken(0) = 0;
142 value = 0xffffffff;
143 mb();
145 mcheck_expected(0) = 0;
146 mb();
148 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
149 if (type1) {
150 *(vip)CIA_IOC_CFG = cia_cfg;
151 mb();
152 *(vip)CIA_IOC_CFG;
155 __restore_flags(flags);
156 DBGC(("done\n"));
158 return value;
161 static void
162 conf_write(unsigned long addr, unsigned int value, unsigned char type1)
164 unsigned long flags;
165 int stat0, cia_cfg = 0;
167 DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
168 __save_and_cli(flags);
170 /* Reset status register to avoid losing errors. */
171 stat0 = *(vip)CIA_IOC_CIA_ERR;
172 *(vip)CIA_IOC_CIA_ERR = stat0;
173 mb();
175 /* If Type1 access, must set CIA CFG. */
176 if (type1) {
177 cia_cfg = *(vip)CIA_IOC_CFG;
178 *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
179 mb();
180 *(vip)CIA_IOC_CFG;
183 draina();
184 mcheck_expected(0) = 1;
185 mcheck_taken(0) = 0;
186 mb();
188 /* Access configuration space. */
189 *(vip)addr = value;
190 mb();
191 mb(); /* magic */
193 mcheck_expected(0) = 0;
194 mb();
196 /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
197 if (type1) {
198 *(vip)CIA_IOC_CFG = cia_cfg;
199 mb();
200 *(vip)CIA_IOC_CFG;
203 __restore_flags(flags);
204 DBGC(("done\n"));
207 static int
208 cia_read_config_byte(struct pci_dev *dev, int where, u8 *value)
210 unsigned long addr, pci_addr;
211 unsigned char type1;
213 if (mk_conf_addr(dev, where, &pci_addr, &type1))
214 return PCIBIOS_DEVICE_NOT_FOUND;
216 addr = (pci_addr << 5) + 0x00 + CIA_CONF;
217 *value = conf_read(addr, type1) >> ((where & 3) * 8);
218 return PCIBIOS_SUCCESSFUL;
221 static int
222 cia_read_config_word(struct pci_dev *dev, int where, u16 *value)
224 unsigned long addr, pci_addr;
225 unsigned char type1;
227 if (mk_conf_addr(dev, where, &pci_addr, &type1))
228 return PCIBIOS_DEVICE_NOT_FOUND;
230 addr = (pci_addr << 5) + 0x08 + CIA_CONF;
231 *value = conf_read(addr, type1) >> ((where & 3) * 8);
232 return PCIBIOS_SUCCESSFUL;
235 static int
236 cia_read_config_dword(struct pci_dev *dev, int where, u32 *value)
238 unsigned long addr, pci_addr;
239 unsigned char type1;
241 if (mk_conf_addr(dev, where, &pci_addr, &type1))
242 return PCIBIOS_DEVICE_NOT_FOUND;
244 addr = (pci_addr << 5) + 0x18 + CIA_CONF;
245 *value = conf_read(addr, type1);
246 return PCIBIOS_SUCCESSFUL;
249 static int
250 cia_write_config(struct pci_dev *dev, int where, u32 value, long mask)
252 unsigned long addr, pci_addr;
253 unsigned char type1;
255 if (mk_conf_addr(dev, where, &pci_addr, &type1))
256 return PCIBIOS_DEVICE_NOT_FOUND;
258 addr = (pci_addr << 5) + mask + CIA_CONF;
259 conf_write(addr, value << ((where & 3) * 8), type1);
260 return PCIBIOS_SUCCESSFUL;
263 static int
264 cia_write_config_byte(struct pci_dev *dev, int where, u8 value)
266 return cia_write_config(dev, where, value, 0x00);
269 static int
270 cia_write_config_word(struct pci_dev *dev, int where, u16 value)
272 return cia_write_config(dev, where, value, 0x08);
275 static int
276 cia_write_config_dword(struct pci_dev *dev, int where, u32 value)
278 return cia_write_config(dev, where, value, 0x18);
281 struct pci_ops cia_pci_ops =
283 read_byte: cia_read_config_byte,
284 read_word: cia_read_config_word,
285 read_dword: cia_read_config_dword,
286 write_byte: cia_write_config_byte,
287 write_word: cia_write_config_word,
288 write_dword: cia_write_config_dword
292 * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
293 * It cannot be invalidated. Rather than hard code the pass numbers,
294 * actually try the tbia to see if it works.
297 void
298 cia_pci_tbi(struct pci_controler *hose, dma_addr_t start, dma_addr_t end)
300 wmb();
301 *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
302 mb();
303 *(vip)CIA_IOC_PCI_TBIA;
307 * Fixup attempt number 1.
309 * Write zeros directly into the tag registers.
312 static void
313 cia_pci_tbi_try1(struct pci_controler *hose,
314 dma_addr_t start, dma_addr_t end)
316 wmb();
317 *(vip)CIA_IOC_TB_TAGn(0) = 0;
318 *(vip)CIA_IOC_TB_TAGn(1) = 0;
319 *(vip)CIA_IOC_TB_TAGn(2) = 0;
320 *(vip)CIA_IOC_TB_TAGn(3) = 0;
321 *(vip)CIA_IOC_TB_TAGn(4) = 0;
322 *(vip)CIA_IOC_TB_TAGn(5) = 0;
323 *(vip)CIA_IOC_TB_TAGn(6) = 0;
324 *(vip)CIA_IOC_TB_TAGn(7) = 0;
325 mb();
326 *(vip)CIA_IOC_TB_TAGn(0);
329 #if 0
331 * Fixup attempt number 2. This is the method NT and NetBSD use.
333 * Allocate mappings, and put the chip into DMA loopback mode to read a
334 * garbage page. This works by causing TLB misses, causing old entries to
335 * be purged to make room for the new entries coming in for the garbage page.
338 #define CIA_BROKEN_TBI_TRY2_BASE 0xE0000000
340 static void __init
341 cia_enable_broken_tbi_try2(void)
343 unsigned long *ppte, pte;
344 long i;
346 ppte = __alloc_bootmem(PAGE_SIZE, 32768, 0);
347 pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
349 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); ++i)
350 ppte[i] = pte;
352 *(vip)CIA_IOC_PCI_W3_BASE = CIA_BROKEN_TBI_TRY2_BASE | 3;
353 *(vip)CIA_IOC_PCI_W3_MASK = (PAGE_SIZE - 1) & 0xfff00000;
354 *(vip)CIA_IOC_PCI_T3_BASE = virt_to_phys(ppte) >> 2;
357 static void
358 cia_pci_tbi_try2(struct pci_controler *hose,
359 dma_addr_t start, dma_addr_t end)
361 unsigned long flags;
362 unsigned long bus_addr;
363 int ctrl;
364 long i;
366 __save_and_cli(flags);
368 /* Put the chip into PCI loopback mode. */
369 mb();
370 ctrl = *(vip)CIA_IOC_CIA_CTRL;
371 *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
372 mb();
373 *(vip)CIA_IOC_CIA_CTRL;
374 mb();
376 /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
377 each read. This forces SG TLB misses. NetBSD claims that the
378 TLB entries are not quite LRU, meaning that we need to read more
379 times than there are actual tags. The 2117x docs claim strict
380 round-robin. Oh well, we've come this far... */
382 bus_addr = cia_ioremap(CIA_BROKEN_TBI_TRY2_BASE);
383 for (i = 0; i < 12; ++i, bus_addr += 32768)
384 cia_readl(bus_addr);
386 /* Restore normal PCI operation. */
387 mb();
388 *(vip)CIA_IOC_CIA_CTRL = ctrl;
389 mb();
390 *(vip)CIA_IOC_CIA_CTRL;
391 mb();
393 __restore_flags(flags);
395 #endif
397 static void __init
398 verify_tb_operation(void)
400 static int page[PAGE_SIZE/4]
401 __attribute__((aligned(PAGE_SIZE)))
402 __initlocaldata = { 0 };
404 struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
405 int ctrl, addr0, tag0, pte0, data0;
406 int temp;
408 /* Put the chip into PCI loopback mode. */
409 mb();
410 ctrl = *(vip)CIA_IOC_CIA_CTRL;
411 *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
412 mb();
413 *(vip)CIA_IOC_CIA_CTRL;
414 mb();
416 /* Write a valid entry directly into the TLB registers. */
418 addr0 = arena->dma_base;
419 tag0 = addr0 | 1;
420 pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
422 *(vip)CIA_IOC_TB_TAGn(0) = tag0;
423 *(vip)CIA_IOC_TB_TAGn(1) = 0;
424 *(vip)CIA_IOC_TB_TAGn(2) = 0;
425 *(vip)CIA_IOC_TB_TAGn(3) = 0;
426 *(vip)CIA_IOC_TB_TAGn(4) = 0;
427 *(vip)CIA_IOC_TB_TAGn(5) = 0;
428 *(vip)CIA_IOC_TB_TAGn(6) = 0;
429 *(vip)CIA_IOC_TB_TAGn(7) = 0;
430 *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
431 *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
432 *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
433 *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
434 mb();
436 /* First, verify we can read back what we've written. If
437 this fails, we can't be sure of any of the other testing
438 we're going to do, so bail. */
439 /* ??? Actually, we could do the work with machine checks.
440 By passing this register update test, we pretty much
441 guarantee that cia_pci_tbi_try1 works. If this test
442 fails, cia_pci_tbi_try2 might still work. */
444 temp = *(vip)CIA_IOC_TB_TAGn(0);
445 if (temp != tag0) {
446 printk("pci: failed tb register update test "
447 "(tag0 %#x != %#x)\n", temp, tag0);
448 goto failed;
450 temp = *(vip)CIA_IOC_TB_TAGn(1);
451 if (temp != 0) {
452 printk("pci: failed tb register update test "
453 "(tag1 %#x != 0)\n", temp);
454 goto failed;
456 temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
457 if (temp != pte0) {
458 printk("pci: failed tb register update test "
459 "(pte0 %#x != %#x)\n", temp, pte0);
460 goto failed;
462 printk("pci: passed tb register update test\n");
464 /* Second, verify we can actually do I/O through this entry. */
466 data0 = 0xdeadbeef;
467 page[0] = data0;
468 mcheck_expected(0) = 1;
469 mcheck_taken(0) = 0;
470 mb();
471 temp = cia_readl(cia_ioremap(addr0));
472 mb();
473 mcheck_expected(0) = 0;
474 mb();
475 if (mcheck_taken(0)) {
476 printk("pci: failed sg loopback i/o read test (mcheck)\n");
477 goto failed;
479 if (temp != data0) {
480 printk("pci: failed sg loopback i/o read test "
481 "(%#x != %#x)\n", temp, data0);
482 goto failed;
484 printk("pci: passed sg loopback i/o read test\n");
486 /* Third, try to invalidate the TLB. */
488 cia_pci_tbi(arena->hose, 0, -1);
489 temp = *(vip)CIA_IOC_TB_TAGn(0);
490 if (temp & 1) {
491 cia_pci_tbi_try1(arena->hose, 0, -1);
493 temp = *(vip)CIA_IOC_TB_TAGn(0);
494 if (temp & 1) {
495 printk("pci: failed tbia test; "
496 "no usable workaround\n");
497 goto failed;
500 alpha_mv.mv_pci_tbi = cia_pci_tbi_try1;
501 printk("pci: failed tbia test; workaround 1 succeeded\n");
502 } else {
503 printk("pci: passed tbia test\n");
506 /* Fourth, verify the TLB snoops the EV5's caches when
507 doing a tlb fill. */
509 data0 = 0x5adda15e;
510 page[0] = data0;
511 arena->ptes[4] = pte0;
512 mcheck_expected(0) = 1;
513 mcheck_taken(0) = 0;
514 mb();
515 temp = cia_readl(cia_ioremap(addr0 + 4*PAGE_SIZE));
516 mb();
517 mcheck_expected(0) = 0;
518 mb();
519 if (mcheck_taken(0)) {
520 printk("pci: failed pte write cache snoop test (mcheck)\n");
521 goto failed;
523 if (temp != data0) {
524 printk("pci: failed pte write cache snoop test "
525 "(%#x != %#x)\n", temp, data0);
526 goto failed;
528 printk("pci: passed pte write cache snoop test\n");
530 /* Fifth, verify that a previously invalid PTE entry gets
531 filled from the page table. */
533 data0 = 0xabcdef12;
534 page[0] = data0;
535 arena->ptes[5] = pte0;
536 mcheck_expected(0) = 1;
537 mcheck_taken(0) = 0;
538 mb();
539 temp = cia_readl(cia_ioremap(addr0 + 5*PAGE_SIZE));
540 mb();
541 mcheck_expected(0) = 0;
542 mb();
543 if (mcheck_taken(0)) {
544 printk("pci: failed valid tag invalid pte reload test "
545 "(mcheck; workaround available)\n");
546 /* Work around this bug by aligning new allocations
547 on 4 page boundaries. */
548 arena->align_entry = 4;
549 } else if (temp != data0) {
550 printk("pci: failed valid tag invalid pte reload test "
551 "(%#x != %#x)\n", temp, data0);
552 goto failed;
553 } else {
554 printk("pci: passed valid tag invalid pte reload test\n");
557 /* Sixth, verify machine checks are working. Test invalid
558 pte under the same valid tag as we used above. */
560 mcheck_expected(0) = 1;
561 mcheck_taken(0) = 0;
562 mb();
563 temp = cia_readl(cia_ioremap(addr0 + 6*PAGE_SIZE));
564 mb();
565 mcheck_expected(0) = 0;
566 mb();
567 printk("pci: %s pci machine check test\n",
568 mcheck_taken(0) ? "passed" : "failed");
570 /* Clean up after the tests. */
571 arena->ptes[4] = 0;
572 arena->ptes[5] = 0;
573 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
575 exit:
576 /* Restore normal PCI operation. */
577 mb();
578 *(vip)CIA_IOC_CIA_CTRL = ctrl;
579 mb();
580 *(vip)CIA_IOC_CIA_CTRL;
581 mb();
582 return;
584 failed:
585 printk("pci: disabling sg translation window\n");
586 *(vip)CIA_IOC_PCI_W0_BASE = 0;
587 alpha_mv.mv_pci_tbi = NULL;
588 goto exit;
591 static void __init
592 do_init_arch(int is_pyxis)
594 struct pci_controler *hose;
595 int temp;
596 int cia_rev;
598 cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
599 printk("pci: cia revision %d%s\n",
600 cia_rev, is_pyxis ? " (pyxis)" : "");
602 /* Set up error reporting. */
603 temp = *(vip)CIA_IOC_ERR_MASK;
604 temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
605 | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
606 *(vip)CIA_IOC_ERR_MASK = temp;
608 /* Clear all currently pending errors. */
609 *(vip)CIA_IOC_CIA_ERR = 0;
611 /* Turn on mchecks. */
612 temp = *(vip)CIA_IOC_CIA_CTRL;
613 temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
614 *(vip)CIA_IOC_CIA_CTRL = temp;
616 /* Clear the CFG register, which gets used for PCI config space
617 accesses. That is the way we want to use it, and we do not
618 want to depend on what ARC or SRM might have left behind. */
619 *(vip)CIA_IOC_CFG = 0;
621 /* Zero the HAEs. */
622 *(vip)CIA_IOC_HAE_MEM = 0;
623 *(vip)CIA_IOC_HAE_IO = 0;
625 /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
626 make sure they're enabled on the controler. */
627 if (is_pyxis) {
628 temp = *(vip)CIA_IOC_CIA_CNFG;
629 temp |= CIA_CNFG_IOA_BWEN;
630 *(vip)CIA_IOC_CIA_CNFG = temp;
633 /* Syncronize with all previous changes. */
634 mb();
635 *(vip)CIA_IOC_CIA_REV;
638 * Create our single hose.
641 pci_isa_hose = hose = alloc_pci_controler();
642 hose->io_space = &ioport_resource;
643 hose->mem_space = &iomem_resource;
644 hose->index = 0;
646 if (! is_pyxis) {
647 struct resource *hae_mem = alloc_resource();
648 hose->mem_space = hae_mem;
650 hae_mem->start = 0;
651 hae_mem->end = CIA_MEM_R1_MASK;
652 hae_mem->name = pci_hae0_name;
653 hae_mem->flags = IORESOURCE_MEM;
655 if (request_resource(&iomem_resource, hae_mem) < 0)
656 printk(KERN_ERR "Failed to request HAE_MEM\n");
658 hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
659 hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
660 hose->sparse_io_base = CIA_IO - IDENT_ADDR;
661 hose->dense_io_base = 0;
662 } else {
663 hose->sparse_mem_base = 0;
664 hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
665 hose->sparse_io_base = 0;
666 hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
670 * Set up the PCI to main memory translation windows.
672 * Window 0 is scatter-gather 8MB at 8MB (for isa)
673 * Window 1 is direct access 1GB at 1GB
674 * Window 2 is direct access 1GB at 2GB
676 * We must actually use 2 windows to direct-map the 2GB space,
677 * because of an idiot-syncrasy of the CYPRESS chip used on
678 * many PYXIS systems. It may respond to a PCI bus address in
679 * the last 1MB of the 4GB address range.
681 * ??? NetBSD hints that page tables must be aligned to 32K,
682 * possibly due to a hardware bug. This is over-aligned
683 * from the 8K alignment one would expect for an 8MB window.
684 * No description of what revisions affected.
687 hose->sg_pci = NULL;
688 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
689 __direct_map_base = 0x40000000;
690 __direct_map_size = 0x80000000;
692 *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
693 *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
694 *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
696 *(vip)CIA_IOC_PCI_W1_BASE = 0x40000000 | 1;
697 *(vip)CIA_IOC_PCI_W1_MASK = (0x40000000 - 1) & 0xfff00000;
698 *(vip)CIA_IOC_PCI_T1_BASE = 0;
700 *(vip)CIA_IOC_PCI_W2_BASE = 0x80000000 | 1;
701 *(vip)CIA_IOC_PCI_W2_MASK = (0x40000000 - 1) & 0xfff00000;
702 *(vip)CIA_IOC_PCI_T2_BASE = 0x40000000;
704 *(vip)CIA_IOC_PCI_W3_BASE = 0;
707 void __init
708 cia_init_arch(void)
710 do_init_arch(0);
713 void __init
714 pyxis_init_arch(void)
716 /* On pyxis machines we can precisely calculate the
717 CPU clock frequency using pyxis real time counter.
718 It's especially useful for SX164 with broken RTC.
720 Both CPU and chipset are driven by the single 16.666M
721 or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
722 66.66 MHz. -ink */
724 unsigned int cc0, cc1;
725 unsigned long pyxis_cc;
727 __asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
728 pyxis_cc = *(vulp)PYXIS_RT_COUNT;
729 do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
730 __asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
731 cc1 -= cc0;
732 hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
733 hwrpb_update_checksum(hwrpb);
735 do_init_arch(1);
738 void __init
739 cia_init_pci(void)
741 /* Must delay this from init_arch, as we need machine checks. */
742 verify_tb_operation();
743 common_init_pci();
746 static inline void
747 cia_pci_clr_err(void)
749 int jd;
751 jd = *(vip)CIA_IOC_CIA_ERR;
752 *(vip)CIA_IOC_CIA_ERR = jd;
753 mb();
754 *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
757 void
758 cia_machine_check(unsigned long vector, unsigned long la_ptr,
759 struct pt_regs * regs)
761 int expected;
763 /* Clear the error before any reporting. */
764 mb();
765 mb(); /* magic */
766 draina();
767 cia_pci_clr_err();
768 wrmces(rdmces()); /* reset machine check pending flag. */
769 mb();
771 expected = mcheck_expected(0);
772 if (!expected && vector == 0x660) {
773 struct el_common *com;
774 struct el_common_EV5_uncorrectable_mcheck *ev5;
775 struct el_CIA_sysdata_mcheck *cia;
777 com = (void *)la_ptr;
778 ev5 = (void *)(la_ptr + com->proc_offset);
779 cia = (void *)(la_ptr + com->sys_offset);
781 if (com->code == 0x202) {
782 printk(KERN_CRIT "CIA PCI machine check: err0=%08x "
783 "err1=%08x err2=%08x\n",
784 (int) cia->pci_err0, (int) cia->pci_err1,
785 (int) cia->pci_err2);
786 expected = 1;
789 process_mcheck_info(vector, la_ptr, regs, "CIA", expected);