Import 2.3.10pre1
[davej-history.git] / arch / mips / sgi / kernel / indy_sc.c
blob1fecee23f9dddc5da899e4cc449d7eeabc20a490
1 /* $Id: indy_sc.c,v 1.9 1999/05/12 21:57:49 ulfc Exp $
3 * indy_sc.c: Indy cache managment functions.
5 * Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
6 * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
7 */
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
13 #include <asm/bcache.h>
14 #include <asm/sgi.h>
15 #include <asm/sgimc.h>
16 #include <asm/page.h>
17 #include <asm/pgtable.h>
18 #include <asm/system.h>
19 #include <asm/bootinfo.h>
20 #include <asm/sgialib.h>
21 #include <asm/mmu_context.h>
23 /* Secondary cache size in bytes, if present. */
24 static unsigned long scache_size;
26 #undef DEBUG_CACHE
28 #define SC_SIZE 0x00080000
29 #define SC_LINE 32
30 #define CI_MASK (SC_SIZE - SC_LINE)
31 #define SC_ROUND(n) ((n) + SC_LINE - 1)
32 #define SC_INDEX(n) ((n) & CI_MASK)
34 static inline void indy_sc_wipe(unsigned long first, unsigned long last)
36 __asm__ __volatile__("
37 .set noreorder
38 .set mips3
39 .set noat
40 mfc0 $2, $12
41 li $1, 0x80 # Go 64 bit
42 mtc0 $1, $12
44 dli $1, 0x9000000080000000
45 or %0, $1 # first line to flush
46 or %1, $1 # last line to flush
47 .set at
49 1: sw $0, 0(%0)
50 bne %0, %1, 1b
51 daddu %0, 32
53 mtc0 $2, $12 # Back to 32 bit
54 nop; nop; nop; nop;
55 .set mips0
56 .set reorder"
57 : /* no output */
58 : "r" (first), "r" (last)
59 : "$1");
62 static void indy_sc_wback_invalidate(unsigned long addr, unsigned long size)
64 unsigned long first_line, last_line;
65 unsigned int flags;
67 #ifdef DEBUG_CACHE
68 printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr, size);
69 #endif
70 /* Which lines to flush? */
71 first_line = SC_INDEX(addr);
72 if (size <= SC_LINE)
73 last_line = SC_INDEX(addr);
74 else
75 last_line = SC_INDEX(addr + size - 1);
77 __save_and_cli(flags);
78 if (first_line <= last_line) {
79 indy_sc_wipe(first_line, last_line);
80 goto out;
83 /* Cache index wrap around. Due to the way the buddy system works
84 this case should not happen. We're prepared to handle it,
85 though. */
86 indy_sc_wipe(first_line, SC_SIZE - SC_LINE);
87 indy_sc_wipe(0, last_line);
88 out:
89 __restore_flags(flags);
92 static void indy_sc_enable(void)
94 unsigned long addr, tmp1, tmp2;
96 /* This is really cool... */
97 #ifdef DEBUG_CACHE
98 printk("Enabling R4600 SCACHE\n");
99 #endif
100 __asm__ __volatile__("
101 .set noreorder
102 .set mips3
103 mfc0 %2, $12
104 nop; nop; nop; nop;
105 li %1, 0x80
106 mtc0 %1, $12
107 nop; nop; nop; nop;
108 li %0, 0x1
109 dsll %0, 31
110 lui %1, 0x9000
111 dsll32 %1, 0
112 or %0, %1, %0
113 sb $0, 0(%0)
114 mtc0 $0, $12
115 nop; nop; nop; nop;
116 mtc0 %2, $12
117 nop; nop; nop; nop;
118 .set mips0
119 .set reorder"
120 : "=r" (tmp1), "=r" (tmp2), "=r" (addr));
123 static void indy_sc_disable(void)
125 unsigned long tmp1, tmp2, tmp3;
127 #ifdef DEBUG_CACHE
128 printk("Disabling R4600 SCACHE\n");
129 #endif
130 __asm__ __volatile__("
131 .set noreorder
132 .set mips3
133 li %0, 0x1
134 dsll %0, 31
135 lui %1, 0x9000
136 dsll32 %1, 0
137 or %0, %1, %0
138 mfc0 %2, $12
139 nop; nop; nop; nop;
140 li %1, 0x80
141 mtc0 %1, $12
142 nop; nop; nop; nop;
143 sh $0, 0(%0)
144 mtc0 $0, $12
145 nop; nop; nop; nop;
146 mtc0 %2, $12
147 nop; nop; nop; nop;
148 .set mips2
149 .set reorder
150 " : "=r" (tmp1), "=r" (tmp2), "=r" (tmp3));
153 __initfunc(static inline int indy_sc_probe(void))
155 volatile unsigned int *cpu_control;
156 unsigned short cmd = 0xc220;
157 unsigned long data = 0;
158 int i, n;
160 #ifdef __MIPSEB__
161 cpu_control = (volatile unsigned int *) KSEG1ADDR(0x1fa00034);
162 #else
163 cpu_control = (volatile unsigned int *) KSEG1ADDR(0x1fa00030);
164 #endif
165 #define DEASSERT(bit) (*(cpu_control) &= (~(bit)))
166 #define ASSERT(bit) (*(cpu_control) |= (bit))
167 #define DELAY for(n = 0; n < 100000; n++) __asm__ __volatile__("")
168 DEASSERT(SGIMC_EEPROM_PRE);
169 DEASSERT(SGIMC_EEPROM_SDATAO);
170 DEASSERT(SGIMC_EEPROM_SECLOCK);
171 DEASSERT(SGIMC_EEPROM_PRE);
172 DELAY;
173 ASSERT(SGIMC_EEPROM_CSEL); ASSERT(SGIMC_EEPROM_SECLOCK);
174 for(i = 0; i < 11; i++) {
175 if(cmd & (1<<15))
176 ASSERT(SGIMC_EEPROM_SDATAO);
177 else
178 DEASSERT(SGIMC_EEPROM_SDATAO);
179 DEASSERT(SGIMC_EEPROM_SECLOCK);
180 ASSERT(SGIMC_EEPROM_SECLOCK);
181 cmd <<= 1;
183 DEASSERT(SGIMC_EEPROM_SDATAO);
184 for(i = 0; i < (sizeof(unsigned short) * 8); i++) {
185 unsigned int tmp;
187 DEASSERT(SGIMC_EEPROM_SECLOCK);
188 DELAY;
189 ASSERT(SGIMC_EEPROM_SECLOCK);
190 DELAY;
191 data <<= 1;
192 tmp = *cpu_control;
193 if(tmp & SGIMC_EEPROM_SDATAI)
194 data |= 1;
196 DEASSERT(SGIMC_EEPROM_SECLOCK);
197 DEASSERT(SGIMC_EEPROM_CSEL);
198 ASSERT(SGIMC_EEPROM_PRE);
199 ASSERT(SGIMC_EEPROM_SECLOCK);
201 data <<= PAGE_SHIFT;
202 if (data == 0)
203 return 0;
205 scache_size = data;
207 printk("R4600/R5000 SCACHE size %ldK, linesize 32 bytes.\n",
208 scache_size >> 10);
210 return 1;
213 /* XXX Check with wje if the Indy caches can differenciate between
214 writeback + invalidate and just invalidate. */
215 struct bcache_ops indy_sc_ops = {
216 indy_sc_enable,
217 indy_sc_disable,
218 indy_sc_wback_invalidate,
219 indy_sc_wback_invalidate
222 __initfunc(void indy_sc_init(void))
224 if (indy_sc_probe()) {
225 indy_sc_enable();
226 bcops = &indy_sc_ops;