1 /* $Id: indy_sc.c,v 1.9 1999/05/12 21:57:49 ulfc Exp $
3 * indy_sc.c: Indy cache managment functions.
5 * Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
6 * derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
13 #include <asm/bcache.h>
15 #include <asm/sgimc.h>
17 #include <asm/pgtable.h>
18 #include <asm/system.h>
19 #include <asm/bootinfo.h>
20 #include <asm/sgialib.h>
21 #include <asm/mmu_context.h>
23 /* Secondary cache size in bytes, if present. */
24 static unsigned long scache_size
;
28 #define SC_SIZE 0x00080000
30 #define CI_MASK (SC_SIZE - SC_LINE)
31 #define SC_ROUND(n) ((n) + SC_LINE - 1)
32 #define SC_INDEX(n) ((n) & CI_MASK)
34 static inline void indy_sc_wipe(unsigned long first
, unsigned long last
)
36 __asm__
__volatile__("
41 li $1, 0x80 # Go 64 bit
44 dli $1, 0x9000000080000000
45 or %0, $1 # first line to flush
46 or %1, $1 # last line to flush
53 mtc0 $2, $12 # Back to 32 bit
58 : "r" (first
), "r" (last
)
62 static void indy_sc_wback_invalidate(unsigned long addr
, unsigned long size
)
64 unsigned long first_line
, last_line
;
68 printk("indy_sc_wback_invalidate[%08lx,%08lx]", addr
, size
);
70 /* Which lines to flush? */
71 first_line
= SC_INDEX(addr
);
73 last_line
= SC_INDEX(addr
);
75 last_line
= SC_INDEX(addr
+ size
- 1);
77 __save_and_cli(flags
);
78 if (first_line
<= last_line
) {
79 indy_sc_wipe(first_line
, last_line
);
83 /* Cache index wrap around. Due to the way the buddy system works
84 this case should not happen. We're prepared to handle it,
86 indy_sc_wipe(first_line
, SC_SIZE
- SC_LINE
);
87 indy_sc_wipe(0, last_line
);
89 __restore_flags(flags
);
92 static void indy_sc_enable(void)
94 unsigned long addr
, tmp1
, tmp2
;
96 /* This is really cool... */
98 printk("Enabling R4600 SCACHE\n");
100 __asm__
__volatile__("
120 : "=r" (tmp1
), "=r" (tmp2
), "=r" (addr
));
123 static void indy_sc_disable(void)
125 unsigned long tmp1
, tmp2
, tmp3
;
128 printk("Disabling R4600 SCACHE\n");
130 __asm__
__volatile__("
150 " : "=r" (tmp1
), "=r" (tmp2
), "=r" (tmp3
));
153 __initfunc(static inline int indy_sc_probe(void))
155 volatile unsigned int *cpu_control
;
156 unsigned short cmd
= 0xc220;
157 unsigned long data
= 0;
161 cpu_control
= (volatile unsigned int *) KSEG1ADDR(0x1fa00034);
163 cpu_control
= (volatile unsigned int *) KSEG1ADDR(0x1fa00030);
165 #define DEASSERT(bit) (*(cpu_control) &= (~(bit)))
166 #define ASSERT(bit) (*(cpu_control) |= (bit))
167 #define DELAY for(n = 0; n < 100000; n++) __asm__ __volatile__("")
168 DEASSERT(SGIMC_EEPROM_PRE
);
169 DEASSERT(SGIMC_EEPROM_SDATAO
);
170 DEASSERT(SGIMC_EEPROM_SECLOCK
);
171 DEASSERT(SGIMC_EEPROM_PRE
);
173 ASSERT(SGIMC_EEPROM_CSEL
); ASSERT(SGIMC_EEPROM_SECLOCK
);
174 for(i
= 0; i
< 11; i
++) {
176 ASSERT(SGIMC_EEPROM_SDATAO
);
178 DEASSERT(SGIMC_EEPROM_SDATAO
);
179 DEASSERT(SGIMC_EEPROM_SECLOCK
);
180 ASSERT(SGIMC_EEPROM_SECLOCK
);
183 DEASSERT(SGIMC_EEPROM_SDATAO
);
184 for(i
= 0; i
< (sizeof(unsigned short) * 8); i
++) {
187 DEASSERT(SGIMC_EEPROM_SECLOCK
);
189 ASSERT(SGIMC_EEPROM_SECLOCK
);
193 if(tmp
& SGIMC_EEPROM_SDATAI
)
196 DEASSERT(SGIMC_EEPROM_SECLOCK
);
197 DEASSERT(SGIMC_EEPROM_CSEL
);
198 ASSERT(SGIMC_EEPROM_PRE
);
199 ASSERT(SGIMC_EEPROM_SECLOCK
);
207 printk("R4600/R5000 SCACHE size %ldK, linesize 32 bytes.\n",
213 /* XXX Check with wje if the Indy caches can differenciate between
214 writeback + invalidate and just invalidate. */
215 struct bcache_ops indy_sc_ops
= {
218 indy_sc_wback_invalidate
,
219 indy_sc_wback_invalidate
222 __initfunc(void indy_sc_init(void))
224 if (indy_sc_probe()) {
226 bcops
= &indy_sc_ops
;