x86: xen: size struct xen_spinlock to always fit in arch_spinlock_t
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / mm / sc-rm7k.c
blob274af3be1442b42fa41d3cb960b598ddcbf5b8c2
1 /*
2 * sc-rm7k.c: RM7000 cache management functions.
4 * Copyright (C) 1997, 2001, 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
5 */
7 #undef DEBUG
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/bitops.h>
14 #include <asm/addrspace.h>
15 #include <asm/bcache.h>
16 #include <asm/cacheops.h>
17 #include <asm/mipsregs.h>
18 #include <asm/processor.h>
19 #include <asm/sections.h>
20 #include <asm/cacheflush.h> /* for run_uncached() */
22 /* Primary cache parameters. */
23 #define sc_lsize 32
24 #define tc_pagesize (32*128)
26 /* Secondary cache parameters. */
27 #define scache_size (256*1024) /* Fixed to 256KiB on RM7000 */
29 /* Tertiary cache parameters */
30 #define tc_lsize 32
32 extern unsigned long icache_way_size, dcache_way_size;
33 static unsigned long tcache_size;
35 #include <asm/r4kcache.h>
37 static int rm7k_tcache_init;
40 * Writeback and invalidate the primary cache dcache before DMA.
41 * (XXX These need to be fixed ...)
43 static void rm7k_sc_wback_inv(unsigned long addr, unsigned long size)
45 unsigned long end, a;
47 pr_debug("rm7k_sc_wback_inv[%08lx,%08lx]", addr, size);
49 /* Catch bad driver code */
50 BUG_ON(size == 0);
52 blast_scache_range(addr, addr + size);
54 if (!rm7k_tcache_init)
55 return;
57 a = addr & ~(tc_pagesize - 1);
58 end = (addr + size - 1) & ~(tc_pagesize - 1);
59 while(1) {
60 invalidate_tcache_page(a); /* Page_Invalidate_T */
61 if (a == end)
62 break;
63 a += tc_pagesize;
67 static void rm7k_sc_inv(unsigned long addr, unsigned long size)
69 unsigned long end, a;
71 pr_debug("rm7k_sc_inv[%08lx,%08lx]", addr, size);
73 /* Catch bad driver code */
74 BUG_ON(size == 0);
76 blast_inv_scache_range(addr, addr + size);
78 if (!rm7k_tcache_init)
79 return;
81 a = addr & ~(tc_pagesize - 1);
82 end = (addr + size - 1) & ~(tc_pagesize - 1);
83 while(1) {
84 invalidate_tcache_page(a); /* Page_Invalidate_T */
85 if (a == end)
86 break;
87 a += tc_pagesize;
91 static void blast_rm7k_tcache(void)
93 unsigned long start = CKSEG0ADDR(0);
94 unsigned long end = start + tcache_size;
96 write_c0_taglo(0);
98 while (start < end) {
99 cache_op(Page_Invalidate_T, start);
100 start += tc_pagesize;
105 * This function is executed in uncached address space.
107 static __cpuinit void __rm7k_tc_enable(void)
109 int i;
111 set_c0_config(RM7K_CONF_TE);
113 write_c0_taglo(0);
114 write_c0_taghi(0);
116 for (i = 0; i < tcache_size; i += tc_lsize)
117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
120 static __cpuinit void rm7k_tc_enable(void)
122 if (read_c0_config() & RM7K_CONF_TE)
123 return;
125 BUG_ON(tcache_size == 0);
127 run_uncached(__rm7k_tc_enable);
131 * This function is executed in uncached address space.
133 static __cpuinit void __rm7k_sc_enable(void)
135 int i;
137 set_c0_config(RM7K_CONF_SE);
139 write_c0_taglo(0);
140 write_c0_taghi(0);
142 for (i = 0; i < scache_size; i += sc_lsize)
143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
146 static __cpuinit void rm7k_sc_enable(void)
148 if (read_c0_config() & RM7K_CONF_SE)
149 return;
151 pr_info("Enabling secondary cache...\n");
152 run_uncached(__rm7k_sc_enable);
154 if (rm7k_tcache_init)
155 rm7k_tc_enable();
158 static void rm7k_tc_disable(void)
160 unsigned long flags;
162 local_irq_save(flags);
163 blast_rm7k_tcache();
164 clear_c0_config(RM7K_CONF_TE);
165 local_irq_save(flags);
168 static void rm7k_sc_disable(void)
170 clear_c0_config(RM7K_CONF_SE);
172 if (rm7k_tcache_init)
173 rm7k_tc_disable();
176 static struct bcache_ops rm7k_sc_ops = {
177 .bc_enable = rm7k_sc_enable,
178 .bc_disable = rm7k_sc_disable,
179 .bc_wback_inv = rm7k_sc_wback_inv,
180 .bc_inv = rm7k_sc_inv
184 * This is a probing function like the one found in c-r4k.c, we look for the
185 * wrap around point with different addresses.
187 static __cpuinit void __probe_tcache(void)
189 unsigned long flags, addr, begin, end, pow2;
191 begin = (unsigned long) &_stext;
192 begin &= ~((8 * 1024 * 1024) - 1);
193 end = begin + (8 * 1024 * 1024);
195 local_irq_save(flags);
197 set_c0_config(RM7K_CONF_TE);
199 /* Fill size-multiple lines with a valid tag */
200 pow2 = (256 * 1024);
201 for (addr = begin; addr <= end; addr = (begin + pow2)) {
202 unsigned long *p = (unsigned long *) addr;
203 __asm__ __volatile__("nop" : : "r" (*p));
204 pow2 <<= 1;
207 /* Load first line with a 0 tag, to check after */
208 write_c0_taglo(0);
209 write_c0_taghi(0);
210 cache_op(Index_Store_Tag_T, begin);
212 /* Look for the wrap-around */
213 pow2 = (512 * 1024);
214 for (addr = begin + (512 * 1024); addr <= end; addr = begin + pow2) {
215 cache_op(Index_Load_Tag_T, addr);
216 if (!read_c0_taglo())
217 break;
218 pow2 <<= 1;
221 addr -= begin;
222 tcache_size = addr;
224 clear_c0_config(RM7K_CONF_TE);
226 local_irq_restore(flags);
229 void __cpuinit rm7k_sc_init(void)
231 struct cpuinfo_mips *c = &current_cpu_data;
232 unsigned int config = read_c0_config();
234 if ((config & RM7K_CONF_SC))
235 return;
237 c->scache.linesz = sc_lsize;
238 c->scache.ways = 4;
239 c->scache.waybit= __ffs(scache_size / c->scache.ways);
240 c->scache.waysize = scache_size / c->scache.ways;
241 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
242 printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
243 (scache_size >> 10), sc_lsize);
245 if (!(config & RM7K_CONF_SE))
246 rm7k_sc_enable();
248 bcops = &rm7k_sc_ops;
251 * While we're at it let's deal with the tertiary cache.
254 rm7k_tcache_init = 0;
255 tcache_size = 0;
257 if (config & RM7K_CONF_TC)
258 return;
261 * No efficient way to ask the hardware for the size of the tcache,
262 * so must probe for it.
264 run_uncached(__probe_tcache);
265 rm7k_tc_enable();
266 rm7k_tcache_init = 1;
267 c->tcache.linesz = tc_lsize;
268 c->tcache.ways = 1;
269 pr_info("Tertiary cache size %ldK.\n", (tcache_size >> 10));