GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / microblaze / kernel / cpu / cache.c
blobb4aeb3aa0ce444c5f8b582e4ea7241060c1cc56d
1 /*
2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
18 static inline void __enable_icache_msr(void)
20 __asm__ __volatile__ (" msrset r0, %0; \
21 nop; " \
22 : : "i" (MSR_ICE) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__ __volatile__ (" msrclr r0, %0; \
28 nop; " \
29 : : "i" (MSR_ICE) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__ __volatile__ (" msrset r0, %0; \
35 nop; " \
36 : \
37 : "i" (MSR_DCE) \
38 : "memory");
41 static inline void __disable_dcache_msr(void)
43 __asm__ __volatile__ (" msrclr r0, %0; \
44 nop; " \
45 : \
46 : "i" (MSR_DCE) \
47 : "memory");
50 static inline void __enable_icache_nomsr(void)
52 __asm__ __volatile__ (" mfs r12, rmsr; \
53 nop; \
54 ori r12, r12, %0; \
55 mts rmsr, r12; \
56 nop; " \
57 : \
58 : "i" (MSR_ICE) \
59 : "memory", "r12");
62 static inline void __disable_icache_nomsr(void)
64 __asm__ __volatile__ (" mfs r12, rmsr; \
65 nop; \
66 andi r12, r12, ~%0; \
67 mts rmsr, r12; \
68 nop; " \
69 : \
70 : "i" (MSR_ICE) \
71 : "memory", "r12");
74 static inline void __enable_dcache_nomsr(void)
76 __asm__ __volatile__ (" mfs r12, rmsr; \
77 nop; \
78 ori r12, r12, %0; \
79 mts rmsr, r12; \
80 nop; " \
81 : \
82 : "i" (MSR_DCE) \
83 : "memory", "r12");
86 static inline void __disable_dcache_nomsr(void)
88 __asm__ __volatile__ (" mfs r12, rmsr; \
89 nop; \
90 andi r12, r12, ~%0; \
91 mts rmsr, r12; \
92 nop; " \
93 : \
94 : "i" (MSR_DCE) \
95 : "memory", "r12");
99 /* Helper macro for computing the limits of cache range loops
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
105 do { \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
108 start &= align; \
109 } while (0);
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
115 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
116 do { \
117 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \
119 WARN_ON(step >= 0); \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
122 bgtid %0, 1b; \
123 addk %0, %0, %1; \
124 " : : "r" (len), "r" (step) \
125 : "memory"); \
126 } while (0);
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
137 do { \
138 int step = -line_length; \
139 int align = ~(line_length - 1); \
140 int count; \
141 end = ((end & align) == end) ? end - line_length : end & align; \
142 count = end - start; \
143 WARN_ON(count < 0); \
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \
146 bgtid %1, 1b; \
147 addk %1, %1, %2; \
148 " : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \
150 } while (0);
152 /* It is used only first parameter for OP - for wic, wdc */
153 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
154 do { \
155 int volatile temp; \
156 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \
160 __asm__ __volatile__ (" 1: " #op " %1, r0; \
161 cmpu %0, %1, %2; \
162 bgtid %0, 1b; \
163 addk %1, %1, %3; \
164 " : : "r" (temp), "r" (start), "r" (end),\
165 "r" (line_length) : "memory"); \
166 } while (0);
168 #define ASM_LOOP
170 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
172 unsigned long flags;
173 #ifndef ASM_LOOP
174 int i;
175 #endif
176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
177 (unsigned int)start, (unsigned int) end);
179 CACHE_LOOP_LIMITS(start, end,
180 cpuinfo.icache_line_length, cpuinfo.icache_size);
182 local_irq_save(flags);
183 __disable_icache_msr();
185 #ifdef ASM_LOOP
186 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
187 #else
188 for (i = start; i < end; i += cpuinfo.icache_line_length)
189 __asm__ __volatile__ ("wic %0, r0;" \
190 : : "r" (i));
191 #endif
192 __enable_icache_msr();
193 local_irq_restore(flags);
196 static void __flush_icache_range_nomsr_irq(unsigned long start,
197 unsigned long end)
199 unsigned long flags;
200 #ifndef ASM_LOOP
201 int i;
202 #endif
203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
204 (unsigned int)start, (unsigned int) end);
206 CACHE_LOOP_LIMITS(start, end,
207 cpuinfo.icache_line_length, cpuinfo.icache_size);
209 local_irq_save(flags);
210 __disable_icache_nomsr();
212 #ifdef ASM_LOOP
213 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
214 #else
215 for (i = start; i < end; i += cpuinfo.icache_line_length)
216 __asm__ __volatile__ ("wic %0, r0;" \
217 : : "r" (i));
218 #endif
220 __enable_icache_nomsr();
221 local_irq_restore(flags);
224 static void __flush_icache_range_noirq(unsigned long start,
225 unsigned long end)
227 #ifndef ASM_LOOP
228 int i;
229 #endif
230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
231 (unsigned int)start, (unsigned int) end);
233 CACHE_LOOP_LIMITS(start, end,
234 cpuinfo.icache_line_length, cpuinfo.icache_size);
235 #ifdef ASM_LOOP
236 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
237 #else
238 for (i = start; i < end; i += cpuinfo.icache_line_length)
239 __asm__ __volatile__ ("wic %0, r0;" \
240 : : "r" (i));
241 #endif
244 static void __flush_icache_all_msr_irq(void)
246 unsigned long flags;
247 #ifndef ASM_LOOP
248 int i;
249 #endif
250 pr_debug("%s\n", __func__);
252 local_irq_save(flags);
253 __disable_icache_msr();
254 #ifdef ASM_LOOP
255 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
256 #else
257 for (i = 0; i < cpuinfo.icache_size;
258 i += cpuinfo.icache_line_length)
259 __asm__ __volatile__ ("wic %0, r0;" \
260 : : "r" (i));
261 #endif
262 __enable_icache_msr();
263 local_irq_restore(flags);
266 static void __flush_icache_all_nomsr_irq(void)
268 unsigned long flags;
269 #ifndef ASM_LOOP
270 int i;
271 #endif
272 pr_debug("%s\n", __func__);
274 local_irq_save(flags);
275 __disable_icache_nomsr();
276 #ifdef ASM_LOOP
277 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
278 #else
279 for (i = 0; i < cpuinfo.icache_size;
280 i += cpuinfo.icache_line_length)
281 __asm__ __volatile__ ("wic %0, r0;" \
282 : : "r" (i));
283 #endif
284 __enable_icache_nomsr();
285 local_irq_restore(flags);
288 static void __flush_icache_all_noirq(void)
290 #ifndef ASM_LOOP
291 int i;
292 #endif
293 pr_debug("%s\n", __func__);
294 #ifdef ASM_LOOP
295 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
296 #else
297 for (i = 0; i < cpuinfo.icache_size;
298 i += cpuinfo.icache_line_length)
299 __asm__ __volatile__ ("wic %0, r0;" \
300 : : "r" (i));
301 #endif
304 static void __invalidate_dcache_all_msr_irq(void)
306 unsigned long flags;
307 #ifndef ASM_LOOP
308 int i;
309 #endif
310 pr_debug("%s\n", __func__);
312 local_irq_save(flags);
313 __disable_dcache_msr();
314 #ifdef ASM_LOOP
315 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
316 #else
317 for (i = 0; i < cpuinfo.dcache_size;
318 i += cpuinfo.dcache_line_length)
319 __asm__ __volatile__ ("wdc %0, r0;" \
320 : : "r" (i));
321 #endif
322 __enable_dcache_msr();
323 local_irq_restore(flags);
326 static void __invalidate_dcache_all_nomsr_irq(void)
328 unsigned long flags;
329 #ifndef ASM_LOOP
330 int i;
331 #endif
332 pr_debug("%s\n", __func__);
334 local_irq_save(flags);
335 __disable_dcache_nomsr();
336 #ifdef ASM_LOOP
337 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
338 #else
339 for (i = 0; i < cpuinfo.dcache_size;
340 i += cpuinfo.dcache_line_length)
341 __asm__ __volatile__ ("wdc %0, r0;" \
342 : : "r" (i));
343 #endif
344 __enable_dcache_nomsr();
345 local_irq_restore(flags);
348 static void __invalidate_dcache_all_noirq_wt(void)
350 #ifndef ASM_LOOP
351 int i;
352 #endif
353 pr_debug("%s\n", __func__);
354 #ifdef ASM_LOOP
355 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc)
356 #else
357 for (i = 0; i < cpuinfo.dcache_size;
358 i += cpuinfo.dcache_line_length)
359 __asm__ __volatile__ ("wdc %0, r0;" \
360 : : "r" (i));
361 #endif
364 static void __invalidate_dcache_all_wb(void)
366 #ifndef ASM_LOOP
367 int i;
368 #endif
369 pr_debug("%s\n", __func__);
370 #ifdef ASM_LOOP
371 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
372 wdc)
373 #else
374 for (i = 0; i < cpuinfo.dcache_size;
375 i += cpuinfo.dcache_line_length)
376 __asm__ __volatile__ ("wdc %0, r0;" \
377 : : "r" (i));
378 #endif
381 static void __invalidate_dcache_range_wb(unsigned long start,
382 unsigned long end)
384 #ifndef ASM_LOOP
385 int i;
386 #endif
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
388 (unsigned int)start, (unsigned int) end);
390 CACHE_LOOP_LIMITS(start, end,
391 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
392 #ifdef ASM_LOOP
393 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
394 #else
395 for (i = start; i < end; i += cpuinfo.dcache_line_length)
396 __asm__ __volatile__ ("wdc.clear %0, r0;" \
397 : : "r" (i));
398 #endif
401 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
402 unsigned long end)
404 #ifndef ASM_LOOP
405 int i;
406 #endif
407 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
408 (unsigned int)start, (unsigned int) end);
409 CACHE_LOOP_LIMITS(start, end,
410 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
412 #ifdef ASM_LOOP
413 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
414 #else
415 for (i = start; i < end; i += cpuinfo.dcache_line_length)
416 __asm__ __volatile__ ("wdc %0, r0;" \
417 : : "r" (i));
418 #endif
421 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
422 unsigned long end)
424 unsigned long flags;
425 #ifndef ASM_LOOP
426 int i;
427 #endif
428 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
429 (unsigned int)start, (unsigned int) end);
430 CACHE_LOOP_LIMITS(start, end,
431 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
433 local_irq_save(flags);
434 __disable_dcache_msr();
436 #ifdef ASM_LOOP
437 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
438 #else
439 for (i = start; i < end; i += cpuinfo.dcache_line_length)
440 __asm__ __volatile__ ("wdc %0, r0;" \
441 : : "r" (i));
442 #endif
444 __enable_dcache_msr();
445 local_irq_restore(flags);
448 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
449 unsigned long end)
451 unsigned long flags;
452 #ifndef ASM_LOOP
453 int i;
454 #endif
455 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
456 (unsigned int)start, (unsigned int) end);
458 CACHE_LOOP_LIMITS(start, end,
459 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
461 local_irq_save(flags);
462 __disable_dcache_nomsr();
464 #ifdef ASM_LOOP
465 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
466 #else
467 for (i = start; i < end; i += cpuinfo.dcache_line_length)
468 __asm__ __volatile__ ("wdc %0, r0;" \
469 : : "r" (i));
470 #endif
472 __enable_dcache_nomsr();
473 local_irq_restore(flags);
476 static void __flush_dcache_all_wb(void)
478 #ifndef ASM_LOOP
479 int i;
480 #endif
481 pr_debug("%s\n", __func__);
482 #ifdef ASM_LOOP
483 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
484 wdc.flush);
485 #else
486 for (i = 0; i < cpuinfo.dcache_size;
487 i += cpuinfo.dcache_line_length)
488 __asm__ __volatile__ ("wdc.flush %0, r0;" \
489 : : "r" (i));
490 #endif
493 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
495 #ifndef ASM_LOOP
496 int i;
497 #endif
498 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
499 (unsigned int)start, (unsigned int) end);
501 CACHE_LOOP_LIMITS(start, end,
502 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
503 #ifdef ASM_LOOP
504 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
505 #else
506 for (i = start; i < end; i += cpuinfo.dcache_line_length)
507 __asm__ __volatile__ ("wdc.flush %0, r0;" \
508 : : "r" (i));
509 #endif
512 /* struct for wb caches and for wt caches */
513 struct scache *mbc;
515 /* new wb cache model */
516 const struct scache wb_msr = {
517 .ie = __enable_icache_msr,
518 .id = __disable_icache_msr,
519 .ifl = __flush_icache_all_noirq,
520 .iflr = __flush_icache_range_noirq,
521 .iin = __flush_icache_all_noirq,
522 .iinr = __flush_icache_range_noirq,
523 .de = __enable_dcache_msr,
524 .dd = __disable_dcache_msr,
525 .dfl = __flush_dcache_all_wb,
526 .dflr = __flush_dcache_range_wb,
527 .din = __invalidate_dcache_all_wb,
528 .dinr = __invalidate_dcache_range_wb,
531 /* There is only difference in ie, id, de, dd functions */
532 const struct scache wb_nomsr = {
533 .ie = __enable_icache_nomsr,
534 .id = __disable_icache_nomsr,
535 .ifl = __flush_icache_all_noirq,
536 .iflr = __flush_icache_range_noirq,
537 .iin = __flush_icache_all_noirq,
538 .iinr = __flush_icache_range_noirq,
539 .de = __enable_dcache_nomsr,
540 .dd = __disable_dcache_nomsr,
541 .dfl = __flush_dcache_all_wb,
542 .dflr = __flush_dcache_range_wb,
543 .din = __invalidate_dcache_all_wb,
544 .dinr = __invalidate_dcache_range_wb,
547 /* Old wt cache model with disabling irq and turn off cache */
548 const struct scache wt_msr = {
549 .ie = __enable_icache_msr,
550 .id = __disable_icache_msr,
551 .ifl = __flush_icache_all_msr_irq,
552 .iflr = __flush_icache_range_msr_irq,
553 .iin = __flush_icache_all_msr_irq,
554 .iinr = __flush_icache_range_msr_irq,
555 .de = __enable_dcache_msr,
556 .dd = __disable_dcache_msr,
557 .dfl = __invalidate_dcache_all_msr_irq,
558 .dflr = __invalidate_dcache_range_msr_irq_wt,
559 .din = __invalidate_dcache_all_msr_irq,
560 .dinr = __invalidate_dcache_range_msr_irq_wt,
563 const struct scache wt_nomsr = {
564 .ie = __enable_icache_nomsr,
565 .id = __disable_icache_nomsr,
566 .ifl = __flush_icache_all_nomsr_irq,
567 .iflr = __flush_icache_range_nomsr_irq,
568 .iin = __flush_icache_all_nomsr_irq,
569 .iinr = __flush_icache_range_nomsr_irq,
570 .de = __enable_dcache_nomsr,
571 .dd = __disable_dcache_nomsr,
572 .dfl = __invalidate_dcache_all_nomsr_irq,
573 .dflr = __invalidate_dcache_range_nomsr_irq,
574 .din = __invalidate_dcache_all_nomsr_irq,
575 .dinr = __invalidate_dcache_range_nomsr_irq,
578 /* New wt cache model for newer Microblaze versions */
579 const struct scache wt_msr_noirq = {
580 .ie = __enable_icache_msr,
581 .id = __disable_icache_msr,
582 .ifl = __flush_icache_all_noirq,
583 .iflr = __flush_icache_range_noirq,
584 .iin = __flush_icache_all_noirq,
585 .iinr = __flush_icache_range_noirq,
586 .de = __enable_dcache_msr,
587 .dd = __disable_dcache_msr,
588 .dfl = __invalidate_dcache_all_noirq_wt,
589 .dflr = __invalidate_dcache_range_nomsr_wt,
590 .din = __invalidate_dcache_all_noirq_wt,
591 .dinr = __invalidate_dcache_range_nomsr_wt,
594 const struct scache wt_nomsr_noirq = {
595 .ie = __enable_icache_nomsr,
596 .id = __disable_icache_nomsr,
597 .ifl = __flush_icache_all_noirq,
598 .iflr = __flush_icache_range_noirq,
599 .iin = __flush_icache_all_noirq,
600 .iinr = __flush_icache_range_noirq,
601 .de = __enable_dcache_nomsr,
602 .dd = __disable_dcache_nomsr,
603 .dfl = __invalidate_dcache_all_noirq_wt,
604 .dflr = __invalidate_dcache_range_nomsr_wt,
605 .din = __invalidate_dcache_all_noirq_wt,
606 .dinr = __invalidate_dcache_range_nomsr_wt,
609 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
610 #define CPUVER_7_20_A 0x0c
611 #define CPUVER_7_20_D 0x0f
613 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
615 void microblaze_cache_init(void)
617 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
618 if (cpuinfo.dcache_wb) {
619 INFO("wb_msr");
620 mbc = (struct scache *)&wb_msr;
621 if (cpuinfo.ver_code < CPUVER_7_20_D) {
622 /* MS: problem with signal handling - hw bug */
623 INFO("WB won't work properly");
625 } else {
626 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
627 INFO("wt_msr_noirq");
628 mbc = (struct scache *)&wt_msr_noirq;
629 } else {
630 INFO("wt_msr");
631 mbc = (struct scache *)&wt_msr;
634 } else {
635 if (cpuinfo.dcache_wb) {
636 INFO("wb_nomsr");
637 mbc = (struct scache *)&wb_nomsr;
638 if (cpuinfo.ver_code < CPUVER_7_20_D) {
639 /* MS: problem with signal handling - hw bug */
640 INFO("WB won't work properly");
642 } else {
643 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
644 INFO("wt_nomsr_noirq");
645 mbc = (struct scache *)&wt_nomsr_noirq;
646 } else {
647 INFO("wt_nomsr");
648 mbc = (struct scache *)&wt_nomsr;
652 /* invalidate_dcache(); */
653 enable_dcache();
655 invalidate_icache();
656 enable_icache();