2 * Cache control for MicroBlaze cache memories
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
18 static inline void __enable_icache_msr(void)
20 __asm__
__volatile__ (" msrset r0, %0; \
22 : : "i" (MSR_ICE
) : "memory");
25 static inline void __disable_icache_msr(void)
27 __asm__
__volatile__ (" msrclr r0, %0; \
29 : : "i" (MSR_ICE
) : "memory");
32 static inline void __enable_dcache_msr(void)
34 __asm__
__volatile__ (" msrset r0, %0; \
41 static inline void __disable_dcache_msr(void)
43 __asm__
__volatile__ (" msrclr r0, %0; \
50 static inline void __enable_icache_nomsr(void)
52 __asm__
__volatile__ (" mfs r12, rmsr; \
62 static inline void __disable_icache_nomsr(void)
64 __asm__
__volatile__ (" mfs r12, rmsr; \
74 static inline void __enable_dcache_nomsr(void)
76 __asm__
__volatile__ (" mfs r12, rmsr; \
86 static inline void __disable_dcache_nomsr(void)
88 __asm__
__volatile__ (" mfs r12, rmsr; \
99 /* Helper macro for computing the limits of cache range loops
101 * End address can be unaligned which is OK for C implementation.
102 * ASM implementation align it in ASM macros
104 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
106 int align = ~(cache_line_length - 1); \
107 end = min(start + cache_size, end); \
112 * Helper macro to loop over the specified cache_size/line_length and
113 * execute 'op' on that cacheline
115 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
117 unsigned int len = cache_size - line_length; \
118 int step = -line_length; \
119 WARN_ON(step >= 0); \
121 __asm__ __volatile__ (" 1: " #op " %0, r0; \
124 " : : "r" (len), "r" (step) \
128 /* Used for wdc.flush/clear which can use rB for offset which is not possible
129 * to use for simple wdc or wic.
131 * start address is cache aligned
132 * end address is not aligned, if end is aligned then I have to substract
133 * cacheline length because I can't flush/invalidate the next cacheline.
134 * If is not, I align it because I will flush/invalidate whole line.
136 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
138 int step = -line_length; \
139 int align = ~(line_length - 1); \
141 end = ((end & align) == end) ? end - line_length : end & align; \
142 count = end - start; \
143 WARN_ON(count < 0); \
145 __asm__ __volatile__ (" 1: " #op " %0, %1; \
148 " : : "r" (start), "r" (count), \
149 "r" (step) : "memory"); \
152 /* It is used only first parameter for OP - for wic, wdc */
153 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
156 int align = ~(line_length - 1); \
157 end = ((end & align) == end) ? end - line_length : end & align; \
158 WARN_ON(end - start < 0); \
160 __asm__ __volatile__ (" 1: " #op " %1, r0; \
164 " : : "r" (temp), "r" (start), "r" (end),\
165 "r" (line_length) : "memory"); \
170 static void __flush_icache_range_msr_irq(unsigned long start
, unsigned long end
)
176 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
177 (unsigned int)start
, (unsigned int) end
);
179 CACHE_LOOP_LIMITS(start
, end
,
180 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
182 local_irq_save(flags
);
183 __disable_icache_msr();
186 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
188 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
189 __asm__
__volatile__ ("wic %0, r0;" \
192 __enable_icache_msr();
193 local_irq_restore(flags
);
196 static void __flush_icache_range_nomsr_irq(unsigned long start
,
203 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
204 (unsigned int)start
, (unsigned int) end
);
206 CACHE_LOOP_LIMITS(start
, end
,
207 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
209 local_irq_save(flags
);
210 __disable_icache_nomsr();
213 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
215 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
216 __asm__
__volatile__ ("wic %0, r0;" \
220 __enable_icache_nomsr();
221 local_irq_restore(flags
);
224 static void __flush_icache_range_noirq(unsigned long start
,
230 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
231 (unsigned int)start
, (unsigned int) end
);
233 CACHE_LOOP_LIMITS(start
, end
,
234 cpuinfo
.icache_line_length
, cpuinfo
.icache_size
);
236 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.icache_line_length
, wic
);
238 for (i
= start
; i
< end
; i
+= cpuinfo
.icache_line_length
)
239 __asm__
__volatile__ ("wic %0, r0;" \
244 static void __flush_icache_all_msr_irq(void)
250 pr_debug("%s\n", __func__
);
252 local_irq_save(flags
);
253 __disable_icache_msr();
255 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
257 for (i
= 0; i
< cpuinfo
.icache_size
;
258 i
+= cpuinfo
.icache_line_length
)
259 __asm__
__volatile__ ("wic %0, r0;" \
262 __enable_icache_msr();
263 local_irq_restore(flags
);
266 static void __flush_icache_all_nomsr_irq(void)
272 pr_debug("%s\n", __func__
);
274 local_irq_save(flags
);
275 __disable_icache_nomsr();
277 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
279 for (i
= 0; i
< cpuinfo
.icache_size
;
280 i
+= cpuinfo
.icache_line_length
)
281 __asm__
__volatile__ ("wic %0, r0;" \
284 __enable_icache_nomsr();
285 local_irq_restore(flags
);
288 static void __flush_icache_all_noirq(void)
293 pr_debug("%s\n", __func__
);
295 CACHE_ALL_LOOP(cpuinfo
.icache_size
, cpuinfo
.icache_line_length
, wic
);
297 for (i
= 0; i
< cpuinfo
.icache_size
;
298 i
+= cpuinfo
.icache_line_length
)
299 __asm__
__volatile__ ("wic %0, r0;" \
304 static void __invalidate_dcache_all_msr_irq(void)
310 pr_debug("%s\n", __func__
);
312 local_irq_save(flags
);
313 __disable_dcache_msr();
315 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
317 for (i
= 0; i
< cpuinfo
.dcache_size
;
318 i
+= cpuinfo
.dcache_line_length
)
319 __asm__
__volatile__ ("wdc %0, r0;" \
322 __enable_dcache_msr();
323 local_irq_restore(flags
);
326 static void __invalidate_dcache_all_nomsr_irq(void)
332 pr_debug("%s\n", __func__
);
334 local_irq_save(flags
);
335 __disable_dcache_nomsr();
337 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
);
339 for (i
= 0; i
< cpuinfo
.dcache_size
;
340 i
+= cpuinfo
.dcache_line_length
)
341 __asm__
__volatile__ ("wdc %0, r0;" \
344 __enable_dcache_nomsr();
345 local_irq_restore(flags
);
348 static void __invalidate_dcache_all_noirq_wt(void)
353 pr_debug("%s\n", __func__
);
355 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
, wdc
)
357 for (i
= 0; i
< cpuinfo
.dcache_size
;
358 i
+= cpuinfo
.dcache_line_length
)
359 __asm__
__volatile__ ("wdc %0, r0;" \
364 static void __invalidate_dcache_all_wb(void)
369 pr_debug("%s\n", __func__
);
371 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
374 for (i
= 0; i
< cpuinfo
.dcache_size
;
375 i
+= cpuinfo
.dcache_line_length
)
376 __asm__
__volatile__ ("wdc %0, r0;" \
381 static void __invalidate_dcache_range_wb(unsigned long start
,
387 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
388 (unsigned int)start
, (unsigned int) end
);
390 CACHE_LOOP_LIMITS(start
, end
,
391 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
393 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.clear
);
395 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
396 __asm__
__volatile__ ("wdc.clear %0, r0;" \
401 static void __invalidate_dcache_range_nomsr_wt(unsigned long start
,
407 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
408 (unsigned int)start
, (unsigned int) end
);
409 CACHE_LOOP_LIMITS(start
, end
,
410 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
413 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
415 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
416 __asm__
__volatile__ ("wdc %0, r0;" \
421 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start
,
428 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
429 (unsigned int)start
, (unsigned int) end
);
430 CACHE_LOOP_LIMITS(start
, end
,
431 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
433 local_irq_save(flags
);
434 __disable_dcache_msr();
437 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
439 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
440 __asm__
__volatile__ ("wdc %0, r0;" \
444 __enable_dcache_msr();
445 local_irq_restore(flags
);
448 static void __invalidate_dcache_range_nomsr_irq(unsigned long start
,
455 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
456 (unsigned int)start
, (unsigned int) end
);
458 CACHE_LOOP_LIMITS(start
, end
,
459 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
461 local_irq_save(flags
);
462 __disable_dcache_nomsr();
465 CACHE_RANGE_LOOP_1(start
, end
, cpuinfo
.dcache_line_length
, wdc
);
467 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
468 __asm__
__volatile__ ("wdc %0, r0;" \
472 __enable_dcache_nomsr();
473 local_irq_restore(flags
);
476 static void __flush_dcache_all_wb(void)
481 pr_debug("%s\n", __func__
);
483 CACHE_ALL_LOOP(cpuinfo
.dcache_size
, cpuinfo
.dcache_line_length
,
486 for (i
= 0; i
< cpuinfo
.dcache_size
;
487 i
+= cpuinfo
.dcache_line_length
)
488 __asm__
__volatile__ ("wdc.flush %0, r0;" \
493 static void __flush_dcache_range_wb(unsigned long start
, unsigned long end
)
498 pr_debug("%s: start 0x%x, end 0x%x\n", __func__
,
499 (unsigned int)start
, (unsigned int) end
);
501 CACHE_LOOP_LIMITS(start
, end
,
502 cpuinfo
.dcache_line_length
, cpuinfo
.dcache_size
);
504 CACHE_RANGE_LOOP_2(start
, end
, cpuinfo
.dcache_line_length
, wdc
.flush
);
506 for (i
= start
; i
< end
; i
+= cpuinfo
.dcache_line_length
)
507 __asm__
__volatile__ ("wdc.flush %0, r0;" \
512 /* struct for wb caches and for wt caches */
515 /* new wb cache model */
516 const struct scache wb_msr
= {
517 .ie
= __enable_icache_msr
,
518 .id
= __disable_icache_msr
,
519 .ifl
= __flush_icache_all_noirq
,
520 .iflr
= __flush_icache_range_noirq
,
521 .iin
= __flush_icache_all_noirq
,
522 .iinr
= __flush_icache_range_noirq
,
523 .de
= __enable_dcache_msr
,
524 .dd
= __disable_dcache_msr
,
525 .dfl
= __flush_dcache_all_wb
,
526 .dflr
= __flush_dcache_range_wb
,
527 .din
= __invalidate_dcache_all_wb
,
528 .dinr
= __invalidate_dcache_range_wb
,
531 /* There is only difference in ie, id, de, dd functions */
532 const struct scache wb_nomsr
= {
533 .ie
= __enable_icache_nomsr
,
534 .id
= __disable_icache_nomsr
,
535 .ifl
= __flush_icache_all_noirq
,
536 .iflr
= __flush_icache_range_noirq
,
537 .iin
= __flush_icache_all_noirq
,
538 .iinr
= __flush_icache_range_noirq
,
539 .de
= __enable_dcache_nomsr
,
540 .dd
= __disable_dcache_nomsr
,
541 .dfl
= __flush_dcache_all_wb
,
542 .dflr
= __flush_dcache_range_wb
,
543 .din
= __invalidate_dcache_all_wb
,
544 .dinr
= __invalidate_dcache_range_wb
,
547 /* Old wt cache model with disabling irq and turn off cache */
548 const struct scache wt_msr
= {
549 .ie
= __enable_icache_msr
,
550 .id
= __disable_icache_msr
,
551 .ifl
= __flush_icache_all_msr_irq
,
552 .iflr
= __flush_icache_range_msr_irq
,
553 .iin
= __flush_icache_all_msr_irq
,
554 .iinr
= __flush_icache_range_msr_irq
,
555 .de
= __enable_dcache_msr
,
556 .dd
= __disable_dcache_msr
,
557 .dfl
= __invalidate_dcache_all_msr_irq
,
558 .dflr
= __invalidate_dcache_range_msr_irq_wt
,
559 .din
= __invalidate_dcache_all_msr_irq
,
560 .dinr
= __invalidate_dcache_range_msr_irq_wt
,
563 const struct scache wt_nomsr
= {
564 .ie
= __enable_icache_nomsr
,
565 .id
= __disable_icache_nomsr
,
566 .ifl
= __flush_icache_all_nomsr_irq
,
567 .iflr
= __flush_icache_range_nomsr_irq
,
568 .iin
= __flush_icache_all_nomsr_irq
,
569 .iinr
= __flush_icache_range_nomsr_irq
,
570 .de
= __enable_dcache_nomsr
,
571 .dd
= __disable_dcache_nomsr
,
572 .dfl
= __invalidate_dcache_all_nomsr_irq
,
573 .dflr
= __invalidate_dcache_range_nomsr_irq
,
574 .din
= __invalidate_dcache_all_nomsr_irq
,
575 .dinr
= __invalidate_dcache_range_nomsr_irq
,
578 /* New wt cache model for newer Microblaze versions */
579 const struct scache wt_msr_noirq
= {
580 .ie
= __enable_icache_msr
,
581 .id
= __disable_icache_msr
,
582 .ifl
= __flush_icache_all_noirq
,
583 .iflr
= __flush_icache_range_noirq
,
584 .iin
= __flush_icache_all_noirq
,
585 .iinr
= __flush_icache_range_noirq
,
586 .de
= __enable_dcache_msr
,
587 .dd
= __disable_dcache_msr
,
588 .dfl
= __invalidate_dcache_all_noirq_wt
,
589 .dflr
= __invalidate_dcache_range_nomsr_wt
,
590 .din
= __invalidate_dcache_all_noirq_wt
,
591 .dinr
= __invalidate_dcache_range_nomsr_wt
,
594 const struct scache wt_nomsr_noirq
= {
595 .ie
= __enable_icache_nomsr
,
596 .id
= __disable_icache_nomsr
,
597 .ifl
= __flush_icache_all_noirq
,
598 .iflr
= __flush_icache_range_noirq
,
599 .iin
= __flush_icache_all_noirq
,
600 .iinr
= __flush_icache_range_noirq
,
601 .de
= __enable_dcache_nomsr
,
602 .dd
= __disable_dcache_nomsr
,
603 .dfl
= __invalidate_dcache_all_noirq_wt
,
604 .dflr
= __invalidate_dcache_range_nomsr_wt
,
605 .din
= __invalidate_dcache_all_noirq_wt
,
606 .dinr
= __invalidate_dcache_range_nomsr_wt
,
609 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
610 #define CPUVER_7_20_A 0x0c
611 #define CPUVER_7_20_D 0x0f
613 #define INFO(s) printk(KERN_INFO "cache: " s "\n");
615 void microblaze_cache_init(void)
617 if (cpuinfo
.use_instr
& PVR2_USE_MSR_INSTR
) {
618 if (cpuinfo
.dcache_wb
) {
620 mbc
= (struct scache
*)&wb_msr
;
621 if (cpuinfo
.ver_code
< CPUVER_7_20_D
) {
622 /* MS: problem with signal handling - hw bug */
623 INFO("WB won't work properly");
626 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
627 INFO("wt_msr_noirq");
628 mbc
= (struct scache
*)&wt_msr_noirq
;
631 mbc
= (struct scache
*)&wt_msr
;
635 if (cpuinfo
.dcache_wb
) {
637 mbc
= (struct scache
*)&wb_nomsr
;
638 if (cpuinfo
.ver_code
< CPUVER_7_20_D
) {
639 /* MS: problem with signal handling - hw bug */
640 INFO("WB won't work properly");
643 if (cpuinfo
.ver_code
>= CPUVER_7_20_A
) {
644 INFO("wt_nomsr_noirq");
645 mbc
= (struct scache
*)&wt_nomsr_noirq
;
648 mbc
= (struct scache
*)&wt_nomsr
;
652 /* invalidate_dcache(); */