2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
11 #ifndef __ASM_R4KCACHE_H
12 #define __ASM_R4KCACHE_H
15 #include <asm/cacheops.h>
17 #define cache_op(op,addr) \
18 __asm__ __volatile__( \
19 " .set noreorder \n" \
20 " .set mips3\n\t \n" \
25 : "i" (op), "m" (*(unsigned char *)(addr)))
27 static inline void flush_icache_line_indexed(unsigned long addr
)
29 cache_op(Index_Invalidate_I
, addr
);
32 static inline void flush_dcache_line_indexed(unsigned long addr
)
34 cache_op(Index_Writeback_Inv_D
, addr
);
37 static inline void flush_scache_line_indexed(unsigned long addr
)
39 cache_op(Index_Writeback_Inv_SD
, addr
);
42 static inline void flush_icache_line(unsigned long addr
)
44 cache_op(Hit_Invalidate_I
, addr
);
47 static inline void flush_dcache_line(unsigned long addr
)
49 cache_op(Hit_Writeback_Inv_D
, addr
);
52 static inline void invalidate_dcache_line(unsigned long addr
)
54 cache_op(Hit_Invalidate_D
, addr
);
57 static inline void invalidate_scache_line(unsigned long addr
)
59 cache_op(Hit_Invalidate_SD
, addr
);
62 static inline void flush_scache_line(unsigned long addr
)
64 cache_op(Hit_Writeback_Inv_SD
, addr
);
68 * The next two are for badland addresses like signal trampolines.
70 static inline void protected_flush_icache_line(unsigned long addr
)
78 ".section\t__ex_table,\"a\"\n\t"
82 : "i" (Hit_Invalidate_I
), "r" (addr
));
86 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
87 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
88 * caches. We're talking about one cacheline unnecessarily getting invalidated
89 * here so the penaltiy isn't overly hard.
91 static inline void protected_writeback_dcache_line(unsigned long addr
)
99 ".section\t__ex_table,\"a\"\n\t"
100 STR(PTR
)"\t1b,2b\n\t"
103 : "i" (Hit_Writeback_Inv_D
), "r" (addr
));
107 * This one is RM7000-specific
109 static inline void invalidate_tcache_page(unsigned long addr
)
111 cache_op(Page_Invalidate_T
, addr
);
114 #define cache16_unroll32(base,op) \
115 __asm__ __volatile__(" \
118 cache %1, 0x000(%0); cache %1, 0x010(%0); \
119 cache %1, 0x020(%0); cache %1, 0x030(%0); \
120 cache %1, 0x040(%0); cache %1, 0x050(%0); \
121 cache %1, 0x060(%0); cache %1, 0x070(%0); \
122 cache %1, 0x080(%0); cache %1, 0x090(%0); \
123 cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \
124 cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \
125 cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \
126 cache %1, 0x100(%0); cache %1, 0x110(%0); \
127 cache %1, 0x120(%0); cache %1, 0x130(%0); \
128 cache %1, 0x140(%0); cache %1, 0x150(%0); \
129 cache %1, 0x160(%0); cache %1, 0x170(%0); \
130 cache %1, 0x180(%0); cache %1, 0x190(%0); \
131 cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \
132 cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \
133 cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \
140 static inline void blast_dcache16(void)
142 unsigned long start
= KSEG0
;
143 unsigned long end
= start
+ current_cpu_data
.dcache
.waysize
;
144 unsigned long ws_inc
= 1UL << current_cpu_data
.dcache
.waybit
;
145 unsigned long ws_end
= current_cpu_data
.dcache
.ways
<<
146 current_cpu_data
.dcache
.waybit
;
147 unsigned long ws
, addr
;
149 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
150 for (addr
= start
; addr
< end
; addr
+= 0x200)
151 cache16_unroll32(addr
|ws
,Index_Writeback_Inv_D
);
154 static inline void blast_dcache16_page(unsigned long page
)
156 unsigned long start
= page
;
157 unsigned long end
= start
+ PAGE_SIZE
;
159 while (start
< end
) {
160 cache16_unroll32(start
,Hit_Writeback_Inv_D
);
165 static inline void blast_dcache16_page_indexed(unsigned long page
)
167 unsigned long start
= page
;
168 unsigned long end
= start
+ PAGE_SIZE
;
169 unsigned long ws_inc
= 1UL << current_cpu_data
.dcache
.waybit
;
170 unsigned long ws_end
= current_cpu_data
.dcache
.ways
<<
171 current_cpu_data
.dcache
.waybit
;
172 unsigned long ws
, addr
;
174 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
175 for (addr
= start
; addr
< end
; addr
+= 0x200)
176 cache16_unroll32(addr
|ws
,Index_Writeback_Inv_D
);
179 static inline void blast_icache16(void)
181 unsigned long start
= KSEG0
;
182 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
183 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
184 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
185 current_cpu_data
.icache
.waybit
;
186 unsigned long ws
, addr
;
188 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
189 for (addr
= start
; addr
< end
; addr
+= 0x200)
190 cache16_unroll32(addr
|ws
,Index_Invalidate_I
);
193 static inline void blast_icache16_page(unsigned long page
)
195 unsigned long start
= page
;
196 unsigned long end
= start
+ PAGE_SIZE
;
198 while (start
< end
) {
199 cache16_unroll32(start
,Hit_Invalidate_I
);
204 static inline void blast_icache16_page_indexed(unsigned long page
)
206 unsigned long start
= page
;
207 unsigned long end
= start
+ PAGE_SIZE
;
208 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
209 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
210 current_cpu_data
.icache
.waybit
;
211 unsigned long ws
, addr
;
213 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
214 for (addr
= start
; addr
< end
; addr
+= 0x200)
215 cache16_unroll32(addr
|ws
,Index_Invalidate_I
);
218 static inline void blast_scache16(void)
220 unsigned long start
= KSEG0
;
221 unsigned long end
= start
+ current_cpu_data
.scache
.waysize
;
222 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
223 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
224 current_cpu_data
.scache
.waybit
;
225 unsigned long ws
, addr
;
227 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
228 for (addr
= start
; addr
< end
; addr
+= 0x200)
229 cache16_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
232 static inline void blast_scache16_page(unsigned long page
)
234 unsigned long start
= page
;
235 unsigned long end
= page
+ PAGE_SIZE
;
237 while (start
< end
) {
238 cache16_unroll32(start
,Hit_Writeback_Inv_SD
);
243 static inline void blast_scache16_page_indexed(unsigned long page
)
245 unsigned long start
= page
;
246 unsigned long end
= start
+ PAGE_SIZE
;
247 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
248 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
249 current_cpu_data
.scache
.waybit
;
250 unsigned long ws
, addr
;
252 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
253 for (addr
= start
; addr
< end
; addr
+= 0x200)
254 cache16_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
257 #define cache32_unroll32(base,op) \
258 __asm__ __volatile__(" \
261 cache %1, 0x000(%0); cache %1, 0x020(%0); \
262 cache %1, 0x040(%0); cache %1, 0x060(%0); \
263 cache %1, 0x080(%0); cache %1, 0x0a0(%0); \
264 cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \
265 cache %1, 0x100(%0); cache %1, 0x120(%0); \
266 cache %1, 0x140(%0); cache %1, 0x160(%0); \
267 cache %1, 0x180(%0); cache %1, 0x1a0(%0); \
268 cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \
269 cache %1, 0x200(%0); cache %1, 0x220(%0); \
270 cache %1, 0x240(%0); cache %1, 0x260(%0); \
271 cache %1, 0x280(%0); cache %1, 0x2a0(%0); \
272 cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \
273 cache %1, 0x300(%0); cache %1, 0x320(%0); \
274 cache %1, 0x340(%0); cache %1, 0x360(%0); \
275 cache %1, 0x380(%0); cache %1, 0x3a0(%0); \
276 cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \
283 static inline void blast_dcache32(void)
285 unsigned long start
= KSEG0
;
286 unsigned long end
= start
+ current_cpu_data
.dcache
.waysize
;
287 unsigned long ws_inc
= 1UL << current_cpu_data
.dcache
.waybit
;
288 unsigned long ws_end
= current_cpu_data
.dcache
.ways
<<
289 current_cpu_data
.dcache
.waybit
;
290 unsigned long ws
, addr
;
292 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
293 for (addr
= start
; addr
< end
; addr
+= 0x400)
294 cache32_unroll32(addr
|ws
,Index_Writeback_Inv_D
);
297 static inline void blast_dcache32_page(unsigned long page
)
299 unsigned long start
= page
;
300 unsigned long end
= start
+ PAGE_SIZE
;
302 while (start
< end
) {
303 cache32_unroll32(start
,Hit_Writeback_Inv_D
);
308 static inline void blast_dcache32_page_indexed(unsigned long page
)
310 unsigned long start
= page
;
311 unsigned long end
= start
+ PAGE_SIZE
;
312 unsigned long ws_inc
= 1UL << current_cpu_data
.dcache
.waybit
;
313 unsigned long ws_end
= current_cpu_data
.dcache
.ways
<<
314 current_cpu_data
.dcache
.waybit
;
315 unsigned long ws
, addr
;
317 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
318 for (addr
= start
; addr
< end
; addr
+= 0x400)
319 cache32_unroll32(addr
|ws
,Index_Writeback_Inv_D
);
322 static inline void blast_icache32(void)
324 unsigned long start
= KSEG0
;
325 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
326 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
327 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
328 current_cpu_data
.icache
.waybit
;
329 unsigned long ws
, addr
;
331 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
332 for (addr
= start
; addr
< end
; addr
+= 0x400)
333 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
336 static inline void blast_icache32_page(unsigned long page
)
338 unsigned long start
= page
;
339 unsigned long end
= start
+ PAGE_SIZE
;
341 while (start
< end
) {
342 cache32_unroll32(start
,Hit_Invalidate_I
);
347 static inline void blast_icache32_page_indexed(unsigned long page
)
349 unsigned long start
= page
;
350 unsigned long end
= start
+ PAGE_SIZE
;
351 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
352 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
353 current_cpu_data
.icache
.waybit
;
354 unsigned long ws
, addr
;
356 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
357 for (addr
= start
; addr
< end
; addr
+= 0x400)
358 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
361 static inline void blast_scache32(void)
363 unsigned long start
= KSEG0
;
364 unsigned long end
= start
+ current_cpu_data
.scache
.waysize
;
365 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
366 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
367 current_cpu_data
.scache
.waybit
;
368 unsigned long ws
, addr
;
370 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
371 for (addr
= start
; addr
< end
; addr
+= 0x400)
372 cache32_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
375 static inline void blast_scache32_page(unsigned long page
)
377 unsigned long start
= page
;
378 unsigned long end
= page
+ PAGE_SIZE
;
380 while (start
< end
) {
381 cache32_unroll32(start
,Hit_Writeback_Inv_SD
);
386 static inline void blast_scache32_page_indexed(unsigned long page
)
388 unsigned long start
= page
;
389 unsigned long end
= start
+ PAGE_SIZE
;
390 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
391 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
392 current_cpu_data
.scache
.waybit
;
393 unsigned long ws
, addr
;
395 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
396 for (addr
= start
; addr
< end
; addr
+= 0x400)
397 cache32_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
400 #define cache64_unroll32(base,op) \
401 __asm__ __volatile__(" \
404 cache %1, 0x000(%0); cache %1, 0x040(%0); \
405 cache %1, 0x080(%0); cache %1, 0x0c0(%0); \
406 cache %1, 0x100(%0); cache %1, 0x140(%0); \
407 cache %1, 0x180(%0); cache %1, 0x1c0(%0); \
408 cache %1, 0x200(%0); cache %1, 0x240(%0); \
409 cache %1, 0x280(%0); cache %1, 0x2c0(%0); \
410 cache %1, 0x300(%0); cache %1, 0x340(%0); \
411 cache %1, 0x380(%0); cache %1, 0x3c0(%0); \
412 cache %1, 0x400(%0); cache %1, 0x440(%0); \
413 cache %1, 0x480(%0); cache %1, 0x4c0(%0); \
414 cache %1, 0x500(%0); cache %1, 0x540(%0); \
415 cache %1, 0x580(%0); cache %1, 0x5c0(%0); \
416 cache %1, 0x600(%0); cache %1, 0x640(%0); \
417 cache %1, 0x680(%0); cache %1, 0x6c0(%0); \
418 cache %1, 0x700(%0); cache %1, 0x740(%0); \
419 cache %1, 0x780(%0); cache %1, 0x7c0(%0); \
426 static inline void blast_icache64(void)
428 unsigned long start
= KSEG0
;
429 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
430 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
431 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
432 current_cpu_data
.icache
.waybit
;
433 unsigned long ws
, addr
;
435 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
436 for (addr
= start
; addr
< end
; addr
+= 0x800)
437 cache64_unroll32(addr
|ws
,Index_Invalidate_I
);
440 static inline void blast_icache64_page(unsigned long page
)
442 unsigned long start
= page
;
443 unsigned long end
= start
+ PAGE_SIZE
;
445 while (start
< end
) {
446 cache64_unroll32(start
,Hit_Invalidate_I
);
451 static inline void blast_icache64_page_indexed(unsigned long page
)
453 unsigned long start
= page
;
454 unsigned long end
= start
+ PAGE_SIZE
;
455 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
456 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
457 current_cpu_data
.icache
.waybit
;
458 unsigned long ws
, addr
;
460 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
461 for (addr
= start
; addr
< end
; addr
+= 0x800)
462 cache64_unroll32(addr
|ws
,Index_Invalidate_I
);
465 static inline void blast_scache64(void)
467 unsigned long start
= KSEG0
;
468 unsigned long end
= start
+ current_cpu_data
.scache
.waysize
;
469 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
470 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
471 current_cpu_data
.scache
.waybit
;
472 unsigned long ws
, addr
;
474 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
475 for (addr
= start
; addr
< end
; addr
+= 0x800)
476 cache64_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
479 static inline void blast_scache64_page(unsigned long page
)
481 unsigned long start
= page
;
482 unsigned long end
= page
+ PAGE_SIZE
;
484 while (start
< end
) {
485 cache64_unroll32(start
,Hit_Writeback_Inv_SD
);
490 static inline void blast_scache64_page_indexed(unsigned long page
)
492 unsigned long start
= page
;
493 unsigned long end
= start
+ PAGE_SIZE
;
494 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
495 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
496 current_cpu_data
.scache
.waybit
;
497 unsigned long ws
, addr
;
499 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
500 for (addr
= start
; addr
< end
; addr
+= 0x800)
501 cache64_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
504 #define cache128_unroll32(base,op) \
505 __asm__ __volatile__(" \
508 cache %1, 0x000(%0); cache %1, 0x080(%0); \
509 cache %1, 0x100(%0); cache %1, 0x180(%0); \
510 cache %1, 0x200(%0); cache %1, 0x280(%0); \
511 cache %1, 0x300(%0); cache %1, 0x380(%0); \
512 cache %1, 0x400(%0); cache %1, 0x480(%0); \
513 cache %1, 0x500(%0); cache %1, 0x580(%0); \
514 cache %1, 0x600(%0); cache %1, 0x680(%0); \
515 cache %1, 0x700(%0); cache %1, 0x780(%0); \
516 cache %1, 0x800(%0); cache %1, 0x880(%0); \
517 cache %1, 0x900(%0); cache %1, 0x980(%0); \
518 cache %1, 0xa00(%0); cache %1, 0xa80(%0); \
519 cache %1, 0xb00(%0); cache %1, 0xb80(%0); \
520 cache %1, 0xc00(%0); cache %1, 0xc80(%0); \
521 cache %1, 0xd00(%0); cache %1, 0xd80(%0); \
522 cache %1, 0xe00(%0); cache %1, 0xe80(%0); \
523 cache %1, 0xf00(%0); cache %1, 0xf80(%0); \
530 static inline void blast_scache128(void)
532 unsigned long start
= KSEG0
;
533 unsigned long end
= start
+ current_cpu_data
.scache
.waysize
;
534 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
535 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
536 current_cpu_data
.scache
.waybit
;
537 unsigned long ws
, addr
;
539 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
540 for (addr
= start
; addr
< end
; addr
+= 0x1000)
541 cache128_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
544 static inline void blast_scache128_page(unsigned long page
)
546 unsigned long start
= page
;
547 unsigned long end
= page
+ PAGE_SIZE
;
549 while (start
< end
) {
550 cache128_unroll32(start
,Hit_Writeback_Inv_SD
);
555 static inline void blast_scache128_page_indexed(unsigned long page
)
557 unsigned long start
= page
;
558 unsigned long end
= start
+ PAGE_SIZE
;
559 unsigned long ws_inc
= 1UL << current_cpu_data
.scache
.waybit
;
560 unsigned long ws_end
= current_cpu_data
.scache
.ways
<<
561 current_cpu_data
.scache
.waybit
;
562 unsigned long ws
, addr
;
564 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
565 for (addr
= start
; addr
< end
; addr
+= 0x1000)
566 cache128_unroll32(addr
|ws
,Index_Writeback_Inv_SD
);
569 #endif /* __ASM_R4KCACHE_H */