[PATCH] uml: vm86 compile fix
[linux-2.6.22.y-op.git] / include / asm-xtensa / xtensa / cacheasm.h
blob0cdbb0bf180e872d11a5624e4ae63d64d6308380
1 #ifndef XTENSA_CACHEASM_H
2 #define XTENSA_CACHEASM_H
4 /*
5 * THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
7 * include/asm-xtensa/xtensa/cacheasm.h -- assembler-specific cache
8 * related definitions that depend on CORE configuration.
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
14 * Copyright (C) 2002 Tensilica Inc.
18 #include <xtensa/coreasm.h>
22 * This header file defines assembler macros of the form:
23 * <x>cache_<func>
24 * where <x> is 'i' or 'd' for instruction and data caches,
25 * and <func> indicates the function of the macro.
27 * The following functions <func> are defined,
28 * and apply only to the specified cache (I or D):
30 * reset
31 * Resets the cache.
33 * sync
34 * Makes sure any previous cache instructions have been completed;
35 * ie. makes sure any previous cache control operations
36 * have had full effect and been synchronized to memory.
37 * Eg. any invalidate completed [so as not to generate a hit],
38 * any writebacks or other pipelined writes written to memory, etc.
40 * invalidate_line (single cache line)
41 * invalidate_region (specified memory range)
42 * invalidate_all (entire cache)
43 * Invalidates all cache entries that cache
44 * data from the specified memory range.
45 * NOTE: locked entries are not invalidated.
47 * writeback_line (single cache line)
48 * writeback_region (specified memory range)
49 * writeback_all (entire cache)
50 * Writes back to memory all dirty cache entries
51 * that cache data from the specified memory range,
52 * and marks these entries as clean.
53 * NOTE: on some future implementations, this might
54 * also invalidate.
55 * NOTE: locked entries are written back, but never invalidated.
56 * NOTE: instruction caches never implement writeback.
58 * writeback_inv_line (single cache line)
59 * writeback_inv_region (specified memory range)
60 * writeback_inv_all (entire cache)
61 * Writes back to memory all dirty cache entries
62 * that cache data from the specified memory range,
63 * and invalidates these entries (including all clean
64 * cache entries that cache data from that range).
65 * NOTE: locked entries are written back but not invalidated.
66 * NOTE: instruction caches never implement writeback.
68 * lock_line (single cache line)
69 * lock_region (specified memory range)
70 * Prefetch and lock the specified memory range into cache.
71 * NOTE: if any part of the specified memory range cannot
72 * be locked, a ??? exception occurs. These macros don't
73 * do anything special (yet anyway) to handle this situation.
75 * unlock_line (single cache line)
76 * unlock_region (specified memory range)
77 * unlock_all (entire cache)
78 * Unlock cache entries that cache the specified memory range.
79 * Entries not already locked are unaffected.
84 /*************************** GENERIC -- ALL CACHES ***************************/
88 * The following macros assume the following cache size/parameter limits
89 * in the current Xtensa core implementation:
90 * cache size: 1024 bytes minimum
91 * line size: 16 - 64 bytes
92 * way count: 1 - 4
94 * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4
95 * Hence the assumption that each loop can execute four cache instructions.
97 * Correspondingly, the offset range of instructions is assumed able to cover
98 * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for
99 * both hit and indexed cache instructions. Ie. these offsets are all
100 * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64).
101 * This is true of all original cache instructions
102 * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets
103 * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2).
104 * This is also true of subsequent cache instructions
105 * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets
106 * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4).
108 * (Maximum cache size, currently 32k, doesn't affect the following macros.
109 * Cache ways > MMU min page size cause aliasing but that's another matter.)
115 * Macro to apply an 'indexed' cache instruction to the entire cache.
117 * Parameters:
118 * cainst instruction/ that takes an address register parameter
119 * and an offset parameter (in range 0 .. 3*linesize).
120 * size size of cache in bytes
121 * linesize size of cache line in bytes
122 * assoc_or1 number of associativities (ways/sets) in cache
123 * if all sets affected by cainst,
124 * or 1 if only one set (or not all sets) of the cache
125 * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]).
126 * aa, ab unique address registers (temporaries)
129 .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab
131 // Sanity-check on cache parameters:
132 .ifne (\size % (\linesize * \assoc_or1 * 4))
133 .err // cache configuration outside expected/supported range!
134 .endif
136 // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst.
137 movi \aa, (\size / (\linesize * \assoc_or1 * 4))
138 // Possible improvement: need only loop if \aa > 1 ;
139 // however that particular condition is highly unlikely.
140 movi \ab, 0 // to iterate over cache
141 floop \aa, cachex\@
142 \cainst \ab, 0*\linesize
143 \cainst \ab, 1*\linesize
144 \cainst \ab, 2*\linesize
145 \cainst \ab, 3*\linesize
146 addi \ab, \ab, 4*\linesize // move to next line
147 floopend \aa, cachex\@
149 .endm
153 * Macro to apply a 'hit' cache instruction to a memory region,
154 * ie. to any cache entries that cache a specified portion (region) of memory.
155 * Takes care of the unaligned cases, ie. may apply to one
156 * more cache line than $asize / lineSize if $aaddr is not aligned.
159 * Parameters are:
160 * cainst instruction/macro that takes an address register parameter
161 * and an offset parameter (currently always zero)
162 * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.)
163 * linesize_log2 log2(size of cache line in bytes)
164 * addr register containing start address of region (clobbered)
165 * asize register containing size of the region in bytes (clobbered)
166 * askew unique register used as temporary
168 * !?!?! 2DO: optimization: iterate max(cache_size and \asize) / linesize
171 .macro cache_hit_region cainst, linesize_log2, addr, asize, askew
173 // Make \asize the number of iterations:
174 extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr
175 add \asize, \asize, \askew // ... and add it to \asize
176 addi \asize, \asize, (1 << \linesize_log2) - 1 // round up!
177 srli \asize, \asize, \linesize_log2
179 // Iterate over region:
180 floopnez \asize, cacheh\@
181 \cainst \addr, 0
182 addi \addr, \addr, (1 << \linesize_log2) // move to next line
183 floopend \asize, cacheh\@
185 .endm
191 /*************************** INSTRUCTION CACHE ***************************/
195 * Reset/initialize the instruction cache by simply invalidating it:
196 * (need to unlock first also, if cache locking implemented):
198 * Parameters:
199 * aa, ab unique address registers (temporaries)
201 .macro icache_reset aa, ab
202 icache_unlock_all \aa, \ab
203 icache_invalidate_all \aa, \ab
204 .endm
208 * Synchronize after an instruction cache operation,
209 * to be sure everything is in sync with memory as to be
210 * expected following any previous instruction cache control operations.
212 * Parameters are:
213 * ar an address register (temporary) (currently unused, but may be used in future)
215 .macro icache_sync ar
216 #if XCHAL_ICACHE_SIZE > 0
217 isync
218 #endif
219 .endm
224 * Invalidate a single line of the instruction cache.
225 * Parameters are:
226 * ar address register that contains (virtual) address to invalidate
227 * (may get clobbered in a future implementation, but not currently)
228 * offset (optional) offset to add to \ar to compute effective address to invalidate
229 * (note: some number of lsbits are ignored)
231 .macro icache_invalidate_line ar, offset
232 #if XCHAL_ICACHE_SIZE > 0
233 ihi \ar, \offset // invalidate icache line
235 * NOTE: in some version of the silicon [!!!SHOULD HAVE BEEN DOCUMENTED!!!]
236 * 'ihi' doesn't work, so it had been replaced with 'iii'
237 * (which would just invalidate more than it should,
238 * which should be okay other than the performance hit
239 * because cache locking did not exist in that version,
240 * unless user somehow relies on something being cached).
241 * [WHAT VERSION IS IT!!?!?
242 * IS THERE ANY WAY TO TEST FOR THAT HERE, TO OUTPUT 'III' ONLY IF NEEDED!?!?].
244 * iii \ar, \offset
246 icache_sync \ar
247 #endif
248 .endm
254 * Invalidate instruction cache entries that cache a specified portion of memory.
255 * Parameters are:
256 * astart start address (register gets clobbered)
257 * asize size of the region in bytes (register gets clobbered)
258 * ac unique register used as temporary
260 .macro icache_invalidate_region astart, asize, ac
261 #if XCHAL_ICACHE_SIZE > 0
262 // Instruction cache region invalidation:
263 cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
264 icache_sync \ac
265 // End of instruction cache region invalidation
266 #endif
267 .endm
272 * Invalidate entire instruction cache.
274 * Parameters:
275 * aa, ab unique address registers (temporaries)
277 .macro icache_invalidate_all aa, ab
278 #if XCHAL_ICACHE_SIZE > 0
279 // Instruction cache invalidation:
280 cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab
281 icache_sync \aa
282 // End of instruction cache invalidation
283 #endif
284 .endm
289 * Lock (prefetch & lock) a single line of the instruction cache.
291 * Parameters are:
292 * ar address register that contains (virtual) address to lock
293 * (may get clobbered in a future implementation, but not currently)
294 * offset offset to add to \ar to compute effective address to lock
295 * (note: some number of lsbits are ignored)
297 .macro icache_lock_line ar, offset
298 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
299 ipfl \ar, \offset /* prefetch and lock icache line */
300 icache_sync \ar
301 #endif
302 .endm
307 * Lock (prefetch & lock) a specified portion of memory into the instruction cache.
308 * Parameters are:
309 * astart start address (register gets clobbered)
310 * asize size of the region in bytes (register gets clobbered)
311 * ac unique register used as temporary
313 .macro icache_lock_region astart, asize, ac
314 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
315 // Instruction cache region lock:
316 cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
317 icache_sync \ac
318 // End of instruction cache region lock
319 #endif
320 .endm
325 * Unlock a single line of the instruction cache.
327 * Parameters are:
328 * ar address register that contains (virtual) address to unlock
329 * (may get clobbered in a future implementation, but not currently)
330 * offset offset to add to \ar to compute effective address to unlock
331 * (note: some number of lsbits are ignored)
333 .macro icache_unlock_line ar, offset
334 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
335 ihu \ar, \offset /* unlock icache line */
336 icache_sync \ar
337 #endif
338 .endm
343 * Unlock a specified portion of memory from the instruction cache.
344 * Parameters are:
345 * astart start address (register gets clobbered)
346 * asize size of the region in bytes (register gets clobbered)
347 * ac unique register used as temporary
349 .macro icache_unlock_region astart, asize, ac
350 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
351 // Instruction cache region unlock:
352 cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac
353 icache_sync \ac
354 // End of instruction cache region unlock
355 #endif
356 .endm
361 * Unlock entire instruction cache.
363 * Parameters:
364 * aa, ab unique address registers (temporaries)
366 .macro icache_unlock_all aa, ab
367 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
368 // Instruction cache unlock:
369 cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab
370 icache_sync \aa
371 // End of instruction cache unlock
372 #endif
373 .endm
379 /*************************** DATA CACHE ***************************/
384 * Reset/initialize the data cache by simply invalidating it
385 * (need to unlock first also, if cache locking implemented):
387 * Parameters:
388 * aa, ab unique address registers (temporaries)
390 .macro dcache_reset aa, ab
391 dcache_unlock_all \aa, \ab
392 dcache_invalidate_all \aa, \ab
393 .endm
399 * Synchronize after a data cache operation,
400 * to be sure everything is in sync with memory as to be
401 * expected following any previous data cache control operations.
403 * Parameters are:
404 * ar an address register (temporary) (currently unused, but may be used in future)
406 .macro dcache_sync ar
407 #if XCHAL_DCACHE_SIZE > 0
408 // This previous sequence errs on the conservative side (too much so); a DSYNC should be sufficient:
409 //memw // synchronize data cache changes relative to subsequent memory accesses
410 //isync // be conservative and ISYNC as well (just to be sure)
412 dsync
413 #endif
414 .endm
419 * Synchronize after a data store operation,
420 * to be sure the stored data is completely off the processor
421 * (and assuming there is no buffering outside the processor,
422 * that the data is in memory). This may be required to
423 * ensure that the processor's write buffers are emptied.
424 * A MEMW followed by a read guarantees this, by definition.
425 * We also try to make sure the read itself completes.
427 * Parameters are:
428 * ar an address register (temporary)
430 .macro write_sync ar
431 memw // ensure previous memory accesses are complete prior to subsequent memory accesses
432 l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW
433 //slot
434 add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures)
435 .endm
439 * Invalidate a single line of the data cache.
440 * Parameters are:
441 * ar address register that contains (virtual) address to invalidate
442 * (may get clobbered in a future implementation, but not currently)
443 * offset (optional) offset to add to \ar to compute effective address to invalidate
444 * (note: some number of lsbits are ignored)
446 .macro dcache_invalidate_line ar, offset
447 #if XCHAL_DCACHE_SIZE > 0
448 dhi \ar, \offset
449 dcache_sync \ar
450 #endif
451 .endm
458 * Invalidate data cache entries that cache a specified portion of memory.
459 * Parameters are:
460 * astart start address (register gets clobbered)
461 * asize size of the region in bytes (register gets clobbered)
462 * ac unique register used as temporary
464 .macro dcache_invalidate_region astart, asize, ac
465 #if XCHAL_DCACHE_SIZE > 0
466 // Data cache region invalidation:
467 cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
468 dcache_sync \ac
469 // End of data cache region invalidation
470 #endif
471 .endm
475 #if 0
477 * This is a work-around for a bug in SiChip1 (???).
478 * There should be a proper mechanism for not outputting
479 * these instructions when not needed.
480 * To enable work-around, uncomment this and replace 'dii'
481 * with 'dii_s1' everywhere, eg. in dcache_invalidate_all
482 * macro below.
484 .macro dii_s1 ar, offset
485 dii \ar, \offset
486 or \ar, \ar, \ar
487 or \ar, \ar, \ar
488 or \ar, \ar, \ar
489 or \ar, \ar, \ar
490 .endm
491 #endif
495 * Invalidate entire data cache.
497 * Parameters:
498 * aa, ab unique address registers (temporaries)
500 .macro dcache_invalidate_all aa, ab
501 #if XCHAL_DCACHE_SIZE > 0
502 // Data cache invalidation:
503 cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab
504 dcache_sync \aa
505 // End of data cache invalidation
506 #endif
507 .endm
512 * Writeback a single line of the data cache.
513 * Parameters are:
514 * ar address register that contains (virtual) address to writeback
515 * (may get clobbered in a future implementation, but not currently)
516 * offset offset to add to \ar to compute effective address to writeback
517 * (note: some number of lsbits are ignored)
519 .macro dcache_writeback_line ar, offset
520 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
521 dhwb \ar, \offset
522 dcache_sync \ar
523 #endif
524 .endm
529 * Writeback dirty data cache entries that cache a specified portion of memory.
530 * Parameters are:
531 * astart start address (register gets clobbered)
532 * asize size of the region in bytes (register gets clobbered)
533 * ac unique register used as temporary
535 .macro dcache_writeback_region astart, asize, ac
536 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
537 // Data cache region writeback:
538 cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
539 dcache_sync \ac
540 // End of data cache region writeback
541 #endif
542 .endm
547 * Writeback entire data cache.
548 * Parameters:
549 * aa, ab unique address registers (temporaries)
551 .macro dcache_writeback_all aa, ab
552 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
553 // Data cache writeback:
554 cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
555 dcache_sync \aa
556 // End of data cache writeback
557 #endif
558 .endm
563 * Writeback and invalidate a single line of the data cache.
564 * Parameters are:
565 * ar address register that contains (virtual) address to writeback and invalidate
566 * (may get clobbered in a future implementation, but not currently)
567 * offset offset to add to \ar to compute effective address to writeback and invalidate
568 * (note: some number of lsbits are ignored)
570 .macro dcache_writeback_inv_line ar, offset
571 #if XCHAL_DCACHE_SIZE > 0
572 dhwbi \ar, \offset /* writeback and invalidate dcache line */
573 dcache_sync \ar
574 #endif
575 .endm
580 * Writeback and invalidate data cache entries that cache a specified portion of memory.
581 * Parameters are:
582 * astart start address (register gets clobbered)
583 * asize size of the region in bytes (register gets clobbered)
584 * ac unique register used as temporary
586 .macro dcache_writeback_inv_region astart, asize, ac
587 #if XCHAL_DCACHE_SIZE > 0
588 // Data cache region writeback and invalidate:
589 cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
590 dcache_sync \ac
591 // End of data cache region writeback and invalidate
592 #endif
593 .endm
598 * Writeback and invalidate entire data cache.
599 * Parameters:
600 * aa, ab unique address registers (temporaries)
602 .macro dcache_writeback_inv_all aa, ab
603 #if XCHAL_DCACHE_SIZE > 0
604 // Data cache writeback and invalidate:
605 #if XCHAL_DCACHE_IS_WRITEBACK
606 cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
607 dcache_sync \aa
608 #else /*writeback*/
609 // Data cache does not support writeback, so just invalidate: */
610 dcache_invalidate_all \aa, \ab
611 #endif /*writeback*/
612 // End of data cache writeback and invalidate
613 #endif
614 .endm
620 * Lock (prefetch & lock) a single line of the data cache.
622 * Parameters are:
623 * ar address register that contains (virtual) address to lock
624 * (may get clobbered in a future implementation, but not currently)
625 * offset offset to add to \ar to compute effective address to lock
626 * (note: some number of lsbits are ignored)
628 .macro dcache_lock_line ar, offset
629 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
630 dpfl \ar, \offset /* prefetch and lock dcache line */
631 dcache_sync \ar
632 #endif
633 .endm
638 * Lock (prefetch & lock) a specified portion of memory into the data cache.
639 * Parameters are:
640 * astart start address (register gets clobbered)
641 * asize size of the region in bytes (register gets clobbered)
642 * ac unique register used as temporary
644 .macro dcache_lock_region astart, asize, ac
645 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
646 // Data cache region lock:
647 cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
648 dcache_sync \ac
649 // End of data cache region lock
650 #endif
651 .endm
656 * Unlock a single line of the data cache.
658 * Parameters are:
659 * ar address register that contains (virtual) address to unlock
660 * (may get clobbered in a future implementation, but not currently)
661 * offset offset to add to \ar to compute effective address to unlock
662 * (note: some number of lsbits are ignored)
664 .macro dcache_unlock_line ar, offset
665 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
666 dhu \ar, \offset /* unlock dcache line */
667 dcache_sync \ar
668 #endif
669 .endm
674 * Unlock a specified portion of memory from the data cache.
675 * Parameters are:
676 * astart start address (register gets clobbered)
677 * asize size of the region in bytes (register gets clobbered)
678 * ac unique register used as temporary
680 .macro dcache_unlock_region astart, asize, ac
681 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
682 // Data cache region unlock:
683 cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac
684 dcache_sync \ac
685 // End of data cache region unlock
686 #endif
687 .endm
692 * Unlock entire data cache.
694 * Parameters:
695 * aa, ab unique address registers (temporaries)
697 .macro dcache_unlock_all aa, ab
698 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
699 // Data cache unlock:
700 cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab
701 dcache_sync \aa
702 // End of data cache unlock
703 #endif
704 .endm
707 #endif /*XTENSA_CACHEASM_H*/