sh: Fixup cpu_data references for the non-boot CPUs.
[linux-2.6/linux-2.6-openrd.git] / arch / sh / mm / cache-sh4.c
blobe0cd4b7f4aeb4d510ad74680b8c89aa18781f978
1 /*
2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2006 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/io.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
20 * The maximum number of pages we support up to when doing ranged dcache
21 * flushing. Anything exceeding this will simply flush the dcache in its
22 * entirety.
24 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26 static void __flush_dcache_segment_1way(unsigned long start,
27 unsigned long extent);
28 static void __flush_dcache_segment_2way(unsigned long start,
29 unsigned long extent);
30 static void __flush_dcache_segment_4way(unsigned long start,
31 unsigned long extent);
33 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
34 unsigned long exec_offset);
37 * This is initialised here to ensure that it is not placed in the BSS. If
38 * that were to happen, note that cache_init gets called before the BSS is
39 * cleared, so this would get nulled out which would be hopeless.
41 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
42 (void (*)(unsigned long, unsigned long))0xdeadbeef;
44 static void compute_alias(struct cache_info *c)
46 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
47 c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
50 static void __init emit_cache_params(void)
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 ctrl_inl(CCN_PVR),
54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR));
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 current_cpu_data.icache.ways,
58 current_cpu_data.icache.sets,
59 current_cpu_data.icache.way_incr);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 current_cpu_data.icache.entry_mask,
62 current_cpu_data.icache.alias_mask,
63 current_cpu_data.icache.n_aliases);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 current_cpu_data.dcache.ways,
66 current_cpu_data.dcache.sets,
67 current_cpu_data.dcache.way_incr);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 current_cpu_data.dcache.entry_mask,
70 current_cpu_data.dcache.alias_mask,
71 current_cpu_data.dcache.n_aliases);
73 if (!__flush_dcache_segment_fn)
74 panic("unknown number of cache ways\n");
78 * SH-4 has virtually indexed and physically tagged cache.
81 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
82 #define MAX_P3_MUTEXES 16
84 struct mutex p3map_mutex[MAX_P3_MUTEXES];
86 void __init p3_cache_init(void)
88 int i;
90 compute_alias(&current_cpu_data.icache);
91 compute_alias(&current_cpu_data.dcache);
93 switch (current_cpu_data.dcache.ways) {
94 case 1:
95 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
96 break;
97 case 2:
98 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
99 break;
100 case 4:
101 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
102 break;
103 default:
104 __flush_dcache_segment_fn = NULL;
105 break;
108 emit_cache_params();
110 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 panic("%s failed.", __FUNCTION__);
113 for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
114 mutex_init(&p3map_mutex[i]);
118 * Write back the dirty D-caches, but not invalidate them.
120 * START: Virtual Address (U0, P1, or P3)
121 * SIZE: Size of the region.
123 void __flush_wback_region(void *start, int size)
125 unsigned long v;
126 unsigned long begin, end;
128 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 & ~(L1_CACHE_BYTES-1);
131 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
132 asm volatile("ocbwb %0"
133 : /* no output */
134 : "m" (__m(v)));
139 * Write back the dirty D-caches and invalidate them.
141 * START: Virtual Address (U0, P1, or P3)
142 * SIZE: Size of the region.
144 void __flush_purge_region(void *start, int size)
146 unsigned long v;
147 unsigned long begin, end;
149 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
150 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
151 & ~(L1_CACHE_BYTES-1);
152 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
153 asm volatile("ocbp %0"
154 : /* no output */
155 : "m" (__m(v)));
160 * No write back please
162 void __flush_invalidate_region(void *start, int size)
164 unsigned long v;
165 unsigned long begin, end;
167 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
168 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
169 & ~(L1_CACHE_BYTES-1);
170 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
171 asm volatile("ocbi %0"
172 : /* no output */
173 : "m" (__m(v)));
178 * Write back the range of D-cache, and purge the I-cache.
180 * Called from kernel/module.c:sys_init_module and routine for a.out format.
182 void flush_icache_range(unsigned long start, unsigned long end)
184 flush_cache_all();
188 * Write back the D-cache and purge the I-cache for signal trampoline.
189 * .. which happens to be the same behavior as flush_icache_range().
190 * So, we simply flush out a line.
192 void flush_cache_sigtramp(unsigned long addr)
194 unsigned long v, index;
195 unsigned long flags;
196 int i;
198 v = addr & ~(L1_CACHE_BYTES-1);
199 asm volatile("ocbwb %0"
200 : /* no output */
201 : "m" (__m(v)));
203 index = CACHE_IC_ADDRESS_ARRAY |
204 (v & current_cpu_data.icache.entry_mask);
206 local_irq_save(flags);
207 jump_to_P2();
209 for (i = 0; i < current_cpu_data.icache.ways;
210 i++, index += current_cpu_data.icache.way_incr)
211 ctrl_outl(0, index); /* Clear out Valid-bit */
213 back_to_P1();
214 wmb();
215 local_irq_restore(flags);
218 static inline void flush_cache_4096(unsigned long start,
219 unsigned long phys)
221 unsigned long flags, exec_offset = 0;
224 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
225 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
227 if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
228 (start < CACHE_OC_ADDRESS_ARRAY))
229 exec_offset = 0x20000000;
231 local_irq_save(flags);
232 __flush_cache_4096(start | SH_CACHE_ASSOC,
233 P1SEGADDR(phys), exec_offset);
234 local_irq_restore(flags);
238 * Write back & invalidate the D-cache of the page.
239 * (To avoid "alias" issues)
241 * This uses a lazy write-back on UP, which is explicitly
242 * disabled on SMP.
244 void flush_dcache_page(struct page *page)
246 #ifndef CONFIG_SMP
247 struct address_space *mapping = page_mapping(page);
249 if (mapping && !mapping_mapped(mapping))
250 set_bit(PG_dcache_dirty, &page->flags);
251 else
252 #endif
254 unsigned long phys = PHYSADDR(page_address(page));
255 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
256 int i, n;
258 /* Loop all the D-cache */
259 n = current_cpu_data.dcache.n_aliases;
260 for (i = 0; i < n; i++, addr += 4096)
261 flush_cache_4096(addr, phys);
264 wmb();
267 /* TODO: Selective icache invalidation through IC address array.. */
268 static inline void flush_icache_all(void)
270 unsigned long flags, ccr;
272 local_irq_save(flags);
273 jump_to_P2();
275 /* Flush I-cache */
276 ccr = ctrl_inl(CCR);
277 ccr |= CCR_CACHE_ICI;
278 ctrl_outl(ccr, CCR);
281 * back_to_P1() will take care of the barrier for us, don't add
282 * another one!
285 back_to_P1();
286 local_irq_restore(flags);
289 void flush_dcache_all(void)
291 (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
292 wmb();
295 void flush_cache_all(void)
297 flush_dcache_all();
298 flush_icache_all();
301 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
302 unsigned long end)
304 unsigned long d = 0, p = start & PAGE_MASK;
305 unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
306 unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
307 unsigned long select_bit;
308 unsigned long all_aliases_mask;
309 unsigned long addr_offset;
310 pgd_t *dir;
311 pmd_t *pmd;
312 pud_t *pud;
313 pte_t *pte;
314 int i;
316 dir = pgd_offset(mm, p);
317 pud = pud_offset(dir, p);
318 pmd = pmd_offset(pud, p);
319 end = PAGE_ALIGN(end);
321 all_aliases_mask = (1 << n_aliases) - 1;
323 do {
324 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
325 p &= PMD_MASK;
326 p += PMD_SIZE;
327 pmd++;
329 continue;
332 pte = pte_offset_kernel(pmd, p);
334 do {
335 unsigned long phys;
336 pte_t entry = *pte;
338 if (!(pte_val(entry) & _PAGE_PRESENT)) {
339 pte++;
340 p += PAGE_SIZE;
341 continue;
344 phys = pte_val(entry) & PTE_PHYS_MASK;
346 if ((p ^ phys) & alias_mask) {
347 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
348 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
350 if (d == all_aliases_mask)
351 goto loop_exit;
354 pte++;
355 p += PAGE_SIZE;
356 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
357 pmd++;
358 } while (p < end);
360 loop_exit:
361 addr_offset = 0;
362 select_bit = 1;
364 for (i = 0; i < n_aliases; i++) {
365 if (d & select_bit) {
366 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
367 wmb();
370 select_bit <<= 1;
371 addr_offset += PAGE_SIZE;
376 * Note : (RPC) since the caches are physically tagged, the only point
377 * of flush_cache_mm for SH-4 is to get rid of aliases from the
378 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
379 * lines can stay resident so long as the virtual address they were
380 * accessed with (hence cache set) is in accord with the physical
381 * address (i.e. tag). It's no different here. So I reckon we don't
382 * need to flush the I-cache, since aliases don't matter for that. We
383 * should try that.
385 * Caller takes mm->mmap_sem.
387 void flush_cache_mm(struct mm_struct *mm)
390 * If cache is only 4k-per-way, there are never any 'aliases'. Since
391 * the cache is physically tagged, the data can just be left in there.
393 if (current_cpu_data.dcache.n_aliases == 0)
394 return;
397 * Don't bother groveling around the dcache for the VMA ranges
398 * if there are too many PTEs to make it worthwhile.
400 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
401 flush_dcache_all();
402 else {
403 struct vm_area_struct *vma;
406 * In this case there are reasonably sized ranges to flush,
407 * iterate through the VMA list and take care of any aliases.
409 for (vma = mm->mmap; vma; vma = vma->vm_next)
410 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
413 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
414 if (mm->exec_vm)
415 flush_icache_all();
419 * Write back and invalidate I/D-caches for the page.
421 * ADDR: Virtual Address (U0 address)
422 * PFN: Physical page number
424 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
425 unsigned long pfn)
427 unsigned long phys = pfn << PAGE_SHIFT;
428 unsigned int alias_mask;
430 alias_mask = current_cpu_data.dcache.alias_mask;
432 /* We only need to flush D-cache when we have alias */
433 if ((address^phys) & alias_mask) {
434 /* Loop 4K of the D-cache */
435 flush_cache_4096(
436 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
437 phys);
438 /* Loop another 4K of the D-cache */
439 flush_cache_4096(
440 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
441 phys);
444 alias_mask = current_cpu_data.icache.alias_mask;
445 if (vma->vm_flags & VM_EXEC) {
447 * Evict entries from the portion of the cache from which code
448 * may have been executed at this address (virtual). There's
449 * no need to evict from the portion corresponding to the
450 * physical address as for the D-cache, because we know the
451 * kernel has never executed the code through its identity
452 * translation.
454 flush_cache_4096(
455 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
456 phys);
461 * Write back and invalidate D-caches.
463 * START, END: Virtual Address (U0 address)
465 * NOTE: We need to flush the _physical_ page entry.
466 * Flushing the cache lines for U0 only isn't enough.
467 * We need to flush for P1 too, which may contain aliases.
469 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
470 unsigned long end)
473 * If cache is only 4k-per-way, there are never any 'aliases'. Since
474 * the cache is physically tagged, the data can just be left in there.
476 if (current_cpu_data.dcache.n_aliases == 0)
477 return;
480 * Don't bother with the lookup and alias check if we have a
481 * wide range to cover, just blow away the dcache in its
482 * entirety instead. -- PFM.
484 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
485 flush_dcache_all();
486 else
487 __flush_cache_mm(vma->vm_mm, start, end);
489 if (vma->vm_flags & VM_EXEC) {
491 * TODO: Is this required??? Need to look at how I-cache
492 * coherency is assured when new programs are loaded to see if
493 * this matters.
495 flush_icache_all();
500 * flush_icache_user_range
501 * @vma: VMA of the process
502 * @page: page
503 * @addr: U0 address
504 * @len: length of the range (< page size)
506 void flush_icache_user_range(struct vm_area_struct *vma,
507 struct page *page, unsigned long addr, int len)
509 flush_cache_page(vma, addr, page_to_pfn(page));
510 mb();
514 * __flush_cache_4096
516 * @addr: address in memory mapped cache array
517 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
518 * set i.e. associative write)
519 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
520 * region else 0x0
522 * The offset into the cache array implied by 'addr' selects the
523 * 'colour' of the virtual address range that will be flushed. The
524 * operation (purge/write-back) is selected by the lower 2 bits of
525 * 'phys'.
527 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
528 unsigned long exec_offset)
530 int way_count;
531 unsigned long base_addr = addr;
532 struct cache_info *dcache;
533 unsigned long way_incr;
534 unsigned long a, ea, p;
535 unsigned long temp_pc;
537 dcache = &current_cpu_data.dcache;
538 /* Write this way for better assembly. */
539 way_count = dcache->ways;
540 way_incr = dcache->way_incr;
543 * Apply exec_offset (i.e. branch to P2 if required.).
545 * FIXME:
547 * If I write "=r" for the (temp_pc), it puts this in r6 hence
548 * trashing exec_offset before it's been added on - why? Hence
549 * "=&r" as a 'workaround'
551 asm volatile("mov.l 1f, %0\n\t"
552 "add %1, %0\n\t"
553 "jmp @%0\n\t"
554 "nop\n\t"
555 ".balign 4\n\t"
556 "1: .long 2f\n\t"
557 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
560 * We know there will be >=1 iteration, so write as do-while to avoid
561 * pointless nead-of-loop check for 0 iterations.
563 do {
564 ea = base_addr + PAGE_SIZE;
565 a = base_addr;
566 p = phys;
568 do {
569 *(volatile unsigned long *)a = p;
571 * Next line: intentionally not p+32, saves an add, p
572 * will do since only the cache tag bits need to
573 * match.
575 *(volatile unsigned long *)(a+32) = p;
576 a += 64;
577 p += 64;
578 } while (a < ea);
580 base_addr += way_incr;
581 } while (--way_count != 0);
585 * Break the 1, 2 and 4 way variants of this out into separate functions to
586 * avoid nearly all the overhead of having the conditional stuff in the function
587 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
589 static void __flush_dcache_segment_1way(unsigned long start,
590 unsigned long extent_per_way)
592 unsigned long orig_sr, sr_with_bl;
593 unsigned long base_addr;
594 unsigned long way_incr, linesz, way_size;
595 struct cache_info *dcache;
596 register unsigned long a0, a0e;
598 asm volatile("stc sr, %0" : "=r" (orig_sr));
599 sr_with_bl = orig_sr | (1<<28);
600 base_addr = ((unsigned long)&empty_zero_page[0]);
603 * The previous code aligned base_addr to 16k, i.e. the way_size of all
604 * existing SH-4 D-caches. Whilst I don't see a need to have this
605 * aligned to any better than the cache line size (which it will be
606 * anyway by construction), let's align it to at least the way_size of
607 * any existing or conceivable SH-4 D-cache. -- RPC
609 base_addr = ((base_addr >> 16) << 16);
610 base_addr |= start;
612 dcache = &current_cpu_data.dcache;
613 linesz = dcache->linesz;
614 way_incr = dcache->way_incr;
615 way_size = dcache->way_size;
617 a0 = base_addr;
618 a0e = base_addr + extent_per_way;
619 do {
620 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
621 asm volatile("movca.l r0, @%0\n\t"
622 "ocbi @%0" : : "r" (a0));
623 a0 += linesz;
624 asm volatile("movca.l r0, @%0\n\t"
625 "ocbi @%0" : : "r" (a0));
626 a0 += linesz;
627 asm volatile("movca.l r0, @%0\n\t"
628 "ocbi @%0" : : "r" (a0));
629 a0 += linesz;
630 asm volatile("movca.l r0, @%0\n\t"
631 "ocbi @%0" : : "r" (a0));
632 asm volatile("ldc %0, sr" : : "r" (orig_sr));
633 a0 += linesz;
634 } while (a0 < a0e);
637 static void __flush_dcache_segment_2way(unsigned long start,
638 unsigned long extent_per_way)
640 unsigned long orig_sr, sr_with_bl;
641 unsigned long base_addr;
642 unsigned long way_incr, linesz, way_size;
643 struct cache_info *dcache;
644 register unsigned long a0, a1, a0e;
646 asm volatile("stc sr, %0" : "=r" (orig_sr));
647 sr_with_bl = orig_sr | (1<<28);
648 base_addr = ((unsigned long)&empty_zero_page[0]);
650 /* See comment under 1-way above */
651 base_addr = ((base_addr >> 16) << 16);
652 base_addr |= start;
654 dcache = &current_cpu_data.dcache;
655 linesz = dcache->linesz;
656 way_incr = dcache->way_incr;
657 way_size = dcache->way_size;
659 a0 = base_addr;
660 a1 = a0 + way_incr;
661 a0e = base_addr + extent_per_way;
662 do {
663 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
664 asm volatile("movca.l r0, @%0\n\t"
665 "movca.l r0, @%1\n\t"
666 "ocbi @%0\n\t"
667 "ocbi @%1" : :
668 "r" (a0), "r" (a1));
669 a0 += linesz;
670 a1 += linesz;
671 asm volatile("movca.l r0, @%0\n\t"
672 "movca.l r0, @%1\n\t"
673 "ocbi @%0\n\t"
674 "ocbi @%1" : :
675 "r" (a0), "r" (a1));
676 a0 += linesz;
677 a1 += linesz;
678 asm volatile("movca.l r0, @%0\n\t"
679 "movca.l r0, @%1\n\t"
680 "ocbi @%0\n\t"
681 "ocbi @%1" : :
682 "r" (a0), "r" (a1));
683 a0 += linesz;
684 a1 += linesz;
685 asm volatile("movca.l r0, @%0\n\t"
686 "movca.l r0, @%1\n\t"
687 "ocbi @%0\n\t"
688 "ocbi @%1" : :
689 "r" (a0), "r" (a1));
690 asm volatile("ldc %0, sr" : : "r" (orig_sr));
691 a0 += linesz;
692 a1 += linesz;
693 } while (a0 < a0e);
696 static void __flush_dcache_segment_4way(unsigned long start,
697 unsigned long extent_per_way)
699 unsigned long orig_sr, sr_with_bl;
700 unsigned long base_addr;
701 unsigned long way_incr, linesz, way_size;
702 struct cache_info *dcache;
703 register unsigned long a0, a1, a2, a3, a0e;
705 asm volatile("stc sr, %0" : "=r" (orig_sr));
706 sr_with_bl = orig_sr | (1<<28);
707 base_addr = ((unsigned long)&empty_zero_page[0]);
709 /* See comment under 1-way above */
710 base_addr = ((base_addr >> 16) << 16);
711 base_addr |= start;
713 dcache = &current_cpu_data.dcache;
714 linesz = dcache->linesz;
715 way_incr = dcache->way_incr;
716 way_size = dcache->way_size;
718 a0 = base_addr;
719 a1 = a0 + way_incr;
720 a2 = a1 + way_incr;
721 a3 = a2 + way_incr;
722 a0e = base_addr + extent_per_way;
723 do {
724 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
725 asm volatile("movca.l r0, @%0\n\t"
726 "movca.l r0, @%1\n\t"
727 "movca.l r0, @%2\n\t"
728 "movca.l r0, @%3\n\t"
729 "ocbi @%0\n\t"
730 "ocbi @%1\n\t"
731 "ocbi @%2\n\t"
732 "ocbi @%3\n\t" : :
733 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
734 a0 += linesz;
735 a1 += linesz;
736 a2 += linesz;
737 a3 += linesz;
738 asm volatile("movca.l r0, @%0\n\t"
739 "movca.l r0, @%1\n\t"
740 "movca.l r0, @%2\n\t"
741 "movca.l r0, @%3\n\t"
742 "ocbi @%0\n\t"
743 "ocbi @%1\n\t"
744 "ocbi @%2\n\t"
745 "ocbi @%3\n\t" : :
746 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
747 a0 += linesz;
748 a1 += linesz;
749 a2 += linesz;
750 a3 += linesz;
751 asm volatile("movca.l r0, @%0\n\t"
752 "movca.l r0, @%1\n\t"
753 "movca.l r0, @%2\n\t"
754 "movca.l r0, @%3\n\t"
755 "ocbi @%0\n\t"
756 "ocbi @%1\n\t"
757 "ocbi @%2\n\t"
758 "ocbi @%3\n\t" : :
759 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
760 a0 += linesz;
761 a1 += linesz;
762 a2 += linesz;
763 a3 += linesz;
764 asm volatile("movca.l r0, @%0\n\t"
765 "movca.l r0, @%1\n\t"
766 "movca.l r0, @%2\n\t"
767 "movca.l r0, @%3\n\t"
768 "ocbi @%0\n\t"
769 "ocbi @%1\n\t"
770 "ocbi @%2\n\t"
771 "ocbi @%3\n\t" : :
772 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
773 asm volatile("ldc %0, sr" : : "r" (orig_sr));
774 a0 += linesz;
775 a1 += linesz;
776 a2 += linesz;
777 a3 += linesz;
778 } while (a0 < a0e);