sh: Add kmap_coherent()/kunmap_coherent() interface for SH-4.
[linux-2.6/libata-dev.git] / arch / sh / mm / cache-sh4.c
blob5d0f73a4fbbbd1e618f365e2f761ef8206d37e95
1 /*
2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2006 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/io.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
20 * The maximum number of pages we support up to when doing ranged dcache
21 * flushing. Anything exceeding this will simply flush the dcache in its
22 * entirety.
24 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26 static void __flush_dcache_segment_1way(unsigned long start,
27 unsigned long extent);
28 static void __flush_dcache_segment_2way(unsigned long start,
29 unsigned long extent);
30 static void __flush_dcache_segment_4way(unsigned long start,
31 unsigned long extent);
33 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
34 unsigned long exec_offset);
37 * This is initialised here to ensure that it is not placed in the BSS. If
38 * that were to happen, note that cache_init gets called before the BSS is
39 * cleared, so this would get nulled out which would be hopeless.
41 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
42 (void (*)(unsigned long, unsigned long))0xdeadbeef;
44 static void compute_alias(struct cache_info *c)
46 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
47 c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
50 static void __init emit_cache_params(void)
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 ctrl_inl(CCN_PVR),
54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR));
56 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 current_cpu_data.icache.ways,
58 current_cpu_data.icache.sets,
59 current_cpu_data.icache.way_incr);
60 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 current_cpu_data.icache.entry_mask,
62 current_cpu_data.icache.alias_mask,
63 current_cpu_data.icache.n_aliases);
64 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 current_cpu_data.dcache.ways,
66 current_cpu_data.dcache.sets,
67 current_cpu_data.dcache.way_incr);
68 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 current_cpu_data.dcache.entry_mask,
70 current_cpu_data.dcache.alias_mask,
71 current_cpu_data.dcache.n_aliases);
73 if (!__flush_dcache_segment_fn)
74 panic("unknown number of cache ways\n");
78 * SH-4 has virtually indexed and physically tagged cache.
80 void __init p3_cache_init(void)
82 compute_alias(&current_cpu_data.icache);
83 compute_alias(&current_cpu_data.dcache);
85 switch (current_cpu_data.dcache.ways) {
86 case 1:
87 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
88 break;
89 case 2:
90 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
91 break;
92 case 4:
93 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
94 break;
95 default:
96 __flush_dcache_segment_fn = NULL;
97 break;
100 emit_cache_params();
102 if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
103 panic("%s failed.", __FUNCTION__);
107 * Write back the dirty D-caches, but not invalidate them.
109 * START: Virtual Address (U0, P1, or P3)
110 * SIZE: Size of the region.
112 void __flush_wback_region(void *start, int size)
114 unsigned long v;
115 unsigned long begin, end;
117 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
118 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
119 & ~(L1_CACHE_BYTES-1);
120 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
121 asm volatile("ocbwb %0"
122 : /* no output */
123 : "m" (__m(v)));
128 * Write back the dirty D-caches and invalidate them.
130 * START: Virtual Address (U0, P1, or P3)
131 * SIZE: Size of the region.
133 void __flush_purge_region(void *start, int size)
135 unsigned long v;
136 unsigned long begin, end;
138 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
139 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
140 & ~(L1_CACHE_BYTES-1);
141 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
142 asm volatile("ocbp %0"
143 : /* no output */
144 : "m" (__m(v)));
149 * No write back please
151 void __flush_invalidate_region(void *start, int size)
153 unsigned long v;
154 unsigned long begin, end;
156 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
157 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
158 & ~(L1_CACHE_BYTES-1);
159 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
160 asm volatile("ocbi %0"
161 : /* no output */
162 : "m" (__m(v)));
167 * Write back the range of D-cache, and purge the I-cache.
169 * Called from kernel/module.c:sys_init_module and routine for a.out format.
171 void flush_icache_range(unsigned long start, unsigned long end)
173 flush_cache_all();
177 * Write back the D-cache and purge the I-cache for signal trampoline.
178 * .. which happens to be the same behavior as flush_icache_range().
179 * So, we simply flush out a line.
181 void flush_cache_sigtramp(unsigned long addr)
183 unsigned long v, index;
184 unsigned long flags;
185 int i;
187 v = addr & ~(L1_CACHE_BYTES-1);
188 asm volatile("ocbwb %0"
189 : /* no output */
190 : "m" (__m(v)));
192 index = CACHE_IC_ADDRESS_ARRAY |
193 (v & current_cpu_data.icache.entry_mask);
195 local_irq_save(flags);
196 jump_to_P2();
198 for (i = 0; i < current_cpu_data.icache.ways;
199 i++, index += current_cpu_data.icache.way_incr)
200 ctrl_outl(0, index); /* Clear out Valid-bit */
202 back_to_P1();
203 wmb();
204 local_irq_restore(flags);
207 static inline void flush_cache_4096(unsigned long start,
208 unsigned long phys)
210 unsigned long flags, exec_offset = 0;
213 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
214 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
216 if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
217 (start < CACHE_OC_ADDRESS_ARRAY))
218 exec_offset = 0x20000000;
220 local_irq_save(flags);
221 __flush_cache_4096(start | SH_CACHE_ASSOC,
222 P1SEGADDR(phys), exec_offset);
223 local_irq_restore(flags);
227 * Write back & invalidate the D-cache of the page.
228 * (To avoid "alias" issues)
230 void flush_dcache_page(struct page *page)
232 if (test_bit(PG_mapped, &page->flags)) {
233 unsigned long phys = PHYSADDR(page_address(page));
234 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
235 int i, n;
237 /* Loop all the D-cache */
238 n = current_cpu_data.dcache.n_aliases;
239 for (i = 0; i < n; i++, addr += 4096)
240 flush_cache_4096(addr, phys);
243 wmb();
246 /* TODO: Selective icache invalidation through IC address array.. */
247 static inline void flush_icache_all(void)
249 unsigned long flags, ccr;
251 local_irq_save(flags);
252 jump_to_P2();
254 /* Flush I-cache */
255 ccr = ctrl_inl(CCR);
256 ccr |= CCR_CACHE_ICI;
257 ctrl_outl(ccr, CCR);
260 * back_to_P1() will take care of the barrier for us, don't add
261 * another one!
264 back_to_P1();
265 local_irq_restore(flags);
268 void flush_dcache_all(void)
270 (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
271 wmb();
274 void flush_cache_all(void)
276 flush_dcache_all();
277 flush_icache_all();
280 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
281 unsigned long end)
283 unsigned long d = 0, p = start & PAGE_MASK;
284 unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
285 unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
286 unsigned long select_bit;
287 unsigned long all_aliases_mask;
288 unsigned long addr_offset;
289 pgd_t *dir;
290 pmd_t *pmd;
291 pud_t *pud;
292 pte_t *pte;
293 int i;
295 dir = pgd_offset(mm, p);
296 pud = pud_offset(dir, p);
297 pmd = pmd_offset(pud, p);
298 end = PAGE_ALIGN(end);
300 all_aliases_mask = (1 << n_aliases) - 1;
302 do {
303 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
304 p &= PMD_MASK;
305 p += PMD_SIZE;
306 pmd++;
308 continue;
311 pte = pte_offset_kernel(pmd, p);
313 do {
314 unsigned long phys;
315 pte_t entry = *pte;
317 if (!(pte_val(entry) & _PAGE_PRESENT)) {
318 pte++;
319 p += PAGE_SIZE;
320 continue;
323 phys = pte_val(entry) & PTE_PHYS_MASK;
325 if ((p ^ phys) & alias_mask) {
326 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
327 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
329 if (d == all_aliases_mask)
330 goto loop_exit;
333 pte++;
334 p += PAGE_SIZE;
335 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
336 pmd++;
337 } while (p < end);
339 loop_exit:
340 addr_offset = 0;
341 select_bit = 1;
343 for (i = 0; i < n_aliases; i++) {
344 if (d & select_bit) {
345 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
346 wmb();
349 select_bit <<= 1;
350 addr_offset += PAGE_SIZE;
355 * Note : (RPC) since the caches are physically tagged, the only point
356 * of flush_cache_mm for SH-4 is to get rid of aliases from the
357 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
358 * lines can stay resident so long as the virtual address they were
359 * accessed with (hence cache set) is in accord with the physical
360 * address (i.e. tag). It's no different here. So I reckon we don't
361 * need to flush the I-cache, since aliases don't matter for that. We
362 * should try that.
364 * Caller takes mm->mmap_sem.
366 void flush_cache_mm(struct mm_struct *mm)
369 * If cache is only 4k-per-way, there are never any 'aliases'. Since
370 * the cache is physically tagged, the data can just be left in there.
372 if (current_cpu_data.dcache.n_aliases == 0)
373 return;
376 * Don't bother groveling around the dcache for the VMA ranges
377 * if there are too many PTEs to make it worthwhile.
379 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
380 flush_dcache_all();
381 else {
382 struct vm_area_struct *vma;
385 * In this case there are reasonably sized ranges to flush,
386 * iterate through the VMA list and take care of any aliases.
388 for (vma = mm->mmap; vma; vma = vma->vm_next)
389 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
392 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
393 if (mm->exec_vm)
394 flush_icache_all();
398 * Write back and invalidate I/D-caches for the page.
400 * ADDR: Virtual Address (U0 address)
401 * PFN: Physical page number
403 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
404 unsigned long pfn)
406 unsigned long phys = pfn << PAGE_SHIFT;
407 unsigned int alias_mask;
409 alias_mask = current_cpu_data.dcache.alias_mask;
411 /* We only need to flush D-cache when we have alias */
412 if ((address^phys) & alias_mask) {
413 /* Loop 4K of the D-cache */
414 flush_cache_4096(
415 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
416 phys);
417 /* Loop another 4K of the D-cache */
418 flush_cache_4096(
419 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
420 phys);
423 alias_mask = current_cpu_data.icache.alias_mask;
424 if (vma->vm_flags & VM_EXEC) {
426 * Evict entries from the portion of the cache from which code
427 * may have been executed at this address (virtual). There's
428 * no need to evict from the portion corresponding to the
429 * physical address as for the D-cache, because we know the
430 * kernel has never executed the code through its identity
431 * translation.
433 flush_cache_4096(
434 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
435 phys);
440 * Write back and invalidate D-caches.
442 * START, END: Virtual Address (U0 address)
444 * NOTE: We need to flush the _physical_ page entry.
445 * Flushing the cache lines for U0 only isn't enough.
446 * We need to flush for P1 too, which may contain aliases.
448 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
449 unsigned long end)
452 * If cache is only 4k-per-way, there are never any 'aliases'. Since
453 * the cache is physically tagged, the data can just be left in there.
455 if (current_cpu_data.dcache.n_aliases == 0)
456 return;
459 * Don't bother with the lookup and alias check if we have a
460 * wide range to cover, just blow away the dcache in its
461 * entirety instead. -- PFM.
463 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
464 flush_dcache_all();
465 else
466 __flush_cache_mm(vma->vm_mm, start, end);
468 if (vma->vm_flags & VM_EXEC) {
470 * TODO: Is this required??? Need to look at how I-cache
471 * coherency is assured when new programs are loaded to see if
472 * this matters.
474 flush_icache_all();
479 * flush_icache_user_range
480 * @vma: VMA of the process
481 * @page: page
482 * @addr: U0 address
483 * @len: length of the range (< page size)
485 void flush_icache_user_range(struct vm_area_struct *vma,
486 struct page *page, unsigned long addr, int len)
488 flush_cache_page(vma, addr, page_to_pfn(page));
489 mb();
493 * __flush_cache_4096
495 * @addr: address in memory mapped cache array
496 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
497 * set i.e. associative write)
498 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
499 * region else 0x0
501 * The offset into the cache array implied by 'addr' selects the
502 * 'colour' of the virtual address range that will be flushed. The
503 * operation (purge/write-back) is selected by the lower 2 bits of
504 * 'phys'.
506 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
507 unsigned long exec_offset)
509 int way_count;
510 unsigned long base_addr = addr;
511 struct cache_info *dcache;
512 unsigned long way_incr;
513 unsigned long a, ea, p;
514 unsigned long temp_pc;
516 dcache = &current_cpu_data.dcache;
517 /* Write this way for better assembly. */
518 way_count = dcache->ways;
519 way_incr = dcache->way_incr;
522 * Apply exec_offset (i.e. branch to P2 if required.).
524 * FIXME:
526 * If I write "=r" for the (temp_pc), it puts this in r6 hence
527 * trashing exec_offset before it's been added on - why? Hence
528 * "=&r" as a 'workaround'
530 asm volatile("mov.l 1f, %0\n\t"
531 "add %1, %0\n\t"
532 "jmp @%0\n\t"
533 "nop\n\t"
534 ".balign 4\n\t"
535 "1: .long 2f\n\t"
536 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
539 * We know there will be >=1 iteration, so write as do-while to avoid
540 * pointless nead-of-loop check for 0 iterations.
542 do {
543 ea = base_addr + PAGE_SIZE;
544 a = base_addr;
545 p = phys;
547 do {
548 *(volatile unsigned long *)a = p;
550 * Next line: intentionally not p+32, saves an add, p
551 * will do since only the cache tag bits need to
552 * match.
554 *(volatile unsigned long *)(a+32) = p;
555 a += 64;
556 p += 64;
557 } while (a < ea);
559 base_addr += way_incr;
560 } while (--way_count != 0);
564 * Break the 1, 2 and 4 way variants of this out into separate functions to
565 * avoid nearly all the overhead of having the conditional stuff in the function
566 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
568 static void __flush_dcache_segment_1way(unsigned long start,
569 unsigned long extent_per_way)
571 unsigned long orig_sr, sr_with_bl;
572 unsigned long base_addr;
573 unsigned long way_incr, linesz, way_size;
574 struct cache_info *dcache;
575 register unsigned long a0, a0e;
577 asm volatile("stc sr, %0" : "=r" (orig_sr));
578 sr_with_bl = orig_sr | (1<<28);
579 base_addr = ((unsigned long)&empty_zero_page[0]);
582 * The previous code aligned base_addr to 16k, i.e. the way_size of all
583 * existing SH-4 D-caches. Whilst I don't see a need to have this
584 * aligned to any better than the cache line size (which it will be
585 * anyway by construction), let's align it to at least the way_size of
586 * any existing or conceivable SH-4 D-cache. -- RPC
588 base_addr = ((base_addr >> 16) << 16);
589 base_addr |= start;
591 dcache = &current_cpu_data.dcache;
592 linesz = dcache->linesz;
593 way_incr = dcache->way_incr;
594 way_size = dcache->way_size;
596 a0 = base_addr;
597 a0e = base_addr + extent_per_way;
598 do {
599 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
600 asm volatile("movca.l r0, @%0\n\t"
601 "ocbi @%0" : : "r" (a0));
602 a0 += linesz;
603 asm volatile("movca.l r0, @%0\n\t"
604 "ocbi @%0" : : "r" (a0));
605 a0 += linesz;
606 asm volatile("movca.l r0, @%0\n\t"
607 "ocbi @%0" : : "r" (a0));
608 a0 += linesz;
609 asm volatile("movca.l r0, @%0\n\t"
610 "ocbi @%0" : : "r" (a0));
611 asm volatile("ldc %0, sr" : : "r" (orig_sr));
612 a0 += linesz;
613 } while (a0 < a0e);
616 static void __flush_dcache_segment_2way(unsigned long start,
617 unsigned long extent_per_way)
619 unsigned long orig_sr, sr_with_bl;
620 unsigned long base_addr;
621 unsigned long way_incr, linesz, way_size;
622 struct cache_info *dcache;
623 register unsigned long a0, a1, a0e;
625 asm volatile("stc sr, %0" : "=r" (orig_sr));
626 sr_with_bl = orig_sr | (1<<28);
627 base_addr = ((unsigned long)&empty_zero_page[0]);
629 /* See comment under 1-way above */
630 base_addr = ((base_addr >> 16) << 16);
631 base_addr |= start;
633 dcache = &current_cpu_data.dcache;
634 linesz = dcache->linesz;
635 way_incr = dcache->way_incr;
636 way_size = dcache->way_size;
638 a0 = base_addr;
639 a1 = a0 + way_incr;
640 a0e = base_addr + extent_per_way;
641 do {
642 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
643 asm volatile("movca.l r0, @%0\n\t"
644 "movca.l r0, @%1\n\t"
645 "ocbi @%0\n\t"
646 "ocbi @%1" : :
647 "r" (a0), "r" (a1));
648 a0 += linesz;
649 a1 += linesz;
650 asm volatile("movca.l r0, @%0\n\t"
651 "movca.l r0, @%1\n\t"
652 "ocbi @%0\n\t"
653 "ocbi @%1" : :
654 "r" (a0), "r" (a1));
655 a0 += linesz;
656 a1 += linesz;
657 asm volatile("movca.l r0, @%0\n\t"
658 "movca.l r0, @%1\n\t"
659 "ocbi @%0\n\t"
660 "ocbi @%1" : :
661 "r" (a0), "r" (a1));
662 a0 += linesz;
663 a1 += linesz;
664 asm volatile("movca.l r0, @%0\n\t"
665 "movca.l r0, @%1\n\t"
666 "ocbi @%0\n\t"
667 "ocbi @%1" : :
668 "r" (a0), "r" (a1));
669 asm volatile("ldc %0, sr" : : "r" (orig_sr));
670 a0 += linesz;
671 a1 += linesz;
672 } while (a0 < a0e);
675 static void __flush_dcache_segment_4way(unsigned long start,
676 unsigned long extent_per_way)
678 unsigned long orig_sr, sr_with_bl;
679 unsigned long base_addr;
680 unsigned long way_incr, linesz, way_size;
681 struct cache_info *dcache;
682 register unsigned long a0, a1, a2, a3, a0e;
684 asm volatile("stc sr, %0" : "=r" (orig_sr));
685 sr_with_bl = orig_sr | (1<<28);
686 base_addr = ((unsigned long)&empty_zero_page[0]);
688 /* See comment under 1-way above */
689 base_addr = ((base_addr >> 16) << 16);
690 base_addr |= start;
692 dcache = &current_cpu_data.dcache;
693 linesz = dcache->linesz;
694 way_incr = dcache->way_incr;
695 way_size = dcache->way_size;
697 a0 = base_addr;
698 a1 = a0 + way_incr;
699 a2 = a1 + way_incr;
700 a3 = a2 + way_incr;
701 a0e = base_addr + extent_per_way;
702 do {
703 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
704 asm volatile("movca.l r0, @%0\n\t"
705 "movca.l r0, @%1\n\t"
706 "movca.l r0, @%2\n\t"
707 "movca.l r0, @%3\n\t"
708 "ocbi @%0\n\t"
709 "ocbi @%1\n\t"
710 "ocbi @%2\n\t"
711 "ocbi @%3\n\t" : :
712 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
713 a0 += linesz;
714 a1 += linesz;
715 a2 += linesz;
716 a3 += linesz;
717 asm volatile("movca.l r0, @%0\n\t"
718 "movca.l r0, @%1\n\t"
719 "movca.l r0, @%2\n\t"
720 "movca.l r0, @%3\n\t"
721 "ocbi @%0\n\t"
722 "ocbi @%1\n\t"
723 "ocbi @%2\n\t"
724 "ocbi @%3\n\t" : :
725 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
726 a0 += linesz;
727 a1 += linesz;
728 a2 += linesz;
729 a3 += linesz;
730 asm volatile("movca.l r0, @%0\n\t"
731 "movca.l r0, @%1\n\t"
732 "movca.l r0, @%2\n\t"
733 "movca.l r0, @%3\n\t"
734 "ocbi @%0\n\t"
735 "ocbi @%1\n\t"
736 "ocbi @%2\n\t"
737 "ocbi @%3\n\t" : :
738 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
739 a0 += linesz;
740 a1 += linesz;
741 a2 += linesz;
742 a3 += linesz;
743 asm volatile("movca.l r0, @%0\n\t"
744 "movca.l r0, @%1\n\t"
745 "movca.l r0, @%2\n\t"
746 "movca.l r0, @%3\n\t"
747 "ocbi @%0\n\t"
748 "ocbi @%1\n\t"
749 "ocbi @%2\n\t"
750 "ocbi @%3\n\t" : :
751 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
752 asm volatile("ldc %0, sr" : : "r" (orig_sr));
753 a0 += linesz;
754 a1 += linesz;
755 a2 += linesz;
756 a3 += linesz;
757 } while (a0 < a0e);