1 /* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
3 * linux/arch/arm/boot/compressed/head.S
5 * Copyright (C) 1996-2002 Russell King
6 * Copyright (C) 2004 Hyok S. Choi (MPU support)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/linkage.h>
17 * Note that these macros must not contain any code which is not
18 * 100% relocatable. Any attempt to do so will result in a crash.
19 * Please select one of the following when turning on debugging.
23 #if defined(CONFIG_DEBUG_ICEDCC)
26 .macro loadsp, rb, tmp
29 mcr p14, 0, \ch, c0, c5, 0
31 #elif defined(CONFIG_CPU_V7)
32 .macro loadsp, rb, tmp
35 wait: mrc p14, 0, pc, c0, c1, 0
37 mcr p14, 0, \ch, c0, c5, 0
39 #elif defined(CONFIG_CPU_XSCALE)
40 .macro loadsp, rb, tmp
43 mcr p14, 0, \ch, c8, c0, 0
46 .macro loadsp, rb, tmp
49 mcr p14, 0, \ch, c1, c0, 0
55 #include <mach/debug-macro.S>
61 #if defined(CONFIG_ARCH_SA1100)
62 .macro loadsp, rb, tmp
63 mov \rb, #0x80000000 @ physical base address
64 #ifdef CONFIG_DEBUG_LL_SER3
65 add \rb, \rb, #0x00050000 @ Ser3
67 add \rb, \rb, #0x00010000 @ Ser1
70 #elif defined(CONFIG_ARCH_S3C2410)
71 .macro loadsp, rb, tmp
73 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
76 .macro loadsp, rb, tmp
94 .macro debug_reloc_start
97 kphex r6, 8 /* processor id */
99 kphex r7, 8 /* architecture id */
100 #ifdef CONFIG_CPU_CP15
102 mrc p15, 0, r0, c1, c0
103 kphex r0, 8 /* control reg */
106 kphex r5, 8 /* decompressed kernel start */
108 kphex r9, 8 /* decompressed kernel end */
110 kphex r4, 8 /* kernel execution address */
115 .macro debug_reloc_end
117 kphex r5, 8 /* end of kernel */
120 bl memdump /* dump 256 bytes at start of kernel */
124 .section ".start", #alloc, #execinstr
126 * sort out different calling conventions
130 .type start,#function
136 .word 0x016f2818 @ Magic numbers to help the loader
137 .word start @ absolute load/run zImage address
138 .word _edata @ zImage end address
139 1: mov r7, r1 @ save architecture ID
140 mov r8, r2 @ save atags pointer
142 #ifndef __ARM_ARCH_2__
144 * Booting from Angel - need to enter SVC mode and disable
145 * FIQs/IRQs (numeric definitions from angel arm.h source).
146 * We only do this if we were in user mode on entry.
148 mrs r2, cpsr @ get current mode
149 tst r2, #3 @ not user?
151 mov r0, #0x17 @ angel_SWIreason_EnterSVC
152 ARM( swi 0x123456 ) @ angel_SWI_ARM
153 THUMB( svc 0xab ) @ angel_SWI_THUMB
155 mrs r2, cpsr @ turn off interrupts to
156 orr r2, r2, #0xc0 @ prevent angel from running
159 teqp pc, #0x0c000003 @ turn off interrupts
163 * Note that some cache flushing and other stuff may
164 * be needed here - is there an Angel SWI call for this?
168 * some architecture specific code can be inserted
169 * by the linker here, but it should preserve r7, r8, and r9.
174 ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
176 #ifdef CONFIG_AUTO_ZRELADDR
177 @ determine final kernel image address
178 and r4, pc, #0xf8000000
179 add r4, r4, #TEXT_OFFSET
183 subs r0, r0, r1 @ calculate the delta offset
185 @ if delta is zero, we are
186 beq not_relocated @ running at the address we
190 * We're running at a different address. We need to fix
191 * up various pointers:
192 * r5 - zImage base address (_start)
193 * r6 - size of decompressed image
201 #ifndef CONFIG_ZBOOT_ROM
203 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
204 * we need to fix up pointers into the BSS region.
214 * Relocate all entries in the GOT table.
216 1: ldr r1, [r11, #0] @ relocate entries in the GOT
217 add r1, r1, r0 @ table. This fixes up the
218 str r1, [r11], #4 @ C references.
224 * Relocate entries in the GOT table. We only relocate
225 * the entries that are outside the (relocated) BSS region.
227 1: ldr r1, [r11, #0] @ relocate entries in the GOT
228 cmp r1, r2 @ entry < bss_start ||
229 cmphs r3, r1 @ _end < entry
230 addlo r1, r1, r0 @ table. This fixes up the
231 str r1, [r11], #4 @ C references.
236 not_relocated: mov r0, #0
237 1: str r0, [r2], #4 @ clear bss
245 * The C runtime environment should now be setup
246 * sufficiently. Turn the cache on, set up some
247 * pointers, and start decompressing.
251 mov r1, sp @ malloc space above stack
252 add r2, sp, #0x10000 @ 64k max
255 * Check to see if we will overwrite ourselves.
256 * r4 = final kernel address
257 * r5 = start of this image
258 * r6 = size of decompressed image
259 * r2 = end of malloc space (and therefore this image)
262 * r4 + image length <= r5 -> OK
270 mov r5, r2 @ decompress after malloc space
275 add r0, r0, #127 + 128 @ alignment + stack
276 bic r0, r0, #127 @ align the kernel length
278 * r0 = decompressed kernel length
280 * r4 = kernel execution address
281 * r5 = decompressed kernel start
282 * r7 = architecture ID
284 * r9-r12,r14 = corrupted
286 add r1, r5, r0 @ end of decompressed kernel
290 1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
291 stmia r1!, {r9 - r12, r14}
292 ldmia r2!, {r9 - r12, r14}
293 stmia r1!, {r9 - r12, r14}
297 add sp, sp, #128 @ relocate the stack
300 ARM( add pc, r5, r0 ) @ call relocation code
301 THUMB( add r12, r5, r0 )
302 THUMB( mov pc, r12 ) @ call relocation code
305 * We're not in danger of overwriting ourselves. Do this the simple way.
307 * r4 = kernel execution address
308 * r7 = architecture ID
310 wont_overwrite: mov r0, r4
318 .word __bss_start @ r2
321 .word _image_size @ r6
322 .word _got_start @ r11
324 .word user_stack_end @ sp
325 LC1: .word reloc_end - reloc_start
328 #ifdef CONFIG_ARCH_RPC
330 params: ldr r0, =0x10000100 @ params_phys for RPC
337 * Turn on the cache. We need to setup some page tables so that we
338 * can have both the I and D caches on.
340 * We place the page tables 16k down from the kernel execution address,
341 * and we hope that nothing else is using it. If we're using it, we
345 * r4 = kernel execution address
346 * r7 = architecture number
349 * r0, r1, r2, r3, r9, r10, r12 corrupted
350 * This routine must preserve:
354 cache_on: mov r3, #8 @ cache_on function
357 __armv4_mpu_cache_on:
358 mov r0, #0x3f @ 4G, the whole
359 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
360 mcr p15, 0, r0, c6, c7, 1
363 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
364 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
365 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
368 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
369 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
372 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
373 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
374 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
375 mrc p15, 0, r0, c1, c0, 0 @ read control reg
376 @ ...I .... ..D. WC.M
377 orr r0, r0, #0x002d @ .... .... ..1. 11.1
378 orr r0, r0, #0x1000 @ ...1 .... .... ....
380 mcr p15, 0, r0, c1, c0, 0 @ write control reg
383 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
384 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
387 __armv3_mpu_cache_on:
388 mov r0, #0x3f @ 4G, the whole
389 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
392 mcr p15, 0, r0, c2, c0, 0 @ cache on
393 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
396 mcr p15, 0, r0, c5, c0, 0 @ access permission
399 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
401 * ?? ARMv3 MMU does not allow reading the control register,
402 * does this really work on ARMv3 MPU?
404 mrc p15, 0, r0, c1, c0, 0 @ read control reg
405 @ .... .... .... WC.M
406 orr r0, r0, #0x000d @ .... .... .... 11.1
407 /* ?? this overwrites the value constructed above? */
409 mcr p15, 0, r0, c1, c0, 0 @ write control reg
411 /* ?? invalidate for the second time? */
412 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
415 __setup_mmu: sub r3, r4, #16384 @ Page directory size
416 bic r3, r3, #0xff @ Align the pointer
419 * Initialise the page tables, turning on the cacheable and bufferable
420 * bits for the RAM area only.
424 mov r9, r9, lsl #18 @ start of RAM
425 add r10, r9, #0x10000000 @ a reasonable RAM size
429 1: cmp r1, r9 @ if virt > start of RAM
430 orrhs r1, r1, #0x0c @ set cacheable, bufferable
431 cmp r1, r10 @ if virt > end of RAM
432 bichs r1, r1, #0x0c @ clear cacheable, bufferable
433 str r1, [r0], #4 @ 1:1 mapping
438 * If ever we are running from Flash, then we surely want the cache
439 * to be enabled also for our execution instance... We map 2MB of it
440 * so there is no map overlap problem for up to 1 MB compressed kernel.
441 * If the execution is in RAM then we would only be duplicating the above.
446 orr r1, r1, r2, lsl #20
447 add r0, r3, r2, lsl #2
454 __armv4_mmu_cache_on:
459 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
460 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
461 mrc p15, 0, r0, c1, c0, 0 @ read control reg
462 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
464 #ifdef CONFIG_CPU_ENDIAN_BE8
465 orr r0, r0, #1 << 25 @ big-endian page tables
467 bl __common_mmu_cache_on
469 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
473 __armv7_mmu_cache_on:
476 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
480 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
482 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
486 mrc p15, 0, r0, c1, c0, 0 @ read control reg
487 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
488 orr r0, r0, #0x003c @ write buffer
490 #ifdef CONFIG_CPU_ENDIAN_BE8
491 orr r0, r0, #1 << 25 @ big-endian page tables
493 orrne r0, r0, #1 @ MMU enabled
495 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
496 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
498 mcr p15, 0, r0, c1, c0, 0 @ load control register
499 mrc p15, 0, r0, c1, c0, 0 @ and read it back
501 mcr p15, 0, r0, c7, c5, 4 @ ISB
508 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
509 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
510 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
511 mrc p15, 0, r0, c1, c0, 0 @ read control reg
512 orr r0, r0, #0x1000 @ I-cache enable
513 bl __common_mmu_cache_on
515 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
522 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
523 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
525 bl __common_mmu_cache_on
527 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
530 __common_mmu_cache_on:
531 #ifndef CONFIG_THUMB2_KERNEL
533 orr r0, r0, #0x000d @ Write buffer, mmu
536 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
537 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
539 .align 5 @ cache line aligned
540 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
541 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
542 sub pc, lr, r0, lsr #32 @ properly flush pipeline
546 * All code following this line is relocatable. It is relocated by
547 * the above code to the end of the decompressed kernel image and
548 * executed there. During this time, we have no stacks.
550 * r0 = decompressed kernel length
552 * r4 = kernel execution address
553 * r5 = decompressed kernel start
554 * r7 = architecture ID
556 * r9-r12,r14 = corrupted
559 reloc_start: add r9, r5, r0
560 sub r9, r9, #128 @ do not copy the stack
565 ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
566 stmia r1!, {r0, r2, r3, r10 - r12, r14}
572 add sp, sp, #128 @ relocate the stack
575 call_kernel: bl cache_clean_flush
577 mov r0, #0 @ must be zero
578 mov r1, r7 @ restore architecture number
579 mov r2, r8 @ restore atags pointer
580 mov pc, r4 @ call kernel
583 * Here follow the relocatable cache support functions for the
584 * various processors. This is a generic hook for locating an
585 * entry and jumping to an instruction at the specified offset
586 * from the start of the block. Please note this is all position
596 call_cache_fn: adr r12, proc_types
597 #ifdef CONFIG_CPU_CP15
598 mrc p15, 0, r9, c0, c0 @ get processor ID
600 ldr r9, =CONFIG_PROCESSOR_ID
602 1: ldr r1, [r12, #0] @ get value
603 ldr r2, [r12, #4] @ get mask
604 eor r1, r1, r9 @ (real ^ match)
606 ARM( addeq pc, r12, r3 ) @ call cache function
607 THUMB( addeq r12, r3 )
608 THUMB( moveq pc, r12 ) @ call cache function
613 * Table for cache operations. This is basically:
616 * - 'cache on' method instruction
617 * - 'cache off' method instruction
618 * - 'cache flush' method instruction
620 * We match an entry using: ((real_id ^ match) & mask) == 0
622 * Writethrough caches generally only need 'on' and 'off'
623 * methods. Writeback caches _must_ have the flush method
627 .type proc_types,#object
629 .word 0x41560600 @ ARM6/610
631 W(b) __arm6_mmu_cache_off @ works, but slow
632 W(b) __arm6_mmu_cache_off
635 @ b __arm6_mmu_cache_on @ untested
636 @ b __arm6_mmu_cache_off
637 @ b __armv3_mmu_cache_flush
639 .word 0x00000000 @ old ARM ID
648 .word 0x41007000 @ ARM7/710
650 W(b) __arm7_mmu_cache_off
651 W(b) __arm7_mmu_cache_off
655 .word 0x41807200 @ ARM720T (writethrough)
657 W(b) __armv4_mmu_cache_on
658 W(b) __armv4_mmu_cache_off
662 .word 0x41007400 @ ARM74x
664 W(b) __armv3_mpu_cache_on
665 W(b) __armv3_mpu_cache_off
666 W(b) __armv3_mpu_cache_flush
668 .word 0x41009400 @ ARM94x
670 W(b) __armv4_mpu_cache_on
671 W(b) __armv4_mpu_cache_off
672 W(b) __armv4_mpu_cache_flush
674 .word 0x00007000 @ ARM7 IDs
683 @ Everything from here on will be the new ID system.
685 .word 0x4401a100 @ sa110 / sa1100
687 W(b) __armv4_mmu_cache_on
688 W(b) __armv4_mmu_cache_off
689 W(b) __armv4_mmu_cache_flush
691 .word 0x6901b110 @ sa1110
693 W(b) __armv4_mmu_cache_on
694 W(b) __armv4_mmu_cache_off
695 W(b) __armv4_mmu_cache_flush
698 .word 0xffffff00 @ PXA9xx
699 W(b) __armv4_mmu_cache_on
700 W(b) __armv4_mmu_cache_off
701 W(b) __armv4_mmu_cache_flush
703 .word 0x56158000 @ PXA168
705 W(b) __armv4_mmu_cache_on
706 W(b) __armv4_mmu_cache_off
707 W(b) __armv5tej_mmu_cache_flush
709 .word 0x56050000 @ Feroceon
711 W(b) __armv4_mmu_cache_on
712 W(b) __armv4_mmu_cache_off
713 W(b) __armv5tej_mmu_cache_flush
715 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
716 /* this conflicts with the standard ARMv5TE entry */
717 .long 0x41009260 @ Old Feroceon
719 b __armv4_mmu_cache_on
720 b __armv4_mmu_cache_off
721 b __armv5tej_mmu_cache_flush
724 .word 0x66015261 @ FA526
726 W(b) __fa526_cache_on
727 W(b) __armv4_mmu_cache_off
728 W(b) __fa526_cache_flush
730 @ These match on the architecture ID
732 .word 0x00020000 @ ARMv4T
734 W(b) __armv4_mmu_cache_on
735 W(b) __armv4_mmu_cache_off
736 W(b) __armv4_mmu_cache_flush
738 .word 0x00050000 @ ARMv5TE
740 W(b) __armv4_mmu_cache_on
741 W(b) __armv4_mmu_cache_off
742 W(b) __armv4_mmu_cache_flush
744 .word 0x00060000 @ ARMv5TEJ
746 W(b) __armv4_mmu_cache_on
747 W(b) __armv4_mmu_cache_off
748 W(b) __armv5tej_mmu_cache_flush
750 .word 0x0007b000 @ ARMv6
752 W(b) __armv4_mmu_cache_on
753 W(b) __armv4_mmu_cache_off
754 W(b) __armv6_mmu_cache_flush
756 .word 0x560f5810 @ Marvell PJ4 ARMv6
758 W(b) __armv4_mmu_cache_on
759 W(b) __armv4_mmu_cache_off
760 W(b) __armv6_mmu_cache_flush
762 .word 0x000f0000 @ new CPU Id
764 W(b) __armv7_mmu_cache_on
765 W(b) __armv7_mmu_cache_off
766 W(b) __armv7_mmu_cache_flush
768 .word 0 @ unrecognised type
777 .size proc_types, . - proc_types
780 * Turn off the Cache and MMU. ARMv3 does not support
781 * reading the control register, but ARMv4 does.
784 * r0, r1, r2, r3, r9, r12 corrupted
785 * This routine must preserve:
789 cache_off: mov r3, #12 @ cache_off function
792 __armv4_mpu_cache_off:
793 mrc p15, 0, r0, c1, c0
795 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
797 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
798 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
799 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
802 __armv3_mpu_cache_off:
803 mrc p15, 0, r0, c1, c0
805 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
807 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
810 __armv4_mmu_cache_off:
812 mrc p15, 0, r0, c1, c0
814 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
816 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
817 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
821 __armv7_mmu_cache_off:
822 mrc p15, 0, r0, c1, c0
828 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
830 bl __armv7_mmu_cache_flush
833 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
835 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
836 mcr p15, 0, r0, c7, c10, 4 @ DSB
837 mcr p15, 0, r0, c7, c5, 4 @ ISB
840 __arm6_mmu_cache_off:
841 mov r0, #0x00000030 @ ARM6 control reg.
842 b __armv3_mmu_cache_off
844 __arm7_mmu_cache_off:
845 mov r0, #0x00000070 @ ARM7 control reg.
846 b __armv3_mmu_cache_off
848 __armv3_mmu_cache_off:
849 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
851 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
852 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
856 * Clean and flush the cache to maintain consistency.
859 * r1, r2, r3, r9, r10, r11, r12 corrupted
860 * This routine must preserve:
868 __armv4_mpu_cache_flush:
871 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
872 mov r1, #7 << 5 @ 8 segments
873 1: orr r3, r1, #63 << 26 @ 64 entries
874 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
875 subs r3, r3, #1 << 26
876 bcs 2b @ entries 63 to 0
878 bcs 1b @ segments 7 to 0
881 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
882 mcr p15, 0, ip, c7, c10, 4 @ drain WB
887 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
888 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
889 mcr p15, 0, r1, c7, c10, 4 @ drain WB
892 __armv6_mmu_cache_flush:
894 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
895 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
896 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
897 mcr p15, 0, r1, c7, c10, 4 @ drain WB
900 __armv7_mmu_cache_flush:
901 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
902 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
905 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
908 mcr p15, 0, r10, c7, c10, 5 @ DMB
909 stmfd sp!, {r0-r7, r9-r11}
910 mrc p15, 1, r0, c0, c0, 1 @ read clidr
911 ands r3, r0, #0x7000000 @ extract loc from clidr
912 mov r3, r3, lsr #23 @ left align loc bit field
913 beq finished @ if loc is 0, then no need to clean
914 mov r10, #0 @ start clean at cache level 0
916 add r2, r10, r10, lsr #1 @ work out 3x current cache level
917 mov r1, r0, lsr r2 @ extract cache type bits from clidr
918 and r1, r1, #7 @ mask of the bits for current cache only
919 cmp r1, #2 @ see what cache we have at this level
920 blt skip @ skip if no cache, or just i-cache
921 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
922 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
923 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
924 and r2, r1, #7 @ extract the length of the cache lines
925 add r2, r2, #4 @ add 4 (line length offset)
927 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
928 clz r5, r4 @ find bit position of way size increment
930 ands r7, r7, r1, lsr #13 @ extract max number of the index size
932 mov r9, r4 @ create working copy of max way size
934 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
935 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
936 THUMB( lsl r6, r9, r5 )
937 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
938 THUMB( lsl r6, r7, r2 )
939 THUMB( orr r11, r11, r6 ) @ factor index number into r11
940 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
941 subs r9, r9, #1 @ decrement the way
943 subs r7, r7, #1 @ decrement the index
946 add r10, r10, #2 @ increment cache number
950 ldmfd sp!, {r0-r7, r9-r11}
951 mov r10, #0 @ swith back to cache level 0
952 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
954 mcr p15, 0, r10, c7, c10, 4 @ DSB
955 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
956 mcr p15, 0, r10, c7, c10, 4 @ DSB
957 mcr p15, 0, r10, c7, c5, 4 @ ISB
960 __armv5tej_mmu_cache_flush:
961 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
963 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
964 mcr p15, 0, r0, c7, c10, 4 @ drain WB
967 __armv4_mmu_cache_flush:
968 mov r2, #64*1024 @ default: 32K dcache size (*2)
969 mov r11, #32 @ default: 32 byte line size
970 mrc p15, 0, r3, c0, c0, 1 @ read cache type
971 teq r3, r9 @ cache ID register present?
976 mov r2, r2, lsl r1 @ base dcache size *2
977 tst r3, #1 << 14 @ test M bit
978 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
982 mov r11, r11, lsl r3 @ cache line size in bytes
985 bic r1, r1, #63 @ align to longest cache line
988 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
989 THUMB( ldr r3, [r1] ) @ s/w flush D cache
990 THUMB( add r1, r1, r11 )
994 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
995 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
996 mcr p15, 0, r1, c7, c10, 4 @ drain WB
999 __armv3_mmu_cache_flush:
1000 __armv3_mpu_cache_flush:
1002 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1006 * Various debugging routines for printing hex characters and
1007 * memory, which again must be relocatable.
1011 .type phexbuf,#object
1013 .size phexbuf, . - phexbuf
1015 @ phex corrupts {r0, r1, r2, r3}
1016 phex: adr r3, phexbuf
1030 @ puts corrupts {r0, r1, r2, r3}
1032 1: ldrb r2, [r0], #1
1045 @ putc corrupts {r0, r1, r2, r3}
1052 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1053 memdump: mov r12, r0
1056 2: mov r0, r11, lsl #2
1064 ldr r0, [r12, r11, lsl #2]
1086 .section ".stack", "w"
1087 user_stack: .space 4096