2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
18 #include <asm/memory.h>
20 #include <asm/vfpmacros.h>
21 #include <mach/entry-macro.S>
22 #include <asm/thread_notify.h>
23 #include <asm/unwind.h>
24 #include <asm/unistd.h>
27 #include "entry-header.S"
30 * Interrupt handling. Preserves r7, r8, r9
33 #ifdef CONFIG_MULTI_IRQ_HANDLER
34 ldr r5, =handle_arch_irq
41 get_irqnr_preamble r5, lr
42 1: get_irqnr_and_base r0, r6, r5, lr
45 @ routine called with r0 = irq number, r1 = struct pt_regs *
54 * this macro assumes that irqstat (r6) and base (r5) are
55 * preserved from get_irqnr_and_base above
57 ALT_SMP(test_for_ipi r0, r6, r5, lr)
63 #ifdef CONFIG_LOCAL_TIMERS
64 test_for_ltirq r0, r6, r5, lr
74 .section .kprobes.text,"ax",%progbits
80 * Invalid mode handlers
82 .macro inv_entry, reason
83 sub sp, sp, #S_FRAME_SIZE
84 ARM( stmib sp, {r1 - lr} )
85 THUMB( stmia sp, {r0 - r12} )
86 THUMB( str sp, [sp, #S_SP] )
87 THUMB( str lr, [sp, #S_LR] )
92 inv_entry BAD_PREFETCH
94 ENDPROC(__pabt_invalid)
99 ENDPROC(__dabt_invalid)
104 ENDPROC(__irq_invalid)
107 inv_entry BAD_UNDEFINSTR
110 @ XXX fall through to common_invalid
114 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
120 add r0, sp, #S_PC @ here for interlock avoidance
121 mov r7, #-1 @ "" "" "" ""
122 str r4, [sp] @ save preserved r0
123 stmia r0, {r5 - r7} @ lr_<exception>,
124 @ cpsr_<exception>, "old_r0"
128 ENDPROC(__und_invalid)
134 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
135 #define SPFIX(code...) code
137 #define SPFIX(code...)
140 .macro svc_entry, stack_hole=0
142 UNWIND(.save {r0 - pc} )
143 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
144 #ifdef CONFIG_THUMB2_KERNEL
145 SPFIX( str r0, [sp] ) @ temporarily saved
147 SPFIX( tst r0, #4 ) @ test original stack alignment
148 SPFIX( ldr r0, [sp] ) @ restored
152 SPFIX( subeq sp, sp, #4 )
156 add r5, sp, #S_SP - 4 @ here for interlock avoidance
157 mov r4, #-1 @ "" "" "" ""
158 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4)
159 SPFIX( addeq r0, r0, #4 )
160 str r1, [sp, #-4]! @ save the "real" r0 copied
161 @ from the exception stack
166 @ We are now ready to fill in the remaining blanks on the stack:
170 @ r2 - lr_<exception>, already fixed up for correct return/restart
171 @ r3 - spsr_<exception>
172 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
182 @ get ready to re-enable interrupts if appropriate
186 biceq r9, r9, #PSR_I_BIT
189 @ Call the processor-specific abort handler:
191 @ r2 - aborted context pc
192 @ r3 - aborted context cpsr
194 @ The abort handler must return the aborted address in r0, and
195 @ the fault status register in r1. r9 must be preserved.
200 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
202 bl CPU_DABORT_HANDLER
206 @ set desired IRQ state, then call main handler
213 @ IRQs off again before pulling preserved data off the stack
218 @ restore SPSR and restart the instruction
221 svc_exit r2 @ return from exception
229 #ifdef CONFIG_TRACE_IRQFLAGS
230 bl trace_hardirqs_off
232 #ifdef CONFIG_PREEMPT
234 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
235 add r7, r8, #1 @ increment it
236 str r7, [tsk, #TI_PREEMPT]
240 #ifdef CONFIG_PREEMPT
241 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
242 ldr r0, [tsk, #TI_FLAGS] @ get flags
243 teq r8, #0 @ if preempt count != 0
244 movne r0, #0 @ force flags to 0
245 tst r0, #_TIF_NEED_RESCHED
248 ldr r4, [sp, #S_PSR] @ irqs are already disabled
249 #ifdef CONFIG_TRACE_IRQFLAGS
251 bleq trace_hardirqs_on
253 svc_exit r4 @ return from exception
259 #ifdef CONFIG_PREEMPT
262 1: bl preempt_schedule_irq @ irq en/disable is done inside
263 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
264 tst r0, #_TIF_NEED_RESCHED
265 moveq pc, r8 @ go again
271 #ifdef CONFIG_KPROBES
272 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
273 @ it obviously needs free stack space which then will belong to
281 @ call emulation code, which returns using r9 if it has emulated
282 @ the instruction, or the more conventional lr if we are to treat
283 @ this as a real undefined instruction
287 #ifndef CONFIG_THUMB2_KERNEL
290 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2
292 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
293 ldrhhs r9, [r2] @ bottom 16 bits
294 orrhs r0, r9, r0, lsl #16
299 mov r0, sp @ struct pt_regs *regs
303 @ IRQs off again before pulling preserved data off the stack
305 1: disable_irq_notrace
308 @ restore SPSR and restart the instruction
310 ldr r2, [sp, #S_PSR] @ Get SVC cpsr
311 svc_exit r2 @ return from exception
320 @ re-enable interrupts if appropriate
324 biceq r9, r9, #PSR_I_BIT
326 mov r0, r2 @ pass address of aborted instruction.
330 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
332 bl CPU_PABORT_HANDLER
334 msr cpsr_c, r9 @ Maybe enable interrupts
336 bl do_PrefetchAbort @ call abort handler
339 @ IRQs off again before pulling preserved data off the stack
344 @ restore SPSR and restart the instruction
347 svc_exit r2 @ return from exception
364 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
367 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
368 #error "sizeof(struct pt_regs) must be a multiple of 8"
373 UNWIND(.cantunwind ) @ don't unwind the user space
374 sub sp, sp, #S_FRAME_SIZE
375 ARM( stmib sp, {r1 - r12} )
376 THUMB( stmia sp, {r0 - r12} )
379 add r0, sp, #S_PC @ here for interlock avoidance
380 mov r4, #-1 @ "" "" "" ""
382 str r1, [sp] @ save the "real" r0 copied
383 @ from the exception stack
386 @ We are now ready to fill in the remaining blanks on the stack:
388 @ r2 - lr_<exception>, already fixed up for correct return/restart
389 @ r3 - spsr_<exception>
390 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
392 @ Also, separately save sp_usr and lr_usr
395 ARM( stmdb r0, {sp, lr}^ )
396 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
399 @ Enable the alignment trap while in kernel mode
404 @ Clear FP to mark the first stack frame
409 .macro kuser_cmpxchg_check
410 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
412 #warning "NPTL on non MMU needs fixing"
414 @ Make sure our user space atomic helper is restarted
415 @ if it was interrupted in a critical region. Here we
416 @ perform a quick test inline since it should be false
417 @ 99.9999% of the time. The rest is done out of line.
419 blhs kuser_cmpxchg_fixup
430 @ Call the processor-specific abort handler:
432 @ r2 - aborted context pc
433 @ r3 - aborted context cpsr
435 @ The abort handler must return the aborted address in r0, and
436 @ the fault status register in r1.
441 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
443 bl CPU_DABORT_HANDLER
447 @ IRQs on, then call the main handler
451 adr lr, BSYM(ret_from_exception)
462 #ifdef CONFIG_PREEMPT
463 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
464 add r7, r8, #1 @ increment it
465 str r7, [tsk, #TI_PREEMPT]
469 #ifdef CONFIG_PREEMPT
470 ldr r0, [tsk, #TI_PREEMPT]
471 str r8, [tsk, #TI_PREEMPT]
473 ARM( strne r0, [r0, -r0] )
474 THUMB( movne r0, #0 )
475 THUMB( strne r0, [r0] )
490 @ fall through to the emulation code, which returns using r9 if
491 @ it has emulated the instruction, or the more conventional lr
492 @ if we are to treat this as a real undefined instruction
496 adr r9, BSYM(ret_from_exception)
497 adr lr, BSYM(__und_usr_unknown)
498 tst r3, #PSR_T_BIT @ Thumb mode?
499 itet eq @ explicit IT needed for the 1f label
500 subeq r4, r2, #4 @ ARM instr at LR - 4
501 subne r4, r2, #2 @ Thumb instr at LR - 2
503 #ifdef CONFIG_CPU_ENDIAN_BE8
504 reveq r0, r0 @ little endian instruction
508 #if __LINUX_ARM_ARCH__ >= 7
510 ARM( ldrht r5, [r4], #2 )
511 THUMB( ldrht r5, [r4] )
512 THUMB( add r4, r4, #2 )
513 and r0, r5, #0xf800 @ mask bits 111x x... .... ....
514 cmp r0, #0xe800 @ 32bit instruction if xx != 0
515 blo __und_usr_unknown
517 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
518 orr r0, r0, r5, lsl #16
526 @ fallthrough to call_fpe
530 * The out of line fixup for the ldrt above.
532 .pushsection .fixup, "ax"
535 .pushsection __ex_table,"a"
537 #if __LINUX_ARM_ARCH__ >= 7
544 * Check whether the instruction is a co-processor instruction.
545 * If yes, we need to call the relevant co-processor handler.
547 * Note that we don't do a full check here for the co-processor
548 * instructions; all instructions with bit 27 set are well
549 * defined. The only instructions that should fault are the
550 * co-processor instructions. However, we have to watch out
551 * for the ARM6/ARM7 SWI bug.
553 * NEON is a special case that has to be handled here. Not all
554 * NEON instructions are co-processor instructions, so we have
555 * to make a special case of checking for them. Plus, there's
556 * five groups of them, so we have a table of mask/opcode pairs
557 * to check against, and if any match then we branch off into the
560 * Emulators may wish to make use of the following registers:
561 * r0 = instruction opcode.
563 * r9 = normal "successful" return address
564 * r10 = this threads thread_info structure.
565 * lr = unrecognised instruction return address
568 @ Fall-through from Thumb-2 __und_usr
571 adr r6, .LCneon_thumb_opcodes
576 adr r6, .LCneon_arm_opcodes
578 ldr r7, [r6], #4 @ mask value
579 cmp r7, #0 @ end mask?
582 ldr r7, [r6], #4 @ opcode bits matching in mask
583 cmp r8, r7 @ NEON instruction?
587 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
588 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
589 b do_vfp @ let VFP handler handle this
592 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
593 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
594 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
595 and r8, r0, #0x0f000000 @ mask out op-code bits
596 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
599 get_thread_info r10 @ get current thread
600 and r8, r0, #0x00000f00 @ mask out CP number
601 THUMB( lsr r8, r8, #8 )
603 add r6, r10, #TI_USED_CP
604 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
605 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
607 @ Test if we need to give access to iWMMXt coprocessors
608 ldr r5, [r10, #TI_FLAGS]
609 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
610 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
611 bcs iwmmxt_task_enable
613 ARM( add pc, pc, r8, lsr #6 )
614 THUMB( lsl r8, r8, #2 )
619 W(b) do_fpe @ CP#1 (FPE)
620 W(b) do_fpe @ CP#2 (FPE)
623 b crunch_task_enable @ CP#4 (MaverickCrunch)
624 b crunch_task_enable @ CP#5 (MaverickCrunch)
625 b crunch_task_enable @ CP#6 (MaverickCrunch)
635 W(b) do_vfp @ CP#10 (VFP)
636 W(b) do_vfp @ CP#11 (VFP)
638 movw_pc lr @ CP#10 (VFP)
639 movw_pc lr @ CP#11 (VFP)
643 movw_pc lr @ CP#14 (Debug)
644 movw_pc lr @ CP#15 (Control)
650 .word 0xfe000000 @ mask
651 .word 0xf2000000 @ opcode
653 .word 0xff100000 @ mask
654 .word 0xf4000000 @ opcode
656 .word 0x00000000 @ mask
657 .word 0x00000000 @ opcode
659 .LCneon_thumb_opcodes:
660 .word 0xef000000 @ mask
661 .word 0xef000000 @ opcode
663 .word 0xff100000 @ mask
664 .word 0xf9000000 @ opcode
666 .word 0x00000000 @ mask
667 .word 0x00000000 @ opcode
673 add r10, r10, #TI_FPSTATE @ r10 = workspace
674 ldr pc, [r4] @ Call FP module USR entry point
677 * The FP module is called with these registers set:
680 * r9 = normal "successful" return address
682 * lr = unrecognised FP instruction return address
697 adr lr, BSYM(ret_from_exception)
699 ENDPROC(__und_usr_unknown)
705 mov r0, r2 @ pass address of aborted instruction.
709 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
711 bl CPU_PABORT_HANDLER
713 enable_irq @ Enable interrupts
715 bl do_PrefetchAbort @ call abort handler
719 * This is the return code to user mode for abort handlers
721 ENTRY(ret_from_exception)
729 ENDPROC(ret_from_exception)
732 * Register switch for ARMv3 and ARMv4 processors
733 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
734 * previous and next are guaranteed not to be the same.
739 add ip, r1, #TI_CPU_SAVE
740 ldr r3, [r2, #TI_TP_VALUE]
741 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
742 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
743 THUMB( str sp, [ip], #4 )
744 THUMB( str lr, [ip], #4 )
745 #ifdef CONFIG_CPU_USE_DOMAINS
746 ldr r6, [r2, #TI_CPU_DOMAIN]
749 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
750 ldr r7, [r2, #TI_TASK]
751 ldr r8, =__stack_chk_guard
752 ldr r7, [r7, #TSK_STACK_CANARY]
754 #ifdef CONFIG_CPU_USE_DOMAINS
755 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
758 add r4, r2, #TI_CPU_SAVE
759 ldr r0, =thread_notify_head
760 mov r1, #THREAD_NOTIFY_SWITCH
761 bl atomic_notifier_call_chain
762 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
767 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
768 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
769 THUMB( ldr sp, [ip], #4 )
770 THUMB( ldr pc, [ip] )
779 * These are segment of kernel provided user code reachable from user space
780 * at a fixed address in kernel memory. This is used to provide user space
781 * with some operations which require kernel help because of unimplemented
782 * native feature and/or instructions in many ARM CPUs. The idea is for
783 * this code to be executed directly in user mode for best efficiency but
784 * which is too intimate with the kernel counter part to be left to user
785 * libraries. In fact this code might even differ from one CPU to another
786 * depending on the available instruction set and restrictions like on
787 * SMP systems. In other words, the kernel reserves the right to change
788 * this code as needed without warning. Only the entry points and their
789 * results are guaranteed to be stable.
791 * Each segment is 32-byte aligned and will be moved to the top of the high
792 * vector page. New segments (if ever needed) must be added in front of
793 * existing ones. This mechanism should be used only for things that are
794 * really small and justified, and not be abused freely.
796 * User space is expected to implement those things inline when optimizing
797 * for a processor that has the necessary native support, but only if such
798 * resulting binaries are already to be incompatible with earlier ARM
799 * processors due to the use of unsupported instructions other than what
800 * is provided here. In other words don't make binaries unable to run on
801 * earlier processors just for the sake of not using these kernel helpers
802 * if your compiled code is not going to use the new instructions for other
808 #ifdef CONFIG_ARM_THUMB
816 .globl __kuser_helper_start
817 __kuser_helper_start:
820 * Reference prototype:
822 * void __kernel_memory_barrier(void)
826 * lr = return address
836 * Definition and user space usage example:
838 * typedef void (__kernel_dmb_t)(void);
839 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
841 * Apply any needed memory barrier to preserve consistency with data modified
842 * manually and __kuser_cmpxchg usage.
844 * This could be used as follows:
846 * #define __kernel_dmb() \
847 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
848 * : : : "r0", "lr","cc" )
851 __kuser_memory_barrier: @ 0xffff0fa0
858 * Reference prototype:
860 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
867 * lr = return address
871 * r0 = returned value (zero or non-zero)
872 * C flag = set if r0 == 0, clear if r0 != 0
878 * Definition and user space usage example:
880 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
881 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
883 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
884 * Return zero if *ptr was changed or non-zero if no exchange happened.
885 * The C flag is also set if *ptr was changed to allow for assembly
886 * optimization in the calling code.
890 * - This routine already includes memory barriers as needed.
892 * For example, a user space atomic_add implementation could look like this:
894 * #define atomic_add(ptr, val) \
895 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
896 * register unsigned int __result asm("r1"); \
898 * "1: @ atomic_add\n\t" \
899 * "ldr r0, [r2]\n\t" \
900 * "mov r3, #0xffff0fff\n\t" \
901 * "add lr, pc, #4\n\t" \
902 * "add r1, r0, %2\n\t" \
903 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
905 * : "=&r" (__result) \
906 * : "r" (__ptr), "rIL" (val) \
907 * : "r0","r3","ip","lr","cc","memory" ); \
911 __kuser_cmpxchg: @ 0xffff0fc0
913 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
916 * Poor you. No fast solution possible...
917 * The kernel itself must perform the operation.
918 * A special ghost syscall is used for that (see traps.c).
921 ldr r7, =1f @ it's 20 bits
924 1: .word __ARM_NR_cmpxchg
926 #elif __LINUX_ARM_ARCH__ < 6
931 * The only thing that can break atomicity in this cmpxchg
932 * implementation is either an IRQ or a data abort exception
933 * causing another process/thread to be scheduled in the middle
934 * of the critical sequence. To prevent this, code is added to
935 * the IRQ and data abort exception handlers to set the pc back
936 * to the beginning of the critical section if it is found to be
937 * within that critical section (see kuser_cmpxchg_fixup).
939 1: ldr r3, [r2] @ load current val
940 subs r3, r3, r0 @ compare with oldval
941 2: streq r1, [r2] @ store newval if eq
942 rsbs r0, r3, #0 @ set return val and C flag
947 @ Called from kuser_cmpxchg_check macro.
948 @ r2 = address of interrupted insn (must be preserved).
949 @ sp = saved regs. r7 and r8 are clobbered.
950 @ 1b = first critical insn, 2b = last critical insn.
951 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
953 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
955 rsbcss r8, r8, #(2b - 1b)
956 strcs r7, [sp, #S_PC]
961 #warning "NPTL on non MMU needs fixing"
976 /* beware -- each __kuser slot must be 8 instructions max */
977 ALT_SMP(b __kuser_memory_barrier)
985 * Reference prototype:
987 * int __kernel_get_tls(void)
991 * lr = return address
1001 * Definition and user space usage example:
1003 * typedef int (__kernel_get_tls_t)(void);
1004 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
1006 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
1008 * This could be used as follows:
1010 * #define __kernel_get_tls() \
1011 * ({ register unsigned int __val asm("r0"); \
1012 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
1013 * : "=r" (__val) : : "lr","cc" ); \
1017 __kuser_get_tls: @ 0xffff0fe0
1018 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1020 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1022 .word 0 @ 0xffff0ff0 software TLS value, then
1023 .endr @ pad up to __kuser_helper_version
1026 * Reference declaration:
1028 * extern unsigned int __kernel_helper_version;
1030 * Definition and user space usage example:
1032 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
1034 * User space may read this to determine the curent number of helpers
1038 __kuser_helper_version: @ 0xffff0ffc
1039 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1041 .globl __kuser_helper_end
1049 * This code is copied to 0xffff0200 so we can use branches in the
1050 * vectors, rather than ldr's. Note that this code must not
1051 * exceed 0x300 bytes.
1053 * Common stub entry macro:
1054 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1056 * SP points to a minimal amount of processor-private memory, the address
1057 * of which is copied into r0 for the mode specific abort handler.
1059 .macro vector_stub, name, mode, correction=0
1064 sub lr, lr, #\correction
1068 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1071 stmia sp, {r0, lr} @ save r0, lr
1073 str lr, [sp, #8] @ save spsr
1076 @ Prepare for SVC32 mode. IRQs remain disabled.
1079 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
1083 @ the branch table must immediately follow this code
1087 THUMB( ldr lr, [r0, lr, lsl #2] )
1089 ARM( ldr lr, [pc, lr, lsl #2] )
1090 movs pc, lr @ branch to handler in SVC mode
1091 ENDPROC(vector_\name)
1094 @ handler addresses follow this label
1098 .globl __stubs_start
1101 * Interrupt dispatcher
1103 vector_stub irq, IRQ_MODE, 4
1105 .long __irq_usr @ 0 (USR_26 / USR_32)
1106 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1107 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1108 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1109 .long __irq_invalid @ 4
1110 .long __irq_invalid @ 5
1111 .long __irq_invalid @ 6
1112 .long __irq_invalid @ 7
1113 .long __irq_invalid @ 8
1114 .long __irq_invalid @ 9
1115 .long __irq_invalid @ a
1116 .long __irq_invalid @ b
1117 .long __irq_invalid @ c
1118 .long __irq_invalid @ d
1119 .long __irq_invalid @ e
1120 .long __irq_invalid @ f
1123 * Data abort dispatcher
1124 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1126 vector_stub dabt, ABT_MODE, 8
1128 .long __dabt_usr @ 0 (USR_26 / USR_32)
1129 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1130 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1131 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1132 .long __dabt_invalid @ 4
1133 .long __dabt_invalid @ 5
1134 .long __dabt_invalid @ 6
1135 .long __dabt_invalid @ 7
1136 .long __dabt_invalid @ 8
1137 .long __dabt_invalid @ 9
1138 .long __dabt_invalid @ a
1139 .long __dabt_invalid @ b
1140 .long __dabt_invalid @ c
1141 .long __dabt_invalid @ d
1142 .long __dabt_invalid @ e
1143 .long __dabt_invalid @ f
1146 * Prefetch abort dispatcher
1147 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1149 vector_stub pabt, ABT_MODE, 4
1151 .long __pabt_usr @ 0 (USR_26 / USR_32)
1152 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1153 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1154 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1155 .long __pabt_invalid @ 4
1156 .long __pabt_invalid @ 5
1157 .long __pabt_invalid @ 6
1158 .long __pabt_invalid @ 7
1159 .long __pabt_invalid @ 8
1160 .long __pabt_invalid @ 9
1161 .long __pabt_invalid @ a
1162 .long __pabt_invalid @ b
1163 .long __pabt_invalid @ c
1164 .long __pabt_invalid @ d
1165 .long __pabt_invalid @ e
1166 .long __pabt_invalid @ f
1169 * Undef instr entry dispatcher
1170 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1172 vector_stub und, UND_MODE
1174 .long __und_usr @ 0 (USR_26 / USR_32)
1175 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1176 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1177 .long __und_svc @ 3 (SVC_26 / SVC_32)
1178 .long __und_invalid @ 4
1179 .long __und_invalid @ 5
1180 .long __und_invalid @ 6
1181 .long __und_invalid @ 7
1182 .long __und_invalid @ 8
1183 .long __und_invalid @ 9
1184 .long __und_invalid @ a
1185 .long __und_invalid @ b
1186 .long __und_invalid @ c
1187 .long __und_invalid @ d
1188 .long __und_invalid @ e
1189 .long __und_invalid @ f
1193 /*=============================================================================
1195 *-----------------------------------------------------------------------------
1196 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1197 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1198 * Basically to switch modes, we *HAVE* to clobber one register... brain
1199 * damage alert! I don't think that we can execute any code in here in any
1200 * other mode than FIQ... Ok you can switch to another mode, but you can't
1201 * get out of that mode without clobbering one register.
1207 /*=============================================================================
1208 * Address exception handler
1209 *-----------------------------------------------------------------------------
1210 * These aren't too critical.
1211 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1218 * We group all the following data together to optimise
1219 * for CPUs with separate I & D caches.
1229 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1231 .globl __vectors_start
1233 ARM( swi SYS_ERROR0 )
1236 W(b) vector_und + stubs_offset
1237 W(ldr) pc, .LCvswi + stubs_offset
1238 W(b) vector_pabt + stubs_offset
1239 W(b) vector_dabt + stubs_offset
1240 W(b) vector_addrexcptn + stubs_offset
1241 W(b) vector_irq + stubs_offset
1242 W(b) vector_fiq + stubs_offset
1244 .globl __vectors_end
1250 .globl cr_no_alignment
1256 #ifdef CONFIG_MULTI_IRQ_HANDLER
1257 .globl handle_arch_irq