3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 #include <linux/errno.h>
23 #include <linux/sys.h>
24 #include <linux/threads.h>
28 #include <asm/cputable.h>
29 #include <asm/thread_info.h>
30 #include <asm/ppc_asm.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/unistd.h>
33 #include <asm/ftrace.h>
36 #undef SHOW_SYSCALLS_TASK
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
41 #if MSR_KERNEL >= 0x10000
42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
44 #define LOAD_MSR_KERNEL(r, x) li r,(x)
48 .globl mcheck_transfer_to_handler
49 mcheck_transfer_to_handler:
56 .globl debug_transfer_to_handler
57 debug_transfer_to_handler:
64 .globl crit_transfer_to_handler
65 crit_transfer_to_handler:
66 #ifdef CONFIG_FSL_BOOKE
77 #ifdef CONFIG_PHYS_64BIT
80 #endif /* CONFIG_PHYS_64BIT */
81 #endif /* CONFIG_FSL_BOOKE */
93 stw r0,SAVED_KSP_LIMIT(r11)
94 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
100 .globl crit_transfer_to_handler
101 crit_transfer_to_handler:
107 stw r0,crit_srr0@l(0)
109 stw r0,crit_srr1@l(0)
113 stw r0,saved_ksp_limit@l(0)
114 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
126 .globl transfer_to_handler_full
127 transfer_to_handler_full:
131 .globl transfer_to_handler
143 tovirt(r2,r2) /* set r2 to current */
144 beq 2f /* if from user, fix up THREAD.regs */
145 addi r11,r1,STACK_FRAME_OVERHEAD
147 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148 /* Check to see if the dbcr0 register is set up to debug. Use the
149 internal debug mode bit to do this. */
150 lwz r12,THREAD_DBCR0(r12)
151 andis. r12,r12,DBCR0_IDM@h
153 /* From user and task is ptraced - load up global dbcr0 */
154 li r12,-1 /* clear all pending debug events */
156 lis r11,global_dbcr0@ha
158 addi r11,r11,global_dbcr0@l
160 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
173 2: /* if from kernel, check interrupted DOZE/NAP mode and
174 * check for stack overflow
176 lwz r9,KSP_LIMIT(r12)
177 cmplw r1,r9 /* if r1 <= ksp_limit */
178 ble- stack_ovf /* then the kernel stack overflowed */
180 #if defined(CONFIG_6xx) || defined(CONFIG_E500)
181 rlwinm r9,r1,0,0,31-THREAD_SHIFT
182 tophys(r9,r9) /* check local flags */
183 lwz r12,TI_LOCAL_FLAGS(r9)
185 bt- 31-TLF_NAPPING,4f
186 bt- 31-TLF_SLEEPING,7f
187 #endif /* CONFIG_6xx || CONFIG_E500 */
188 .globl transfer_to_handler_cont
189 transfer_to_handler_cont:
192 lwz r11,0(r9) /* virtual address of handler */
193 lwz r9,4(r9) /* where to go when done */
198 RFI /* jump to handler, enable MMU */
200 #if defined (CONFIG_6xx) || defined(CONFIG_E500)
201 4: rlwinm r12,r12,0,~_TLF_NAPPING
202 stw r12,TI_LOCAL_FLAGS(r9)
203 b power_save_ppc32_restore
205 7: rlwinm r12,r12,0,~_TLF_SLEEPING
206 stw r12,TI_LOCAL_FLAGS(r9)
207 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
208 rlwinm r9,r9,0,~MSR_EE
209 lwz r12,_LINK(r11) /* and return to address in LR */
210 b fast_exception_return
214 * On kernel stack overflow, load up an initial stack pointer
215 * and call StackOverflow(regs), which should not return.
218 /* sometimes we use a statically-allocated stack, which is OK. */
222 ble 5b /* r1 <= &_end is OK */
224 addi r3,r1,STACK_FRAME_OVERHEAD
225 lis r1,init_thread_union@ha
226 addi r1,r1,init_thread_union@l
227 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
228 lis r9,StackOverflow@ha
229 addi r9,r9,StackOverflow@l
230 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
238 * Handle a system call.
240 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
241 .stabs "entry_32.S",N_SO,0,0,0f
248 lwz r11,_CCR(r1) /* Clear SO bit in CR */
253 #endif /* SHOW_SYSCALLS */
254 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
255 lwz r11,TI_FLAGS(r10)
256 andi. r11,r11,_TIF_SYSCALL_T_OR_A
258 syscall_dotrace_cont:
259 cmplwi 0,r0,NR_syscalls
260 lis r10,sys_call_table@h
261 ori r10,r10,sys_call_table@l
264 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
266 addi r9,r1,STACK_FRAME_OVERHEAD
268 blrl /* Call handler */
269 .globl ret_from_syscall
272 bl do_show_syscall_exit
275 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
276 /* disable interrupts so current_thread_info()->flags can't change */
277 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
282 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
283 bne- syscall_exit_work
285 blt+ syscall_exit_cont
286 lwz r11,_CCR(r1) /* Load CR */
288 oris r11,r11,0x1000 /* Set SO bit in CR */
291 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
292 /* If the process has its own DBCR0 value, load it up. The internal
293 debug mode bit tells us that dbcr0 should be loaded. */
294 lwz r0,THREAD+THREAD_DBCR0(r2)
295 andis. r10,r0,DBCR0_IDM@h
299 lis r4,icache_44x_need_flush@ha
300 lwz r5,icache_44x_need_flush@l(r4)
304 #endif /* CONFIG_44x */
307 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
308 stwcx. r0,0,r1 /* to clear the reservation */
325 stw r7,icache_44x_need_flush@l(r4)
327 #endif /* CONFIG_44x */
339 /* Traced system call support */
344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_enter
346 lwz r0,GPR0(r1) /* Restore original registers */
354 b syscall_dotrace_cont
357 andi. r0,r9,_TIF_RESTOREALL
363 andi. r0,r9,_TIF_NOERROR
365 lwz r11,_CCR(r1) /* Load CR */
367 oris r11,r11,0x1000 /* Set SO bit in CR */
370 1: stw r6,RESULT(r1) /* Save result */
371 stw r3,GPR3(r1) /* Update return value */
372 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
375 /* Clear per-syscall TIF flags if any are set. */
377 li r11,_TIF_PERSYSCALL_MASK
378 addi r12,r12,TI_FLAGS
381 #ifdef CONFIG_IBM405_ERR77
386 subi r12,r12,TI_FLAGS
388 4: /* Anything which requires enabling interrupts? */
389 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
392 /* Re-enable interrupts */
397 /* Save NVGPRS if they're not saved already */
405 addi r3,r1,STACK_FRAME_OVERHEAD
406 bl do_syscall_trace_leave
407 b ret_from_except_full
411 #ifdef SHOW_SYSCALLS_TASK
412 lis r11,show_syscalls_task@ha
413 lwz r11,show_syscalls_task@l(r11)
444 do_show_syscall_exit:
445 #ifdef SHOW_SYSCALLS_TASK
446 lis r11,show_syscalls_task@ha
447 lwz r11,show_syscalls_task@l(r11)
453 stw r3,RESULT(r1) /* Save result */
463 7: .string "syscall %d(%x, %x, %x, %x, %x, "
464 77: .string "%x), current=%p\n"
465 79: .string " -> %x\n"
468 #ifdef SHOW_SYSCALLS_TASK
470 .globl show_syscalls_task
475 #endif /* SHOW_SYSCALLS */
478 * The fork/clone functions need to copy the full register set into
479 * the child process. Therefore we need to save all the nonvolatile
480 * registers (r13 - r31) before calling the C code.
486 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
487 stw r0,_TRAP(r1) /* register set saved */
494 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
495 stw r0,_TRAP(r1) /* register set saved */
502 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
503 stw r0,_TRAP(r1) /* register set saved */
506 .globl ppc_swapcontext
510 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
511 stw r0,_TRAP(r1) /* register set saved */
515 * Top-level page fault handling.
516 * This is in assembler because if do_page_fault tells us that
517 * it is a bad kernel page fault, we want to save the non-volatile
518 * registers before calling bad_page_fault.
520 .globl handle_page_fault
523 addi r3,r1,STACK_FRAME_OVERHEAD
532 addi r3,r1,STACK_FRAME_OVERHEAD
535 b ret_from_except_full
538 * This routine switches between two different tasks. The process
539 * state of one is saved on its kernel stack. Then the state
540 * of the other is restored from its kernel stack. The memory
541 * management hardware is updated to the second process's state.
542 * Finally, we can return to the second process.
543 * On entry, r3 points to the THREAD for the current task, r4
544 * points to the THREAD for the new task.
546 * This routine is always called with interrupts disabled.
548 * Note: there are two ways to get to the "going out" portion
549 * of this code; either by coming in via the entry (_switch)
550 * or via "fork" which must set up an environment equivalent
551 * to the "_switch" path. If you change this , you'll have to
552 * change the fork code also.
554 * The code which creates the new task context is in 'copy_thread'
555 * in arch/ppc/kernel/process.c
558 stwu r1,-INT_FRAME_SIZE(r1)
560 stw r0,INT_FRAME_SIZE+4(r1)
561 /* r3-r12 are caller saved -- Cort */
563 stw r0,_NIP(r1) /* Return to switch caller */
565 li r0,MSR_FP /* Disable floating-point */
566 #ifdef CONFIG_ALTIVEC
568 oris r0,r0,MSR_VEC@h /* Disable altivec */
569 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
570 stw r12,THREAD+THREAD_VRSAVE(r2)
571 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
572 #endif /* CONFIG_ALTIVEC */
575 oris r0,r0,MSR_SPE@h /* Disable SPE */
576 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
577 stw r12,THREAD+THREAD_SPEFSCR(r2)
578 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
579 #endif /* CONFIG_SPE */
580 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
588 stw r1,KSP(r3) /* Set old stack pointer */
591 /* We need a sync somewhere here to make sure that if the
592 * previous task gets rescheduled on another CPU, it sees all
593 * stores it has performed on this one.
596 #endif /* CONFIG_SMP */
600 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
601 lwz r1,KSP(r4) /* Load new stack pointer */
603 /* save the old current 'last' for return value */
605 addi r2,r4,-THREAD /* Update current */
607 #ifdef CONFIG_ALTIVEC
609 lwz r0,THREAD+THREAD_VRSAVE(r2)
610 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
611 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
612 #endif /* CONFIG_ALTIVEC */
615 lwz r0,THREAD+THREAD_SPEFSCR(r2)
616 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
617 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
618 #endif /* CONFIG_SPE */
622 /* r3-r12 are destroyed -- Cort */
625 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
627 addi r1,r1,INT_FRAME_SIZE
630 .globl fast_exception_return
631 fast_exception_return:
632 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
633 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
634 beq 1f /* if not, we've got problems */
637 2: REST_4GPRS(3, r11)
652 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
653 /* check if the exception happened in a restartable section */
654 1: lis r3,exc_exit_restart_end@ha
655 addi r3,r3,exc_exit_restart_end@l
658 lis r4,exc_exit_restart@ha
659 addi r4,r4,exc_exit_restart@l
662 lis r3,fee_restarts@ha
664 lwz r5,fee_restarts@l(r3)
666 stw r5,fee_restarts@l(r3)
667 mr r12,r4 /* restart at exc_exit_restart */
676 /* aargh, a nonrecoverable interrupt, panic */
677 /* aargh, we don't know which trap this is */
678 /* but the 601 doesn't implement the RI bit, so assume it's OK */
682 END_FTR_SECTION_IFSET(CPU_FTR_601)
685 addi r3,r1,STACK_FRAME_OVERHEAD
687 ori r10,r10,MSR_KERNEL@l
688 bl transfer_to_handler_full
689 .long nonrecoverable_exception
690 .long ret_from_except
693 .globl ret_from_except_full
694 ret_from_except_full:
698 .globl ret_from_except
700 /* Hard-disable interrupts so that current_thread_info()->flags
701 * can't change between when we test it and when we return
702 * from the interrupt. */
703 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
704 SYNC /* Some chip revs have problems here... */
705 MTMSRD(r10) /* disable interrupts */
707 lwz r3,_MSR(r1) /* Returning to user mode? */
711 user_exc_return: /* r10 contains MSR_KERNEL here */
712 /* Check current_thread_info()->flags */
713 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
715 andi. r0,r9,_TIF_USER_WORK_MASK
719 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
720 /* Check whether this process has its own DBCR0 value. The internal
721 debug mode bit tells us that dbcr0 should be loaded. */
722 lwz r0,THREAD+THREAD_DBCR0(r2)
723 andis. r10,r0,DBCR0_IDM@h
727 #ifdef CONFIG_PREEMPT
730 /* N.B. the only way to get here is from the beq following ret_from_except. */
732 /* check current_thread_info->preempt_count */
733 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
734 lwz r0,TI_PREEMPT(r9)
735 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
738 andi. r0,r0,_TIF_NEED_RESCHED
740 andi. r0,r3,MSR_EE /* interrupts off? */
741 beq restore /* don't schedule if so */
742 1: bl preempt_schedule_irq
743 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
745 andi. r0,r3,_TIF_NEED_RESCHED
749 #endif /* CONFIG_PREEMPT */
751 /* interrupts are hard-disabled at this point */
754 lis r4,icache_44x_need_flush@ha
755 lwz r5,icache_44x_need_flush@l(r4)
760 stw r6,icache_44x_need_flush@l(r4)
762 #endif /* CONFIG_44x */
776 END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
777 stwcx. r0,0,r1 /* to clear the reservation */
779 #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
781 andi. r10,r9,MSR_RI /* check if this exception occurred */
782 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
790 * Once we put values in SRR0 and SRR1, we are in a state
791 * where exceptions are not recoverable, since taking an
792 * exception will trash SRR0 and SRR1. Therefore we clear the
793 * MSR:RI bit to indicate this. If we do take an exception,
794 * we can't return to the point of the exception but we
795 * can restart the exception exit path at the label
796 * exc_exit_restart below. -- paulus
798 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
800 MTMSRD(r10) /* clear the RI bit */
801 .globl exc_exit_restart
810 .globl exc_exit_restart_end
811 exc_exit_restart_end:
815 #else /* !(CONFIG_4xx || CONFIG_BOOKE) */
817 * This is a bit different on 4xx/Book-E because it doesn't have
818 * the RI bit in the MSR.
819 * The TLB miss handler checks if we have interrupted
820 * the exception exit path and restarts it if so
821 * (well maybe one day it will... :).
828 .globl exc_exit_restart
837 .globl exc_exit_restart_end
838 exc_exit_restart_end:
841 b . /* prevent prefetch past rfi */
844 * Returning from a critical interrupt in user mode doesn't need
845 * to be any different from a normal exception. For a critical
846 * interrupt in the kernel, we just return (without checking for
847 * preemption) since the interrupt may have happened at some crucial
848 * place (e.g. inside the TLB miss handler), and because we will be
849 * running with r1 pointing into critical_stack, not the current
850 * process's kernel stack (and therefore current_thread_info() will
851 * give the wrong answer).
852 * We have to restore various SPRs that may have been in use at the
853 * time of the critical interrupt.
857 #define PPC_40x_TURN_OFF_MSR_DR \
858 /* avoid any possible TLB misses here by turning off MSR.DR, we \
859 * assume the instructions here are mapped by a pinned TLB entry */ \
865 #define PPC_40x_TURN_OFF_MSR_DR
868 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
871 andi. r3,r3,MSR_PR; \
872 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
873 bne user_exc_return; \
880 mtspr SPRN_XER,r10; \
882 PPC405_ERR77(0,r1); \
883 stwcx. r0,0,r1; /* to clear the reservation */ \
888 PPC_40x_TURN_OFF_MSR_DR; \
891 mtspr SPRN_DEAR,r9; \
892 mtspr SPRN_ESR,r10; \
895 mtspr exc_lvl_srr0,r11; \
896 mtspr exc_lvl_srr1,r12; \
904 b .; /* prevent prefetch past exc_lvl_rfi */
906 #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
907 lwz r9,_##exc_lvl_srr0(r1); \
908 lwz r10,_##exc_lvl_srr1(r1); \
909 mtspr SPRN_##exc_lvl_srr0,r9; \
910 mtspr SPRN_##exc_lvl_srr1,r10;
912 #if defined(CONFIG_FSL_BOOKE)
913 #ifdef CONFIG_PHYS_64BIT
914 #define RESTORE_MAS7 \
919 #endif /* CONFIG_PHYS_64BIT */
920 #define RESTORE_MMU_REGS \
924 mtspr SPRN_MAS0,r9; \
926 mtspr SPRN_MAS1,r10; \
928 mtspr SPRN_MAS2,r11; \
929 mtspr SPRN_MAS3,r9; \
930 mtspr SPRN_MAS6,r10; \
932 #elif defined(CONFIG_44x)
933 #define RESTORE_MMU_REGS \
937 #define RESTORE_MMU_REGS
941 .globl ret_from_crit_exc
944 lis r10,saved_ksp_limit@ha;
945 lwz r10,saved_ksp_limit@l(r10);
947 stw r10,KSP_LIMIT(r9)
949 lwz r9,crit_srr0@l(r9);
950 lis r10,crit_srr1@ha;
951 lwz r10,crit_srr1@l(r10);
954 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
955 #endif /* CONFIG_40x */
958 .globl ret_from_crit_exc
961 lwz r10,SAVED_KSP_LIMIT(r1)
962 stw r10,KSP_LIMIT(r9)
963 RESTORE_xSRR(SRR0,SRR1);
965 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
967 .globl ret_from_debug_exc
970 lwz r10,SAVED_KSP_LIMIT(r1)
971 stw r10,KSP_LIMIT(r9)
972 lwz r9,THREAD_INFO-THREAD(r9)
973 rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
974 lwz r10,TI_PREEMPT(r10)
975 stw r10,TI_PREEMPT(r9)
976 RESTORE_xSRR(SRR0,SRR1);
977 RESTORE_xSRR(CSRR0,CSRR1);
979 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
981 .globl ret_from_mcheck_exc
984 lwz r10,SAVED_KSP_LIMIT(r1)
985 stw r10,KSP_LIMIT(r9)
986 RESTORE_xSRR(SRR0,SRR1);
987 RESTORE_xSRR(CSRR0,CSRR1);
988 RESTORE_xSRR(DSRR0,DSRR1);
990 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
991 #endif /* CONFIG_BOOKE */
994 * Load the DBCR0 value for a task that is being ptraced,
995 * having first saved away the global DBCR0. Note that r0
996 * has the dbcr0 value to set upon entry to this.
999 mfmsr r10 /* first disable debug exceptions */
1000 rlwinm r10,r10,0,~MSR_DE
1003 mfspr r10,SPRN_DBCR0
1004 lis r11,global_dbcr0@ha
1005 addi r11,r11,global_dbcr0@l
1007 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1018 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1026 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1028 do_work: /* r10 contains MSR_KERNEL here */
1029 andi. r0,r9,_TIF_NEED_RESCHED
1032 do_resched: /* r10 contains MSR_KERNEL here */
1035 MTMSRD(r10) /* hard-enable interrupts */
1038 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1040 MTMSRD(r10) /* disable interrupts */
1041 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
1043 andi. r0,r9,_TIF_NEED_RESCHED
1045 andi. r0,r9,_TIF_USER_WORK_MASK
1047 do_user_signal: /* r10 contains MSR_KERNEL here */
1050 MTMSRD(r10) /* hard-enable interrupts */
1051 /* save r13-r31 in the exception frame, if not already done */
1059 addi r4,r1,STACK_FRAME_OVERHEAD
1065 * We come here when we are at the end of handling an exception
1066 * that occurred at a place where taking an exception will lose
1067 * state information, such as the contents of SRR0 and SRR1.
1070 lis r10,exc_exit_restart_end@ha
1071 addi r10,r10,exc_exit_restart_end@l
1074 lis r11,exc_exit_restart@ha
1075 addi r11,r11,exc_exit_restart@l
1078 lis r10,ee_restarts@ha
1079 lwz r12,ee_restarts@l(r10)
1081 stw r12,ee_restarts@l(r10)
1082 mr r12,r11 /* restart at exc_exit_restart */
1084 3: /* OK, we can't recover, kill this process */
1085 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1088 END_FTR_SECTION_IFSET(CPU_FTR_601)
1095 4: addi r3,r1,STACK_FRAME_OVERHEAD
1096 bl nonrecoverable_exception
1097 /* shouldn't return */
1107 * PROM code for specific machines follows. Put it
1108 * here so it's easy to add arch-specific sections later.
1111 #ifdef CONFIG_PPC_RTAS
1113 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1114 * called with the MMU off.
1117 stwu r1,-INT_FRAME_SIZE(r1)
1119 stw r0,INT_FRAME_SIZE+4(r1)
1120 LOAD_REG_ADDR(r4, rtas)
1121 lis r6,1f@ha /* physical return address for rtas */
1125 lwz r8,RTASENTRY(r4)
1129 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1130 SYNC /* disable interrupts so SRR0/1 */
1131 MTMSRD(r0) /* don't get trashed */
1132 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1139 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1140 lwz r9,8(r9) /* original msr value */
1142 addi r1,r1,INT_FRAME_SIZE
1147 RFI /* return to caller */
1149 .globl machine_check_in_rtas
1150 machine_check_in_rtas:
1152 /* XXX load up BATs and panic */
1154 #endif /* CONFIG_PPC_RTAS */
1156 #ifdef CONFIG_FTRACE
1157 #ifdef CONFIG_DYNAMIC_FTRACE
1173 subi r3, r3, MCOUNT_INSN_SIZE
1195 _GLOBAL(ftrace_caller)
1196 /* Based off of objdump optput from glibc */
1211 subi r3, r3, MCOUNT_INSN_SIZE
1250 subi r3, r3, MCOUNT_INSN_SIZE
1251 LOAD_REG_ADDR(r5, ftrace_trace_function)
1277 _GLOBAL(ftrace_stub)
1280 #endif /* CONFIG_MCOUNT */