2 * arch/ppc/kernel/entry.S
4 * $Id: entry.S,v 1.4 1999/09/14 05:18:14 dmalek Exp $
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
16 * This file contains the system call entry code, context switch
17 * code, and exception/interrupt return code for PowerPC.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
27 #include <asm/processor.h>
30 #include <linux/errno.h>
31 #include <linux/sys.h>
32 #include <linux/config.h>
35 #define SHOW_SYSCALLS_TASK
37 #ifdef SHOW_SYSCALLS_TASK
44 * Handle a system call.
48 stw r0,THREAD+LAST_SYSCALL(r2)
49 lwz r11,_CCR(r1) /* Clear SO bit in CR */
54 #ifdef SHOW_SYSCALLS_TASK
55 lis r31,show_syscalls_task@ha
56 lwz r31,show_syscalls_task@l(r31)
83 #endif /* SHOW_SYSCALLS */
84 cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */
86 lwz r10,TASK_FLAGS(r2)
87 andi. r10,r10,PF_TRACESYS
89 cmpli 0,r0,NR_syscalls
91 lis r10,sys_call_table@h
92 ori r10,r10,sys_call_table@l
94 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
98 addi r9,r1,STACK_FRAME_OVERHEAD
99 blrl /* Call handler */
100 .globl ret_from_syscall_1
102 20: stw r3,RESULT(r1) /* Save result */
104 #ifdef SHOW_SYSCALLS_TASK
119 cmpi 0,r3,ERESTARTNOHAND
122 22: lwz r10,_CCR(r1) /* Set SO bit in CR */
125 30: stw r3,GPR3(r1) /* Update return value */
130 10: addi r3,r1,STACK_FRAME_OVERHEAD
132 cmpi 0,r3,0 /* Check for restarted system call */
135 /* Traced system call support */
137 lwz r0,GPR0(r1) /* Restore original registers */
145 cmpli 0,r0,NR_syscalls
147 lis r10,sys_call_table@h
148 ori r10,r10,sys_call_table@l
150 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
154 addi r9,r1,STACK_FRAME_OVERHEAD
155 blrl /* Call handler */
156 .globl ret_from_syscall_2
158 stw r3,RESULT(r1) /* Save result */
159 stw r3,GPR0(r1) /* temporary gross hack to make strace work */
164 cmpi 0,r3,ERESTARTNOHAND
167 52: lwz r10,_CCR(r1) /* Set SO bit in CR */
170 60: stw r3,GPR3(r1) /* Update return value */
176 7: .string "syscall %d(%x, %x, %x, %x, %x, "
177 77: .string "%x, %x), current=%p\n"
178 79: .string " -> %x\n"
183 * This routine switches between two different tasks. The process
184 * state of one is saved on its kernel stack. Then the state
185 * of the other is restored from its kernel stack. The memory
186 * management hardware is updated to the second process's state.
187 * Finally, we can return to the second process, via ret_from_except.
188 * On entry, r3 points to the THREAD for the current task, r4
189 * points to the THREAD for the new task.
191 * Note: there are two ways to get to the "going out" portion
192 * of this code; either by coming in via the entry (_switch)
193 * or via "fork" which must set up an environment equivalent
194 * to the "_switch" path. If you change this (or in particular, the
195 * SAVE_REGS macro), you'll have to change the fork code also.
197 * The code which creates the new task context is in 'copy_thread'
198 * in arch/ppc/kernel/process.c
201 stwu r1,-INT_FRAME_SIZE(r1)
205 /* r3-r13 are caller saved -- Cort */
209 mflr r20 /* Return to switch caller */
211 li r0,MSR_FP /* Disable floating-point */
212 #ifdef CONFIG_ALTIVEC
214 #endif /* CONFIG_ALTIVEC */
227 stw r1,KSP(r3) /* Set old stack pointer */
230 mtspr SPRG3,r0 /* Update current THREAD phys addr */
232 /* XXX it would be nice to find a SPRGx for this on 6xx,7xx too */
233 lwz r9,PGDIR(r4) /* cache the page table root */
234 tophys(r9,r9) /* convert to phys addr */
235 mtspr M_TWB,r9 /* Update MMU base address */
236 #endif /* CONFIG_8xx */
237 lwz r1,KSP(r4) /* Load new stack pointer */
238 /* save the old current 'last' for return value */
240 addi r2,r4,-THREAD /* Update current */
241 lwz r9,_MSR(r1) /* Returning to user mode? */
243 beq+ 10f /* if not, don't adjust kernel stack */
244 8: addi r4,r1,INT_FRAME_SIZE /* size of frame */
245 stw r4,THREAD+KSP(r2) /* save kernel stack pointer */
247 mtspr SPRG2,r9 /* phys exception stack pointer */
256 /* r3-r13 are destroyed -- Cort */
261 lwz r2,_NIP(r1) /* Restore environment */
263 * We need to hard disable here even if RTL is active since
264 * being interrupted after here trashes SRR{0,1}
267 mfmsr r0 /* Get current interrupt state */
268 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
269 mtmsr r0 /* Update machine state */
281 .globl ret_from_smpfork
287 .globl ret_from_intercept
290 * We may be returning from RTL and cannot do the normal checks
296 * If we're returning from user mode we do things differently
304 .globl ret_from_except
306 0: /* disable interrupts */
307 lis r30,int_control@h
308 ori r30,r30,int_control@l
318 3: lis r4,ppc_n_lost_interrupts@ha
319 lwz r4,ppc_n_lost_interrupts@l(r4)
322 addi r3,r1,STACK_FRAME_OVERHEAD
325 1: lis r4,softirq_state@ha
326 addi r4,r4,softirq_state@l
328 /* get processor # */
333 #error not 64-bit ready
336 #endif /* CONFIG_SMP */
342 .globl do_bottom_half_ret
344 2: /* disable interrupts */
345 lis r30,int_control@h
346 ori r30,r30,int_control@l
350 lwz r3,_MSR(r1) /* Returning to user mode? */
352 beq+ 10f /* if so, check need_resched and signals */
353 lwz r3,NEED_RESCHED(r2)
354 cmpi 0,r3,0 /* check need_resched flag */
358 7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
362 addi r4,r1,STACK_FRAME_OVERHEAD
368 * We need to hard disable here even if RTL is active since
369 * being interrupted after here trashes the SPRG2
372 mfmsr r0 /* Get current interrupt state */
373 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
374 mtmsr r0 /* Update machine state */
376 addi r4,r1,INT_FRAME_SIZE /* size of frame */
377 stw r4,THREAD+KSP(r2) /* save kernel stack pointer */
379 mtspr SPRG2,r3 /* phys exception stack pointer */
381 10: /* make sure we hard disable here, even if rtl is active -- Cort */
382 mfmsr r0 /* Get current interrupt state */
383 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
384 sync /* Some chip revs have problems here... */
385 mtmsr r0 /* Update machine state */
399 lwz r2,_NIP(r1) /* Restore environment */
410 * Fake an interrupt from kernel mode.
411 * This is used when enable_irq loses an interrupt.
412 * We only fill in the stack frame minimally.
414 _GLOBAL(fake_interrupt)
417 stwu r1,-INT_FRAME_SIZE(r1)
424 addi r3,r1,STACK_FRAME_OVERHEAD
427 addi r1,r1,INT_FRAME_SIZE
434 * PROM code for specific machines follows. Put it
435 * here so it's easy to add arch-specific sections later.
438 #if defined(CONFIG_ALL_PPC)
440 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
441 * called with the MMU off.
448 lwz r4,rtas_data@l(r4)
449 addis r4,r4,-KERNELBASE@h
450 lis r6,1f@ha /* physical return address for rtas */
452 addis r6,r6,-KERNELBASE@h
453 subi r7,r1,INT_FRAME_SIZE
454 addis r7,r7,-KERNELBASE@h
456 lwz r8,rtas_entry@l(r8)
460 ori r0,r0,MSR_EE|MSR_SE|MSR_BE
462 andi. r9,r9,MSR_ME|MSR_RI
463 sync /* disable interrupts so SRR0/1 */
464 mtmsr r0 /* don't get trashed */
470 1: addis r9,r1,-KERNELBASE@h
471 lwz r8,20(r9) /* get return address */
472 lwz r9,8(r9) /* original msr value */
477 rfi /* return to caller */
478 #endif /* CONFIG_ALL_PPC */