2 * arch/ppc/kernel/head.S
4 * $Id: head.S,v 1.143 1999/09/05 11:56:28 paulus Exp $
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
16 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 * This file contains the low-level support and setup for the
19 * PowerPC platform, including trap and interrupt dispatch.
20 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
30 #include <asm/processor.h>
32 #include <linux/config.h>
36 #include <asm/amigappc.h>
40 #define LOAD_BAT(n, reg, RA, RB) \
41 ld RA,(n*32)+0(reg); \
42 ld RB,(n*32)+8(reg); \
43 mtspr IBAT##n##U,RA; \
44 mtspr IBAT##n##L,RB; \
45 ld RA,(n*32)+16(reg); \
46 ld RB,(n*32)+24(reg); \
47 mtspr DBAT##n##U,RA; \
48 mtspr DBAT##n##L,RB; \
50 #else /* CONFIG_PPC64 */
52 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
53 #define LOAD_BAT(n, reg, RA, RB) \
54 lwz RA,(n*16)+0(reg); \
55 lwz RB,(n*16)+4(reg); \
56 mtspr IBAT##n##U,RA; \
57 mtspr IBAT##n##L,RB; \
59 lwz RA,(n*16)+8(reg); \
60 lwz RB,(n*16)+12(reg); \
61 mtspr DBAT##n##U,RA; \
62 mtspr DBAT##n##L,RB; \
64 #endif /* CONFIG_PPC64 */
71 * _start is defined this way because the XCOFF loader in the OpenFirmware
72 * on the powermac expects the entry point to be a procedure descriptor.
78 * These are here for legacy reasons, the kernel used to
79 * need to look like a coff function entry for the pmac
80 * but we're always started by some kind of bootloader now.
88 * Enter here with the kernel text, data and bss loaded starting at
89 * 0, running with virtual == physical mapping.
90 * r5 points to the prom entry point (the client interface handler
91 * address). Address translation is turned on, with the prom
92 * managing the hash table. Interrupts are disabled. The stack
93 * pointer (r1) points to just below the end of the half-meg region
94 * from 0x380000 - 0x400000, which is mapped in already.
96 * If we are booted from MacOS via BootX, we enter with the kernel
97 * image loaded somewhere, and the following values in registers:
98 * r3: 'BooX' (0x426f6f58)
99 * r4: virtual address of boot_infos_t
104 * r4: physical address of memory base
105 * Linux/m68k style BootInfo structure at &_end.
108 * This is jumped to on prep systems right after the kernel is relocated
109 * to its proper place in memory by the boot loader. The expected layout
111 * r3: ptr to residual data
112 * r4: initrd_start or if no initrd then 0
113 * r5: initrd_end - unused if r4 is 0
114 * r6: Start of command line string
115 * r7: End of command line string
117 * This just gets a minimal mmu environment setup so we can call
118 * start_here() to do the real work.
126 * Go into 32-bit mode to boot. OF should do this for
127 * us already but just in case...
135 * We have to do any OF calls before we map ourselves to KERNELBASE,
136 * because OF may have I/O devices mapped in in that area
137 * (particularly on CHRP).
139 mr r31,r3 /* save parameters */
148 /* On APUS the __va/__pa constants need to be set to the correct
149 * values before continuing.
156 * Use the first pair of BAT registers to map the 1st 16MB
157 * of RAM to KERNELBASE. From this point on we can't safely
161 #ifndef CONFIG_PPC64xxx
163 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
166 ori r11,r11,4 /* set up BAT registers for 601 */
167 li r8,0x7f /* valid, block length = 8MB */
168 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
169 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
170 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
171 mtspr IBAT0L,r8 /* lower BAT register */
175 #endif /* CONFIG_PPC64 */
178 ori r8,r8,2 /* R/W access */
179 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
181 /* clear out the high 32 bits in the BAT */
184 /* turn off the pagetable mappings just in case */
187 #else /* CONFIG_PPC64 */
189 * allow secondary cpus to get at all of ram in early bootup
190 * since their init_task may be up there -- Cort
193 oris r18,r8,0x10000000@h
194 oris r21,r11,(KERNELBASE+0x10000000)@h
201 mtspr DBAT1L,r18 /* N.B. 6xx (not 601) have valid */
202 mtspr DBAT1U,r21 /* bit in upper BAT register */
206 #if 0 /* for now, otherwise we overflow the 0x100 bytes we have here */
207 oris r18,r8,0x20000000@h
208 oris r21,r11,(KERNELBASE+0x20000000)@h
209 mtspr DBAT2L,r18 /* N.B. 6xx (not 601) have valid */
210 mtspr DBAT2U,r21 /* bit in upper BAT register */
214 #endif /* CONFIG_PPC64 */
215 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
216 mtspr DBAT0U,r11 /* bit in upper BAT register */
222 /* Unfortunately the APUS specific instructions bloat the
223 * code so it cannot fit in the 0x100 bytes available. We have
224 * to do it the crude way. */
226 /* Map 0xfff00000 so we can access VTOP/PTOV constant when
229 ori r11,r8,0x2 /* r/w */
230 ori r8,r8,0x2 /* 128KB, supervisor */
234 /* Copy exception code to exception vector base. */
237 lis r3,0xfff0 /* Copy to 0xfff00000 on APUS */
238 li r5,0x4000 /* # bytes of memory to copy */
240 bl copy_and_flush /* copy the first 0x4000 bytes */
241 #else /* CONFIG_APUS */
243 * We need to run with _start at physical address 0.
244 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
245 * the exception vectors at 0 (and therefore this copy
246 * overwrites OF's exception vectors with our own).
247 * If the MMU is already turned on, we copy stuff to KERNELBASE,
248 * otherwise we copy it to 0.
252 addis r4,r3,KERNELBASE@h /* current address of _start */
253 cmpwi 0,r4,0 /* are we already running at 0? */
254 beq 2f /* assume it's OK if so */
257 andi. r0,r0,MSR_DR /* MMU enabled? */
259 lis r3,KERNELBASE@h /* if so, are we */
260 cmpw 0,r4,r3 /* already running at KERNELBASE? */
263 #endif /* CONFIG_APUS */
265 * we now have the 1st 16M of ram mapped with the bats.
266 * prep needs the mmu to be turned on here, but pmac already has it on.
267 * this shouldn't bother the pmac since it just gets turned on again
268 * as we jump to our code at KERNELBASE. -- Cort
273 ori r0,r0,MSR_DR|MSR_IR
276 ori r0,r0,start_here@l
279 rfi /* enables MMU */
282 * Exception entry code. This code runs with address translation
283 * turned off, i.e. using physical addresses.
284 * We assume sprg3 has the physical address of the current
285 * task's thread_struct.
287 #define EXCEPTION_PROLOG \
291 mfspr r21,SPRG2; /* exception stack to use from */ \
292 cmpwi 0,r21,0; /* user mode or RTAS */ \
294 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
295 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
296 1: stw r20,_CCR(r21); /* save registers */ \
297 stw r22,GPR22(r21); \
298 stw r23,GPR23(r21); \
300 stw r20,GPR20(r21); \
302 stw r22,GPR21(r21); \
304 stw r20,_LINK(r21); \
315 tovirt(r1,r21); /* set new kernel sp */ \
318 * Note: code which follows this uses cr0.eq (set if from kernel),
319 * r21, r22 (SRR0), and r23 (SRR1).
325 #define STD_EXCEPTION(n, label, hdlr) \
329 addi r3,r1,STACK_FRAME_OVERHEAD; \
331 bl transfer_to_handler; \
333 .long ret_from_except
336 #ifdef CONFIG_SMP /* MVME/MTX start the secondary here */
337 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge)
339 STD_EXCEPTION(0x100, Reset, UnknownException)
343 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
345 /* Data access exception. */
350 andis. r0,r20,0xa470 /* weird error? */
351 bne 1f /* if not, try to put a PTE */
352 mfspr r3,DAR /* into the hash table */
353 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
354 rlwimi r4,r20,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
355 mfspr r5,SPRG3 /* phys addr of THREAD */
357 1: stw r20,_DSISR(r21)
361 addi r3,r1,STACK_FRAME_OVERHEAD
363 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
364 bl transfer_to_handler
366 .long ret_from_except
368 /* Instruction access exception. */
372 andis. r0,r23,0x4000 /* no pte found? */
373 beq 1f /* if so, try to put a PTE */
374 mr r3,r22 /* into the hash table */
375 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
376 mr r20,r23 /* SRR1 has reason bits */
377 mfspr r5,SPRG3 /* phys addr of THREAD */
379 1: addi r3,r1,STACK_FRAME_OVERHEAD
383 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
384 bl transfer_to_handler
386 .long ret_from_except
388 /* External interrupt */
393 /* This is horrible, but there's no way around it. Enable the
394 data cache so the IRQ hardware register can be accessed
395 without cache intervention. Then disable interrupts and get
396 the current emulated m68k IPL value. */
404 lis r3,APUS_IPL_EMU@h
406 li r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)
407 stb r20,APUS_IPL_EMU@l(r3)
410 lbz r3,APUS_IPL_EMU@l(r3)
418 stw r3,(_CCR+4)(r21);
420 addi r3,r1,STACK_FRAME_OVERHEAD
423 bl transfer_to_handler
425 .long ret_from_except
428 /* Alignment exception */
436 addi r3,r1,STACK_FRAME_OVERHEAD
438 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
439 bl transfer_to_handler
440 .long AlignmentException
441 .long ret_from_except
443 /* Program check exception */
447 addi r3,r1,STACK_FRAME_OVERHEAD
449 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
450 bl transfer_to_handler
451 .long ProgramCheckException
452 .long ret_from_except
454 /* Floating-point unavailable */
458 bne load_up_fpu /* if from user, just load it up */
460 bl transfer_to_handler /* if from kernel, take a trap */
462 .long ret_from_except
464 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
465 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
466 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
472 stw r3,ORIG_GPR3(r21)
474 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
475 bl transfer_to_handler
477 .long ret_from_except
479 /* Single step - not used on 601 */
480 STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
482 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
483 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
486 * Handle TLB miss for instruction on 603/603e.
487 * Note: we get an alternate set of r0 - r3 to use automatically.
493 * r1: linux style pte ( later becomes ppc hardware pte )
494 * r2: ptr to linux-style pte
498 /* Get PTE (linux-style) and check access */
503 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
504 lwz r2,0(r2) /* get pmd entry */
505 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
506 beq- InstructionAddressInvalid /* return if no mapping */
508 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
509 lwz r1,0(r2) /* get linux-style pte */
510 /* setup access flags in r3 */
512 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
513 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
514 andc. r3,r3,r1 /* check access & ~permission */
515 bne- InstructionAddressInvalid /* return if access not permitted */
516 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
517 stw r1,0(r2) /* update PTE (accessed bit) */
518 /* Convert linux-style PTE to low word of PPC-style PTE */
519 /* this computation could be done better -- Cort */
520 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
521 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
522 ori r3,r3,0xe04 /* clear out reserved bits */
523 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
527 mfspr r3,SRR1 /* Need to restore CR0 */
530 InstructionAddressInvalid:
532 rlwinm r1,r3,9,6,6 /* Get load/store bit */
535 mtspr DSISR,r1 /* (shouldn't be needed) */
536 mtctr r0 /* Restore CTR */
537 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
540 mfspr r1,IMISS /* Get failing address */
541 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
542 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
544 mtspr DAR,r1 /* Set fault address */
545 mfmsr r0 /* Restore "normal" registers */
546 xoris r0,r0,MSR_TGPR>>16
547 mtcrf 0x80,r3 /* Restore CR0 */
548 sync /* Some chip revs have problems here... */
553 * Handle TLB miss for DATA Load operation on 603/603e
559 * r1: linux style pte ( later becomes ppc hardware pte )
560 * r2: ptr to linux-style pte
564 /* Get PTE (linux-style) and check access */
569 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
570 lwz r2,0(r2) /* get pmd entry */
571 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
572 beq- DataAddressInvalid /* return if no mapping */
574 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
575 lwz r1,0(r2) /* get linux-style pte */
576 /* setup access flags in r3 */
578 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
579 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
580 /* save r2 and use it as scratch for the andc. */
581 andc. r3,r3,r1 /* check access & ~permission */
582 bne- DataAddressInvalid /* return if access not permitted */
583 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
584 stw r1,0(r2) /* update PTE (accessed bit) */
585 /* Convert linux-style PTE to low word of PPC-style PTE */
586 /* this computation could be done better -- Cort */
587 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
588 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
589 ori r3,r3,0xe04 /* clear out reserved bits */
590 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
594 mfspr r3,SRR1 /* Need to restore CR0 */
599 rlwinm r1,r3,9,6,6 /* Get load/store bit */
602 mtctr r0 /* Restore CTR */
603 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
605 mfspr r1,DMISS /* Get failing address */
606 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
607 beq 20f /* Jump if big endian */
609 20: mtspr DAR,r1 /* Set fault address */
610 mfmsr r0 /* Restore "normal" registers */
611 xoris r0,r0,MSR_TGPR>>16
612 mtcrf 0x80,r3 /* Restore CR0 */
613 sync /* Some chip revs have problems here... */
618 * Handle TLB miss for DATA Store on 603/603e
624 * r1: linux style pte ( later becomes ppc hardware pte )
625 * r2: ptr to linux-style pte
629 /* Get PTE (linux-style) and check access */
634 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
635 lwz r2,0(r2) /* get pmd entry */
636 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
637 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r1,0(r2) /* get linux-style pte */
641 /* setup access flags in r3 */
643 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
644 ori r3,r3,0x5 /* _PAGE_PRESENT|_PAGE_RW */
645 /* save r2 and use it as scratch for the andc. */
646 andc. r3,r3,r1 /* check access & ~permission */
647 bne- DataAddressInvalid /* return if access not permitted */
648 ori r1,r1,0x384 /* set _PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_RW|_PAGE_HWWRITE in pte */
649 stw r1,0(r2) /* update PTE (accessed bit) */
650 /* Convert linux-style PTE to low word of PPC-style PTE */
651 /* this computation could be done better -- Cort */
652 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
653 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
654 ori r3,r3,0xe04 /* clear out reserved bits */
655 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
659 mfspr r3,SRR1 /* Need to restore CR0 */
663 /* Instruction address breakpoint exception (on 603/604) */
664 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
666 /* System management exception (603?) */
667 STD_EXCEPTION(0x1400, Trap_14, UnknownException)
669 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
670 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
671 STD_EXCEPTION(0x1700, Trap_17, TAUException)
672 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
673 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
674 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
675 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
676 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
677 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
678 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
679 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
681 /* Run mode exception */
682 STD_EXCEPTION(0x2000, RunMode, RunModeException)
684 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
685 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
686 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
687 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
688 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
689 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
690 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
691 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
692 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
693 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
694 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
695 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
696 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
697 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
698 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
703 * This code finishes saving the registers to the exception frame
704 * and jumps to the appropriate handler for the exception, turning
705 * on address translation.
707 .globl transfer_to_handler
718 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
720 addi r24,r1,STACK_FRAME_OVERHEAD
722 2: addi r2,r23,-THREAD /* set r2 to current */
725 andi. r24,r23,0x3f00 /* get vector offset */
728 stwcx. r22,r22,r21 /* to clear the reservation */
731 mtspr SPRG2,r22 /* r1 is now kernel sp */
732 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
736 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
737 lwz r24,0(r23) /* virtual address of handler */
738 lwz r23,4(r23) /* where to go when done */
743 rfi /* jump to handler, enable MMU */
746 * On kernel stack overflow, load up an initial stack pointer
747 * and call StackOverflow(regs), which should not return.
750 addi r3,r1,STACK_FRAME_OVERHEAD
751 lis r1,init_task_union@ha
752 addi r1,r1,init_task_union@l
753 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
754 lis r24,StackOverflow@ha
755 addi r24,r24,StackOverflow@l
763 * Disable FP for the task which had the FPU previously,
764 * and save its floating-point registers in its thread_struct.
765 * Enables the FPU for use in the kernel on return.
766 * On SMP we know the fpu is free, since we give it up every
773 mtmsr r5 /* enable use of fpu now */
776 * For SMP, we don't do lazy FPU switching because it just gets too
777 * horrendously complex, especially when a task switches from one CPU
778 * to another. Instead we call giveup_fpu in switch_to.
781 lis r6,0 /* get __pa constant */
783 addis r3,r6,last_task_used_math@ha
784 lwz r4,last_task_used_math@l(r3)
788 addi r4,r4,THREAD /* want THREAD of last_task_used_math */
791 stfd fr0,THREAD_FPSCR-4(r4)
794 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
795 li r20,MSR_FP|MSR_FE0|MSR_FE1
796 andc r4,r4,r20 /* disable FP for previous task */
797 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
800 /* enable use of FP after return */
801 ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
802 mfspr r5,SPRG3 /* current task's THREAD (phys) */
803 lfd fr0,THREAD_FPSCR-4(r5)
809 stw r4,last_task_used_math@l(r3)
811 /* restore registers and return */
818 /* we haven't used ctr or xer */
828 * FP unavailable trap from kernel - print a message, but let
829 * the task use FP in the kernel until it returns to user mode.
834 stw r3,_MSR(r1) /* enable use of FP after return */
837 mr r4,r2 /* current */
841 86: .string "floating point used in kernel (task=%p, pc=%x)\n"
846 * Disable FP for the task given as the argument,
847 * and save the floating-point registers in its thread_struct.
848 * Enables the FPU for use in the kernel on return.
855 mtmsr r5 /* enable use of fpu now */
858 beqlr- /* if no previous owner, done */
859 addi r3,r3,THREAD /* want THREAD of task */
864 stfd fr0,THREAD_FPSCR-4(r3)
866 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
867 li r3,MSR_FP|MSR_FE0|MSR_FE1
868 andc r4,r4,r3 /* disable FP for previous task */
869 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
873 lis r4,last_task_used_math@ha
874 stw r5,last_task_used_math@l(r4)
879 * This code is jumped to from the startup code to copy
880 * the kernel image to physical address 0.
883 lis r9,0x426f /* if booted from BootX, don't */
884 addi r9,r9,0x6f58 /* translate source addr */
885 cmpw r31,r9 /* (we have to on chrp) */
887 rlwinm r4,r4,0,8,31 /* translate source address */
888 add r4,r4,r3 /* to region mapped with BATs */
889 7: addis r9,r26,klimit@ha /* fetch klimit */
891 addis r25,r25,-KERNELBASE@h
892 li r6,0 /* Destination offset */
893 li r5,0x4000 /* # bytes of memory to copy */
894 bl copy_and_flush /* copy the first 0x4000 bytes */
895 addi r0,r3,4f@l /* jump to the address of 4f */
896 mtctr r0 /* in copy and do the rest. */
897 bctr /* jump to the copy */
899 bl copy_and_flush /* copy the rest */
903 * Copy routine used to copy the kernel to start at physical address 0
904 * and flush and invalidate the caches as needed.
905 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
906 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
913 3: addi r6,r6,4 /* copy a cache line */
917 dcbst r6,r3 /* write it to memory */
919 icbi r6,r3 /* flush the icache line */
929 * On APUS the physical base address of the kernel is not known at compile
930 * time, which means the __pa/__va constants used are incorect. In the
931 * __init section is recorded the virtual addresses of instructions using
932 * these constants, so all that has to be done is fix these before
933 * continuing the kernel boot.
935 * r4 = The physical address of the kernel base.
939 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
940 neg r11,r10 /* phys_to_virt constant */
942 lis r12,__vtop_table_begin@h
943 ori r12,r12,__vtop_table_begin@l
944 add r12,r12,r10 /* table begin phys address */
945 lis r13,__vtop_table_end@h
946 ori r13,r13,__vtop_table_end@l
947 add r13,r13,r10 /* table end phys address */
950 1: lwzu r14,4(r12) /* virt address of instruction */
951 add r14,r14,r10 /* phys address of instruction */
952 lwz r15,0(r14) /* instruction, now insert top */
953 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
954 stw r15,0(r14) /* of instruction and restore. */
955 dcbst r0,r14 /* write it to memory */
957 icbi r0,r14 /* flush the icache line */
961 lis r12,__ptov_table_begin@h
962 ori r12,r12,__ptov_table_begin@l
963 add r12,r12,r10 /* table begin phys address */
964 lis r13,__ptov_table_end@h
965 ori r13,r13,__ptov_table_end@l
966 add r13,r13,r10 /* table end phys address */
969 1: lwzu r14,4(r12) /* virt address of instruction */
970 add r14,r14,r10 /* phys address of instruction */
971 lwz r15,0(r14) /* instruction, now insert top */
972 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
973 stw r15,0(r14) /* of instruction and restore. */
974 dcbst r0,r14 /* write it to memory */
976 icbi r0,r14 /* flush the icache line */
980 isync /* No speculative loading until now */
983 /* On APUS the first 0x4000 bytes of the kernel will be mapped
984 * at a different physical address than the rest. For this
985 * reason, the exception code cannot use relative branches to
986 * access the code below.
992 .globl __secondary_hold
994 /* tell the master we're here */
1003 /* wait until we're told to start */
1006 /* our cpu # was at addr 0 - go */
1007 lis r5,__secondary_start@h
1008 ori r5,r5,__secondary_start@l
1011 mr r24,r3 /* cpu # */
1014 .globl __secondary_start_psurge
1015 __secondary_start_psurge:
1016 li r24,1 /* cpu # */
1017 /* we come in here with IR=0 and DR=1, and DBAT 0
1018 set to map the 0xf0000000 - 0xffffffff region */
1020 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
1025 .globl __secondary_start
1030 lis r2,current_set@h
1031 ori r2,r2,current_set@l
1033 slwi r24,r24,2 /* get current_set[cpu#] */
1037 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
1042 /* load up the MMU */
1045 /* ptr to phys current thread */
1047 addi r4,r4,THREAD /* phys address of our thread_struct */
1050 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1052 /* enable MMU and jump to start_secondary */
1054 lis r3,start_secondary@h
1055 ori r3,r3,start_secondary@l
1060 #endif /* CONFIG_SMP */
1063 * Enable caches and 604-specific features if necessary.
1067 rlwinm r9,r9,16,16,31
1069 beq 4f /* not needed for 601 */
1071 andi. r0,r11,HID0_DCE
1072 ori r11,r11,HID0_ICE|HID0_DCE
1073 ori r8,r11,HID0_ICFI
1074 bne 3f /* don't invalidate the D-cache */
1075 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1078 mtspr HID0,r8 /* enable and invalidate caches */
1080 mtspr HID0,r11 /* enable caches */
1083 cmpi 0,r9,4 /* check for 604 */
1084 cmpi 1,r9,9 /* or 604e */
1085 cmpi 2,r9,10 /* or mach5 */
1089 ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
1091 ori r11,r11,HID0_BTCD
1092 5: mtspr HID0,r11 /* superscalar exec & br history tbl */
1096 * Load stuff into the MMU. Intended to be called with
1100 /* Load the SDR1 register (hash table base & size) */
1106 /* clear the v bit in the ASR so we can
1107 * behave as if we have segment registers
1115 #endif /* CONFIG_PPC64 */
1116 li r0,16 /* load up segment register values */
1117 mtctr r0 /* for context 0 */
1118 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1121 addi r3,r3,1 /* increment VSID */
1122 addis r4,r4,0x1000 /* address of next segment */
1124 /* Load the BAT registers with the values set up by MMU_init.
1125 MMU_init takes care of whether we're on a 601 or not. */
1132 LOAD_BAT(0,r3,r4,r5)
1133 LOAD_BAT(1,r3,r4,r5)
1134 LOAD_BAT(2,r3,r4,r5)
1135 LOAD_BAT(3,r3,r4,r5)
1139 * This is where the main kernel code starts.
1144 /* ptr to current */
1145 lis r2,init_task_union@h
1146 ori r2,r2,init_task_union@l
1147 /* Clear out the BSS */
1150 lis r8,__bss_start@ha
1151 addi r8,r8,__bss_start@l
1154 rlwinm. r11,r11,30,2,31
1163 addi r1,r2,TASK_UNION_SIZE
1165 stwu r0,-STACK_FRAME_OVERHEAD(r1)
1167 * Decide what sort of machine this is and initialize the MMU.
1178 * Go back to running unmapped so we can load up new values
1179 * for SDR1 (hash table pointer) and the segment registers
1180 * and change to using our exception vectors.
1185 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1189 /* Load up the kernel context */
1191 SYNC /* Force all PTE updates to finish */
1192 tlbia /* Clear all TLB entries */
1193 sync /* wait for tlbia/tlbie to finish */
1195 tlbsync /* ... on all CPUs */
1200 /* Set up for using our exception vectors */
1201 /* ptr to phys current thread */
1203 addi r4,r4,THREAD /* init task's THREAD */
1206 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1207 /* Now turn on the MMU for real! */
1209 lis r3,start_kernel@h
1210 ori r3,r3,start_kernel@l
1213 rfi /* enable MMU and jump to start_kernel */
1216 * Set up the segment registers for a new context.
1218 _GLOBAL(set_context)
1219 rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
1220 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1221 li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
1225 addi r3,r3,1 /* next VSID */
1226 addis r4,r4,0x1000 /* address of next segment */
1232 * We put a few things here that have to be page-aligned.
1233 * This stuff goes at the beginning of the data segment,
1234 * which is page-aligned.
1239 .globl empty_zero_page
1243 .globl swapper_pg_dir
1248 * This space gets a copy of optional info passed to us by the bootstrap
1249 * Used to pass parameters into the kernel like root=/dev/sda1, etc.