2 * arch/ppc/kernel/head.S
4 * $Id: head.S,v 1.142 1999/08/23 02:53:18 paulus Exp $
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
16 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 * This file contains the low-level support and setup for the
19 * PowerPC platform, including trap and interrupt dispatch.
20 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
30 #include <asm/processor.h>
32 #include <linux/config.h>
36 #include <asm/amigappc.h>
40 #define LOAD_BAT(n, reg, RA, RB) \
41 ld RA,(n*32)+0(reg); \
42 ld RB,(n*32)+8(reg); \
43 mtspr IBAT##n##U,RA; \
44 mtspr IBAT##n##L,RB; \
45 ld RA,(n*32)+16(reg); \
46 ld RB,(n*32)+24(reg); \
47 mtspr DBAT##n##U,RA; \
48 mtspr DBAT##n##L,RB; \
50 #else /* CONFIG_PPC64 */
52 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
53 #define LOAD_BAT(n, reg, RA, RB) \
54 lwz RA,(n*16)+0(reg); \
55 lwz RB,(n*16)+4(reg); \
56 mtspr IBAT##n##U,RA; \
57 mtspr IBAT##n##L,RB; \
59 lwz RA,(n*16)+8(reg); \
60 lwz RB,(n*16)+12(reg); \
61 mtspr DBAT##n##U,RA; \
62 mtspr DBAT##n##L,RB; \
64 #endif /* CONFIG_PPC64 */
71 * _start is defined this way because the XCOFF loader in the OpenFirmware
72 * on the powermac expects the entry point to be a procedure descriptor.
78 * These are here for legacy reasons, the kernel used to
79 * need to look like a coff function entry for the pmac
80 * but we're always started by some kind of bootloader now.
88 * Enter here with the kernel text, data and bss loaded starting at
89 * 0, running with virtual == physical mapping.
90 * r5 points to the prom entry point (the client interface handler
91 * address). Address translation is turned on, with the prom
92 * managing the hash table. Interrupts are disabled. The stack
93 * pointer (r1) points to just below the end of the half-meg region
94 * from 0x380000 - 0x400000, which is mapped in already.
96 * If we are booted from MacOS via BootX, we enter with the kernel
97 * image loaded somewhere, and the following values in registers:
98 * r3: 'BooX' (0x426f6f58)
99 * r4: virtual address of boot_infos_t
104 * r4: physical address of memory base
105 * Linux/m68k style BootInfo structure at &_end.
108 * This is jumped to on prep systems right after the kernel is relocated
109 * to its proper place in memory by the boot loader. The expected layout
111 * r3: ptr to residual data
112 * r4: initrd_start or if no initrd then 0
113 * r5: initrd_end - unused if r4 is 0
114 * r6: Start of command line string
115 * r7: End of command line string
117 * This just gets a minimal mmu environment setup so we can call
118 * start_here() to do the real work.
126 * Go into 32-bit mode to boot. OF should do this for
127 * us already but just in case...
135 * We have to do any OF calls before we map ourselves to KERNELBASE,
136 * because OF may have I/O devices mapped in in that area
137 * (particularly on CHRP).
139 mr r31,r3 /* save parameters */
147 /* On APUS the __va/__pa constants need to be set to the correct
148 * values before continuing.
153 .globl __secondary_start
156 * Use the first pair of BAT registers to map the 1st 16MB
157 * of RAM to KERNELBASE. From this point on we can't safely
163 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
166 ori r11,r11,4 /* set up BAT registers for 601 */
167 li r8,0x7f /* valid, block length = 8MB */
168 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
169 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
170 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
171 mtspr IBAT0L,r8 /* lower BAT register */
175 #endif /* CONFIG_PPC64 */
178 ori r8,r8,2 /* R/W access */
179 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
181 /* clear out the high 32 bits in the BAT */
184 /* turn off the pagetable mappings just in case */
187 #else /* CONFIG_PPC64 */
189 * allow secondary cpus to get at all of ram in early bootup
190 * since their init_task may be up there -- Cort
192 oris r18,r8,0x10000000@h
193 oris r21,r11,(KERNELBASE+0x10000000)@h
194 mtspr DBAT1L,r18 /* N.B. 6xx (not 601) have valid */
195 mtspr DBAT1U,r21 /* bit in upper BAT register */
199 #if 0 /* for now, otherwise we overflow the 0x100 bytes we have here */
200 oris r18,r8,0x20000000@h
201 oris r21,r11,(KERNELBASE+0x20000000)@h
202 mtspr DBAT2L,r18 /* N.B. 6xx (not 601) have valid */
203 mtspr DBAT2U,r21 /* bit in upper BAT register */
207 #endif /* CONFIG_PPC64 */
208 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
209 mtspr DBAT0U,r11 /* bit in upper BAT register */
215 /* Unfortunately the APUS specific instructions bloat the
216 * code so it cannot fit in the 0x100 bytes available. We have
217 * to do it the crude way. */
219 /* Map 0xfff00000 so we can access VTOP/PTOV constant when
222 ori r11,r8,0x2 /* r/w */
223 ori r8,r8,0x2 /* 128KB, supervisor */
227 /* Copy exception code to exception vector base. */
230 lis r3,0xfff0 /* Copy to 0xfff00000 on APUS */
231 li r5,0x4000 /* # bytes of memory to copy */
233 bl copy_and_flush /* copy the first 0x4000 bytes */
234 #else /* CONFIG_APUS */
236 * We need to run with _start at physical address 0.
237 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
238 * the exception vectors at 0 (and therefore this copy
239 * overwrites OF's exception vectors with our own).
240 * If the MMU is already turned on, we copy stuff to KERNELBASE,
241 * otherwise we copy it to 0.
245 addis r4,r3,KERNELBASE@h /* current address of _start */
246 cmpwi 0,r4,0 /* are we already running at 0? */
247 beq 2f /* assume it's OK if so */
250 andi. r0,r0,MSR_DR /* MMU enabled? */
252 lis r3,KERNELBASE@h /* if so, are we */
253 cmpw 0,r4,r3 /* already running at KERNELBASE? */
256 #endif /* CONFIG_APUS */
258 * we now have the 1st 16M of ram mapped with the bats.
259 * prep needs the mmu to be turned on here, but pmac already has it on.
260 * this shouldn't bother the pmac since it just gets turned on again
261 * as we jump to our code at KERNELBASE. -- Cort
266 ori r0,r0,MSR_DR|MSR_IR
269 ori r0,r0,start_here@l
272 rfi /* enables MMU */
275 * Exception entry code. This code runs with address translation
276 * turned off, i.e. using physical addresses.
277 * We assume sprg3 has the physical address of the current
278 * task's thread_struct.
280 #define EXCEPTION_PROLOG \
284 mfspr r21,SPRG2; /* exception stack to use from */ \
285 cmpwi 0,r21,0; /* user mode or RTAS */ \
287 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
288 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
289 1: stw r20,_CCR(r21); /* save registers */ \
290 stw r22,GPR22(r21); \
291 stw r23,GPR23(r21); \
293 stw r20,GPR20(r21); \
295 stw r22,GPR21(r21); \
297 stw r20,_LINK(r21); \
308 tovirt(r1,r21); /* set new kernel sp */ \
311 * Note: code which follows this uses cr0.eq (set if from kernel),
312 * r21, r22 (SRR0), and r23 (SRR1).
318 #define STD_EXCEPTION(n, label, hdlr) \
322 addi r3,r1,STACK_FRAME_OVERHEAD; \
324 bl transfer_to_handler; \
326 .long ret_from_except
329 #ifdef CONFIG_SMP /* MVME/MTX start the secondary here */
330 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge)
332 STD_EXCEPTION(0x100, Reset, UnknownException)
336 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
338 /* Data access exception. */
343 andis. r0,r20,0xa470 /* weird error? */
344 bne 1f /* if not, try to put a PTE */
345 mfspr r3,DAR /* into the hash table */
346 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
347 rlwimi r4,r20,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
348 mfspr r5,SPRG3 /* phys addr of THREAD */
350 1: stw r20,_DSISR(r21)
354 addi r3,r1,STACK_FRAME_OVERHEAD
356 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
357 bl transfer_to_handler
359 .long ret_from_except
361 /* Instruction access exception. */
365 andis. r0,r23,0x4000 /* no pte found? */
366 beq 1f /* if so, try to put a PTE */
367 mr r3,r22 /* into the hash table */
368 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
369 mr r20,r23 /* SRR1 has reason bits */
370 mfspr r5,SPRG3 /* phys addr of THREAD */
372 1: addi r3,r1,STACK_FRAME_OVERHEAD
376 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
377 bl transfer_to_handler
379 .long ret_from_except
381 /* External interrupt */
386 /* This is horrible, but there's no way around it. Enable the
387 data cache so the IRQ hardware register can be accessed
388 without cache intervention. Then disable interrupts and get
389 the current emulated m68k IPL value. */
397 lis r3,APUS_IPL_EMU@h
399 li r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)
400 stb r20,APUS_IPL_EMU@l(r3)
403 lbz r3,APUS_IPL_EMU@l(r3)
411 stw r3,(_CCR+4)(r21);
413 addi r3,r1,STACK_FRAME_OVERHEAD
416 bl transfer_to_handler
418 .long ret_from_except
421 /* Alignment exception */
429 addi r3,r1,STACK_FRAME_OVERHEAD
431 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
432 bl transfer_to_handler
433 .long AlignmentException
434 .long ret_from_except
436 /* Program check exception */
440 addi r3,r1,STACK_FRAME_OVERHEAD
442 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
443 bl transfer_to_handler
444 .long ProgramCheckException
445 .long ret_from_except
447 /* Floating-point unavailable */
451 bne load_up_fpu /* if from user, just load it up */
453 bl transfer_to_handler /* if from kernel, take a trap */
455 .long ret_from_except
457 STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
458 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
459 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
465 stw r3,ORIG_GPR3(r21)
467 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
468 bl transfer_to_handler
470 .long ret_from_except
472 /* Single step - not used on 601 */
473 STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
475 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
476 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
479 * Handle TLB miss for instruction on 603/603e.
480 * Note: we get an alternate set of r0 - r3 to use automatically.
486 * r1: linux style pte ( later becomes ppc hardware pte )
487 * r2: ptr to linux-style pte
491 /* Get PTE (linux-style) and check access */
496 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
497 lwz r2,0(r2) /* get pmd entry */
498 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
499 beq- InstructionAddressInvalid /* return if no mapping */
501 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
502 lwz r1,0(r2) /* get linux-style pte */
503 /* setup access flags in r3 */
505 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
506 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
507 andc. r3,r3,r1 /* check access & ~permission */
508 bne- InstructionAddressInvalid /* return if access not permitted */
509 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
510 stw r1,0(r2) /* update PTE (accessed bit) */
511 /* Convert linux-style PTE to low word of PPC-style PTE */
512 /* this computation could be done better -- Cort */
513 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
514 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
515 ori r3,r3,0xe04 /* clear out reserved bits */
516 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
520 mfspr r3,SRR1 /* Need to restore CR0 */
523 InstructionAddressInvalid:
525 rlwinm r1,r3,9,6,6 /* Get load/store bit */
528 mtspr DSISR,r1 /* (shouldn't be needed) */
529 mtctr r0 /* Restore CTR */
530 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
533 mfspr r1,IMISS /* Get failing address */
534 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
535 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
537 mtspr DAR,r1 /* Set fault address */
538 mfmsr r0 /* Restore "normal" registers */
539 xoris r0,r0,MSR_TGPR>>16
540 mtcrf 0x80,r3 /* Restore CR0 */
541 sync /* Some chip revs have problems here... */
546 * Handle TLB miss for DATA Load operation on 603/603e
552 * r1: linux style pte ( later becomes ppc hardware pte )
553 * r2: ptr to linux-style pte
557 /* Get PTE (linux-style) and check access */
562 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
563 lwz r2,0(r2) /* get pmd entry */
564 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
565 beq- DataAddressInvalid /* return if no mapping */
567 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
568 lwz r1,0(r2) /* get linux-style pte */
569 /* setup access flags in r3 */
571 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
572 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
573 /* save r2 and use it as scratch for the andc. */
574 andc. r3,r3,r1 /* check access & ~permission */
575 bne- DataAddressInvalid /* return if access not permitted */
576 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
577 stw r1,0(r2) /* update PTE (accessed bit) */
578 /* Convert linux-style PTE to low word of PPC-style PTE */
579 /* this computation could be done better -- Cort */
580 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
581 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
582 ori r3,r3,0xe04 /* clear out reserved bits */
583 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
587 mfspr r3,SRR1 /* Need to restore CR0 */
592 rlwinm r1,r3,9,6,6 /* Get load/store bit */
595 mtctr r0 /* Restore CTR */
596 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
598 mfspr r1,DMISS /* Get failing address */
599 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
600 beq 20f /* Jump if big endian */
602 20: mtspr DAR,r1 /* Set fault address */
603 mfmsr r0 /* Restore "normal" registers */
604 xoris r0,r0,MSR_TGPR>>16
605 mtcrf 0x80,r3 /* Restore CR0 */
606 sync /* Some chip revs have problems here... */
611 * Handle TLB miss for DATA Store on 603/603e
617 * r1: linux style pte ( later becomes ppc hardware pte )
618 * r2: ptr to linux-style pte
622 /* Get PTE (linux-style) and check access */
627 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
628 lwz r2,0(r2) /* get pmd entry */
629 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
630 beq- DataAddressInvalid /* return if no mapping */
632 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
633 lwz r1,0(r2) /* get linux-style pte */
634 /* setup access flags in r3 */
636 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
637 ori r3,r3,0x5 /* _PAGE_PRESENT|_PAGE_RW */
638 /* save r2 and use it as scratch for the andc. */
639 andc. r3,r3,r1 /* check access & ~permission */
640 bne- DataAddressInvalid /* return if access not permitted */
641 ori r1,r1,0x384 /* set _PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_RW|_PAGE_HWWRITE in pte */
642 stw r1,0(r2) /* update PTE (accessed bit) */
643 /* Convert linux-style PTE to low word of PPC-style PTE */
644 /* this computation could be done better -- Cort */
645 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
646 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
647 ori r3,r3,0xe04 /* clear out reserved bits */
648 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
652 mfspr r3,SRR1 /* Need to restore CR0 */
656 /* Instruction address breakpoint exception (on 603/604) */
657 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
659 /* System management exception (603?) */
660 STD_EXCEPTION(0x1400, Trap_14, UnknownException)
662 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
663 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
664 STD_EXCEPTION(0x1700, Trap_17, TAUException)
665 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
666 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
667 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
668 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
669 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
670 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
671 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
672 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
674 /* Run mode exception */
675 STD_EXCEPTION(0x2000, RunMode, RunModeException)
677 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
678 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
679 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
680 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
681 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
682 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
683 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
684 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
685 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
686 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
687 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
688 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
689 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
690 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
691 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
696 * This code finishes saving the registers to the exception frame
697 * and jumps to the appropriate handler for the exception, turning
698 * on address translation.
700 .globl transfer_to_handler
711 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
713 addi r24,r1,STACK_FRAME_OVERHEAD
715 2: addi r2,r23,-THREAD /* set r2 to current */
718 andi. r24,r23,0x3f00 /* get vector offset */
721 stwcx. r22,r22,r21 /* to clear the reservation */
724 mtspr SPRG2,r22 /* r1 is now kernel sp */
725 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
729 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
730 lwz r24,0(r23) /* virtual address of handler */
731 lwz r23,4(r23) /* where to go when done */
736 rfi /* jump to handler, enable MMU */
739 * On kernel stack overflow, load up an initial stack pointer
740 * and call StackOverflow(regs), which should not return.
743 addi r3,r1,STACK_FRAME_OVERHEAD
744 lis r1,init_task_union@ha
745 addi r1,r1,init_task_union@l
746 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
747 lis r24,StackOverflow@ha
748 addi r24,r24,StackOverflow@l
756 * Disable FP for the task which had the FPU previously,
757 * and save its floating-point registers in its thread_struct.
758 * Enables the FPU for use in the kernel on return.
759 * On SMP we know the fpu is free, since we give it up every
766 mtmsr r5 /* enable use of fpu now */
769 * For SMP, we don't do lazy FPU switching because it just gets too
770 * horrendously complex, especially when a task switches from one CPU
771 * to another. Instead we call giveup_fpu in switch_to.
774 lis r6,0 /* get __pa constant */
776 addis r3,r6,last_task_used_math@ha
777 lwz r4,last_task_used_math@l(r3)
781 addi r4,r4,THREAD /* want THREAD of last_task_used_math */
784 stfd fr0,THREAD_FPSCR-4(r4)
787 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
788 li r20,MSR_FP|MSR_FE0|MSR_FE1
789 andc r4,r4,r20 /* disable FP for previous task */
790 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
793 /* enable use of FP after return */
794 ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
795 mfspr r5,SPRG3 /* current task's THREAD (phys) */
796 lfd fr0,THREAD_FPSCR-4(r5)
802 stw r4,last_task_used_math@l(r3)
804 /* restore registers and return */
811 /* we haven't used ctr or xer */
821 * FP unavailable trap from kernel - print a message, but let
822 * the task use FP in the kernel until it returns to user mode.
827 stw r3,_MSR(r1) /* enable use of FP after return */
830 mr r4,r2 /* current */
834 86: .string "floating point used in kernel (task=%p, pc=%x)\n"
839 * Disable FP for the task given as the argument,
840 * and save the floating-point registers in its thread_struct.
841 * Enables the FPU for use in the kernel on return.
848 mtmsr r5 /* enable use of fpu now */
851 beqlr- /* if no previous owner, done */
852 addi r3,r3,THREAD /* want THREAD of task */
857 stfd fr0,THREAD_FPSCR-4(r3)
859 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
860 li r3,MSR_FP|MSR_FE0|MSR_FE1
861 andc r4,r4,r3 /* disable FP for previous task */
862 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
866 lis r4,last_task_used_math@ha
867 stw r5,last_task_used_math@l(r4)
872 * This code is jumped to from the startup code to copy
873 * the kernel image to physical address 0.
876 lis r9,0x426f /* if booted from BootX, don't */
877 addi r9,r9,0x6f58 /* translate source addr */
878 cmpw r31,r9 /* (we have to on chrp) */
880 rlwinm r4,r4,0,8,31 /* translate source address */
881 add r4,r4,r3 /* to region mapped with BATs */
882 7: addis r9,r26,klimit@ha /* fetch klimit */
884 addis r25,r25,-KERNELBASE@h
885 li r6,0 /* Destination offset */
886 li r5,0x4000 /* # bytes of memory to copy */
887 bl copy_and_flush /* copy the first 0x4000 bytes */
888 addi r0,r3,4f@l /* jump to the address of 4f */
889 mtctr r0 /* in copy and do the rest. */
890 bctr /* jump to the copy */
892 bl copy_and_flush /* copy the rest */
896 * Copy routine used to copy the kernel to start at physical address 0
897 * and flush and invalidate the caches as needed.
898 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
899 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
906 3: addi r6,r6,4 /* copy a cache line */
910 dcbst r6,r3 /* write it to memory */
912 icbi r6,r3 /* flush the icache line */
922 * On APUS the physical base address of the kernel is not known at compile
923 * time, which means the __pa/__va constants used are incorect. In the
924 * __init section is recorded the virtual addresses of instructions using
925 * these constants, so all that has to be done is fix these before
926 * continuing the kernel boot.
928 * r4 = The physical address of the kernel base.
932 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
933 neg r11,r10 /* phys_to_virt constant */
935 lis r12,__vtop_table_begin@h
936 ori r12,r12,__vtop_table_begin@l
937 add r12,r12,r10 /* table begin phys address */
938 lis r13,__vtop_table_end@h
939 ori r13,r13,__vtop_table_end@l
940 add r13,r13,r10 /* table end phys address */
943 1: lwzu r14,4(r12) /* virt address of instruction */
944 add r14,r14,r10 /* phys address of instruction */
945 lwz r15,0(r14) /* instruction, now insert top */
946 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
947 stw r15,0(r14) /* of instruction and restore. */
948 dcbst r0,r14 /* write it to memory */
950 icbi r0,r14 /* flush the icache line */
954 lis r12,__ptov_table_begin@h
955 ori r12,r12,__ptov_table_begin@l
956 add r12,r12,r10 /* table begin phys address */
957 lis r13,__ptov_table_end@h
958 ori r13,r13,__ptov_table_end@l
959 add r13,r13,r10 /* table end phys address */
962 1: lwzu r14,4(r12) /* virt address of instruction */
963 add r14,r14,r10 /* phys address of instruction */
964 lwz r15,0(r14) /* instruction, now insert top */
965 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
966 stw r15,0(r14) /* of instruction and restore. */
967 dcbst r0,r14 /* write it to memory */
969 icbi r0,r14 /* flush the icache line */
973 isync /* No speculative loading until now */
976 /* On APUS the first 0x4000 bytes of the kernel will be mapped
977 * at a different physical address than the rest. For this
978 * reason, the exception code cannot use relative branches to
979 * access the code below.
985 .globl __secondary_start_psurge
986 __secondary_start_psurge:
990 .globl __secondary_hold
992 /* tell the master we're here */
1001 /* wait until we're told to start */
1004 /* our cpu # was at addr 0 - go */
1005 lis r5,__secondary_start@h
1006 ori r5,r5,__secondary_start@l
1009 mr r24,r3 /* cpu # */
1011 #endif /* CONFIG_SMP */
1014 * This is where the main kernel code starts.
1018 * Enable caches and 604-specific features if necessary.
1021 rlwinm r9,r9,16,16,31
1023 beq 4f /* not needed for 601 */
1025 andi. r0,r11,HID0_DCE
1026 ori r11,r11,HID0_ICE|HID0_DCE
1027 ori r8,r11,HID0_ICFI
1028 bne 3f /* don't invalidate the D-cache */
1029 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1032 mtspr HID0,r8 /* enable and invalidate caches */
1034 mtspr HID0,r11 /* enable caches */
1037 cmpi 0,r9,4 /* check for 604 */
1038 cmpi 1,r9,9 /* or 604e */
1039 cmpi 2,r9,10 /* or mach5 */
1043 ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e], enable */
1045 ori r11,r11,HID0_BTCD
1046 5: mtspr HID0,r11 /* superscalar exec & br history tbl */
1049 /* if we're the second cpu stack and r2 are different
1050 * and we want to not clear the bss -- Cort */
1051 lis r5,first_cpu_booted@h
1052 ori r5,r5,first_cpu_booted@l
1058 lis r2,current_set@h
1059 ori r2,r2,current_set@l
1060 slwi r24,r24,2 /* cpu # to current_set[cpu#] */
1065 #endif /* __SMP__ */
1066 /* ptr to current */
1067 lis r2,init_task_union@h
1068 ori r2,r2,init_task_union@l
1069 /* Clear out the BSS */
1072 lis r8,__bss_start@ha
1073 addi r8,r8,__bss_start@l
1076 rlwinm. r11,r11,30,2,31
1086 #endif /* __SMP__ */
1088 addi r1,r2,TASK_UNION_SIZE
1090 stwu r0,-STACK_FRAME_OVERHEAD(r1)
1092 * Decide what sort of machine this is and initialize the MMU.
1103 * Go back to running unmapped so we can load up new values
1104 * for SDR1 (hash table pointer) and the segment registers
1105 * and change to using our exception vectors.
1116 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1120 /* Load up the kernel context */
1122 SYNC /* Force all PTE updates to finish */
1123 tlbia /* Clear all TLB entries */
1124 sync /* wait for tlbia/tlbie to finish */
1126 tlbsync /* ... on all CPUs */
1131 /* clear the v bit in the ASR so we can
1132 * behave as if we have segment registers
1137 #endif /* CONFIG_PPC64 */
1138 li r0,16 /* load up segment register values */
1139 mtctr r0 /* for context 0 */
1140 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1143 addi r3,r3,1 /* increment VSID */
1144 addis r4,r4,0x1000 /* address of next segment */
1146 /* Load the BAT registers with the values set up by MMU_init.
1147 MMU_init takes care of whether we're on a 601 or not. */
1155 LOAD_BAT(0,r3,r4,r5)
1156 LOAD_BAT(1,r3,r4,r5)
1157 LOAD_BAT(2,r3,r4,r5)
1158 LOAD_BAT(3,r3,r4,r5)
1159 #else /* CONFIG_PPC64 */
1160 LOAD_BAT(0,r3,r4,r5)
1161 LOAD_BAT(1,r3,r4,r5)
1162 LOAD_BAT(2,r3,r4,r5)
1163 LOAD_BAT(3,r3,r4,r5)
1164 #endif /* CONFIG_PPC64 */
1166 /* Set up for using our exception vectors */
1167 /* ptr to phys current thread */
1169 addi r4,r4,THREAD /* init task's THREAD */
1172 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1173 /* Now turn on the MMU for real! */
1175 lis r3,start_kernel@h
1176 ori r3,r3,start_kernel@l
1178 /* the second time through here we go to
1179 * start_secondary(). -- Cort
1181 lis r5,first_cpu_booted@h
1182 ori r5,r5,first_cpu_booted@l
1187 lis r3,start_secondary@h
1188 ori r3,r3,start_secondary@l
1190 #endif /* __SMP__ */
1193 rfi /* enable MMU and jump to start_kernel */
1196 * Set up the segment registers for a new context.
1198 _GLOBAL(set_context)
1199 rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
1200 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1201 li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
1205 addi r3,r3,1 /* next VSID */
1206 addis r4,r4,0x1000 /* address of next segment */
1212 * We put a few things here that have to be page-aligned.
1213 * This stuff goes at the beginning of the data segment,
1214 * which is page-aligned.
1219 .globl empty_zero_page
1223 .globl swapper_pg_dir
1228 * This space gets a copy of optional info passed to us by the bootstrap
1229 * Used to pass parameters into the kernel like root=/dev/sda1, etc.