2 * arch/ppc/kernel/head.S
4 * $Id: head.S,v 1.154 1999/10/12 00:33:31 cort Exp $
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
16 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
18 * This file contains the low-level support and setup for the
19 * PowerPC platform, including trap and interrupt dispatch.
20 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
30 #include <asm/processor.h>
32 #include <linux/config.h>
37 #include <asm/amigappc.h>
40 #ifndef CONFIG_PPC64BRIDGE
42 LG_CACHELINE_BYTES = 5
47 LG_CACHELINE_BYTES = 7
50 #endif /* CONFIG_PPC64BRIDGE */
52 #ifdef CONFIG_PPC64BRIDGE
53 #define LOAD_BAT(n, reg, RA, RB) \
54 ld RA,(n*32)+0(reg); \
55 ld RB,(n*32)+8(reg); \
56 mtspr IBAT##n##U,RA; \
57 mtspr IBAT##n##L,RB; \
58 ld RA,(n*32)+16(reg); \
59 ld RB,(n*32)+24(reg); \
60 mtspr DBAT##n##U,RA; \
61 mtspr DBAT##n##L,RB; \
63 #else /* CONFIG_PPC64BRIDGE */
65 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
66 #define LOAD_BAT(n, reg, RA, RB) \
67 /* see the comment for clear_bats() -- Cort */ \
69 mtspr IBAT##n##U,RA; \
70 mtspr DBAT##n##U,RA; \
71 lwz RA,(n*16)+0(reg); \
72 lwz RB,(n*16)+4(reg); \
73 mtspr IBAT##n##U,RA; \
74 mtspr IBAT##n##L,RB; \
76 lwz RA,(n*16)+8(reg); \
77 lwz RB,(n*16)+12(reg); \
78 mtspr DBAT##n##U,RA; \
79 mtspr DBAT##n##L,RB; \
81 #endif /* CONFIG_PPC64BRIDGE */
88 * _start is defined this way because the XCOFF loader in the OpenFirmware
89 * on the powermac expects the entry point to be a procedure descriptor.
95 * These are here for legacy reasons, the kernel used to
96 * need to look like a coff function entry for the pmac
97 * but we're always started by some kind of bootloader now.
105 * Enter here with the kernel text, data and bss loaded starting at
106 * 0, running with virtual == physical mapping.
107 * r5 points to the prom entry point (the client interface handler
108 * address). Address translation is turned on, with the prom
109 * managing the hash table. Interrupts are disabled. The stack
110 * pointer (r1) points to just below the end of the half-meg region
111 * from 0x380000 - 0x400000, which is mapped in already.
113 * If we are booted from MacOS via BootX, we enter with the kernel
114 * image loaded somewhere, and the following values in registers:
115 * r3: 'BooX' (0x426f6f58)
116 * r4: virtual address of boot_infos_t
121 * r4: physical address of memory base
122 * Linux/m68k style BootInfo structure at &_end.
125 * This is jumped to on prep systems right after the kernel is relocated
126 * to its proper place in memory by the boot loader. The expected layout
128 * r3: ptr to residual data
129 * r4: initrd_start or if no initrd then 0
130 * r5: initrd_end - unused if r4 is 0
131 * r6: Start of command line string
132 * r7: End of command line string
134 * This just gets a minimal mmu environment setup so we can call
135 * start_here() to do the real work.
142 * We have to do any OF calls before we map ourselves to KERNELBASE,
143 * because OF may have I/O devices mapped into that area
144 * (particularly on CHRP).
146 mr r31,r3 /* save parameters */
155 /* On APUS the __va/__pa constants need to be set to the correct
156 * values before continuing.
160 #endif /* CONFIG_APUS */
162 #ifndef CONFIG_GEMINI
163 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
164 * the physical address we are running at, returned by prom_init()
172 #ifndef CONFIG_POWER4
173 /* POWER4 doesn't have BATs */
175 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
178 #else /* CONFIG_POWER4 */
180 * Load up the SDR1 and segment register values now
181 * since we don't have the BATs.
184 addis r4,r3,_SDR1@ha /* get the value from _SDR1 */
185 lwz r4,_SDR1@l(r4) /* assume hash table below 4GB */
188 lis r5,0x2000 /* set pseudo-segment reg 12 */
191 #endif /* CONFIG_POWER4 */
195 * We need to run with _start at physical address 0.
196 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
197 * the exception vectors at 0 (and therefore this copy
198 * overwrites OF's exception vectors with our own).
199 * If the MMU is already turned on, we copy stuff to KERNELBASE,
200 * otherwise we copy it to 0.
204 addis r4,r3,KERNELBASE@h /* current address of _start */
205 cmpwi 0,r4,0 /* are we already running at 0? */
207 #endif /* CONFIG_APUS */
209 * we now have the 1st 16M of ram mapped with the bats.
210 * prep needs the mmu to be turned on here, but pmac already has it on.
211 * this shouldn't bother the pmac since it just gets turned on again
212 * as we jump to our code at KERNELBASE. -- Cort
213 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
214 * off, and in other cases, we now turn it off before changing BATs above.
218 ori r0,r0,MSR_DR|MSR_IR
221 ori r0,r0,start_here@l
224 RFI /* enables MMU */
227 .globl __secondary_hold
229 /* tell the master we're here */
232 /* wait until we're told to start */
235 /* our cpu # was at addr 0 - go */
236 mr r24,r3 /* cpu # */
241 * Exception entry code. This code runs with address translation
242 * turned off, i.e. using physical addresses.
243 * We assume sprg3 has the physical address of the current
244 * task's thread_struct.
246 #define EXCEPTION_PROLOG \
250 mfspr r21,SPRG2; /* exception stack to use from */ \
251 cmpwi 0,r21,0; /* user mode or RTAS */ \
253 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
254 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
256 stw r20,_CCR(r21); /* save registers */ \
257 stw r22,GPR22(r21); \
258 stw r23,GPR23(r21); \
260 stw r20,GPR20(r21); \
262 stw r22,GPR21(r21); \
264 stw r20,_LINK(r21); \
275 tovirt(r1,r21); /* set new kernel sp */ \
276 SAVE_4GPRS(3, r21); \
279 * Note: code which follows this uses cr0.eq (set if from kernel),
280 * r21, r22 (SRR0), and r23 (SRR1).
286 #define STD_EXCEPTION(n, label, hdlr) \
290 addi r3,r1,STACK_FRAME_OVERHEAD; \
292 bl transfer_to_handler; \
294 .long ret_from_except
296 #define STD_MOL_EXCEPTION(n, label, hdlr, hook) \
301 addi r3,r1,STACK_FRAME_OVERHEAD; \
303 bl transfer_to_handler; \
305 .long ret_from_except
308 #ifdef CONFIG_SMP /* MVME/MTX and gemini start the secondary here */
311 b __secondary_start_gemini
312 #else /* CONFIG_GEMINI */
313 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge)
314 #endif /* CONFIG_GEMINI */
316 STD_EXCEPTION(0x100, Reset, UnknownException)
320 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
322 /* Data access exception. */
324 #ifdef CONFIG_PPC64BRIDGE
330 #endif /* CONFIG_PPC64BRIDGE */
333 andis. r0,r20,0xa470 /* weird error? */
334 bne 1f /* if not, try to put a PTE */
335 mfspr r3,DAR /* into the hash table */
336 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
337 rlwimi r4,r20,32-23,29,29 /* DSISR_STORE -> _PAGE_RW */
339 1: stw r20,_DSISR(r21)
343 addi r3,r1,STACK_FRAME_OVERHEAD
345 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
346 bl transfer_to_handler
348 .long ret_from_except
350 #ifdef CONFIG_PPC64BRIDGE
351 /* SLB fault on data access. */
357 addi r3,r1,STACK_FRAME_OVERHEAD
359 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
360 bl transfer_to_handler
361 .long UnknownException
362 .long ret_from_except
363 #endif /* CONFIG_PPC64BRIDGE */
365 /* Instruction access exception. */
367 #ifdef CONFIG_PPC64BRIDGE
369 InstructionAccessCont:
373 #endif /* CONFIG_PPC64BRIDGE */
375 andis. r0,r23,0x4000 /* no pte found? */
376 beq 1f /* if so, try to put a PTE */
377 mr r3,r22 /* into the hash table */
378 rlwinm r4,r23,32-13,30,30 /* MSR_PR -> _PAGE_USER */
379 mr r20,r23 /* SRR1 has reason bits */
381 1: addi r3,r1,STACK_FRAME_OVERHEAD
385 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
386 bl transfer_to_handler
388 .long ret_from_except
390 #ifdef CONFIG_PPC64BRIDGE
391 /* SLB fault on instruction access. */
394 InstructionSegmentCont:
395 addi r3,r1,STACK_FRAME_OVERHEAD
397 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
398 bl transfer_to_handler
399 .long UnknownException
400 .long ret_from_except
401 #endif /* CONFIG_PPC64BRIDGE */
403 /* External interrupt */
407 addi r3,r1,STACK_FRAME_OVERHEAD
411 bl transfer_to_handler
412 .globl do_IRQ_intercept
415 .long ret_from_intercept
417 bl apus_interrupt_entry
418 #endif /* CONFIG_APUS */
420 /* Alignment exception */
428 addi r3,r1,STACK_FRAME_OVERHEAD
430 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
431 bl transfer_to_handler
432 .long AlignmentException
433 .long ret_from_except
435 /* Program check exception */
440 addi r3,r1,STACK_FRAME_OVERHEAD
442 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
443 bl transfer_to_handler
444 .long ProgramCheckException
445 .long ret_from_except
447 /* Floating-point unavailable */
452 bne load_up_fpu /* if from user, just load it up */
454 bl transfer_to_handler /* if from kernel, take a trap */
456 .long ret_from_except
462 addi r3,r1,STACK_FRAME_OVERHEAD
464 bl transfer_to_handler
465 .globl timer_interrupt_intercept
466 timer_interrupt_intercept:
467 .long timer_interrupt
468 .long ret_from_intercept
470 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
471 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
477 stw r3,ORIG_GPR3(r21)
479 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
480 bl transfer_to_handler
482 .long ret_from_except
484 /* Single step - not used on 601 */
485 STD_MOL_EXCEPTION(0xd00, SingleStep, SingleStepException, 5)
486 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
489 * The Altivec unavailable trap is at 0x0f20. Foo.
490 * We effectively remap it to 0x3000.
495 addi r3,r1,STACK_FRAME_OVERHEAD
497 bl transfer_to_handler
498 .long UnknownException
499 .long ret_from_except
502 #ifdef CONFIG_ALTIVEC
510 * Handle TLB miss for instruction on 603/603e.
511 * Note: we get an alternate set of r0 - r3 to use automatically.
515 MOL_HOOK_TLBMISS( 14 )
518 * r1: linux style pte ( later becomes ppc hardware pte )
519 * r2: ptr to linux-style pte
523 /* Get PTE (linux-style) and check access */
525 lis r1,KERNELBASE@h /* check if kernel address */
530 lis r2,swapper_pg_dir@ha /* if kernel address, use */
531 addi r2,r2,swapper_pg_dir@l /* kernel page table */
533 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
534 lwz r2,0(r2) /* get pmd entry */
535 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
536 beq- InstructionAddressInvalid /* return if no mapping */
538 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
539 lwz r1,0(r2) /* get linux-style pte */
540 /* setup access flags in r3 */
542 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
543 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
544 andc. r3,r3,r1 /* check access & ~permission */
545 bne- InstructionAddressInvalid /* return if access not permitted */
546 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
547 stw r1,0(r2) /* update PTE (accessed bit) */
548 /* Convert linux-style PTE to low word of PPC-style PTE */
549 /* this computation could be done better -- Cort */
550 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
551 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
552 ori r3,r3,0xe04 /* clear out reserved bits */
553 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
557 mfspr r3,SRR1 /* Need to restore CR0 */
560 InstructionAddressInvalid:
562 rlwinm r1,r3,9,6,6 /* Get load/store bit */
565 mtspr DSISR,r1 /* (shouldn't be needed) */
566 mtctr r0 /* Restore CTR */
567 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
570 mfspr r1,IMISS /* Get failing address */
571 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
572 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
574 mtspr DAR,r1 /* Set fault address */
575 mfmsr r0 /* Restore "normal" registers */
576 xoris r0,r0,MSR_TGPR>>16
577 mtcrf 0x80,r3 /* Restore CR0 */
578 sync /* Some chip revs have problems here... */
583 * Handle TLB miss for DATA Load operation on 603/603e
587 MOL_HOOK_TLBMISS( 15 )
590 * r1: linux style pte ( later becomes ppc hardware pte )
591 * r2: ptr to linux-style pte
595 /* Get PTE (linux-style) and check access */
597 lis r1,KERNELBASE@h /* check if kernel address */
602 lis r2,swapper_pg_dir@ha /* if kernel address, use */
603 addi r2,r2,swapper_pg_dir@l /* kernel page table */
605 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
606 lwz r2,0(r2) /* get pmd entry */
607 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
608 beq- DataAddressInvalid /* return if no mapping */
610 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
611 lwz r1,0(r2) /* get linux-style pte */
612 /* setup access flags in r3 */
614 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
615 ori r3,r3,1 /* set _PAGE_PRESENT bit in access */
616 /* save r2 and use it as scratch for the andc. */
617 andc. r3,r3,r1 /* check access & ~permission */
618 bne- DataAddressInvalid /* return if access not permitted */
619 ori r1,r1,0x100 /* set _PAGE_ACCESSED in pte */
620 stw r1,0(r2) /* update PTE (accessed bit) */
621 /* Convert linux-style PTE to low word of PPC-style PTE */
622 /* this computation could be done better -- Cort */
623 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
624 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
625 ori r3,r3,0xe04 /* clear out reserved bits */
626 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
630 mfspr r3,SRR1 /* Need to restore CR0 */
635 rlwinm r1,r3,9,6,6 /* Get load/store bit */
638 mtctr r0 /* Restore CTR */
639 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
641 mfspr r1,DMISS /* Get failing address */
642 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
643 beq 20f /* Jump if big endian */
645 20: mtspr DAR,r1 /* Set fault address */
646 mfmsr r0 /* Restore "normal" registers */
647 xoris r0,r0,MSR_TGPR>>16
648 mtcrf 0x80,r3 /* Restore CR0 */
649 sync /* Some chip revs have problems here... */
654 * Handle TLB miss for DATA Store on 603/603e
658 MOL_HOOK_TLBMISS( 16 )
661 * r1: linux style pte ( later becomes ppc hardware pte )
662 * r2: ptr to linux-style pte
666 /* Get PTE (linux-style) and check access */
668 lis r1,KERNELBASE@h /* check if kernel address */
673 lis r2,swapper_pg_dir@ha /* if kernel address, use */
674 addi r2,r2,swapper_pg_dir@l /* kernel page table */
676 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
677 lwz r2,0(r2) /* get pmd entry */
678 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
679 beq- DataAddressInvalid /* return if no mapping */
681 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
682 lwz r1,0(r2) /* get linux-style pte */
683 /* setup access flags in r3 */
685 rlwinm r3,r3,32-13,30,30 /* MSR_PR -> _PAGE_USER */
686 ori r3,r3,0x5 /* _PAGE_PRESENT|_PAGE_RW */
687 /* save r2 and use it as scratch for the andc. */
688 andc. r3,r3,r1 /* check access & ~permission */
689 bne- DataAddressInvalid /* return if access not permitted */
690 ori r1,r1,0x384 /* set _PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_RW|_PAGE_HWWRITE in pte */
691 stw r1,0(r2) /* update PTE (accessed bit) */
692 /* Convert linux-style PTE to low word of PPC-style PTE */
693 /* this computation could be done better -- Cort */
694 rlwinm r3,r1,32-9,31,31 /* _PAGE_HWWRITE -> PP lsb */
695 rlwimi r1,r1,32-1,31,31 /* _PAGE_USER -> PP (both bits now) */
696 ori r3,r3,0xe04 /* clear out reserved bits */
697 andc r1,r1,r3 /* PP=2 or 0, when _PAGE_HWWRITE */
701 mfspr r3,SRR1 /* Need to restore CR0 */
705 STD_MOL_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, 11)
706 STD_EXCEPTION(0x1400, SMI, SMIException)
707 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
708 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
709 STD_EXCEPTION(0x1700, Trap_17, TAUException)
710 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
711 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
712 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
713 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
714 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
715 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
716 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
717 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
718 STD_MOL_EXCEPTION(0x2000, RunMode, RunModeException, 5)
719 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
720 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
721 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
722 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
723 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
724 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
725 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
726 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
727 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
728 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
729 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
730 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
731 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
732 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
733 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
737 #ifdef CONFIG_ALTIVEC
741 bne load_up_altivec /* if from user, just load it up */
743 bl transfer_to_handler /* if from kernel, take a trap */
745 .long ret_from_except
746 #endif /* CONFIG_ALTIVEC */
748 #ifdef CONFIG_PPC64BRIDGE
754 b InstructionAccessCont
760 b InstructionSegmentCont
761 #endif /* CONFIG_PPC64BRIDGE */
764 * This code finishes saving the registers to the exception frame
765 * and jumps to the appropriate handler for the exception, turning
766 * on address translation.
768 .globl transfer_to_handler
776 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
778 addi r24,r1,STACK_FRAME_OVERHEAD
780 #ifdef CONFIG_ALTIVEC
781 mfpvr r24 /* check if we are on a G4 */
785 mfspr r22,SPRN_VRSAVE /* if so, save vrsave register value */
786 stw r22,THREAD_VRSAVE(r23)
787 #endif /* CONFIG_ALTIVEC */
788 2: addi r2,r23,-THREAD /* set r2 to current */
791 andi. r24,r23,0x3f00 /* get vector offset */
794 stwcx. r22,r22,r21 /* to clear the reservation */
797 mtspr SPRG2,r22 /* r1 is now kernel sp */
798 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
802 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
803 lwz r24,0(r23) /* virtual address of handler */
804 lwz r23,4(r23) /* where to go when done */
811 RFI /* jump to handler, enable MMU */
814 * On kernel stack overflow, load up an initial stack pointer
815 * and call StackOverflow(regs), which should not return.
818 addi r3,r1,STACK_FRAME_OVERHEAD
819 lis r1,init_task_union@ha
820 addi r1,r1,init_task_union@l
821 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
822 lis r24,StackOverflow@ha
823 addi r24,r24,StackOverflow@l
832 * Disable FP for the task which had the FPU previously,
833 * and save its floating-point registers in its thread_struct.
834 * Enables the FPU for use in the kernel on return.
835 * On SMP we know the fpu is free, since we give it up every
841 #ifdef CONFIG_PPC64BRIDGE
842 clrldi r5,r5,1 /* turn off 64-bit mode */
843 #endif /* CONFIG_PPC64BRIDGE */
845 MTMSRD(r5) /* enable use of fpu now */
848 * For SMP, we don't do lazy FPU switching because it just gets too
849 * horrendously complex, especially when a task switches from one CPU
850 * to another. Instead we call giveup_fpu in switch_to.
853 lis r6,0 /* get __pa constant */
855 addis r3,r6,last_task_used_math@ha
856 lwz r4,last_task_used_math@l(r3)
860 addi r4,r4,THREAD /* want THREAD of last_task_used_math */
863 stfd fr0,THREAD_FPSCR-4(r4)
866 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
867 li r20,MSR_FP|MSR_FE0|MSR_FE1
868 andc r4,r4,r20 /* disable FP for previous task */
869 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
871 #endif /* CONFIG_SMP */
872 /* enable use of FP after return */
873 ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
874 mfspr r5,SPRG3 /* current task's THREAD (phys) */
875 lfd fr0,THREAD_FPSCR-4(r5)
881 stw r4,last_task_used_math@l(r3)
882 #endif /* CONFIG_SMP */
883 /* restore registers and return */
890 /* we haven't used ctr or xer */
900 * FP unavailable trap from kernel - print a message, but let
901 * the task use FP in the kernel until it returns to user mode.
906 stw r3,_MSR(r1) /* enable use of FP after return */
909 mr r4,r2 /* current */
913 86: .string "floating point used in kernel (task=%p, pc=%x)\n"
916 #ifdef CONFIG_ALTIVEC
917 /* Note that the AltiVec support is closely modeled after the FP
918 * support. Changes to one are likely to be applicable to the
922 * Disable AltiVec for the task which had AltiVec previously,
923 * and save its AltiVec registers in its thread_struct.
924 * Enables AltiVec for use in the kernel on return.
925 * On SMP we know the AltiVec units are free, since we give it up every
931 mtmsr r5 /* enable use of AltiVec now */
934 * For SMP, we don't do lazy AltiVec switching because it just gets too
935 * horrendously complex, especially when a task switches from one CPU
936 * to another. Instead we call giveup_altivec in switch_to.
945 addis r3,r6,last_task_used_altivec@ha
946 lwz r4,last_task_used_altivec@l(r3)
950 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
957 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
959 andc r4,r4,r20 /* disable altivec for previous task */
960 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
962 #endif /* CONFIG_SMP */
963 /* enable use of AltiVec after return */
964 oris r23,r23,MSR_VEC@h
965 mfspr r5,SPRG3 /* current task's THREAD (phys) */
973 stw r4,last_task_used_altivec@l(r3)
974 #endif /* CONFIG_SMP */
975 /* restore registers and return */
982 /* we haven't used ctr or xer */
992 * AltiVec unavailable trap from kernel - print a message, but let
993 * the task use AltiVec in the kernel until it returns to user mode.
998 stw r3,_MSR(r1) /* enable use of AltiVec after return */
1001 mr r4,r2 /* current */
1005 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
1009 * giveup_altivec(tsk)
1010 * Disable AltiVec for the task given as the argument,
1011 * and save the AltiVec registers in its thread_struct.
1012 * Enables AltiVec for use in the kernel on return.
1015 .globl giveup_altivec
1019 MOL_HOOK_MMU(13, r5)
1023 oris r5,r5,MSR_VEC@h
1025 mtmsr r5 /* enable use of AltiVec now */
1028 beqlr- /* if no previous owner, done */
1029 addi r3,r3,THREAD /* want THREAD of task */
1032 SAVE_32VR(0, r4, r3)
1037 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1039 andc r4,r4,r3 /* disable AltiVec for previous task */
1040 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1044 lis r4,last_task_used_altivec@ha
1045 stw r5,last_task_used_altivec@l(r4)
1046 #endif /* CONFIG_SMP */
1048 #endif /* CONFIG_ALTIVEC */
1052 * Disable FP for the task given as the argument,
1053 * and save the floating-point registers in its thread_struct.
1054 * Enables the FPU for use in the kernel on return.
1066 mtmsr r5 /* enable use of fpu now */
1069 beqlr- /* if no previous owner, done */
1070 addi r3,r3,THREAD /* want THREAD of task */
1075 stfd fr0,THREAD_FPSCR-4(r3)
1077 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1078 li r3,MSR_FP|MSR_FE0|MSR_FE1
1079 andc r4,r4,r3 /* disable FP for previous task */
1080 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1084 lis r4,last_task_used_math@ha
1085 stw r5,last_task_used_math@l(r4)
1086 #endif /* CONFIG_SMP */
1090 * This code is jumped to from the startup code to copy
1091 * the kernel image to physical address 0.
1094 addis r9,r26,klimit@ha /* fetch klimit */
1095 lwz r25,klimit@l(r9)
1096 addis r25,r25,-KERNELBASE@h
1097 li r3,0 /* Destination base address */
1098 li r6,0 /* Destination offset */
1099 li r5,0x4000 /* # bytes of memory to copy */
1100 bl copy_and_flush /* copy the first 0x4000 bytes */
1101 addi r0,r3,4f@l /* jump to the address of 4f */
1102 mtctr r0 /* in copy and do the rest. */
1103 bctr /* jump to the copy */
1105 bl copy_and_flush /* copy the rest */
1109 * Copy routine used to copy the kernel to start at physical address 0
1110 * and flush and invalidate the caches as needed.
1111 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1112 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1117 4: li r0,CACHELINE_WORDS
1119 3: addi r6,r6,4 /* copy a cache line */
1123 dcbst r6,r3 /* write it to memory */
1125 icbi r6,r3 /* flush the icache line */
1135 * On APUS the physical base address of the kernel is not known at compile
1136 * time, which means the __pa/__va constants used are incorect. In the
1137 * __init section is recorded the virtual addresses of instructions using
1138 * these constants, so all that has to be done is fix these before
1139 * continuing the kernel boot.
1141 * r4 = The physical address of the kernel base.
1145 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
1146 neg r11,r10 /* phys_to_virt constant */
1148 lis r12,__vtop_table_begin@h
1149 ori r12,r12,__vtop_table_begin@l
1150 add r12,r12,r10 /* table begin phys address */
1151 lis r13,__vtop_table_end@h
1152 ori r13,r13,__vtop_table_end@l
1153 add r13,r13,r10 /* table end phys address */
1156 1: lwzu r14,4(r12) /* virt address of instruction */
1157 add r14,r14,r10 /* phys address of instruction */
1158 lwz r15,0(r14) /* instruction, now insert top */
1159 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
1160 stw r15,0(r14) /* of instruction and restore. */
1161 dcbst r0,r14 /* write it to memory */
1163 icbi r0,r14 /* flush the icache line */
1168 * Map the memory where the exception handlers will
1169 * be copied to when hash constants have been patched.
1171 #ifdef CONFIG_APUS_FAST_EXCEPT
1176 ori r8,r8,0x2 /* 128KB, supervisor */
1180 lis r12,__ptov_table_begin@h
1181 ori r12,r12,__ptov_table_begin@l
1182 add r12,r12,r10 /* table begin phys address */
1183 lis r13,__ptov_table_end@h
1184 ori r13,r13,__ptov_table_end@l
1185 add r13,r13,r10 /* table end phys address */
1188 1: lwzu r14,4(r12) /* virt address of instruction */
1189 add r14,r14,r10 /* phys address of instruction */
1190 lwz r15,0(r14) /* instruction, now insert top */
1191 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
1192 stw r15,0(r14) /* of instruction and restore. */
1193 dcbst r0,r14 /* write it to memory */
1195 icbi r0,r14 /* flush the icache line */
1199 isync /* No speculative loading until now */
1202 apus_interrupt_entry:
1203 /* This is horrible, but there's no way around it. Enable the
1204 * data cache so the IRQ hardware register can be accessed
1205 * without cache intervention. Then disable interrupts and get
1206 * the current emulated m68k IPL value.
1215 lis r4,APUS_IPL_EMU@h
1217 li r20,(IPLEMU_SETRESET|IPLEMU_DISABLEINT)
1218 stb r20,APUS_IPL_EMU@l(r4)
1221 lbz r3,APUS_IPL_EMU@l(r4)
1223 li r2,IPLEMU_IPLMASK
1224 rlwinm. r20,r3,32-3,29,31
1226 mr r20,r2 /* lvl7! Need to reset state machine. */
1231 stb r2,APUS_IPL_EMU@l(r4)
1232 ori r20,r20,IPLEMU_SETRESET
1234 stb r20,APUS_IPL_EMU@l(r4)
1236 li r20,IPLEMU_DISABLEINT
1237 stb r20,APUS_IPL_EMU@l(r4)
1239 /* At this point we could do some magic to avoid the overhead
1240 * of calling the C interrupt handler in case of a spurious
1241 * interrupt. Could not get a simple hack to work though.
1250 stw r3,(_CCR+4)(r21);
1252 addi r3,r1,STACK_FRAME_OVERHEAD;
1254 bl transfer_to_handler;
1256 .long ret_from_except
1258 /***********************************************************************
1259 * Please note that on APUS the exception handlers are located at the
1260 * physical address 0xfff0000. For this reason, the exception handlers
1261 * cannot use relative branches to access the code below.
1262 ***********************************************************************/
1263 #endif /* CONFIG_APUS */
1266 #ifdef CONFIG_GEMINI
1267 .globl __secondary_start_gemini
1268 __secondary_start_gemini:
1278 #endif /* CONFIG_GEMINI */
1280 .globl __secondary_start_psurge
1281 __secondary_start_psurge:
1282 li r24,1 /* cpu # */
1283 /* we come in here with IR=0 and DR=1, and DBAT 0
1284 set to map the 0xf0000000 - 0xffffffff region */
1286 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
1291 .globl __secondary_start
1293 #ifdef CONFIG_PPC64BRIDGE
1295 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
1304 lis r2,current_set@h
1305 ori r2,r2,current_set@l
1307 slwi r24,r24,2 /* get current_set[cpu#] */
1311 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
1316 /* load up the MMU */
1319 /* ptr to phys current thread */
1321 addi r4,r4,THREAD /* phys address of our thread_struct */
1325 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1327 /* enable MMU and jump to start_secondary */
1329 lis r3,start_secondary@h
1330 ori r3,r3,start_secondary@l
1335 #endif /* CONFIG_SMP */
1338 * Enable caches and 604-specific features if necessary.
1342 rlwinm r9,r9,16,16,31
1344 beq 6f /* not needed for 601 */
1346 andi. r0,r11,HID0_DCE
1347 ori r11,r11,HID0_ICE|HID0_DCE
1348 ori r8,r11,HID0_ICFI
1349 bne 3f /* don't invalidate the D-cache */
1350 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1353 mtspr HID0,r8 /* enable and invalidate caches */
1355 mtspr HID0,r11 /* enable caches */
1358 cmpi 0,r9,4 /* check for 604 */
1359 cmpi 1,r9,9 /* or 604e */
1360 cmpi 2,r9,10 /* or mach5 / 604r */
1361 cmpi 3,r9,8 /* check for 750 (G3) */
1362 cmpi 4,r9,12 /* or 7400 (G4) */
1366 ori r11,r11,HID0_SIED|HID0_BHTE /* for 604[e|r], enable */
1368 ori r11,r11,HID0_BTCD /* superscalar exec & br history tbl */
1374 * enable Store Gathering (SGE), Address Brodcast (ABE),
1375 * Branch History Table (BHTE), Branch Target ICache (BTIC)
1377 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
1378 oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
1380 andc r11,r11,r3 /* clear SPD: enable speculative */
1382 mtspr ICTC,r3 /* Instruction Cache Throttling off */
1390 * Load stuff into the MMU. Intended to be called with
1394 /* Load the SDR1 register (hash table base & size) */
1399 #ifdef CONFIG_PPC64BRIDGE
1400 /* clear the ASR so we only use the pseudo-segment registers. */
1403 #endif /* CONFIG_PPC64BRIDGE */
1404 li r0,16 /* load up segment register values */
1405 mtctr r0 /* for context 0 */
1406 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1409 addi r3,r3,1 /* increment VSID */
1410 addis r4,r4,0x1000 /* address of next segment */
1412 #ifndef CONFIG_POWER4
1413 /* Load the BAT registers with the values set up by MMU_init.
1414 MMU_init takes care of whether we're on a 601 or not. */
1421 LOAD_BAT(0,r3,r4,r5)
1422 LOAD_BAT(1,r3,r4,r5)
1423 LOAD_BAT(2,r3,r4,r5)
1424 LOAD_BAT(3,r3,r4,r5)
1425 #endif /* CONFIG_POWER4 */
1429 * This is where the main kernel code starts.
1432 #ifndef CONFIG_PPC64BRIDGE
1436 /* ptr to current */
1437 lis r2,init_task_union@h
1438 ori r2,r2,init_task_union@l
1439 /* Set up for using our exception vectors */
1440 /* ptr to phys current thread */
1442 addi r4,r4,THREAD /* init task's THREAD */
1446 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1448 /* Clear out the BSS */
1451 lis r8,__bss_start@ha
1452 addi r8,r8,__bss_start@l
1455 rlwinm. r11,r11,30,2,31
1464 addi r1,r2,TASK_UNION_SIZE
1466 stwu r0,-STACK_FRAME_OVERHEAD(r1)
1468 * Decide what sort of machine this is and initialize the MMU.
1479 /* Copy exception code to exception vector base on APUS. */
1481 #ifdef CONFIG_APUS_FAST_EXCEPT
1482 lis r3,0xfff0 /* Copy to 0xfff00000 */
1484 lis r3,0 /* Copy to 0x00000000 */
1486 li r5,0x4000 /* # bytes of memory to copy */
1488 bl copy_and_flush /* copy the first 0x4000 bytes */
1489 #endif /* CONFIG_APUS */
1492 * Go back to running unmapped so we can load up new values
1493 * for SDR1 (hash table pointer) and the segment registers
1494 * and change to using our exception vectors.
1499 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1505 /* Load up the kernel context */
1507 SYNC /* Force all PTE updates to finish */
1508 tlbia /* Clear all TLB entries */
1509 sync /* wait for tlbia/tlbie to finish */
1511 tlbsync /* ... on all CPUs */
1516 /* Now turn on the MMU for real! */
1519 lis r3,start_kernel@h
1520 ori r3,r3,start_kernel@l
1527 * Set up the segment registers for a new context.
1529 _GLOBAL(set_context)
1530 rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
1531 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1532 li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
1536 #ifdef CONFIG_PPC64BRIDGE
1538 #endif /* CONFIG_PPC64BRIDGE */
1540 addi r3,r3,1 /* next VSID */
1541 addis r4,r4,0x1000 /* address of next segment */
1547 * An undocumented "feature" of 604e requires that the v bit
1548 * be cleared before changing BAT values.
1550 * Also, newer IBM firmware does not clear bat3 and 4 so
1551 * this makes sure it's done.
1555 #if !defined(CONFIG_GEMINI)
1558 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1579 #endif /* !defined(CONFIG_GEMINI) */
1582 #ifndef CONFIG_GEMINI
1585 1: addic. r20, r20, -0x1000
1592 addi r4, r3, __after_mmu_off - _start
1594 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1603 #ifndef CONFIG_POWER4
1605 * Use the first pair of BAT registers to map the 1st 16MB
1606 * of RAM to KERNELBASE. From this point on we can't safely
1610 lis r11,KERNELBASE@h
1611 #ifndef CONFIG_PPC64BRIDGE
1613 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1616 ori r11,r11,4 /* set up BAT registers for 601 */
1617 li r8,0x7f /* valid, block length = 8MB */
1618 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1619 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1620 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
1621 mtspr IBAT0L,r8 /* lower BAT register */
1626 #endif /* CONFIG_PPC64BRIDGE */
1630 ori r8,r8,0x12 /* R/W access, M=1 */
1632 ori r8,r8,2 /* R/W access */
1633 #endif /* CONFIG_SMP */
1635 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1637 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1638 #endif /* CONFIG_APUS */
1640 #ifdef CONFIG_PPC64BRIDGE
1641 /* clear out the high 32 bits in the BAT */
1644 #endif /* CONFIG_PPC64BRIDGE */
1645 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1646 mtspr DBAT0U,r11 /* bit in upper BAT register */
1652 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
1655 * setup the display bat prepared for us in prom.c
1660 addis r8,r3,disp_BAT@ha
1661 addi r8,r8,disp_BAT@l
1665 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1675 #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
1676 #endif /* CONFIG_POWER4 */
1679 /* Jump into the system reset for the rom.
1680 * We first disable the MMU, and then jump to the ROM reset address.
1682 * r3 is the board info structure, r4 is the location for starting.
1683 * I use this for building a small kernel that can load other kernels,
1684 * rather than trying to write or rely on a rom monitor that can tftp load.
1689 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1695 ori r10,r10,HID0_ICE|HID0_DCE
1701 addis r6,r6,-KERNELBASE@h
1715 * Mac-on-linux hook_table. Don't put this in the data section -
1716 * the base address must be within the first 32KB of RAM.
1718 .globl mol_interface
1720 .long MOL_INTERFACE_VERSION
1721 .fill 24,4,0 /* space for 24 hooks */
1726 * We put a few things here that have to be page-aligned.
1727 * This stuff goes at the beginning of the data segment,
1728 * which is page-aligned.
1733 .globl empty_zero_page
1737 .globl swapper_pg_dir
1742 * This space gets a copy of optional info passed to us by the bootstrap
1743 * Used to pass parameters into the kernel like root=/dev/sda1, etc.