2 * arch/ppc/kernel/except_8xx.S
4 * $Id: head_8xx.S,v 1.4 1999/09/18 18:43:19 dmalek Exp $
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 * MPC8xx modifications by Dan Malek
14 * Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
16 * This file contains low-level support and setup for PowerPC 8xx
17 * embedded processors, including trap and interrupt dispatch.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
27 #include <asm/processor.h>
29 #include <linux/config.h>
31 #include <asm/cache.h>
32 #include <asm/pgtable.h>
34 /* XXX need definitions here for 16 byte cachelines on some/all 8xx
37 LG_CACHELINE_BYTES = 5
46 * _start is defined this way because the XCOFF loader in the OpenFirmware
47 * on the powermac expects the entry point to be a procedure descriptor.
54 * This port was done on an MBX board with an 860. Right now I only
55 * support an ELF compressed (zImage) boot from EPPC-Bug because the
56 * code there loads up some registers before calling us:
57 * r3: ptr to board info data
58 * r4: initrd_start or if no initrd then 0
59 * r5: initrd_end - unused if r4 is 0
60 * r6: Start of command line string
61 * r7: End of command line string
63 * I decided to use conditional compilation instead of checking PVR and
64 * adding more processor specific branches around code I don't need.
65 * Since this is an embedded processor, I also appreciate any memory
68 * The MPC8xx does not have any BATs, but it supports large page sizes.
69 * We first initialize the MMU to support 8M byte pages, then load one
70 * entry into each of the instruction and data TLBs to map the first
71 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
72 * the "internal" processor registers before MMU_init is called.
74 * The TLB code currently contains a major hack. Since I use the condition
75 * code register, I have to save and restore it. I am out of registers, so
76 * I just store it in memory location 0 (the TLB handlers are not reentrant).
77 * To avoid making any decisions, I need to use the "segment" valid bit
78 * in the first level table, but that would require many changes to the
79 * Linux page directory/table functions that I don't want to do right now.
81 * I used to use SPRG2 for a temporary register in the TLB handler, but it
82 * has since been put to other uses. I now use a hack to save a register
83 * and the CCR at memory location 0.....Someday I'll fix this.....
89 mr r31,r3 /* save parameters */
96 tlbia /* Invalidate all TLB entries */
98 mtspr MI_CTR, r8 /* Set instruction control to zero */
100 #ifndef CONFIG_8xx_COPYBACK
101 oris r8, r8, MD_WTDEF@h
103 mtspr MD_CTR, r8 /* Set data TLB control */
105 /* Now map the lower 8 Meg into the TLBs. For this quick hack,
106 * we can load the instruction and data TLB registers with the
109 lis r8, KERNELBASE@h /* Create vaddr for TLB */
110 ori r8, r8, MI_EVALID /* Mark it valid */
113 li r8, MI_PS8MEG /* Set 8M byte page */
114 ori r8, r8, MI_SVALID /* Make it valid */
117 li r8, MI_BOOTINIT /* Create RPN for address 0 */
118 mtspr MI_RPN, r8 /* Store TLB entry */
120 lis r8, MI_Kp@h /* Set the protection mode */
124 /* Map another 8 MByte at the IMMR to get the processor
125 * internal registers (among other things).
127 mfspr r9, 638 /* Get current IMMR */
128 andis. r9, r9, 0xff80 /* Get 8Mbyte boundary */
130 mr r8, r9 /* Create vaddr for TLB */
131 ori r8, r8, MD_EVALID /* Mark it valid */
133 li r8, MD_PS8MEG /* Set 8M byte page */
134 ori r8, r8, MD_SVALID /* Make it valid */
136 mr r8, r9 /* Create paddr for TLB */
137 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
140 /* Since the cache is enabled according to the information we
141 * just loaded into the TLB, invalidate and enable the caches here.
142 * We should probably check/set other modes....later.
149 #ifdef CONFIG_8xx_COPYBACK
152 /* For a debug option, I left this here to easily enable
153 * the write through cache mode
161 /* We now have the lower 8 Meg mapped into TLB entries, and the caches
167 ori r0,r0,MSR_DR|MSR_IR
170 ori r0,r0,start_here@l
173 rfi /* enables MMU */
176 * Exception entry code. This code runs with address translation
177 * turned off, i.e. using physical addresses.
178 * We assume sprg3 has the physical address of the current
179 * task's thread_struct.
181 #define EXCEPTION_PROLOG \
185 mfspr r21,SPRG2; /* exception stack to use from */ \
186 cmpwi 0,r21,0; /* user mode or RTAS */ \
188 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
189 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
190 1: stw r20,_CCR(r21); /* save registers */ \
191 stw r22,GPR22(r21); \
192 stw r23,GPR23(r21); \
194 stw r20,GPR20(r21); \
196 stw r22,GPR21(r21); \
198 stw r20,_LINK(r21); \
209 tovirt(r1,r21); /* set new kernel sp */ \
210 SAVE_4GPRS(3, r21); \
213 * Note: code which follows this uses cr0.eq (set if from kernel),
214 * r21, r22 (SRR0), and r23 (SRR1).
220 #define STD_EXCEPTION(n, label, hdlr) \
224 addi r3,r1,STACK_FRAME_OVERHEAD; \
226 bl transfer_to_handler; \
228 .long ret_from_except
231 #ifdef CONFIG_SMP /* MVME/MTX start the secondary here */
232 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge)
234 STD_EXCEPTION(0x100, Reset, UnknownException)
238 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
240 /* Data access exception.
241 * This is "never generated" by the MPC8xx. We jump to it for other
242 * translation errors.
252 addi r3,r1,STACK_FRAME_OVERHEAD
254 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
255 bl transfer_to_handler
257 .long ret_from_except
259 /* Instruction access exception.
260 * This is "never generated" by the MPC8xx. We jump to it for other
261 * translation errors.
266 addi r3,r1,STACK_FRAME_OVERHEAD
270 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
271 bl transfer_to_handler
273 .long ret_from_except
275 /* External interrupt */
279 addi r3,r1,STACK_FRAME_OVERHEAD
282 bl transfer_to_handler
283 .globl do_IRQ_intercept
286 .long ret_from_intercept
289 /* Alignment exception */
297 addi r3,r1,STACK_FRAME_OVERHEAD
299 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
300 bl transfer_to_handler
301 .long AlignmentException
302 .long ret_from_except
304 /* Program check exception */
308 addi r3,r1,STACK_FRAME_OVERHEAD
310 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
311 bl transfer_to_handler
312 .long ProgramCheckException
313 .long ret_from_except
315 /* No FPU on MPC8xx. This exception is not supposed to happen.
317 STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
322 addi r3,r1,STACK_FRAME_OVERHEAD
324 bl transfer_to_handler
325 .globl timer_interrupt_intercept
326 timer_interrupt_intercept:
327 .long timer_interrupt
328 .long ret_from_intercept
330 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
331 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
337 stw r3,ORIG_GPR3(r21)
339 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
340 bl transfer_to_handler
342 .long ret_from_except
344 /* Single step - not used on 601 */
345 STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
347 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
348 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
350 /* On the MPC8xx, this is a software emulation interrupt. It occurs
351 * for all unimplemented and illegal instructions.
353 STD_EXCEPTION(0x1000, SoftEmu, SoftwareEmulation)
357 * For the MPC8xx, this is a software tablewalk to load the instruction
358 * TLB. It is modelled after the example in the Motorola manual. The task
359 * switch loads the M_TWB register with the pointer to the first level table.
360 * If we discover there is no second level table (the value is zero), the
361 * plan was to load that into the TLB, which causes another fault into the
362 * TLB Error interrupt where we can handle such problems. However, that did
363 * not work, so if we discover there is no second level table, we restore
364 * registers and branch to the error exception. We have to use the MD_xxx
365 * registers for the tablewalk because the equivalent MI_xxx registers
366 * only perform the attribute functions.
369 #ifdef CONFIG_8xx_CPU6
375 mtspr M_TW, r20 /* Save a couple of working registers */
379 mfspr r20, SRR0 /* Get effective address of fault */
380 #ifdef CONFIG_8xx_CPU6
385 mtspr MD_EPN, r20 /* Have to use MD_EPN for walk, MI_EPN can't */
386 mfspr r20, M_TWB /* Get level 1 table entry address */
388 /* If we are faulting a kernel address, we have to use the
389 * kernel page tables.
391 andi. r21, r20, 0x0800 /* Address >= 0x80000000 */
393 lis r21, swapper_pg_dir@h
394 ori r21, r21, swapper_pg_dir@l
395 rlwimi r20, r21, 0, 2, 19
397 lwz r21, 0(r20) /* Get the level 1 entry */
398 rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
399 beq 2f /* If zero, don't try to find a pte */
401 /* We have a pte table, so load the MI_TWC with the attributes
402 * for this page, which has only bit 31 set.
405 ori r21,r21,1 /* Set valid bit */
406 #ifdef CONFIG_8xx_CPU6
411 mtspr MI_TWC, r21 /* Set page attributes */
412 #ifdef CONFIG_8xx_CPU6
417 mtspr MD_TWC, r21 /* Load pte table base address */
418 mfspr r21, MD_TWC /* ....and get the pte address */
419 lwz r20, 0(r21) /* Get the pte */
421 ori r20, r20, _PAGE_ACCESSED
425 /* Set four subpage valid bits (24, 25, 26, and 27).
426 * Clear bit 28 (which should be in the PTE, but we do this anyway).
429 rlwimi r20, r21, 0, 24, 28
430 #ifdef CONFIG_8xx_CPU6
435 mtspr MI_RPN, r20 /* Update TLB entry */
437 mfspr r20, M_TW /* Restore registers */
441 #ifdef CONFIG_8xx_CPU6
446 2: mfspr r20, M_TW /* Restore registers */
450 #ifdef CONFIG_8xx_CPU6
457 #ifdef CONFIG_8xx_CPU6
463 mtspr M_TW, r20 /* Save a couple of working registers */
467 mfspr r20, M_TWB /* Get level 1 table entry address */
469 /* If we are faulting a kernel address, we have to use the
470 * kernel page tables.
472 andi. r21, r20, 0x0800
474 lis r21, swapper_pg_dir@h
475 ori r21, r21, swapper_pg_dir@l
476 rlwimi r20, r21, 0, 2, 19
478 lwz r21, 0(r20) /* Get the level 1 entry */
479 rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
480 beq 2f /* If zero, don't try to find a pte */
482 /* We have a pte table, so load fetch the pte from the table.
485 ori r21, r21, 1 /* Set valid bit in physical L2 page */
486 #ifdef CONFIG_8xx_CPU6
491 mtspr MD_TWC, r21 /* Load pte table base address */
492 mfspr r20, MD_TWC /* ....and get the pte address */
493 lwz r20, 0(r20) /* Get the pte */
495 /* Insert the Guarded flag into the TWC from the Linux PTE.
496 * It is bit 27 of both the Linux PTE and the TWC (at least
497 * I got that right :-). It will be better when we can put
498 * this into the Linux pgd/pmd and load it in the operation
501 rlwimi r21, r20, 0, 27, 27
502 #ifdef CONFIG_8xx_CPU6
509 /* Set four subpage valid bits (24, 25, 26, and 27).
510 * Clear bit 28 (which should be in the PTE, but we do this anyway).
516 rlwimi r20, r21, 0, 24, 28
518 #ifdef CONFIG_8xx_CPU6
523 mtspr MD_RPN, r20 /* Update TLB entry */
525 mfspr r20, M_TW /* Restore registers */
529 #ifdef CONFIG_8xx_CPU6
534 2: mfspr r20, M_TW /* Restore registers */
538 #ifdef CONFIG_8xx_CPU6
543 /* This is an instruction TLB error on the MPC8xx. This could be due
544 * to many reasons, such as executing guarded memory or illegal instruction
545 * addresses. There is nothing to do but handle a big time error fault.
551 /* This is the data TLB error on the MPC8xx. This could be due to
552 * many reasons, including a dirty update to a pte. We can catch that
553 * one here, but anything else is an error. First, we track down the
554 * Linux pte. If it is valid, write access is allowed, but the
555 * page dirty bit is not set, we will set it and reload the TLB. For
556 * any other case, we bail out to a higher level function that can
561 #ifdef CONFIG_8xx_CPU6
567 mtspr M_TW, r20 /* Save a couple of working registers */
572 /* First, make sure this was a store operation.
575 andis. r21, r20, 0x0200 /* If set, indicates store op */
578 mfspr r20, M_TWB /* Get level 1 table entry address */
580 /* If we are faulting a kernel address, we have to use the
581 * kernel page tables.
583 andi. r21, r20, 0x0800
585 lis r21, swapper_pg_dir@h
586 ori r21, r21, swapper_pg_dir@l
587 rlwimi r20, r21, 0, 2, 19
589 lwz r21, 0(r20) /* Get the level 1 entry */
590 rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
591 beq 2f /* If zero, bail */
593 /* We have a pte table, so fetch the pte from the table.
596 ori r21, r21, 1 /* Set valid bit in physical L2 page */
597 #ifdef CONFIG_8xx_CPU6
602 mtspr MD_TWC, r21 /* Load pte table base address */
603 mfspr r21, MD_TWC /* ....and get the pte address */
604 lwz r20, 0(r21) /* Get the pte */
606 andi. r21, r20, _PAGE_RW /* Is it writeable? */
607 beq 2f /* Bail out if not */
609 /* Update 'changed', among others.
611 ori r20, r20, _PAGE_DIRTY|_PAGE_HWWRITE|_PAGE_ACCESSED
612 mfspr r21, MD_TWC /* Get pte address again */
613 stw r20, 0(r21) /* and update pte in table */
615 /* Set four subpage valid bits (24, 25, 26, and 27).
616 * Clear bit 28 (which should be in the PTE, but we do this anyway).
619 rlwimi r20, r21, 0, 24, 28
620 #ifdef CONFIG_8xx_CPU6
625 mtspr MD_RPN, r20 /* Update TLB entry */
627 mfspr r20, M_TW /* Restore registers */
631 #ifdef CONFIG_8xx_CPU6
636 mfspr r20, M_TW /* Restore registers */
640 #ifdef CONFIG_8xx_CPU6
645 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
646 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
647 STD_EXCEPTION(0x1700, Trap_17, TAUException)
648 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
649 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
650 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
651 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
653 /* On the MPC8xx, these next four traps are used for development
654 * support of breakpoints and such. Someday I will get around to
657 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
658 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
659 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
660 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
665 * This code finishes saving the registers to the exception frame
666 * and jumps to the appropriate handler for the exception, turning
667 * on address translation.
669 .globl transfer_to_handler
679 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
681 addi r24,r1,STACK_FRAME_OVERHEAD
683 2: addi r2,r23,-THREAD /* set r2 to current */
686 andi. r24,r23,0x3f00 /* get vector offset */
689 stwcx. r22,r22,r21 /* to clear the reservation */
692 mtspr SPRG2,r22 /* r1 is now kernel sp */
693 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
697 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
698 lwz r24,0(r23) /* virtual address of handler */
699 lwz r23,4(r23) /* where to go when done */
704 rfi /* jump to handler, enable MMU */
707 * On kernel stack overflow, load up an initial stack pointer
708 * and call StackOverflow(regs), which should not return.
711 addi r3,r1,STACK_FRAME_OVERHEAD
712 lis r1,init_task_union@ha
713 addi r1,r1,init_task_union@l
714 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
715 lis r24,StackOverflow@ha
716 addi r24,r24,StackOverflow@l
728 * This code is jumped to from the startup code to copy
729 * the kernel image to physical address 0.
732 lis r9,0x426f /* if booted from BootX, don't */
733 addi r9,r9,0x6f58 /* translate source addr */
734 cmpw r31,r9 /* (we have to on chrp) */
736 rlwinm r4,r4,0,8,31 /* translate source address */
737 add r4,r4,r3 /* to region mapped with BATs */
738 7: addis r9,r26,klimit@ha /* fetch klimit */
740 addis r25,r25,-KERNELBASE@h
741 li r6,0 /* Destination offset */
742 li r5,0x4000 /* # bytes of memory to copy */
743 bl copy_and_flush /* copy the first 0x4000 bytes */
744 addi r0,r3,4f@l /* jump to the address of 4f */
745 mtctr r0 /* in copy and do the rest. */
746 bctr /* jump to the copy */
748 bl copy_and_flush /* copy the rest */
752 * Copy routine used to copy the kernel to start at physical address 0
753 * and flush and invalidate the caches as needed.
754 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
755 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
760 4: li r0,CACHELINE_WORDS
762 3: addi r6,r6,4 /* copy a cache line */
766 dcbst r6,r3 /* write it to memory */
768 icbi r6,r3 /* flush the icache line */
777 .globl __secondary_start_psurge
778 __secondary_start_psurge:
782 .globl __secondary_hold
784 /* tell the master we're here */
793 /* wait until we're told to start */
796 /* our cpu # was at addr 0 - go */
797 lis r5,__secondary_start@h
798 ori r5,r5,__secondary_start@l
801 mr r24,r3 /* cpu # */
803 #endif /* CONFIG_SMP */
806 * This is where the main kernel code starts.
810 /* if we're the second cpu stack and r2 are different
811 * and we want to not clear the bss -- Cort */
812 lis r5,first_cpu_booted@h
813 ori r5,r5,first_cpu_booted@l
820 ori r2,r2,current_set@l
821 slwi r24,r24,2 /* cpu # to current_set[cpu#] */
826 #endif /* CONFIG_SMP */
828 lis r2,init_task_union@h
829 ori r2,r2,init_task_union@l
830 /* Clear out the BSS */
833 lis r8,__bss_start@ha
834 addi r8,r8,__bss_start@l
837 rlwinm. r11,r11,30,2,31
847 #endif /* CONFIG_SMP */
849 addi r1,r2,TASK_UNION_SIZE
851 stwu r0,-STACK_FRAME_OVERHEAD(r1)
853 * Decide what sort of machine this is and initialize the MMU.
864 * Go back to running unmapped so we can load up new values
865 * for SDR1 (hash table pointer) and the segment registers
866 * and change to using our exception vectors.
867 * On the 8xx, all we have to do is invalidate the TLB to clear
868 * the old 8M byte TLB mappings and load the page table base register.
870 /* The right way to do this would be to track it down through
871 * init's THREAD like the context switch code does, but this is
872 * easier......until someone changes init's static structures.
874 lis r6, swapper_pg_dir@h
876 ori r6, r6, swapper_pg_dir@l
877 #ifdef CONFIG_8xx_CPU6
878 lis r4, cpu6_errata_word@h
879 ori r4, r4, cpu6_errata_word@l
888 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
892 /* Load up the kernel context */
894 SYNC /* Force all PTE updates to finish */
895 tlbia /* Clear all TLB entries */
896 sync /* wait for tlbia/tlbie to finish */
898 tlbsync /* ... on all CPUs */
901 /* Set up for using our exception vectors */
902 /* ptr to phys current thread */
904 addi r4,r4,THREAD /* init task's THREAD */
907 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
908 /* Now turn on the MMU for real! */
910 lis r3,start_kernel@h
911 ori r3,r3,start_kernel@l
913 /* the second time through here we go to
914 * start_secondary(). -- Cort
916 lis r5,first_cpu_booted@h
917 ori r5,r5,first_cpu_booted@l
922 lis r3,start_secondary@h
923 ori r3,r3,start_secondary@l
925 #endif /* CONFIG_SMP */
928 rfi /* enable MMU and jump to start_kernel */
931 * Set up to use a given MMU context.
933 * The MPC8xx has something that currently happens "automagically."
934 * Unshared user space address translations are subject to ASID (context)
935 * match. During each task switch, the ASID is incremented. We can
936 * guarantee (I hope :-) that no entries currently match this ASID
937 * because every task will cause at least a TLB entry to be loaded for
938 * the first instruction and data access, plus the kernel running will
939 * have displaced several more TLBs. The MMU contains 32 entries for
940 * each TLB, and there are 16 contexts, so we just need to make sure
941 * two pages get replaced for every context switch, which currently
942 * happens. There are other TLB management techniques that I will
943 * eventually implement, but this is the easiest for now. -- Dan
945 * On the MPC8xx, we place the physical address of the new task
946 * page directory loaded into the MMU base register, and set the
947 * ASID compare register with the new "context".
950 #ifdef CONFIG_8xx_CPU6
951 lis r6, cpu6_errata_word@h
952 ori r6, r6, cpu6_errata_word@l
957 mtspr M_TWB, r4 /* Update MMU base address */
961 mtspr M_CASID, r3 /* Update context */
963 mtspr M_CASID,r3 /* Update context */
965 mtspr M_TWB, r4 /* and pgd */
971 /* Jump into the system reset for the rom.
972 * We first disable the MMU, and then jump to the ROM reset address.
974 * r3 is the board info structure, r4 is the location for starting.
975 * I use this for building a small kernel that can load other kernels,
976 * rather than trying to write or rely on a rom monitor that can tftp load.
980 li r5,MSR_KERNEL & ~(MSR_IR|MSR_DR)
982 addis r6,r6,-KERNELBASE@h
991 #ifdef CONFIG_8xx_CPU6
992 /* It's here because it is unique to the 8xx.
993 * It is important we get called with interrupts disabled. I used to
994 * do that, but it appears that all code that calls this already had
995 * interrupt disabled.
999 lis r7, cpu6_errata_word@h
1000 ori r7, r7, cpu6_errata_word@l
1004 mtspr 22, r3 /* Update Decrementer */
1010 * We put a few things here that have to be page-aligned.
1011 * This stuff goes at the beginning of the data segment,
1012 * which is page-aligned.
1017 .globl empty_zero_page
1021 .globl swapper_pg_dir
1026 * This space gets a copy of optional info passed to us by the bootstrap
1027 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1033 #ifdef CONFIG_8xx_CPU6
1034 .globl cpu6_errata_word