2 * Copyright (c) 1990 The Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
35 * originally from: locore.s, by William F. Jolitz
37 * Substantially rewritten by David Greenman, Rod Grimes,
38 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
42 #include "opt_bootp.h"
43 #include "opt_compat.h"
44 #include "opt_nfsroot.h"
47 #include <sys/syscall.h>
48 #include <sys/reboot.h>
50 #include <machine/asmacros.h>
51 #include <machine/cputypes.h>
52 #include <machine/psl.h>
53 #include <machine/pmap.h>
54 #include <machine/specialreg.h>
61 * Note: This version greatly munged to avoid various assembler errors
62 * that may be fixed in newer versions of gas. Perhaps newer versions
63 * will have more pleasant appearance.
67 * PTmap is recursive pagemap at top of virtual address space.
68 * Within PTmap, the page directory can be found (third indirection).
70 .globl PTmap,PTD,PTDpde
71 .set PTmap,(PTDPTDI << PDRSHIFT)
72 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
73 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
76 * Compiled KERNBASE location and the kernel load address
79 .set kernbase,KERNBASE
81 .set kernload,KERNLOAD
87 ALIGN_DATA
/* just to be sure */
89 .space 0x2000 /* space for tmpstk - temporary stack */
93 bootinfo
: .space BOOTINFO_SIZE /* bootinfo that we can handle */
96 KERNend
: .long 0 /* phys addr end of kernel (just after bss) */
97 physfree
: .long 0 /* phys addr of next free page */
100 IdlePTD
: .long 0 /* phys addr of kernel PTD */
102 #if defined(PAE) || defined(PAE_TABLES)
104 IdlePDPT
: .long 0 /* phys addr of kernel PDPT */
108 KPTmap
: .long 0 /* address of kernel page tables */
111 KPTphys
: .long 0 /* phys addr of kernel page tables */
114 proc0kstack
: .long 0 /* address of proc 0 kstack space */
115 p0kpa
: .long 0 /* phys addr of proc0's STACK */
117 vm86phystk
: .long 0 /* PA of vm86/bios stack */
119 .globl vm86paddr, vm86pa
120 vm86paddr
: .long 0 /* address of vm86 region */
121 vm86pa
: .long 0 /* phys addr of vm86 region */
124 .globl pc98_system_parameter
125 pc98_system_parameter
:
129 /**********************************************************************
135 #define R(foo) ((foo)-KERNBASE)
137 #define ALLOCPAGES(foo) \
138 movl R
(physfree
), %esi ; \
139 movl $
((foo
)*PAGE_SIZE
), %eax ; \
141 movl
%eax
, R
(physfree
) ; \
143 movl $
((foo
)*PAGE_SIZE
),%ecx ; \
151 * eax = page frame address
152 * ebx = index into page table
153 * ecx = how many pages to map
154 * base = base address of page dir/table
155 * prot = protection bits
157 #define fillkpt(base, prot) \
158 shll $PTESHIFT
,%ebx ; \
162 1: movl
%eax
,(%ebx
) ; \
163 addl $PAGE_SIZE
,%eax ;
/* increment physical address */ \
164 addl $PTESIZE
,%ebx ;
/* next pte */ \
169 * eax = physical address
170 * ecx = how many pages to map
171 * prot = protection bits
173 #define fillkptphys(prot) \
175 shrl $PAGE_SHIFT
, %ebx ; \
176 fillkpt
(R
(KPTphys
), prot
)
179 /**********************************************************************
181 * This is where the bootblocks start us, set the ball rolling...
184 NON_GPROF_ENTRY
(btext
)
187 /* save SYSTEM PARAMETER for resume (NS/T or other) */
189 movl $R
(pc98_system_parameter
),%edi
195 /* Tell the bios to warmboot next time */
199 /* Set up a real frame in case the double return in newboot is executed. */
203 /* Don't trust what the BIOS gives for eflags. */
208 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
209 * to set %cs, %ds, %es and %ss.
216 * Clear the bss. Not all boot programs do it, and it is our job anyway.
218 * XXX we don't check that there is memory for our bss and page tables
221 * Note: we must be careful to not overwrite an active gdt or idt. They
222 * inactive from now until we switch to new ones, since we don't load any
223 * more segment registers or permit interrupts until after the switch.
233 call recover_bootinfo
235 /* Get onto a stack that we can trust. */
237 * XXX this step is delayed in case recover_bootinfo needs to return via
238 * the old stack, but it need not be, since recover_bootinfo actually
239 * returns via the old frame.
244 /* pc98_machine_type & M_EPSON_PC98 */
245 testb $
0x02,R
(pc98_system_parameter
)+220
247 /* epson_machine_id <= 0x0b */
248 cmpb $
0x0b,R
(pc98_system_parameter
)+224
251 /* count up memory */
252 movl $
0x100000,%eax
/* next, talley remaining memory */
253 movl $
0xFFF-0x100,%ecx
254 1: movl
0(%eax
),%ebx
/* save location to check */
255 movl $
0xa55a5aa5,0(%eax
) /* write test pattern */
256 cmpl $
0xa55a5aa5,0(%eax
) /* does not check yet for rollover */
258 movl
%ebx
,0(%eax
) /* restore memory */
261 2: subl $
0x100000,%eax
263 movb
%al
,R
(pc98_system_parameter
)+1
266 movw R
(pc98_system_parameter+
0x86),%ax
271 call create_pagetables
274 * If the CPU has support for VME, turn it on.
276 testl $CPUID_VME
, R
(cpu_feature
)
283 /* Now enable paging */
284 #if defined(PAE) || defined(PAE_TABLES)
285 movl R
(IdlePDPT
), %eax
291 movl R
(IdlePTD
), %eax
292 movl
%eax
,%cr3
/* load ptd addr into mmu */
294 movl
%cr0,%eax
/* get control word */
295 orl $CR0_PE|CR0_PG
,%eax
/* enable paging */
296 movl
%eax
,%cr0 /* and let's page NOW! */
298 pushl $begin
/* jump to high virtualized address */
301 /* now running relocated at KERNBASE where the system is linked to run */
303 /* set up bootstrap stack */
304 movl proc0kstack
,%eax
/* location of in-kernel stack */
307 * Only use bottom page for init386(). init386() calculates the
308 * PCB + FPU save area size and returns the true top of stack.
310 leal PAGE_SIZE
(%eax
),%esp
312 xorl
%ebp
,%ebp
/* mark end of frames */
314 pushl physfree
/* value of first for init386(first) */
315 call init386
/* wire 386 chip for unix operation */
318 * Clean up the stack in a way that db_numargs() understands, so
319 * that backtraces in ddb don't underrun the stack. Traps for
320 * inaccessible memory are more fatal than usual this early.
324 /* Switch to true top of stack. */
327 call mi_startup
/* autoconfiguration, mountroot etc */
329 addl $
0,%esp
/* for db_numargs() again */
332 * Signal trampoline, copied to top of user stack
334 NON_GPROF_ENTRY
(sigcode
)
335 calll
*SIGF_HANDLER
(%esp
)
336 leal SIGF_UC
(%esp
),%eax
/* get ucontext */
338 testl $PSL_VM
,UC_EFLAGS
(%eax
)
340 mov UC_GS
(%eax
),%gs
/* restore %gs */
342 movl $SYS_sigreturn
,%eax
343 pushl
%eax
/* junk to fake return addr. */
344 int $
0x80 /* enter kernel with args */
349 #ifdef COMPAT_FREEBSD4
352 calll
*SIGF_HANDLER
(%esp
)
353 leal SIGF_UC4
(%esp
),%eax
/* get ucontext */
355 testl $PSL_VM
,UC4_EFLAGS
(%eax
)
357 mov UC4_GS
(%eax
),%gs
/* restore %gs */
359 movl $
344,%eax
/* 4.x SYS_sigreturn */
360 pushl
%eax
/* junk to fake return addr. */
361 int $
0x80 /* enter kernel with args */
370 call
*SIGF_HANDLER
(%esp
) /* call signal handler */
371 lea SIGF_SC
(%esp
),%eax
/* get sigcontext */
373 testl $PSL_VM
,SC_PS
(%eax
)
375 mov SC_GS
(%eax
),%gs
/* restore %gs */
377 movl $
103,%eax
/* 3.x SYS_sigreturn */
378 pushl
%eax
/* junk to fake return addr. */
379 int $
0x80 /* enter kernel with args */
381 #endif /* COMPAT_43 */
389 .long esigcode-sigcode
390 #ifdef COMPAT_FREEBSD4
391 .globl szfreebsd4_sigcode
393 .long esigcode-freebsd4_sigcode
398 .long esigcode-osigcode
402 /**********************************************************************
404 * Recover the bootinfo passed to us from the boot program
409 * This code is called in different ways depending on what loaded
410 * and started the kernel. This is used to detect how we get the
411 * arguments from the other code and what we do with them.
413 * Old disk boot blocks:
414 * (*btext)(howto, bootdev, cyloffset, esym);
415 * [return address == 0, and can NOT be returned to]
416 * [cyloffset was not supported by the FreeBSD boot code
417 * and always passed in as 0]
418 * [esym is also known as total in the boot code, and
419 * was never properly supported by the FreeBSD boot code]
421 * Old diskless netboot code:
422 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
423 * [return address != 0, and can NOT be returned to]
424 * If we are being booted by this code it will NOT work,
425 * so we are just going to halt if we find this case.
427 * New uniform boot code:
428 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
429 * [return address != 0, and can be returned to]
431 * There may seem to be a lot of wasted arguments in here, but
432 * that is so the newer boot code can still load very old kernels
433 * and old boot code can load new kernels.
437 * The old style disk boot blocks fake a frame on the stack and
438 * did an lret to get here. The frame on the stack has a return
445 * We have some form of return address, so this is either the
446 * old diskless netboot code, or the new uniform code. That can
447 * be detected by looking at the 5th argument, if it is 0
448 * we are being booted by the new uniform boot code.
454 * Seems we have been loaded by the old diskless boot code, we
455 * don't stand a chance of running as the diskless structure
456 * changed considerably between the two, so just halt.
461 * We have been loaded by the new uniform boot code.
462 * Let's check the bootinfo version, and if we do not understand
463 * it we return to the loader with a status of 1 to indicate this error
466 movl
28(%ebp
),%ebx
/* &bootinfo.version */
467 movl BI_VERSION
(%ebx
),%eax
468 cmpl $
1,%eax
/* We only understand version 1 */
470 movl $
1,%eax
/* Return status */
473 * XXX this returns to our caller's caller (as is required) since
474 * we didn't set up a frame and our caller did.
480 * If we have a kernelname copy it in
482 movl BI_KERNELNAME
(%ebx
),%esi
484 je
2f
/* No kernelname */
485 movl $MAXPATHLEN
,%ecx
/* Brute force!!! */
486 movl $R
(kernelname
),%edi
487 cmpb $
'/',(%esi
) /* Make sure it starts with a slash */
499 * Determine the size of the boot loader's copy of the bootinfo
500 * struct. This is impossible to do properly because old versions
501 * of the struct don't contain a size field and there are 2 old
502 * versions with the same version number.
504 movl $BI_ENDCOMMON
,%ecx
/* prepare for sizeless version */
505 testl $RB_BOOTINFO
,8(%ebp
) /* bi_size (and bootinfo) valid? */
506 je got_bi_size
/* no, sizeless version */
507 movl BI_SIZE
(%ebx
),%ecx
511 * Copy the common part of the bootinfo struct
514 movl $R
(bootinfo
),%edi
515 cmpl $BOOTINFO_SIZE
,%ecx
516 jbe got_common_bi_size
517 movl $BOOTINFO_SIZE
,%ecx
526 * If we have a nfs_diskless structure copy it in
528 movl BI_NFS_DISKLESS
(%ebx
),%esi
531 movl $R
(nfs_diskless
),%edi
532 movl $NFSDISKLESS_SIZE
,%ecx
536 movl $R
(nfs_diskless_valid
),%edi
542 * The old style disk boot.
543 * (*btext)(howto, bootdev, cyloffset, esym);
544 * Note that the newer boot code just falls into here to pick
545 * up howto and bootdev, cyloffset and esym are no longer used
549 movl
%eax
,R
(boothowto
)
556 /**********************************************************************
558 * Identify the CPU and initialize anything special about it
563 /* Try to toggle alignment check flag; does not exist on 386. */
580 /* NexGen CPU does not have aligment check flag. */
594 movl $CPU_NX586
,R
(cpu
)
595 movl $
0x4778654e,R
(cpu_vendor
) # store vendor string
596 movl $
0x72446e65,R
(cpu_vendor+
4)
597 movl $
0x6e657669,R
(cpu_vendor+
8)
598 movl $
0,R
(cpu_vendor+
12)
601 try486
: /* Try to toggle identification flag; does not exist on early 486s. */
621 * Cyrix CPUs do not change the undefined flags following
622 * execution of the divide instruction which divides 5 by 2.
624 * Note: CPUID is enabled on M2, so it passes another way.
634 jmp
3f
/* You may use Intel CPU. */
639 * IBM Bluelighting CPU also doesn't change the undefined flags.
640 * Because IBM doesn't disclose the information for Bluelighting
641 * CPU, we couldn't distinguish it from Cyrix's (including IBM
642 * brand of Cyrix CPUs).
644 movl $
0x69727943,R
(cpu_vendor
) # store vendor string
645 movl $
0x736e4978,R
(cpu_vendor+
4)
646 movl $
0x64616574,R
(cpu_vendor+
8)
649 trycpuid
: /* Use the `cpuid' instruction. */
652 movl
%eax
,R
(cpu_high
) # highest capability
653 movl
%ebx
,R
(cpu_vendor
) # store vendor string
654 movl
%edx
,R
(cpu_vendor+
4)
655 movl
%ecx
,R
(cpu_vendor+
8)
656 movb $
0,R
(cpu_vendor+
12)
660 movl
%eax
,R
(cpu_id
) # store cpu_id
661 movl
%ebx
,R
(cpu_procinfo
) # store cpu_procinfo
662 movl
%edx
,R
(cpu_feature
) # store cpu_feature
663 movl
%ecx
,R
(cpu_feature2
) # store cpu_feature2
664 rorl $
8,%eax
# extract family type
669 /* less than Pentium; must be 486 */
679 /* Greater than Pentium...call it a Pentium Pro */
685 /**********************************************************************
687 * Create the first page directory and its page tables.
693 /* Find end of kernel image (rounded up to a page boundary). */
696 /* Include symbols, if any. */
697 movl R
(bootinfo+BI_ESYMTAB
),%edi
702 addl
%edi
,R
(bootinfo+BI_SYMTAB
)
703 addl
%edi
,R
(bootinfo+BI_ESYMTAB
)
706 /* If we are told where the end of the kernel space is, believe it. */
707 movl R
(bootinfo+BI_KERNEND
),%edi
713 addl $PDRMASK
,%esi
/* Play conservative for now, and */
714 andl $~PDRMASK
,%esi
/* ... wrap to next 4M. */
715 movl
%esi
,R
(KERNend
) /* save end of kernel */
716 movl
%esi
,R
(physfree
) /* next free page is at end of kernel */
718 /* Allocate Kernel Page Tables */
721 addl $
(KERNBASE-
(KPTDI
<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT
))),%esi
724 /* Allocate Page Table Directory */
725 #if defined(PAE) || defined(PAE_TABLES)
726 /* XXX only need 32 bytes (easier for now) */
728 movl
%esi
,R
(IdlePDPT
)
733 /* Allocate KSTACK */
734 ALLOCPAGES
(TD0_KSTACK_PAGES
)
737 movl
%esi
, R
(proc0kstack
)
739 ALLOCPAGES
(1) /* vm86/bios stack */
740 movl
%esi
,R
(vm86phystk
)
742 ALLOCPAGES
(3) /* pgtable + ext + IOPAGES */
745 movl
%esi
, R
(vm86paddr
)
748 * Enable PSE and PGE.
751 testl $CPUID_PSE
, R
(cpu_feature
)
753 movl $PG_PS
, R
(pseflag
)
760 testl $CPUID_PGE
, R
(cpu_feature
)
762 movl $PG_G
, R
(pgeflag
)
770 * Initialize page table pages mapping physical address zero through the
771 * end of the kernel. All of the page table entries allow read and write
772 * access. Write access to the first physical page is required by bios32
773 * calls, and write access to the first 1 MB of physical memory is required
774 * by ACPI for implementing suspend and resume. We do this even
775 * if we've enabled PSE above, we'll just switch the corresponding kernel
776 * PDEs before we turn on paging.
778 * XXX: We waste some pages here in the PSE case!
782 shrl $PAGE_SHIFT
,%ecx
785 /* Map page table pages. */
790 /* Map page directory. */
791 #if defined(PAE) || defined(PAE_TABLES)
792 movl R
(IdlePDPT
), %eax
797 movl R
(IdlePTD
), %eax
801 /* Map proc0's KSTACK in the physical way ... */
803 movl $
(TD0_KSTACK_PAGES
), %ecx
807 movl $ISA_HOLE_START
, %eax
808 movl $ISA_HOLE_LENGTH
>>PAGE_SHIFT
, %ecx
811 /* Map space for the vm86 region */
812 movl R
(vm86phystk
), %eax
816 /* Map page 0 into the vm86 page table */
820 fillkpt
(R
(vm86pa
), $PG_RW|PG_U
)
822 /* ...likewise for the ISA hole */
823 movl $ISA_HOLE_START
, %eax
824 movl $ISA_HOLE_START
>>PAGE_SHIFT
, %ebx
825 movl $ISA_HOLE_LENGTH
>>PAGE_SHIFT
, %ecx
826 fillkpt
(R
(vm86pa
), $PG_RW|PG_U
)
829 * Create an identity mapping for low physical memory, including the kernel.
830 * The part of this mapping that covers the first 1 MB of physical memory
831 * becomes a permanent part of the kernel's address space. The rest of this
832 * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table
833 * pages are shared by the identity mapping and the kernel's native mapping.
834 * However, the permanent identity mapping cannot contain PG_G mappings.
835 * Thus, if the kernel is loaded within the permanent identity mapping, that
836 * page table page must be duplicated and not shared.
838 * N.B. Due to errata concerning large pages and physical address zero,
839 * a PG_PS mapping is not used.
841 movl R
(KPTphys
), %eax
844 fillkpt
(R
(IdlePTD
), $PG_RW
)
845 #if KERNLOAD < (1 << PDRSHIFT)
846 testl $PG_G
, R
(pgeflag
)
850 movl R
(IdlePTD
), %eax
853 movl $PAGE_SIZE
, %ecx
861 * For the non-PSE case, install PDEs for PTs covering the KVA.
862 * For the PSE case, do the same, but clobber the ones corresponding
863 * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
864 * PDEs immediately after.
866 movl R
(KPTphys
), %eax
869 fillkpt
(R
(IdlePTD
), $PG_RW
)
873 movl R
(KERNend
), %ecx
877 movl $
(KPTDI+
(KERNLOAD
/(1 << PDRSHIFT
))), %ebx
879 addl R
(IdlePTD
), %ebx
880 orl $
(PG_V|PG_RW|PG_PS
), %eax
882 addl $
(1 << PDRSHIFT
), %eax
887 /* install a pde recursively mapping page directory as a page table */
888 movl R
(IdlePTD
), %eax
891 fillkpt
(R
(IdlePTD
), $PG_RW
)
893 #if defined(PAE) || defined(PAE_TABLES)
894 movl R
(IdlePTD
), %eax
897 fillkpt
(R
(IdlePDPT
), $
0x0)
903 /* Xen Hypercall page */
905 .p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */
907 NON_GPROF_ENTRY
(hypercall_page
)
908 .skip 0x1000, 0x90 /* Fill with "nop"s */