kernel - Fix some rare pmap races in i386 and x86_64.
[dragonfly.git] / sys / platform / pc32 / i386 / locore.s
blobd550de1f94eb0bea8bbfd2884e6cbb45e978695c
1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
37 * $FreeBSD: src/sys/i386/i386/locore.s,v 1.132.2.10 2003/02/03 20:54:49 jhb Exp $
38 * $DragonFly: src/sys/platform/pc32/i386/locore.s,v 1.13 2007/01/08 03:33:42 dillon Exp $
40 * originally from: locore.s, by William F. Jolitz
42 * Substantially rewritten by David Greenman, Rod Grimes,
43 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
44 * and many others.
47 #include "opt_bootp.h"
48 #include "opt_nfsroot.h"
50 #include <sys/syscall.h>
51 #include <sys/reboot.h>
53 #include <machine/asmacros.h>
54 #include <machine/cputypes.h>
55 #include <machine/psl.h>
56 #include <machine/pmap.h>
57 #include <machine/specialreg.h>
59 #include "assym.s"
62 * XXX
64 * Note: This version greatly munged to avoid various assembler errors
65 * that may be fixed in newer versions of gas. Perhaps newer versions
66 * will have more pleasant appearance.
70 * PTmap is recursive pagemap at top of virtual address space.
71 * Within PTmap, the page directory can be found (third indirection).
73 .globl PTmap,PTD,PTDpde
74 .set PTmap,(PTDPTDI << PDRSHIFT)
75 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
76 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
79 * APTmap, APTD is the alternate recursive pagemap.
80 * It's used when modifying another process's page tables.
82 .globl APTmap,APTD,APTDpde
83 .set APTmap,APTDPTDI << PDRSHIFT
84 .set APTD,APTmap + (APTDPTDI * PAGE_SIZE)
85 .set APTDpde,PTD + (APTDPTDI * PDESIZE)
88 * Compiled KERNBASE location
90 .globl kernbase
91 .set kernbase,KERNBASE
94 * Globals
96 .data
97 ALIGN_DATA /* just to be sure */
99 .globl .tmpstk
100 .space 0x2000 /* space for tmpstk - temporary stack */
101 .tmpstk:
103 .globl boothowto,bootdev,bootinfo
105 bootinfo: .space BOOTINFO_SIZE /* bootinfo buffer space */
107 KERNend: .long 0 /* phys addr end of kernel (just after bss) */
108 physfree: .long 0 /* phys addr of next free page */
110 #if 0
111 .globl cpu0prvpage
112 cpu0prvpage: .long 0 /* relocated version */
113 #endif
114 cpu0pp: .long 0 /* phys addr cpu0 private pg */
115 cpu0idlestk: .long 0 /* stack for the idle thread */
117 .globl SMPpt
118 SMPptpa: .long 0 /* phys addr SMP page table */
119 SMPpt: .long 0 /* relocated version */
121 .globl IdlePTD
122 IdlePTD: .long 0 /* phys addr of kernel PTD */
124 .globl KPTphys
125 KPTphys: .long 0 /* PA of kernel page tables */
127 .globl proc0paddr
128 proc0paddr: .long 0 /* VA of proc 0 address space */
129 p0upa: .long 0 /* PA of proc0's UPAGES */
131 vm86phystk: .long 0 /* PA of vm86/bios stack */
133 .globl vm86paddr, vm86pa
134 vm86paddr: .long 0 /* address of vm86 region */
135 vm86pa: .long 0 /* phys addr of vm86 region */
137 #ifdef BDE_DEBUGGER
138 .globl bdb_exists /* BDE debugger is present */
139 bdb_exists: .long 0
140 #endif
142 /**********************************************************************
144 * Some handy macros
148 #define R(foo) ((foo)-KERNBASE)
150 #define ALLOCPAGES(foo) \
151 movl R(physfree), %esi ; \
152 movl $((foo)*PAGE_SIZE), %eax ; \
153 addl %esi, %eax ; \
154 movl %eax, R(physfree) ; \
155 movl %esi, %edi ; \
156 movl $((foo)*PAGE_SIZE),%ecx ; \
157 xorl %eax,%eax ; \
158 cld ; \
159 rep ; \
160 stosb
163 * fillkpt
164 * eax = page frame address
165 * ebx = index into page table
166 * ecx = how many pages to map
167 * base = base address of page dir/table
168 * prot = protection bits
170 #define fillkpt(base, prot) \
171 shll $2,%ebx ; \
172 addl base,%ebx ; \
173 orl $PG_V,%eax ; \
174 orl prot,%eax ; \
175 1: movl %eax,(%ebx) ; \
176 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
177 addl $4,%ebx ; /* next pte */ \
178 loop 1b
181 * fillkptphys(prot)
182 * eax = physical address
183 * ecx = how many pages to map
184 * prot = protection bits
186 #define fillkptphys(prot) \
187 movl %eax, %ebx ; \
188 shrl $PAGE_SHIFT, %ebx ; \
189 fillkpt(R(KPTphys), prot)
191 .text
192 /**********************************************************************
194 * This is where the bootblocks start us, set the ball rolling...
197 NON_GPROF_ENTRY(btext)
199 #ifdef BDE_DEBUGGER
200 #ifdef BIOS_STEALS_3K
201 cmpl $0x0375c339,0x95504
202 #else
203 cmpl $0x0375c339,0x96104 /* XXX - debugger signature */
204 #endif
205 jne 1f
206 movb $1,R(bdb_exists)
208 #endif
209 /* Tell the bios to warmboot next time */
210 movw $0x1234,0x472
212 /* Set up a real frame in case the double return in newboot is executed. */
213 pushl %ebp
214 movl %esp, %ebp
216 /* Don't trust what the BIOS gives for eflags. */
217 pushl $PSL_KERNEL
218 popfl
221 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
222 * to set %cs, %ds, %es and %ss.
224 mov %ds, %ax
225 mov %ax, %fs
226 mov %ax, %gs
229 * Clear the bss. Not all boot programs do it, and it is our job anyway.
231 * XXX we don't check that there is memory for our bss and page tables
232 * before using it.
234 * Note: we must be careful to not overwrite an active gdt or idt. They
235 * inactive from now until we switch to new ones, since we don't load any
236 * more segment registers or permit interrupts until after the switch.
238 movl $R(_end),%ecx
239 movl $R(_edata),%edi
240 subl %edi,%ecx
241 xorl %eax,%eax
244 stosb
246 call recover_bootinfo
248 /* Get onto a stack that we can trust. */
250 * XXX this step is delayed in case recover_bootinfo needs to return via
251 * the old stack, but it need not be, since recover_bootinfo actually
252 * returns via the old frame.
254 movl $R(.tmpstk),%esp
256 call identify_cpu
258 call create_pagetables
261 * If the CPU has support for VME, turn it on.
263 testl $CPUID_VME, R(cpu_feature)
264 jz 1f
265 movl %cr4, %eax
266 orl $CR4_VME, %eax
267 movl %eax, %cr4
270 #ifdef BDE_DEBUGGER
272 * Adjust as much as possible for paging before enabling paging so that the
273 * adjustments can be traced.
275 call bdb_prepare_paging
276 #endif
278 /* Now enable paging */
279 movl R(IdlePTD), %eax
280 movl %eax,%cr3 /* load ptd addr into mmu */
281 movl %cr0,%eax /* get control word */
282 orl $CR0_PE|CR0_PG,%eax /* enable paging */
283 movl %eax,%cr0 /* and let's page NOW! */
286 #ifdef BDE_DEBUGGER
288 * Complete the adjustments for paging so that we can keep tracing through
289 * initi386() after the low (physical) addresses for the gdt and idt become
290 * invalid.
292 call bdb_commit_paging
293 #endif
295 pushl $begin /* jump to high virtualized address */
298 /* now running relocated at KERNBASE where the system is linked to run */
299 begin:
302 * set up the bootstrap stack. The pcb sits at the end of the
303 * bootstrap stack.
305 /* set up bootstrap stack */
306 movl proc0paddr,%esp /* location of in-kernel pages */
307 addl $UPAGES*PAGE_SIZE-PCB_SIZE,%esp
308 xorl %eax,%eax /* mark end of frames */
309 movl %eax,%ebp
310 /*movl proc0paddr,%eax*/
311 movl IdlePTD, %esi
312 movl %esi,PCB_CR3(%esp)
314 testl $CPUID_PGE, R(cpu_feature)
315 jz 1f
316 movl %cr4, %eax
317 orl $CR4_PGE, %eax
318 movl %eax, %cr4
321 movl physfree, %esi
322 pushl %esi /* value of first for init386(first) */
324 call init386 /* wire 386 chip for unix operation */
325 popl %esi
327 call mi_startup /* autoconfiguration, mountroot etc */
329 hlt /* never returns to here */
332 * Signal trampoline, copied to top of user stack
334 NON_GPROF_ENTRY(sigcode)
335 call *SIGF_HANDLER(%esp) /* call signal handler */
336 lea SIGF_UC(%esp),%eax /* get ucontext_t */
337 pushl %eax
338 testl $PSL_VM,UC_EFLAGS(%eax)
339 jne 9f
341 movl $SYS_sigreturn,%eax
342 pushl %eax /* junk to fake return addr. */
343 int $0x80 /* enter kernel with args */
344 0: jmp 0b
346 ALIGN_TEXT
347 esigcode:
349 .data
350 .globl szsigcode
351 szsigcode:
352 .long esigcode - sigcode
353 .text
355 /**********************************************************************
357 * Recover the bootinfo passed to us from the boot program
360 recover_bootinfo:
362 * This code is called in different ways depending on what loaded
363 * and started the kernel. This is used to detect how we get the
364 * arguments from the other code and what we do with them.
366 * Old disk boot blocks:
367 * (*btext)(howto, bootdev, cyloffset, esym);
368 * [return address == 0, and can NOT be returned to]
369 * [cyloffset was not supported by the FreeBSD boot code
370 * and always passed in as 0]
371 * [esym is also known as total in the boot code, and
372 * was never properly supported by the FreeBSD boot code]
374 * Old diskless netboot code:
375 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
376 * [return address != 0, and can NOT be returned to]
377 * If we are being booted by this code it will NOT work,
378 * so we are just going to halt if we find this case.
380 * New uniform boot code:
381 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
382 * [return address != 0, and can be returned to]
384 * There may seem to be a lot of wasted arguments in here, but
385 * that is so the newer boot code can still load very old kernels
386 * and old boot code can load new kernels.
390 * The old style disk boot blocks fake a frame on the stack and
391 * did an lret to get here. The frame on the stack has a return
392 * address of 0.
394 cmpl $0,4(%ebp)
395 je olddiskboot
398 * We have some form of return address, so this is either the
399 * old diskless netboot code, or the new uniform code. That can
400 * be detected by looking at the 5th argument, if it is 0
401 * we are being booted by the new uniform boot code.
403 cmpl $0,24(%ebp)
404 je newboot
407 * Seems we have been loaded by the old diskless boot code, we
408 * don't stand a chance of running as the diskless structure
409 * changed considerably between the two, so just halt.
414 * We have been loaded by the new uniform boot code.
415 * Let's check the bootinfo version, and if we do not understand
416 * it we return to the loader with a status of 1 to indicate this error
418 newboot:
419 movl 28(%ebp),%ebx /* &bootinfo.version */
420 movl BI_VERSION(%ebx),%eax
421 cmpl $1,%eax /* We only understand version 1 */
422 je 1f
423 movl $1,%eax /* Return status */
424 leave
426 * XXX this returns to our caller's caller (as is required) since
427 * we didn't set up a frame and our caller did.
433 * If we have a kernelname copy it in
435 movl BI_KERNELNAME(%ebx),%esi
436 cmpl $0,%esi
437 je 2f /* No kernelname */
438 movl $MAXPATHLEN,%ecx /* Brute force!!! */
439 movl $R(kernelname),%edi
440 cmpb $'/',(%esi) /* Make sure it starts with a slash */
441 je 1f
442 movb $'/',(%edi)
443 incl %edi
444 decl %ecx
448 movsb
452 * Determine the size of the boot loader's copy of the bootinfo
453 * struct. This is impossible to do properly because old versions
454 * of the struct don't contain a size field and there are 2 old
455 * versions with the same version number.
457 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
458 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
459 je got_bi_size /* no, sizeless version */
460 movl BI_SIZE(%ebx),%ecx
461 got_bi_size:
464 * Copy the common part of the bootinfo struct
466 movl %ebx,%esi
467 movl $R(bootinfo),%edi
468 cmpl $BOOTINFO_SIZE,%ecx
469 jbe got_common_bi_size
470 movl $BOOTINFO_SIZE,%ecx
471 got_common_bi_size:
474 movsb
476 #ifdef NFS_ROOT
477 #ifndef BOOTP_NFSV3
479 * If we have a nfs_diskless structure copy it in
481 movl BI_NFS_DISKLESS(%ebx),%esi
482 cmpl $0,%esi
483 je olddiskboot
484 movl $R(nfs_diskless),%edi
485 movl $NFSDISKLESS_SIZE,%ecx
488 movsb
489 movl $R(nfs_diskless_valid),%edi
490 movl $1,(%edi)
491 #endif
492 #endif
495 * The old style disk boot.
496 * (*btext)(howto, bootdev, cyloffset, esym);
497 * Note that the newer boot code just falls into here to pick
498 * up howto and bootdev, cyloffset and esym are no longer used
500 olddiskboot:
501 movl 8(%ebp),%eax
502 movl %eax,R(boothowto)
503 movl 12(%ebp),%eax
504 movl %eax,R(bootdev)
509 /**********************************************************************
511 * Identify the CPU and initialize anything special about it
514 identify_cpu:
516 /* Try to toggle alignment check flag; does not exist on 386. */
517 pushfl
518 popl %eax
519 movl %eax,%ecx
520 orl $PSL_AC,%eax
521 pushl %eax
522 popfl
523 pushfl
524 popl %eax
525 xorl %ecx,%eax
526 andl $PSL_AC,%eax
527 pushl %ecx
528 popfl
530 testl %eax,%eax
531 jnz try486
533 /* NexGen CPU does not have aligment check flag. */
534 pushfl
535 movl $0x5555, %eax
536 xorl %edx, %edx
537 movl $2, %ecx
539 divl %ecx
540 jz trynexgen
541 popfl
542 movl $CPU_386,R(cpu)
543 jmp 3f
545 trynexgen:
546 popfl
547 movl $CPU_NX586,R(cpu)
548 movl $0x4778654e,R(cpu_vendor) # store vendor string
549 movl $0x72446e65,R(cpu_vendor+4)
550 movl $0x6e657669,R(cpu_vendor+8)
551 movl $0,R(cpu_vendor+12)
552 jmp 3f
554 try486: /* Try to toggle identification flag; does not exist on early 486s. */
555 pushfl
556 popl %eax
557 movl %eax,%ecx
558 xorl $PSL_ID,%eax
559 pushl %eax
560 popfl
561 pushfl
562 popl %eax
563 xorl %ecx,%eax
564 andl $PSL_ID,%eax
565 pushl %ecx
566 popfl
568 testl %eax,%eax
569 jnz trycpuid
570 movl $CPU_486,R(cpu)
573 * Check Cyrix CPU
574 * Cyrix CPUs do not change the undefined flags following
575 * execution of the divide instruction which divides 5 by 2.
577 * Note: CPUID is enabled on M2, so it passes another way.
579 pushfl
580 movl $0x5555, %eax
581 xorl %edx, %edx
582 movl $2, %ecx
584 divl %ecx
585 jnc trycyrix
586 popfl
587 jmp 3f /* You may use Intel CPU. */
589 trycyrix:
590 popfl
592 * IBM Bluelighting CPU also doesn't change the undefined flags.
593 * Because IBM doesn't disclose the information for Bluelighting
594 * CPU, we couldn't distinguish it from Cyrix's (including IBM
595 * brand of Cyrix CPUs).
597 movl $0x69727943,R(cpu_vendor) # store vendor string
598 movl $0x736e4978,R(cpu_vendor+4)
599 movl $0x64616574,R(cpu_vendor+8)
600 jmp 3f
602 trycpuid: /* Use the `cpuid' instruction. */
603 xorl %eax,%eax
604 cpuid # cpuid 0
605 movl %eax,R(cpu_high) # highest capability
606 movl %ebx,R(cpu_vendor) # store vendor string
607 movl %edx,R(cpu_vendor+4)
608 movl %ecx,R(cpu_vendor+8)
609 movb $0,R(cpu_vendor+12)
611 movl $1,%eax
612 cpuid # cpuid 1
613 movl %eax,R(cpu_id) # store cpu_id
614 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
615 movl %edx,R(cpu_feature) # store cpu_feature
616 movl %ecx,R(cpu_feature2) # store cpu_feature2
617 rorl $8,%eax # extract family type
618 andl $15,%eax
619 cmpl $5,%eax
620 jae 1f
622 /* less than Pentium; must be 486 */
623 movl $CPU_486,R(cpu)
624 jmp 3f
626 /* a Pentium? */
627 cmpl $5,%eax
628 jne 2f
629 movl $CPU_586,R(cpu)
630 jmp 3f
632 /* Greater than Pentium...call it a Pentium Pro */
633 movl $CPU_686,R(cpu)
638 /**********************************************************************
640 * Create the first page directory and its page tables.
644 create_pagetables:
646 /* Find end of kernel image (rounded up to a page boundary). */
647 movl $R(end),%esi
649 /* Include symbols, if any. */
650 movl R(bootinfo+BI_ESYMTAB),%edi
651 testl %edi,%edi
652 je over_symalloc
653 movl %edi,%esi
654 movl $KERNBASE,%edi
655 addl %edi,R(bootinfo+BI_SYMTAB)
656 addl %edi,R(bootinfo+BI_ESYMTAB)
657 over_symalloc:
659 /* If we are told where the end of the kernel space is, believe it. */
660 movl R(bootinfo+BI_KERNEND),%edi
661 testl %edi,%edi
662 je no_kernend
663 movl %edi,%esi
664 no_kernend:
666 addl $PAGE_MASK,%esi
667 andl $~PAGE_MASK,%esi
668 movl %esi,R(KERNend) /* save end of kernel */
669 movl %esi,R(physfree) /* next free page is at end of kernel */
672 /* Allocate Kernel Page Tables */
673 ALLOCPAGES(NKPT)
674 movl %esi,R(KPTphys)
676 /* Allocate Page Table Directory */
677 ALLOCPAGES(1)
678 movl %esi,R(IdlePTD)
680 /* Allocate UPAGES */
681 ALLOCPAGES(UPAGES)
682 movl %esi,R(p0upa)
683 addl $KERNBASE, %esi
684 movl %esi, R(proc0paddr)
686 ALLOCPAGES(1) /* vm86/bios stack */
687 movl %esi,R(vm86phystk)
689 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
690 movl %esi,R(vm86pa)
691 addl $KERNBASE, %esi
692 movl %esi, R(vm86paddr)
694 /* Allocate cpu0's mdglobaldata */
695 ALLOCPAGES(MDGLOBALDATA_BASEALLOC_PAGES)
696 movl %esi,R(cpu0pp)
697 #if 0
698 addl $KERNBASE, %esi
699 movl %esi, R(cpu0prvpage) /* relocated to KVM space */
700 #endif
702 /* Allocate cpu0's idle stack */
703 ALLOCPAGES(UPAGES)
704 movl %esi,R(cpu0idlestk)
706 /* Allocate SMP page table page */
707 ALLOCPAGES(1)
708 movl %esi,R(SMPptpa)
709 addl $KERNBASE, %esi
710 movl %esi, R(SMPpt) /* relocated to KVM space */
712 /* Map read-only from zero to the end of the kernel text section */
713 xorl %eax, %eax
714 #ifdef BDE_DEBUGGER
715 /* If the debugger is present, actually map everything read-write. */
716 cmpl $0,R(bdb_exists)
717 jne map_read_write
718 #endif
719 xorl %edx,%edx
721 #if !defined(SMP)
722 testl $CPUID_PGE, R(cpu_feature)
723 jz 2f
724 orl $PG_G,%edx
725 #endif
727 2: movl $R(etext),%ecx
728 addl $PAGE_MASK,%ecx
729 shrl $PAGE_SHIFT,%ecx
730 fillkptphys(%edx)
732 /* Map read-write, data, bss and symbols */
733 movl $R(etext),%eax
734 addl $PAGE_MASK, %eax
735 andl $~PAGE_MASK, %eax
736 map_read_write:
737 movl $PG_RW,%edx
738 #if !defined(SMP)
739 testl $CPUID_PGE, R(cpu_feature)
740 jz 1f
741 orl $PG_G,%edx
742 #endif
744 1: movl R(KERNend),%ecx
745 subl %eax,%ecx
746 shrl $PAGE_SHIFT,%ecx
747 fillkptphys(%edx)
749 /* Map page directory. */
750 movl R(IdlePTD), %eax
751 movl $1, %ecx
752 fillkptphys($PG_RW)
754 /* Map proc0's UPAGES in the physical way ... */
755 movl R(p0upa), %eax
756 movl $UPAGES, %ecx
757 fillkptphys($PG_RW)
759 /* Map ISA hole */
760 movl $ISA_HOLE_START, %eax
761 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
762 fillkptphys($PG_RW)
764 /* Map space for the vm86 region */
765 movl R(vm86phystk), %eax
766 movl $4, %ecx
767 fillkptphys($PG_RW)
769 /* Map page 0 into the vm86 page table */
770 movl $0, %eax
771 movl $0, %ebx
772 movl $1, %ecx
773 fillkpt(R(vm86pa), $PG_RW|PG_U)
775 /* ...likewise for the ISA hole */
776 movl $ISA_HOLE_START, %eax
777 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
778 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
779 fillkpt(R(vm86pa), $PG_RW|PG_U)
781 #if 0
782 /* Map cpu0's mdglobaldata into global kmem (N pages @ cpu0pp) */
783 movl R(cpu0pp), %eax
784 movl $MDGLOBALDATA_BASEALLOC_PAGES, %ecx
785 fillkptphys($PG_RW)
786 #endif
788 /* Map SMP page table page into global kmem FWIW */
789 movl R(SMPptpa), %eax
790 movl $1, %ecx
791 fillkptphys($PG_RW)
793 /* Map the private page into the SMP page table */
794 movl R(cpu0pp), %eax
795 movl $0, %ebx /* pte offset = 0 */
796 /* N private pages coming right up */
797 movl $MDGLOBALDATA_BASEALLOC_PAGES, %ecx
798 fillkpt(R(SMPptpa), $PG_RW)
800 /* Map the cpu0's idle thread stack */
801 movl R(cpu0idlestk), %eax
802 movl $PS_IDLESTACK_PAGE, %ebx
803 movl $UPAGES, %ecx
804 fillkpt(R(SMPptpa), $PG_RW)
806 /* ... and put the page table table in the pde. */
807 movl R(SMPptpa), %eax
808 movl $MPPTDI, %ebx
809 movl $1, %ecx
810 fillkpt(R(IdlePTD), $PG_RW)
812 /* Fakeup VA for the local apic to allow early traps. */
813 ALLOCPAGES(1)
814 movl %esi, %eax
815 movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */
816 movl $1, %ecx /* one private pt coming right up */
817 fillkpt(R(SMPptpa), $PG_RW)
819 #ifdef SMP
820 /* Initialize mp lock to allow early traps */
821 movl $0, R(mp_lock)
822 #endif /* SMP */
824 /* install a pde for temporary double map of bottom of VA */
825 movl R(KPTphys), %eax
826 xorl %ebx, %ebx
827 movl $NKPT, %ecx
828 fillkpt(R(IdlePTD), $PG_RW)
830 /* install pde's for pt's */
831 movl R(KPTphys), %eax
832 movl $KPTDI, %ebx
833 movl $NKPT, %ecx
834 fillkpt(R(IdlePTD), $PG_RW)
836 /* install a pde recursively mapping page directory as a page table */
837 movl R(IdlePTD), %eax
838 movl $PTDPTDI, %ebx
839 movl $1,%ecx
840 fillkpt(R(IdlePTD), $PG_RW)
844 #ifdef BDE_DEBUGGER
845 bdb_prepare_paging:
846 cmpl $0,R(bdb_exists)
847 je bdb_prepare_paging_exit
849 subl $6,%esp
852 * Copy and convert debugger entries from the bootstrap gdt and idt
853 * to the kernel gdt and idt. Everything is still in low memory.
854 * Tracing continues to work after paging is enabled because the
855 * low memory addresses remain valid until everything is relocated.
856 * However, tracing through the setidt() that initializes the trace
857 * trap will crash.
859 sgdt (%esp)
860 movl 2(%esp),%esi /* base address of bootstrap gdt */
861 movl $R(gdt),%edi
862 movl %edi,2(%esp) /* prepare to load kernel gdt */
863 movl $8*18/4,%ecx
865 rep /* copy gdt */
866 movsl
867 movl $R(gdt),-8+2(%edi) /* adjust gdt self-ptr */
868 movb $0x92,-8+5(%edi)
869 lgdt (%esp)
871 sidt (%esp)
872 movl 2(%esp),%esi /* base address of current idt */
873 movl 8+4(%esi),%eax /* convert dbg descriptor to ... */
874 movw 8(%esi),%ax
875 movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */
876 movl 8+2(%esi),%eax
877 movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */
878 movl 24+4(%esi),%eax /* same for bpt descriptor */
879 movw 24(%esi),%ax
880 movl %eax,R(bdb_bpt_ljmp+1)
881 movl 24+2(%esi),%eax
882 movw %ax,R(bdb_bpt_ljmp+5)
883 movl R(idt),%edi
884 movl %edi,2(%esp) /* prepare to load kernel idt */
885 movl $8*4/4,%ecx
887 rep /* copy idt */
888 movsl
889 lidt (%esp)
891 addl $6,%esp
893 bdb_prepare_paging_exit:
896 /* Relocate debugger gdt entries and gdt and idt pointers. */
897 bdb_commit_paging:
898 cmpl $0,_bdb_exists
899 je bdb_commit_paging_exit
901 movl $_gdt+8*9,%eax /* adjust slots 9-17 */
902 movl $9,%ecx
903 reloc_gdt:
904 movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */
905 addl $8,%eax /* now KERNBASE>>24 */
906 loop reloc_gdt
908 subl $6,%esp
909 sgdt (%esp)
910 addl $KERNBASE,2(%esp)
911 lgdt (%esp)
912 sidt (%esp)
913 addl $KERNBASE,2(%esp)
914 lidt (%esp)
915 addl $6,%esp
917 int $3
919 bdb_commit_paging_exit:
922 #endif /* BDE_DEBUGGER */