2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
43 * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.87 2008/08/25 17:01:38 dillon Exp $
47 * Manages physical address maps.
49 * In addition to hardware address maps, this
50 * module is called upon to provide software-use-only
51 * maps which may or may not be stored in the same
52 * form as hardware maps. These pseudo-maps are
53 * used to store intermediate results from copy
54 * operations to and from address spaces.
56 * Since the information managed by this module is
57 * also stored by the logical address mapping module,
58 * this module may throw away valid virtual-to-physical
59 * mappings at almost any time. However, invalidations
60 * of virtual-to-physical mappings must be done as
63 * In order to cope with hardware architectures which
64 * make virtual-to-physical map invalidates expensive,
65 * this module may delay invalidate or reduced protection
66 * operations until such time as they are actually
67 * necessary. This module is given full information as
68 * to which processors are currently using which maps,
69 * and to when physical maps must be made correct.
72 #include "opt_disable_pse.h"
74 #include "opt_msgbuf.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
80 #include <sys/msgbuf.h>
81 #include <sys/vmmeter.h>
85 #include <vm/vm_param.h>
86 #include <sys/sysctl.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
90 #include <vm/vm_map.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_extern.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_zone.h>
98 #include <sys/thread2.h>
99 #include <sys/sysref2.h>
101 #include <machine/cputypes.h>
102 #include <machine/md_var.h>
103 #include <machine/specialreg.h>
104 #include <machine/smp.h>
105 #include <machine_base/apic/apicreg.h>
106 #include <machine/globaldata.h>
107 #include <machine/pmap.h>
108 #include <machine/pmap_inval.h>
110 #define PMAP_KEEP_PDIRS
111 #ifndef PMAP_SHPGPERPROC
112 #define PMAP_SHPGPERPROC 200
115 #if defined(DIAGNOSTIC)
116 #define PMAP_DIAGNOSTIC
121 #if !defined(PMAP_DIAGNOSTIC)
122 #define PMAP_INLINE __inline
128 * Get PDEs and PTEs for user/kernel address space
130 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
131 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
133 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
134 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
135 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
136 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
137 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
141 * Given a map and a machine independent protection code,
142 * convert to a vax protection code.
144 #define pte_prot(m, p) \
145 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
146 static int protection_codes
[8];
148 struct pmap kernel_pmap
;
149 static TAILQ_HEAD(,pmap
) pmap_list
= TAILQ_HEAD_INITIALIZER(pmap_list
);
151 vm_paddr_t avail_start
; /* PA of first available physical page */
152 vm_paddr_t avail_end
; /* PA of last available physical page */
153 vm_offset_t virtual_start
; /* VA of first avail page (after kernel bss) */
154 vm_offset_t virtual_end
; /* VA of last avail page (end of kernel AS) */
155 vm_offset_t KvaStart
; /* VA start of KVA space */
156 vm_offset_t KvaEnd
; /* VA end of KVA space (non-inclusive) */
157 vm_offset_t KvaSize
; /* max size of kernel virtual address space */
158 static boolean_t pmap_initialized
= FALSE
; /* Has pmap_init completed? */
159 static int pgeflag
; /* PG_G or-in */
160 static int pseflag
; /* PG_PS or-in */
162 static vm_object_t kptobj
;
165 vm_offset_t kernel_vm_end
;
168 * Data for the pv entry allocation mechanism
170 static vm_zone_t pvzone
;
171 static struct vm_zone pvzone_store
;
172 static struct vm_object pvzone_obj
;
173 static int pv_entry_count
=0, pv_entry_max
=0, pv_entry_high_water
=0;
174 static int pmap_pagedaemon_waken
= 0;
175 static struct pv_entry
*pvinit
;
178 * All those kernel PT submaps that BSD is so fond of
180 pt_entry_t
*CMAP1
= 0, *ptmmap
;
181 caddr_t CADDR1
= 0, ptvmmap
= 0;
182 static pt_entry_t
*msgbufmap
;
183 struct msgbuf
*msgbufp
=0;
188 static pt_entry_t
*pt_crashdumpmap
;
189 static caddr_t crashdumpmap
;
191 extern pt_entry_t
*SMPpt
;
193 static PMAP_INLINE
void free_pv_entry (pv_entry_t pv
);
194 static unsigned * get_ptbase (pmap_t pmap
);
195 static pv_entry_t
get_pv_entry (void);
196 static void i386_protection_init (void);
197 static __inline
void pmap_clearbit (vm_page_t m
, int bit
);
199 static void pmap_remove_all (vm_page_t m
);
200 static void pmap_enter_quick (pmap_t pmap
, vm_offset_t va
, vm_page_t m
);
201 static int pmap_remove_pte (struct pmap
*pmap
, unsigned *ptq
,
202 vm_offset_t sva
, pmap_inval_info_t info
);
203 static void pmap_remove_page (struct pmap
*pmap
,
204 vm_offset_t va
, pmap_inval_info_t info
);
205 static int pmap_remove_entry (struct pmap
*pmap
, vm_page_t m
,
206 vm_offset_t va
, pmap_inval_info_t info
);
207 static boolean_t
pmap_testbit (vm_page_t m
, int bit
);
208 static void pmap_insert_entry (pmap_t pmap
, vm_offset_t va
,
209 vm_page_t mpte
, vm_page_t m
);
211 static vm_page_t
pmap_allocpte (pmap_t pmap
, vm_offset_t va
);
213 static int pmap_release_free_page (pmap_t pmap
, vm_page_t p
);
214 static vm_page_t
_pmap_allocpte (pmap_t pmap
, unsigned ptepindex
);
215 static unsigned * pmap_pte_quick (pmap_t pmap
, vm_offset_t va
);
216 static vm_page_t
pmap_page_lookup (vm_object_t object
, vm_pindex_t pindex
);
217 static int pmap_unuse_pt (pmap_t
, vm_offset_t
, vm_page_t
, pmap_inval_info_t
);
218 static vm_offset_t
pmap_kmem_choose(vm_offset_t addr
);
220 static unsigned pdir4mb
;
223 * Move the kernel virtual free pointer to the next
224 * 4MB. This is used to help improve performance
225 * by using a large (4MB) page for much of the kernel
226 * (.text, .data, .bss)
229 pmap_kmem_choose(vm_offset_t addr
)
231 vm_offset_t newaddr
= addr
;
233 if (cpu_feature
& CPUID_PSE
) {
234 newaddr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
243 * Extract the page table entry associated with the given map/virtual
246 * This function may NOT be called from an interrupt.
248 PMAP_INLINE
unsigned *
249 pmap_pte(pmap_t pmap
, vm_offset_t va
)
254 pdeaddr
= (unsigned *) pmap_pde(pmap
, va
);
255 if (*pdeaddr
& PG_PS
)
258 return get_ptbase(pmap
) + i386_btop(va
);
267 * Super fast pmap_pte routine best used when scanning the pv lists.
268 * This eliminates many course-grained invltlb calls. Note that many of
269 * the pv list scans are across different pmaps and it is very wasteful
270 * to do an entire invltlb when checking a single mapping.
272 * Should only be called while in a critical section.
275 pmap_pte_quick(pmap_t pmap
, vm_offset_t va
)
277 struct mdglobaldata
*gd
= mdcpu
;
280 if ((pde
= (unsigned) pmap
->pm_pdir
[va
>> PDRSHIFT
]) != 0) {
281 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
282 unsigned index
= i386_btop(va
);
283 /* are we current address space or kernel? */
284 if ((pmap
== &kernel_pmap
) ||
285 (frame
== (((unsigned) PTDpde
) & PG_FRAME
))) {
286 return (unsigned *) PTmap
+ index
;
288 newpf
= pde
& PG_FRAME
;
289 if ( ((* (unsigned *) gd
->gd_PMAP1
) & PG_FRAME
) != newpf
) {
290 * (unsigned *) gd
->gd_PMAP1
= newpf
| PG_RW
| PG_V
;
291 cpu_invlpg(gd
->gd_PADDR1
);
293 return gd
->gd_PADDR1
+ ((unsigned) index
& (NPTEPG
- 1));
300 * Bootstrap the system enough to run with virtual memory.
302 * On the i386 this is called after mapping has already been enabled
303 * and just syncs the pmap module with what has already been done.
304 * [We can't call it easily with mapping off since the kernel is not
305 * mapped with PA == VA, hence we would have to relocate every address
306 * from the linked base (virtual) address "KERNBASE" to the actual
307 * (physical) address starting relative to 0]
310 pmap_bootstrap(vm_paddr_t firstaddr
, vm_paddr_t loadaddr
)
314 struct mdglobaldata
*gd
;
318 KvaStart
= (vm_offset_t
)VADDR(PTDPTDI
, 0);
319 KvaSize
= (vm_offset_t
)VADDR(APTDPTDI
, 0) - KvaStart
;
320 KvaEnd
= KvaStart
+ KvaSize
;
322 avail_start
= firstaddr
;
325 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
326 * too large. It should instead be correctly calculated in locore.s and
327 * not based on 'first' (which is a physical address, not a virtual
328 * address, for the start of unused physical memory). The kernel
329 * page tables are NOT double mapped and thus should not be included
330 * in this calculation.
332 virtual_start
= (vm_offset_t
) KERNBASE
+ firstaddr
;
333 virtual_start
= pmap_kmem_choose(virtual_start
);
334 virtual_end
= VADDR(KPTDI
+NKPDE
-1, NPTEPG
-1);
337 * Initialize protection array.
339 i386_protection_init();
342 * The kernel's pmap is statically allocated so we don't have to use
343 * pmap_create, which is unlikely to work correctly at this part of
344 * the boot sequence (XXX and which no longer exists).
346 kernel_pmap
.pm_pdir
= (pd_entry_t
*)(KERNBASE
+ (u_int
)IdlePTD
);
347 kernel_pmap
.pm_count
= 1;
348 kernel_pmap
.pm_active
= (cpumask_t
)-1; /* don't allow deactivation */
349 TAILQ_INIT(&kernel_pmap
.pm_pvlist
);
353 * Reserve some special page table entries/VA space for temporary
356 #define SYSMAP(c, p, v, n) \
357 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
360 pte
= (pt_entry_t
*) pmap_pte(&kernel_pmap
, va
);
363 * CMAP1/CMAP2 are used for zeroing and copying pages.
365 SYSMAP(caddr_t
, CMAP1
, CADDR1
, 1)
370 SYSMAP(caddr_t
, pt_crashdumpmap
, crashdumpmap
, MAXDUMPPGS
);
373 * ptvmmap is used for reading arbitrary physical pages via
376 SYSMAP(caddr_t
, ptmmap
, ptvmmap
, 1)
379 * msgbufp is used to map the system message buffer.
380 * XXX msgbufmap is not used.
382 SYSMAP(struct msgbuf
*, msgbufmap
, msgbufp
,
383 atop(round_page(MSGBUF_SIZE
)))
388 for (i
= 0; i
< NKPT
; i
++)
392 * PG_G is terribly broken on SMP because we IPI invltlb's in some
393 * cases rather then invl1pg. Actually, I don't even know why it
394 * works under UP because self-referential page table mappings
399 if (cpu_feature
& CPUID_PGE
)
404 * Initialize the 4MB page size flag
408 * The 4MB page version of the initial
409 * kernel page mapping.
413 #if !defined(DISABLE_PSE)
414 if (cpu_feature
& CPUID_PSE
) {
417 * Note that we have enabled PSE mode
420 ptditmp
= *((unsigned *)PTmap
+ i386_btop(KERNBASE
));
421 ptditmp
&= ~(NBPDR
- 1);
422 ptditmp
|= PG_V
| PG_RW
| PG_PS
| PG_U
| pgeflag
;
427 * Enable the PSE mode. If we are SMP we can't do this
428 * now because the APs will not be able to use it when
431 load_cr4(rcr4() | CR4_PSE
);
434 * We can do the mapping here for the single processor
435 * case. We simply ignore the old page table page from
439 * For SMP, we still need 4K pages to bootstrap APs,
440 * PSE will be enabled as soon as all APs are up.
442 PTD
[KPTDI
] = (pd_entry_t
)ptditmp
;
443 kernel_pmap
.pm_pdir
[KPTDI
] = (pd_entry_t
)ptditmp
;
449 if (cpu_apic_address
== 0)
450 panic("pmap_bootstrap: no local apic!");
452 /* local apic is mapped on last page */
453 SMPpt
[NPTEPG
- 1] = (pt_entry_t
)(PG_V
| PG_RW
| PG_N
| pgeflag
|
454 (cpu_apic_address
& PG_FRAME
));
458 * We need to finish setting up the globaldata page for the BSP.
459 * locore has already populated the page table for the mdglobaldata
462 pg
= MDGLOBALDATA_BASEALLOC_PAGES
;
463 gd
= &CPU_prvspace
[0].mdglobaldata
;
464 gd
->gd_CMAP1
= &SMPpt
[pg
+ 0];
465 gd
->gd_CMAP2
= &SMPpt
[pg
+ 1];
466 gd
->gd_CMAP3
= &SMPpt
[pg
+ 2];
467 gd
->gd_PMAP1
= &SMPpt
[pg
+ 3];
468 gd
->gd_CADDR1
= CPU_prvspace
[0].CPAGE1
;
469 gd
->gd_CADDR2
= CPU_prvspace
[0].CPAGE2
;
470 gd
->gd_CADDR3
= CPU_prvspace
[0].CPAGE3
;
471 gd
->gd_PADDR1
= (unsigned *)CPU_prvspace
[0].PPAGE1
;
478 * Set 4mb pdir for mp startup
483 if (pseflag
&& (cpu_feature
& CPUID_PSE
)) {
484 load_cr4(rcr4() | CR4_PSE
);
485 if (pdir4mb
&& mycpu
->gd_cpuid
== 0) { /* only on BSP */
486 kernel_pmap
.pm_pdir
[KPTDI
] =
487 PTD
[KPTDI
] = (pd_entry_t
)pdir4mb
;
495 * Initialize the pmap module.
496 * Called by vm_init, to initialize any structures that the pmap
497 * system needs to map virtual memory.
498 * pmap_init has been enhanced to support in a fairly consistant
499 * way, discontiguous physical memory.
508 * object for kernel page table pages
510 kptobj
= vm_object_allocate(OBJT_DEFAULT
, NKPDE
);
513 * Allocate memory for random pmap data structures. Includes the
517 for(i
= 0; i
< vm_page_array_size
; i
++) {
520 m
= &vm_page_array
[i
];
521 TAILQ_INIT(&m
->md
.pv_list
);
522 m
->md
.pv_list_count
= 0;
526 * init the pv free list
528 initial_pvs
= vm_page_array_size
;
529 if (initial_pvs
< MINPV
)
531 pvzone
= &pvzone_store
;
532 pvinit
= (struct pv_entry
*) kmem_alloc(&kernel_map
,
533 initial_pvs
* sizeof (struct pv_entry
));
534 zbootinit(pvzone
, "PV ENTRY", sizeof (struct pv_entry
), pvinit
,
538 * Now it is safe to enable pv_table recording.
540 pmap_initialized
= TRUE
;
544 * Initialize the address space (zone) for the pv_entries. Set a
545 * high water mark so that the system can recover from excessive
546 * numbers of pv entries.
551 int shpgperproc
= PMAP_SHPGPERPROC
;
553 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc
);
554 pv_entry_max
= shpgperproc
* maxproc
+ vm_page_array_size
;
555 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max
);
556 pv_entry_high_water
= 9 * (pv_entry_max
/ 10);
557 zinitna(pvzone
, &pvzone_obj
, NULL
, 0, pv_entry_max
, ZONE_INTERRUPT
, 1);
561 /***************************************************
562 * Low level helper routines.....
563 ***************************************************/
565 #if defined(PMAP_DIAGNOSTIC)
568 * This code checks for non-writeable/modified pages.
569 * This should be an invalid condition.
572 pmap_nw_modified(pt_entry_t ptea
)
578 if ((pte
& (PG_M
|PG_RW
)) == PG_M
)
587 * this routine defines the region(s) of memory that should
588 * not be tested for the modified bit.
590 static PMAP_INLINE
int
591 pmap_track_modified(vm_offset_t va
)
593 if ((va
< clean_sva
) || (va
>= clean_eva
))
600 get_ptbase(pmap_t pmap
)
602 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
603 struct globaldata
*gd
= mycpu
;
605 /* are we current address space or kernel? */
606 if (pmap
== &kernel_pmap
|| frame
== (((unsigned) PTDpde
) & PG_FRAME
)) {
607 return (unsigned *) PTmap
;
610 /* otherwise, we are alternate address space */
611 KKASSERT(gd
->gd_intr_nesting_level
== 0 &&
612 (gd
->gd_curthread
->td_flags
& TDF_INTTHREAD
) == 0);
614 if (frame
!= (((unsigned) APTDpde
) & PG_FRAME
)) {
615 APTDpde
= (pd_entry_t
)(frame
| PG_RW
| PG_V
);
616 /* The page directory is not shared between CPUs */
619 return (unsigned *) APTmap
;
625 * Extract the physical page address associated with the map/VA pair.
627 * This function may not be called from an interrupt if the pmap is
631 pmap_extract(pmap_t pmap
, vm_offset_t va
)
634 vm_offset_t pdirindex
;
636 pdirindex
= va
>> PDRSHIFT
;
637 if (pmap
&& (rtval
= (unsigned) pmap
->pm_pdir
[pdirindex
])) {
639 if ((rtval
& PG_PS
) != 0) {
640 rtval
&= ~(NBPDR
- 1);
641 rtval
|= va
& (NBPDR
- 1);
644 pte
= get_ptbase(pmap
) + i386_btop(va
);
645 rtval
= ((*pte
& PG_FRAME
) | (va
& PAGE_MASK
));
651 /***************************************************
652 * Low level mapping routines.....
653 ***************************************************/
656 * Routine: pmap_kenter
658 * Add a wired page to the KVA
659 * NOTE! note that in order for the mapping to take effect -- you
660 * should do an invltlb after doing the pmap_kenter().
663 pmap_kenter(vm_offset_t va
, vm_paddr_t pa
)
667 pmap_inval_info info
;
669 pmap_inval_init(&info
);
670 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
671 pte
= (unsigned *)vtopte(va
);
672 pmap_inval_add(&info
, &kernel_pmap
, va
);
674 pmap_inval_flush(&info
);
678 * Routine: pmap_kenter_quick
680 * Similar to pmap_kenter(), except we only invalidate the
681 * mapping on the current CPU.
684 pmap_kenter_quick(vm_offset_t va
, vm_paddr_t pa
)
689 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
690 pte
= (unsigned *)vtopte(va
);
692 cpu_invlpg((void *)va
);
696 pmap_kenter_sync(vm_offset_t va
)
698 pmap_inval_info info
;
700 pmap_inval_init(&info
);
701 pmap_inval_add(&info
, &kernel_pmap
, va
);
702 pmap_inval_flush(&info
);
706 pmap_kenter_sync_quick(vm_offset_t va
)
708 cpu_invlpg((void *)va
);
712 * remove a page from the kernel pagetables
715 pmap_kremove(vm_offset_t va
)
718 pmap_inval_info info
;
720 pmap_inval_init(&info
);
721 pte
= (unsigned *)vtopte(va
);
722 pmap_inval_add(&info
, &kernel_pmap
, va
);
724 pmap_inval_flush(&info
);
728 pmap_kremove_quick(vm_offset_t va
)
731 pte
= (unsigned *)vtopte(va
);
733 cpu_invlpg((void *)va
);
737 * XXX these need to be recoded. They are not used in any critical path.
740 pmap_kmodify_rw(vm_offset_t va
)
742 *vtopte(va
) |= PG_RW
;
743 cpu_invlpg((void *)va
);
747 pmap_kmodify_nc(vm_offset_t va
)
750 cpu_invlpg((void *)va
);
754 * Used to map a range of physical addresses into kernel
755 * virtual address space.
757 * For now, VM is already on, we only need to map the
761 pmap_map(vm_offset_t virt
, vm_paddr_t start
, vm_paddr_t end
, int prot
)
763 while (start
< end
) {
764 pmap_kenter(virt
, start
);
773 * Add a list of wired pages to the kva
774 * this routine is only used for temporary
775 * kernel mappings that do not need to have
776 * page modification or references recorded.
777 * Note that old mappings are simply written
778 * over. The page *must* be wired.
781 pmap_qenter(vm_offset_t va
, vm_page_t
*m
, int count
)
785 end_va
= va
+ count
* PAGE_SIZE
;
787 while (va
< end_va
) {
790 pte
= (unsigned *)vtopte(va
);
791 *pte
= VM_PAGE_TO_PHYS(*m
) | PG_RW
| PG_V
| pgeflag
;
792 cpu_invlpg((void *)va
);
797 smp_invltlb(); /* XXX */
802 pmap_qenter2(vm_offset_t va
, vm_page_t
*m
, int count
, cpumask_t
*mask
)
805 cpumask_t cmask
= mycpu
->gd_cpumask
;
807 end_va
= va
+ count
* PAGE_SIZE
;
809 while (va
< end_va
) {
814 * Install the new PTE. If the pte changed from the prior
815 * mapping we must reset the cpu mask and invalidate the page.
816 * If the pte is the same but we have not seen it on the
817 * current cpu, invlpg the existing mapping. Otherwise the
818 * entry is optimal and no invalidation is required.
820 pte
= (unsigned *)vtopte(va
);
821 pteval
= VM_PAGE_TO_PHYS(*m
) | PG_A
| PG_RW
| PG_V
| pgeflag
;
822 if (*pte
!= pteval
) {
825 cpu_invlpg((void *)va
);
826 } else if ((*mask
& cmask
) == 0) {
827 cpu_invlpg((void *)va
);
836 * this routine jerks page mappings from the
837 * kernel -- it is meant only for temporary mappings.
840 pmap_qremove(vm_offset_t va
, int count
)
844 end_va
= va
+ count
*PAGE_SIZE
;
846 while (va
< end_va
) {
849 pte
= (unsigned *)vtopte(va
);
851 cpu_invlpg((void *)va
);
860 * This routine works like vm_page_lookup() but also blocks as long as the
861 * page is busy. This routine does not busy the page it returns.
863 * Unless the caller is managing objects whos pages are in a known state,
864 * the call should be made with a critical section held so the page's object
865 * association remains valid on return.
868 pmap_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
873 m
= vm_page_lookup(object
, pindex
);
874 } while (m
&& vm_page_sleep_busy(m
, FALSE
, "pplookp"));
880 * Create a new thread and optionally associate it with a (new) process.
881 * NOTE! the new thread's cpu may not equal the current cpu.
884 pmap_init_thread(thread_t td
)
886 /* enforce pcb placement */
887 td
->td_pcb
= (struct pcb
*)(td
->td_kstack
+ td
->td_kstack_size
) - 1;
888 td
->td_savefpu
= &td
->td_pcb
->pcb_save
;
889 td
->td_sp
= (char *)td
->td_pcb
- 16;
893 * This routine directly affects the fork perf for a process.
896 pmap_init_proc(struct proc
*p
)
901 * Dispose the UPAGES for a process that has exited.
902 * This routine directly impacts the exit perf of a process.
905 pmap_dispose_proc(struct proc
*p
)
907 KASSERT(p
->p_lock
== 0, ("attempt to dispose referenced proc! %p", p
));
910 /***************************************************
911 * Page table page management routines.....
912 ***************************************************/
915 * This routine unholds page table pages, and if the hold count
916 * drops to zero, then it decrements the wire count.
919 _pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
922 * Wait until we can busy the page ourselves. We cannot have
923 * any active flushes if we block.
925 if (m
->flags
& PG_BUSY
) {
926 pmap_inval_flush(info
);
927 while (vm_page_sleep_busy(m
, FALSE
, "pmuwpt"))
930 KASSERT(m
->queue
== PQ_NONE
,
931 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m
));
933 if (m
->hold_count
== 1) {
935 * Unmap the page table page
938 pmap_inval_add(info
, pmap
, -1);
939 pmap
->pm_pdir
[m
->pindex
] = 0;
941 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
942 --pmap
->pm_stats
.resident_count
;
944 if (pmap
->pm_ptphint
== m
)
945 pmap
->pm_ptphint
= NULL
;
948 * This was our last hold, the page had better be unwired
949 * after we decrement wire_count.
951 * FUTURE NOTE: shared page directory page could result in
952 * multiple wire counts.
956 KKASSERT(m
->wire_count
== 0);
957 --vmstats
.v_wire_count
;
958 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
960 vm_page_free_zero(m
);
963 KKASSERT(m
->hold_count
> 1);
969 static PMAP_INLINE
int
970 pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
972 KKASSERT(m
->hold_count
> 0);
973 if (m
->hold_count
> 1) {
977 return _pmap_unwire_pte_hold(pmap
, m
, info
);
982 * After removing a page table entry, this routine is used to
983 * conditionally free the page, and manage the hold/wire counts.
986 pmap_unuse_pt(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
,
987 pmap_inval_info_t info
)
990 if (va
>= UPT_MIN_ADDRESS
)
994 ptepindex
= (va
>> PDRSHIFT
);
995 if (pmap
->pm_ptphint
&&
996 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
997 mpte
= pmap
->pm_ptphint
;
999 pmap_inval_flush(info
);
1000 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1001 pmap
->pm_ptphint
= mpte
;
1005 return pmap_unwire_pte_hold(pmap
, mpte
, info
);
1009 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1010 * it, and IdlePTD, represents the template used to update all other pmaps.
1012 * On architectures where the kernel pmap is not integrated into the user
1013 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1014 * kernel_pmap should be used to directly access the kernel_pmap.
1017 pmap_pinit0(struct pmap
*pmap
)
1020 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1021 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, (vm_offset_t
) IdlePTD
);
1023 pmap
->pm_active
= 0;
1024 pmap
->pm_ptphint
= NULL
;
1025 TAILQ_INIT(&pmap
->pm_pvlist
);
1026 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1030 * Initialize a preallocated and zeroed pmap structure,
1031 * such as one in a vmspace structure.
1034 pmap_pinit(struct pmap
*pmap
)
1039 * No need to allocate page table space yet but we do need a valid
1040 * page directory table.
1042 if (pmap
->pm_pdir
== NULL
) {
1044 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1048 * Allocate an object for the ptes
1050 if (pmap
->pm_pteobj
== NULL
)
1051 pmap
->pm_pteobj
= vm_object_allocate(OBJT_DEFAULT
, PTDPTDI
+ 1);
1054 * Allocate the page directory page, unless we already have
1055 * one cached. If we used the cached page the wire_count will
1056 * already be set appropriately.
1058 if ((ptdpg
= pmap
->pm_pdirm
) == NULL
) {
1059 ptdpg
= vm_page_grab(pmap
->pm_pteobj
, PTDPTDI
,
1060 VM_ALLOC_NORMAL
| VM_ALLOC_RETRY
);
1061 pmap
->pm_pdirm
= ptdpg
;
1062 vm_page_flag_clear(ptdpg
, PG_MAPPED
| PG_BUSY
);
1063 ptdpg
->valid
= VM_PAGE_BITS_ALL
;
1064 ptdpg
->wire_count
= 1;
1065 ++vmstats
.v_wire_count
;
1066 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, VM_PAGE_TO_PHYS(ptdpg
));
1068 if ((ptdpg
->flags
& PG_ZERO
) == 0)
1069 bzero(pmap
->pm_pdir
, PAGE_SIZE
);
1071 pmap
->pm_pdir
[MPPTDI
] = PTD
[MPPTDI
];
1073 /* install self-referential address mapping entry */
1074 *(unsigned *) (pmap
->pm_pdir
+ PTDPTDI
) =
1075 VM_PAGE_TO_PHYS(ptdpg
) | PG_V
| PG_RW
| PG_A
| PG_M
;
1078 pmap
->pm_active
= 0;
1079 pmap
->pm_ptphint
= NULL
;
1080 TAILQ_INIT(&pmap
->pm_pvlist
);
1081 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1082 pmap
->pm_stats
.resident_count
= 1;
1086 * Clean up a pmap structure so it can be physically freed. This routine
1087 * is called by the vmspace dtor function. A great deal of pmap data is
1088 * left passively mapped to improve vmspace management so we have a bit
1089 * of cleanup work to do here.
1092 pmap_puninit(pmap_t pmap
)
1096 KKASSERT(pmap
->pm_active
== 0);
1097 if ((p
= pmap
->pm_pdirm
) != NULL
) {
1098 KKASSERT(pmap
->pm_pdir
!= NULL
);
1099 pmap_kremove((vm_offset_t
)pmap
->pm_pdir
);
1101 vmstats
.v_wire_count
--;
1102 KKASSERT((p
->flags
& PG_BUSY
) == 0);
1104 vm_page_free_zero(p
);
1105 pmap
->pm_pdirm
= NULL
;
1107 if (pmap
->pm_pdir
) {
1108 kmem_free(&kernel_map
, (vm_offset_t
)pmap
->pm_pdir
, PAGE_SIZE
);
1109 pmap
->pm_pdir
= NULL
;
1111 if (pmap
->pm_pteobj
) {
1112 vm_object_deallocate(pmap
->pm_pteobj
);
1113 pmap
->pm_pteobj
= NULL
;
1118 * Wire in kernel global address entries. To avoid a race condition
1119 * between pmap initialization and pmap_growkernel, this procedure
1120 * adds the pmap to the master list (which growkernel scans to update),
1121 * then copies the template.
1124 pmap_pinit2(struct pmap
*pmap
)
1127 TAILQ_INSERT_TAIL(&pmap_list
, pmap
, pm_pmnode
);
1128 /* XXX copies current process, does not fill in MPPTDI */
1129 bcopy(PTD
+ KPTDI
, pmap
->pm_pdir
+ KPTDI
, nkpt
* PTESIZE
);
1134 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
1135 * 0 on failure (if the procedure had to sleep).
1137 * When asked to remove the page directory page itself, we actually just
1138 * leave it cached so we do not have to incur the SMP inval overhead of
1139 * removing the kernel mapping. pmap_puninit() will take care of it.
1142 pmap_release_free_page(struct pmap
*pmap
, vm_page_t p
)
1144 unsigned *pde
= (unsigned *) pmap
->pm_pdir
;
1146 * This code optimizes the case of freeing non-busy
1147 * page-table pages. Those pages are zero now, and
1148 * might as well be placed directly into the zero queue.
1150 if (vm_page_sleep_busy(p
, FALSE
, "pmaprl"))
1156 * Remove the page table page from the processes address space.
1159 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1160 --pmap
->pm_stats
.resident_count
;
1162 if (p
->hold_count
) {
1163 panic("pmap_release: freeing held page table page");
1165 if (pmap
->pm_ptphint
&& (pmap
->pm_ptphint
->pindex
== p
->pindex
))
1166 pmap
->pm_ptphint
= NULL
;
1169 * We leave the page directory page cached, wired, and mapped in
1170 * the pmap until the dtor function (pmap_puninit()) gets called.
1171 * However, still clean it up so we can set PG_ZERO.
1173 if (p
->pindex
== PTDPTDI
) {
1174 bzero(pde
+ KPTDI
, nkpt
* PTESIZE
);
1177 vm_page_flag_set(p
, PG_ZERO
);
1181 vmstats
.v_wire_count
--;
1182 vm_page_free_zero(p
);
1188 * this routine is called if the page table page is not
1192 _pmap_allocpte(pmap_t pmap
, unsigned ptepindex
)
1194 vm_offset_t pteva
, ptepa
;
1198 * Find or fabricate a new pagetable page
1200 m
= vm_page_grab(pmap
->pm_pteobj
, ptepindex
,
1201 VM_ALLOC_NORMAL
| VM_ALLOC_ZERO
| VM_ALLOC_RETRY
);
1203 KASSERT(m
->queue
== PQ_NONE
,
1204 ("_pmap_allocpte: %p->queue != PQ_NONE", m
));
1207 * Increment the hold count for the page we will be returning to
1213 * It is possible that someone else got in and mapped by the page
1214 * directory page while we were blocked, if so just unbusy and
1215 * return the held page.
1217 if ((ptepa
= pmap
->pm_pdir
[ptepindex
]) != 0) {
1218 KKASSERT((ptepa
& PG_FRAME
) == VM_PAGE_TO_PHYS(m
));
1223 if (m
->wire_count
== 0)
1224 vmstats
.v_wire_count
++;
1229 * Map the pagetable page into the process address space, if
1230 * it isn't already there.
1233 ++pmap
->pm_stats
.resident_count
;
1235 ptepa
= VM_PAGE_TO_PHYS(m
);
1236 pmap
->pm_pdir
[ptepindex
] =
1237 (pd_entry_t
) (ptepa
| PG_U
| PG_RW
| PG_V
| PG_A
| PG_M
);
1240 * Set the page table hint
1242 pmap
->pm_ptphint
= m
;
1245 * Try to use the new mapping, but if we cannot, then
1246 * do it with the routine that maps the page explicitly.
1248 if ((m
->flags
& PG_ZERO
) == 0) {
1249 if ((((unsigned)pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
) ==
1250 (((unsigned) PTDpde
) & PG_FRAME
)) {
1251 pteva
= UPT_MIN_ADDRESS
+ i386_ptob(ptepindex
);
1252 bzero((caddr_t
) pteva
, PAGE_SIZE
);
1254 pmap_zero_page(ptepa
);
1258 m
->valid
= VM_PAGE_BITS_ALL
;
1259 vm_page_flag_clear(m
, PG_ZERO
);
1260 vm_page_flag_set(m
, PG_MAPPED
);
1267 pmap_allocpte(pmap_t pmap
, vm_offset_t va
)
1274 * Calculate pagetable page index
1276 ptepindex
= va
>> PDRSHIFT
;
1279 * Get the page directory entry
1281 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
1284 * This supports switching from a 4MB page to a
1287 if (ptepa
& PG_PS
) {
1288 pmap
->pm_pdir
[ptepindex
] = 0;
1295 * If the page table page is mapped, we just increment the
1296 * hold count, and activate it.
1300 * In order to get the page table page, try the
1303 if (pmap
->pm_ptphint
&&
1304 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
1305 m
= pmap
->pm_ptphint
;
1307 m
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1308 pmap
->pm_ptphint
= m
;
1314 * Here if the pte page isn't mapped, or if it has been deallocated.
1316 return _pmap_allocpte(pmap
, ptepindex
);
1320 /***************************************************
1321 * Pmap allocation/deallocation routines.
1322 ***************************************************/
1325 * Release any resources held by the given physical map.
1326 * Called when a pmap initialized by pmap_pinit is being released.
1327 * Should only be called if the map contains no valid mappings.
1329 static int pmap_release_callback(struct vm_page
*p
, void *data
);
1332 pmap_release(struct pmap
*pmap
)
1334 vm_object_t object
= pmap
->pm_pteobj
;
1335 struct rb_vm_page_scan_info info
;
1337 KASSERT(pmap
->pm_active
== 0, ("pmap still active! %08x", pmap
->pm_active
));
1338 #if defined(DIAGNOSTIC)
1339 if (object
->ref_count
!= 1)
1340 panic("pmap_release: pteobj reference count != 1");
1344 info
.object
= object
;
1346 TAILQ_REMOVE(&pmap_list
, pmap
, pm_pmnode
);
1353 info
.limit
= object
->generation
;
1355 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
1356 pmap_release_callback
, &info
);
1357 if (info
.error
== 0 && info
.mpte
) {
1358 if (!pmap_release_free_page(pmap
, info
.mpte
))
1362 } while (info
.error
);
1366 pmap_release_callback(struct vm_page
*p
, void *data
)
1368 struct rb_vm_page_scan_info
*info
= data
;
1370 if (p
->pindex
== PTDPTDI
) {
1374 if (!pmap_release_free_page(info
->pmap
, p
)) {
1378 if (info
->object
->generation
!= info
->limit
) {
1386 * Grow the number of kernel page table entries, if needed.
1390 pmap_growkernel(vm_offset_t addr
)
1393 vm_offset_t ptppaddr
;
1398 if (kernel_vm_end
== 0) {
1399 kernel_vm_end
= KERNBASE
;
1401 while (pdir_pde(PTD
, kernel_vm_end
)) {
1402 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1406 addr
= (addr
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1407 while (kernel_vm_end
< addr
) {
1408 if (pdir_pde(PTD
, kernel_vm_end
)) {
1409 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1414 * This index is bogus, but out of the way
1416 nkpg
= vm_page_alloc(kptobj
, nkpt
,
1417 VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
);
1419 panic("pmap_growkernel: no memory to grow kernel");
1422 ptppaddr
= VM_PAGE_TO_PHYS(nkpg
);
1423 pmap_zero_page(ptppaddr
);
1424 newpdir
= (pd_entry_t
) (ptppaddr
| PG_V
| PG_RW
| PG_A
| PG_M
);
1425 pdir_pde(PTD
, kernel_vm_end
) = newpdir
;
1426 *pmap_pde(&kernel_pmap
, kernel_vm_end
) = newpdir
;
1430 * This update must be interlocked with pmap_pinit2.
1432 TAILQ_FOREACH(pmap
, &pmap_list
, pm_pmnode
) {
1433 *pmap_pde(pmap
, kernel_vm_end
) = newpdir
;
1435 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) &
1436 ~(PAGE_SIZE
* NPTEPG
- 1);
1442 * Retire the given physical map from service.
1443 * Should only be called if the map contains
1444 * no valid mappings.
1447 pmap_destroy(pmap_t pmap
)
1454 count
= --pmap
->pm_count
;
1457 panic("destroying a pmap is not yet implemented");
1462 * Add a reference to the specified pmap.
1465 pmap_reference(pmap_t pmap
)
1472 /***************************************************
1473 * page management routines.
1474 ***************************************************/
1477 * free the pv_entry back to the free list. This function may be
1478 * called from an interrupt.
1480 static PMAP_INLINE
void
1481 free_pv_entry(pv_entry_t pv
)
1488 * get a new pv_entry, allocating a block from the system
1489 * when needed. This function may be called from an interrupt.
1495 if (pv_entry_high_water
&&
1496 (pv_entry_count
> pv_entry_high_water
) &&
1497 (pmap_pagedaemon_waken
== 0)) {
1498 pmap_pagedaemon_waken
= 1;
1499 wakeup (&vm_pages_needed
);
1501 return zalloc(pvzone
);
1505 * This routine is very drastic, but can save the system
1513 static int warningdone
=0;
1515 if (pmap_pagedaemon_waken
== 0)
1517 pmap_pagedaemon_waken
= 0;
1519 if (warningdone
< 5) {
1520 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1524 for(i
= 0; i
< vm_page_array_size
; i
++) {
1525 m
= &vm_page_array
[i
];
1526 if (m
->wire_count
|| m
->hold_count
|| m
->busy
||
1527 (m
->flags
& PG_BUSY
))
1535 * If it is the first entry on the list, it is actually
1536 * in the header and we must copy the following entry up
1537 * to the header. Otherwise we must search the list for
1538 * the entry. In either case we free the now unused entry.
1541 pmap_remove_entry(struct pmap
*pmap
, vm_page_t m
,
1542 vm_offset_t va
, pmap_inval_info_t info
)
1548 if (m
->md
.pv_list_count
< pmap
->pm_stats
.resident_count
) {
1549 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
1550 if (pmap
== pv
->pv_pmap
&& va
== pv
->pv_va
)
1554 TAILQ_FOREACH(pv
, &pmap
->pm_pvlist
, pv_plist
) {
1555 if (va
== pv
->pv_va
)
1562 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1563 m
->md
.pv_list_count
--;
1564 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1565 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1566 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
1567 ++pmap
->pm_generation
;
1568 rtval
= pmap_unuse_pt(pmap
, va
, pv
->pv_ptem
, info
);
1576 * Create a pv entry for page at pa for
1580 pmap_insert_entry(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
, vm_page_t m
)
1585 pv
= get_pv_entry();
1590 TAILQ_INSERT_TAIL(&pmap
->pm_pvlist
, pv
, pv_plist
);
1591 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
1592 m
->md
.pv_list_count
++;
1598 * pmap_remove_pte: do the things to unmap a page in a process
1601 pmap_remove_pte(struct pmap
*pmap
, unsigned *ptq
, vm_offset_t va
,
1602 pmap_inval_info_t info
)
1607 pmap_inval_add(info
, pmap
, va
);
1608 oldpte
= loadandclear(ptq
);
1610 pmap
->pm_stats
.wired_count
-= 1;
1612 * Machines that don't support invlpg, also don't support
1613 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1617 cpu_invlpg((void *)va
);
1618 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1619 --pmap
->pm_stats
.resident_count
;
1620 if (oldpte
& PG_MANAGED
) {
1621 m
= PHYS_TO_VM_PAGE(oldpte
);
1622 if (oldpte
& PG_M
) {
1623 #if defined(PMAP_DIAGNOSTIC)
1624 if (pmap_nw_modified((pt_entry_t
) oldpte
)) {
1626 "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1630 if (pmap_track_modified(va
))
1634 vm_page_flag_set(m
, PG_REFERENCED
);
1635 return pmap_remove_entry(pmap
, m
, va
, info
);
1637 return pmap_unuse_pt(pmap
, va
, NULL
, info
);
1646 * Remove a single page from a process address space.
1648 * This function may not be called from an interrupt if the pmap is
1652 pmap_remove_page(struct pmap
*pmap
, vm_offset_t va
, pmap_inval_info_t info
)
1657 * if there is no pte for this address, just skip it!!! Otherwise
1658 * get a local va for mappings for this pmap and remove the entry.
1660 if (*pmap_pde(pmap
, va
) != 0) {
1661 ptq
= get_ptbase(pmap
) + i386_btop(va
);
1663 pmap_remove_pte(pmap
, ptq
, va
, info
);
1671 * Remove the given range of addresses from the specified map.
1673 * It is assumed that the start and end are properly
1674 * rounded to the page size.
1676 * This function may not be called from an interrupt if the pmap is
1680 pmap_remove(struct pmap
*pmap
, vm_offset_t sva
, vm_offset_t eva
)
1684 vm_offset_t ptpaddr
;
1685 vm_offset_t sindex
, eindex
;
1686 struct pmap_inval_info info
;
1691 if (pmap
->pm_stats
.resident_count
== 0)
1694 pmap_inval_init(&info
);
1697 * special handling of removing one page. a very
1698 * common operation and easy to short circuit some
1701 if (((sva
+ PAGE_SIZE
) == eva
) &&
1702 (((unsigned) pmap
->pm_pdir
[(sva
>> PDRSHIFT
)] & PG_PS
) == 0)) {
1703 pmap_remove_page(pmap
, sva
, &info
);
1704 pmap_inval_flush(&info
);
1709 * Get a local virtual address for the mappings that are being
1712 sindex
= i386_btop(sva
);
1713 eindex
= i386_btop(eva
);
1715 for (; sindex
< eindex
; sindex
= pdnxt
) {
1719 * Calculate index for next page table.
1721 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
1722 if (pmap
->pm_stats
.resident_count
== 0)
1725 pdirindex
= sindex
/ NPDEPG
;
1726 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
1727 pmap_inval_add(&info
, pmap
, -1);
1728 pmap
->pm_pdir
[pdirindex
] = 0;
1729 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
1734 * Weed out invalid mappings. Note: we assume that the page
1735 * directory table is always allocated, and in kernel virtual.
1741 * Limit our scan to either the end of the va represented
1742 * by the current page table page, or to the end of the
1743 * range being removed.
1745 if (pdnxt
> eindex
) {
1750 * NOTE: pmap_remove_pte() can block.
1752 for (; sindex
!= pdnxt
; sindex
++) {
1755 ptbase
= get_ptbase(pmap
);
1756 if (ptbase
[sindex
] == 0)
1758 va
= i386_ptob(sindex
);
1759 if (pmap_remove_pte(pmap
, ptbase
+ sindex
, va
, &info
))
1763 pmap_inval_flush(&info
);
1769 * Removes this physical page from all physical maps in which it resides.
1770 * Reflects back modify bits to the pager.
1772 * This routine may not be called from an interrupt.
1776 pmap_remove_all(vm_page_t m
)
1778 struct pmap_inval_info info
;
1779 unsigned *pte
, tpte
;
1782 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
1785 pmap_inval_init(&info
);
1787 while ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
1788 KKASSERT(pv
->pv_pmap
->pm_stats
.resident_count
> 0);
1789 --pv
->pv_pmap
->pm_stats
.resident_count
;
1791 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
1792 pmap_inval_add(&info
, pv
->pv_pmap
, pv
->pv_va
);
1793 tpte
= loadandclear(pte
);
1796 pv
->pv_pmap
->pm_stats
.wired_count
--;
1799 vm_page_flag_set(m
, PG_REFERENCED
);
1802 * Update the vm_page_t clean and reference bits.
1805 #if defined(PMAP_DIAGNOSTIC)
1806 if (pmap_nw_modified((pt_entry_t
) tpte
)) {
1808 "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1812 if (pmap_track_modified(pv
->pv_va
))
1815 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1816 TAILQ_REMOVE(&pv
->pv_pmap
->pm_pvlist
, pv
, pv_plist
);
1817 ++pv
->pv_pmap
->pm_generation
;
1818 m
->md
.pv_list_count
--;
1819 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1820 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1821 pmap_unuse_pt(pv
->pv_pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
1825 KKASSERT((m
->flags
& (PG_MAPPED
|PG_WRITEABLE
)) == 0);
1826 pmap_inval_flush(&info
);
1832 * Set the physical protection on the specified range of this map
1835 * This function may not be called from an interrupt if the map is
1836 * not the kernel_pmap.
1839 pmap_protect(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
, vm_prot_t prot
)
1842 vm_offset_t pdnxt
, ptpaddr
;
1843 vm_pindex_t sindex
, eindex
;
1844 pmap_inval_info info
;
1849 if ((prot
& VM_PROT_READ
) == VM_PROT_NONE
) {
1850 pmap_remove(pmap
, sva
, eva
);
1854 if (prot
& VM_PROT_WRITE
)
1857 pmap_inval_init(&info
);
1859 ptbase
= get_ptbase(pmap
);
1861 sindex
= i386_btop(sva
);
1862 eindex
= i386_btop(eva
);
1864 for (; sindex
< eindex
; sindex
= pdnxt
) {
1868 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
1870 pdirindex
= sindex
/ NPDEPG
;
1871 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
1872 pmap_inval_add(&info
, pmap
, -1);
1873 pmap
->pm_pdir
[pdirindex
] &= ~(PG_M
|PG_RW
);
1874 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
1879 * Weed out invalid mappings. Note: we assume that the page
1880 * directory table is always allocated, and in kernel virtual.
1885 if (pdnxt
> eindex
) {
1889 for (; sindex
!= pdnxt
; sindex
++) {
1895 * XXX non-optimal. Note also that there can be
1896 * no pmap_inval_flush() calls until after we modify
1897 * ptbase[sindex] (or otherwise we have to do another
1898 * pmap_inval_add() call).
1900 pmap_inval_add(&info
, pmap
, i386_ptob(sindex
));
1901 pbits
= ptbase
[sindex
];
1903 if (pbits
& PG_MANAGED
) {
1906 m
= PHYS_TO_VM_PAGE(pbits
);
1907 vm_page_flag_set(m
, PG_REFERENCED
);
1911 if (pmap_track_modified(i386_ptob(sindex
))) {
1913 m
= PHYS_TO_VM_PAGE(pbits
);
1922 if (pbits
!= ptbase
[sindex
]) {
1923 ptbase
[sindex
] = pbits
;
1927 pmap_inval_flush(&info
);
1931 * Insert the given physical page (p) at
1932 * the specified virtual address (v) in the
1933 * target physical map with the protection requested.
1935 * If specified, the page will be wired down, meaning
1936 * that the related pte can not be reclaimed.
1938 * NB: This is the only routine which MAY NOT lazy-evaluate
1939 * or lose information. That is, this routine must actually
1940 * insert this page into the given map NOW.
1943 pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_page_t m
, vm_prot_t prot
,
1949 vm_offset_t origpte
, newpte
;
1951 pmap_inval_info info
;
1957 #ifdef PMAP_DIAGNOSTIC
1959 panic("pmap_enter: toobig");
1960 if ((va
>= UPT_MIN_ADDRESS
) && (va
< UPT_MAX_ADDRESS
))
1961 panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va
);
1963 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
1964 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
1967 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
1968 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
1973 * In the case that a page table page is not
1974 * resident, we are creating it here.
1976 if (va
< UPT_MIN_ADDRESS
)
1977 mpte
= pmap_allocpte(pmap
, va
);
1981 pmap_inval_init(&info
);
1982 pte
= pmap_pte(pmap
, va
);
1985 * Page Directory table entry not valid, we need a new PT page
1988 panic("pmap_enter: invalid page directory pdir=%x, va=0x%x\n",
1989 (unsigned) pmap
->pm_pdir
[PTDPTDI
], va
);
1992 pa
= VM_PAGE_TO_PHYS(m
) & PG_FRAME
;
1993 origpte
= *(vm_offset_t
*)pte
;
1994 opa
= origpte
& PG_FRAME
;
1996 if (origpte
& PG_PS
)
1997 panic("pmap_enter: attempted pmap_enter on 4MB page");
2000 * Mapping has not changed, must be protection or wiring change.
2002 if (origpte
&& (opa
== pa
)) {
2004 * Wiring change, just update stats. We don't worry about
2005 * wiring PT pages as they remain resident as long as there
2006 * are valid mappings in them. Hence, if a user page is wired,
2007 * the PT page will be also.
2009 if (wired
&& ((origpte
& PG_W
) == 0))
2010 pmap
->pm_stats
.wired_count
++;
2011 else if (!wired
&& (origpte
& PG_W
))
2012 pmap
->pm_stats
.wired_count
--;
2014 #if defined(PMAP_DIAGNOSTIC)
2015 if (pmap_nw_modified((pt_entry_t
) origpte
)) {
2017 "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
2023 * Remove the extra pte reference. Note that we cannot
2024 * optimize the RO->RW case because we have adjusted the
2025 * wiring count above and may need to adjust the wiring
2032 * We might be turning off write access to the page,
2033 * so we go ahead and sense modify status.
2035 if (origpte
& PG_MANAGED
) {
2036 if ((origpte
& PG_M
) && pmap_track_modified(va
)) {
2038 om
= PHYS_TO_VM_PAGE(opa
);
2042 KKASSERT(m
->flags
& PG_MAPPED
);
2047 * Mapping has changed, invalidate old range and fall through to
2048 * handle validating new mapping.
2052 err
= pmap_remove_pte(pmap
, pte
, va
, &info
);
2054 panic("pmap_enter: pte vanished, va: 0x%x", va
);
2058 * Enter on the PV list if part of our managed memory. Note that we
2059 * raise IPL while manipulating pv_table since pmap_enter can be
2060 * called at interrupt time.
2062 if (pmap_initialized
&&
2063 (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2064 pmap_insert_entry(pmap
, va
, mpte
, m
);
2066 vm_page_flag_set(m
, PG_MAPPED
);
2070 * Increment counters
2072 ++pmap
->pm_stats
.resident_count
;
2074 pmap
->pm_stats
.wired_count
++;
2078 * Now validate mapping with desired protection/wiring.
2080 newpte
= (vm_offset_t
) (pa
| pte_prot(pmap
, prot
) | PG_V
);
2084 if (va
< UPT_MIN_ADDRESS
)
2086 if (pmap
== &kernel_pmap
)
2090 * if the mapping or permission bits are different, we need
2091 * to update the pte.
2093 if ((origpte
& ~(PG_M
|PG_A
)) != newpte
) {
2094 pmap_inval_add(&info
, pmap
, va
);
2095 *pte
= newpte
| PG_A
;
2097 vm_page_flag_set(m
, PG_WRITEABLE
);
2099 KKASSERT((newpte
& PG_MANAGED
) == 0 || (m
->flags
& PG_MAPPED
));
2100 pmap_inval_flush(&info
);
2104 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2105 * This code also assumes that the pmap has no pre-existing entry for this
2108 * This code currently may only be used on user pmaps, not kernel_pmap.
2111 pmap_enter_quick(pmap_t pmap
, vm_offset_t va
, vm_page_t m
)
2118 pmap_inval_info info
;
2120 pmap_inval_init(&info
);
2122 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
2123 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
2126 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
2127 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
2131 KKASSERT(va
< UPT_MIN_ADDRESS
); /* assert used on user pmaps only */
2134 * Calculate the page table page (mpte), allocating it if necessary.
2136 * A held page table page (mpte), or NULL, is passed onto the
2137 * section following.
2139 if (va
< UPT_MIN_ADDRESS
) {
2141 * Calculate pagetable page index
2143 ptepindex
= va
>> PDRSHIFT
;
2147 * Get the page directory entry
2149 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
2152 * If the page table page is mapped, we just increment
2153 * the hold count, and activate it.
2157 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2158 if (pmap
->pm_ptphint
&&
2159 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
2160 mpte
= pmap
->pm_ptphint
;
2162 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
2163 pmap
->pm_ptphint
= mpte
;
2168 mpte
= _pmap_allocpte(pmap
, ptepindex
);
2170 } while (mpte
== NULL
);
2173 /* this code path is not yet used */
2177 * With a valid (and held) page directory page, we can just use
2178 * vtopte() to get to the pte. If the pte is already present
2179 * we do not disturb it.
2181 pte
= (unsigned *)vtopte(va
);
2184 pmap_unwire_pte_hold(pmap
, mpte
, &info
);
2185 pa
= VM_PAGE_TO_PHYS(m
);
2186 KKASSERT(((*pte
^ pa
) & PG_FRAME
) == 0);
2191 * Enter on the PV list if part of our managed memory
2193 if ((m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2194 pmap_insert_entry(pmap
, va
, mpte
, m
);
2195 vm_page_flag_set(m
, PG_MAPPED
);
2199 * Increment counters
2201 ++pmap
->pm_stats
.resident_count
;
2203 pa
= VM_PAGE_TO_PHYS(m
);
2206 * Now validate mapping with RO protection
2208 if (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
))
2209 *pte
= pa
| PG_V
| PG_U
;
2211 *pte
= pa
| PG_V
| PG_U
| PG_MANAGED
;
2212 /* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2213 pmap_inval_flush(&info
);
2217 * Make a temporary mapping for a physical address. This is only intended
2218 * to be used for panic dumps.
2221 pmap_kenter_temporary(vm_paddr_t pa
, int i
)
2223 pmap_kenter((vm_offset_t
)crashdumpmap
+ (i
* PAGE_SIZE
), pa
);
2224 return ((void *)crashdumpmap
);
2227 #define MAX_INIT_PT (96)
2230 * This routine preloads the ptes for a given object into the specified pmap.
2231 * This eliminates the blast of soft faults on process startup and
2232 * immediately after an mmap.
2234 static int pmap_object_init_pt_callback(vm_page_t p
, void *data
);
2237 pmap_object_init_pt(pmap_t pmap
, vm_offset_t addr
, vm_prot_t prot
,
2238 vm_object_t object
, vm_pindex_t pindex
,
2239 vm_size_t size
, int limit
)
2241 struct rb_vm_page_scan_info info
;
2246 * We can't preinit if read access isn't set or there is no pmap
2249 if ((prot
& VM_PROT_READ
) == 0 || pmap
== NULL
|| object
== NULL
)
2253 * We can't preinit if the pmap is not the current pmap
2255 lp
= curthread
->td_lwp
;
2256 if (lp
== NULL
|| pmap
!= vmspace_pmap(lp
->lwp_vmspace
))
2259 psize
= i386_btop(size
);
2261 if ((object
->type
!= OBJT_VNODE
) ||
2262 ((limit
& MAP_PREFAULT_PARTIAL
) && (psize
> MAX_INIT_PT
) &&
2263 (object
->resident_page_count
> MAX_INIT_PT
))) {
2267 if (psize
+ pindex
> object
->size
) {
2268 if (object
->size
< pindex
)
2270 psize
= object
->size
- pindex
;
2277 * Use a red-black scan to traverse the requested range and load
2278 * any valid pages found into the pmap.
2280 * We cannot safely scan the object's memq unless we are in a
2281 * critical section since interrupts can remove pages from objects.
2283 info
.start_pindex
= pindex
;
2284 info
.end_pindex
= pindex
+ psize
- 1;
2291 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
2292 pmap_object_init_pt_callback
, &info
);
2298 pmap_object_init_pt_callback(vm_page_t p
, void *data
)
2300 struct rb_vm_page_scan_info
*info
= data
;
2301 vm_pindex_t rel_index
;
2303 * don't allow an madvise to blow away our really
2304 * free pages allocating pv entries.
2306 if ((info
->limit
& MAP_PREFAULT_MADVISE
) &&
2307 vmstats
.v_free_count
< vmstats
.v_free_reserved
) {
2310 if (((p
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) &&
2311 (p
->busy
== 0) && (p
->flags
& (PG_BUSY
| PG_FICTITIOUS
)) == 0) {
2312 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
2313 vm_page_deactivate(p
);
2315 rel_index
= p
->pindex
- info
->start_pindex
;
2316 pmap_enter_quick(info
->pmap
,
2317 info
->addr
+ i386_ptob(rel_index
), p
);
2324 * pmap_prefault provides a quick way of clustering pagefaults into a
2325 * processes address space. It is a "cousin" of pmap_object_init_pt,
2326 * except it runs at page fault time instead of mmap time.
2330 #define PAGEORDER_SIZE (PFBAK+PFFOR)
2332 static int pmap_prefault_pageorder
[] = {
2333 -PAGE_SIZE
, PAGE_SIZE
,
2334 -2 * PAGE_SIZE
, 2 * PAGE_SIZE
,
2335 -3 * PAGE_SIZE
, 3 * PAGE_SIZE
,
2336 -4 * PAGE_SIZE
, 4 * PAGE_SIZE
2340 pmap_prefault(pmap_t pmap
, vm_offset_t addra
, vm_map_entry_t entry
)
2351 * We do not currently prefault mappings that use virtual page
2352 * tables. We do not prefault foreign pmaps.
2354 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
)
2356 lp
= curthread
->td_lwp
;
2357 if (lp
== NULL
|| (pmap
!= vmspace_pmap(lp
->lwp_vmspace
)))
2360 object
= entry
->object
.vm_object
;
2362 starta
= addra
- PFBAK
* PAGE_SIZE
;
2363 if (starta
< entry
->start
)
2364 starta
= entry
->start
;
2365 else if (starta
> addra
)
2369 * critical section protection is required to maintain the
2370 * page/object association, interrupts can free pages and remove
2371 * them from their objects.
2374 for (i
= 0; i
< PAGEORDER_SIZE
; i
++) {
2375 vm_object_t lobject
;
2378 addr
= addra
+ pmap_prefault_pageorder
[i
];
2379 if (addr
> addra
+ (PFFOR
* PAGE_SIZE
))
2382 if (addr
< starta
|| addr
>= entry
->end
)
2385 if ((*pmap_pde(pmap
, addr
)) == 0)
2388 pte
= (unsigned *) vtopte(addr
);
2392 pindex
= ((addr
- entry
->start
) + entry
->offset
) >> PAGE_SHIFT
;
2395 for (m
= vm_page_lookup(lobject
, pindex
);
2396 (!m
&& (lobject
->type
== OBJT_DEFAULT
) &&
2397 (lobject
->backing_object
));
2398 lobject
= lobject
->backing_object
2400 if (lobject
->backing_object_offset
& PAGE_MASK
)
2402 pindex
+= (lobject
->backing_object_offset
>> PAGE_SHIFT
);
2403 m
= vm_page_lookup(lobject
->backing_object
, pindex
);
2407 * give-up when a page is not in memory
2412 if (((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) &&
2414 (m
->flags
& (PG_BUSY
| PG_FICTITIOUS
)) == 0) {
2416 if ((m
->queue
- m
->pc
) == PQ_CACHE
) {
2417 vm_page_deactivate(m
);
2420 pmap_enter_quick(pmap
, addr
, m
);
2428 * Routine: pmap_change_wiring
2429 * Function: Change the wiring attribute for a map/virtual-address
2431 * In/out conditions:
2432 * The mapping must already exist in the pmap.
2435 pmap_change_wiring(pmap_t pmap
, vm_offset_t va
, boolean_t wired
)
2442 pte
= pmap_pte(pmap
, va
);
2444 if (wired
&& !pmap_pte_w(pte
))
2445 pmap
->pm_stats
.wired_count
++;
2446 else if (!wired
&& pmap_pte_w(pte
))
2447 pmap
->pm_stats
.wired_count
--;
2450 * Wiring is not a hardware characteristic so there is no need to
2451 * invalidate TLB. However, in an SMP environment we must use
2452 * a locked bus cycle to update the pte (if we are not using
2453 * the pmap_inval_*() API that is)... it's ok to do this for simple
2458 atomic_set_int(pte
, PG_W
);
2460 atomic_clear_int(pte
, PG_W
);
2463 atomic_set_int_nonlocked(pte
, PG_W
);
2465 atomic_clear_int_nonlocked(pte
, PG_W
);
2472 * Copy the range specified by src_addr/len
2473 * from the source map to the range dst_addr/len
2474 * in the destination map.
2476 * This routine is only advisory and need not do anything.
2479 pmap_copy(pmap_t dst_pmap
, pmap_t src_pmap
, vm_offset_t dst_addr
,
2480 vm_size_t len
, vm_offset_t src_addr
)
2482 pmap_inval_info info
;
2484 vm_offset_t end_addr
= src_addr
+ len
;
2486 unsigned src_frame
, dst_frame
;
2489 if (dst_addr
!= src_addr
)
2492 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
2493 * valid through blocking calls, and that's just not going to
2500 src_frame
= ((unsigned) src_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2501 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
)) {
2505 dst_frame
= ((unsigned) dst_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2506 if (dst_frame
!= (((unsigned) APTDpde
) & PG_FRAME
)) {
2507 APTDpde
= (pd_entry_t
) (dst_frame
| PG_RW
| PG_V
);
2508 /* The page directory is not shared between CPUs */
2511 pmap_inval_init(&info
);
2512 pmap_inval_add(&info
, dst_pmap
, -1);
2513 pmap_inval_add(&info
, src_pmap
, -1);
2516 * critical section protection is required to maintain the page/object
2517 * association, interrupts can free pages and remove them from
2521 for (addr
= src_addr
; addr
< end_addr
; addr
= pdnxt
) {
2522 unsigned *src_pte
, *dst_pte
;
2523 vm_page_t dstmpte
, srcmpte
;
2524 vm_offset_t srcptepaddr
;
2527 if (addr
>= UPT_MIN_ADDRESS
)
2528 panic("pmap_copy: invalid to pmap_copy page tables\n");
2531 * Don't let optional prefaulting of pages make us go
2532 * way below the low water mark of free pages or way
2533 * above high water mark of used pv entries.
2535 if (vmstats
.v_free_count
< vmstats
.v_free_reserved
||
2536 pv_entry_count
> pv_entry_high_water
)
2539 pdnxt
= ((addr
+ PAGE_SIZE
*NPTEPG
) & ~(PAGE_SIZE
*NPTEPG
- 1));
2540 ptepindex
= addr
>> PDRSHIFT
;
2542 srcptepaddr
= (vm_offset_t
) src_pmap
->pm_pdir
[ptepindex
];
2543 if (srcptepaddr
== 0)
2546 if (srcptepaddr
& PG_PS
) {
2547 if (dst_pmap
->pm_pdir
[ptepindex
] == 0) {
2548 dst_pmap
->pm_pdir
[ptepindex
] = (pd_entry_t
) srcptepaddr
;
2549 dst_pmap
->pm_stats
.resident_count
+= NBPDR
/ PAGE_SIZE
;
2554 srcmpte
= vm_page_lookup(src_pmap
->pm_pteobj
, ptepindex
);
2555 if ((srcmpte
== NULL
) || (srcmpte
->hold_count
== 0) ||
2556 (srcmpte
->flags
& PG_BUSY
)) {
2560 if (pdnxt
> end_addr
)
2563 src_pte
= (unsigned *) vtopte(addr
);
2564 dst_pte
= (unsigned *) avtopte(addr
);
2565 while (addr
< pdnxt
) {
2570 * we only virtual copy managed pages
2572 if ((ptetemp
& PG_MANAGED
) != 0) {
2574 * We have to check after allocpte for the
2575 * pte still being around... allocpte can
2578 * pmap_allocpte() can block. If we lose
2579 * our page directory mappings we stop.
2581 dstmpte
= pmap_allocpte(dst_pmap
, addr
);
2583 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
) ||
2584 dst_frame
!= (((unsigned) APTDpde
) & PG_FRAME
)
2586 kprintf("WARNING: pmap_copy: detected and corrected race\n");
2587 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2589 } else if ((*dst_pte
== 0) &&
2590 (ptetemp
= *src_pte
) != 0 &&
2591 (ptetemp
& PG_MANAGED
)) {
2593 * Clear the modified and
2594 * accessed (referenced) bits
2597 m
= PHYS_TO_VM_PAGE(ptetemp
);
2598 *dst_pte
= ptetemp
& ~(PG_M
| PG_A
);
2599 ++dst_pmap
->pm_stats
.resident_count
;
2600 pmap_insert_entry(dst_pmap
, addr
,
2602 KKASSERT(m
->flags
& PG_MAPPED
);
2604 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
2605 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2608 if (dstmpte
->hold_count
>= srcmpte
->hold_count
)
2618 pmap_inval_flush(&info
);
2624 * Zero the specified PA by mapping the page into KVM and clearing its
2627 * This function may be called from an interrupt and no locking is
2631 pmap_zero_page(vm_paddr_t phys
)
2633 struct mdglobaldata
*gd
= mdcpu
;
2636 if (*(int *)gd
->gd_CMAP3
)
2637 panic("pmap_zero_page: CMAP3 busy");
2638 *(int *)gd
->gd_CMAP3
=
2639 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2640 cpu_invlpg(gd
->gd_CADDR3
);
2642 #if defined(I686_CPU)
2643 if (cpu_class
== CPUCLASS_686
)
2644 i686_pagezero(gd
->gd_CADDR3
);
2647 bzero(gd
->gd_CADDR3
, PAGE_SIZE
);
2648 *(int *) gd
->gd_CMAP3
= 0;
2653 * pmap_page_assertzero:
2655 * Assert that a page is empty, panic if it isn't.
2658 pmap_page_assertzero(vm_paddr_t phys
)
2660 struct mdglobaldata
*gd
= mdcpu
;
2664 if (*(int *)gd
->gd_CMAP3
)
2665 panic("pmap_zero_page: CMAP3 busy");
2666 *(int *)gd
->gd_CMAP3
=
2667 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2668 cpu_invlpg(gd
->gd_CADDR3
);
2669 for (i
= 0; i
< PAGE_SIZE
; i
+= 4) {
2670 if (*(int *)((char *)gd
->gd_CADDR3
+ i
) != 0) {
2671 panic("pmap_page_assertzero() @ %p not zero!\n",
2672 (void *)gd
->gd_CADDR3
);
2675 *(int *) gd
->gd_CMAP3
= 0;
2682 * Zero part of a physical page by mapping it into memory and clearing
2683 * its contents with bzero.
2685 * off and size may not cover an area beyond a single hardware page.
2688 pmap_zero_page_area(vm_paddr_t phys
, int off
, int size
)
2690 struct mdglobaldata
*gd
= mdcpu
;
2693 if (*(int *) gd
->gd_CMAP3
)
2694 panic("pmap_zero_page: CMAP3 busy");
2695 *(int *) gd
->gd_CMAP3
= PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2696 cpu_invlpg(gd
->gd_CADDR3
);
2698 #if defined(I686_CPU)
2699 if (cpu_class
== CPUCLASS_686
&& off
== 0 && size
== PAGE_SIZE
)
2700 i686_pagezero(gd
->gd_CADDR3
);
2703 bzero((char *)gd
->gd_CADDR3
+ off
, size
);
2704 *(int *) gd
->gd_CMAP3
= 0;
2711 * Copy the physical page from the source PA to the target PA.
2712 * This function may be called from an interrupt. No locking
2716 pmap_copy_page(vm_paddr_t src
, vm_paddr_t dst
)
2718 struct mdglobaldata
*gd
= mdcpu
;
2721 if (*(int *) gd
->gd_CMAP1
)
2722 panic("pmap_copy_page: CMAP1 busy");
2723 if (*(int *) gd
->gd_CMAP2
)
2724 panic("pmap_copy_page: CMAP2 busy");
2726 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2727 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2729 cpu_invlpg(gd
->gd_CADDR1
);
2730 cpu_invlpg(gd
->gd_CADDR2
);
2732 bcopy(gd
->gd_CADDR1
, gd
->gd_CADDR2
, PAGE_SIZE
);
2734 *(int *) gd
->gd_CMAP1
= 0;
2735 *(int *) gd
->gd_CMAP2
= 0;
2740 * pmap_copy_page_frag:
2742 * Copy the physical page from the source PA to the target PA.
2743 * This function may be called from an interrupt. No locking
2747 pmap_copy_page_frag(vm_paddr_t src
, vm_paddr_t dst
, size_t bytes
)
2749 struct mdglobaldata
*gd
= mdcpu
;
2752 if (*(int *) gd
->gd_CMAP1
)
2753 panic("pmap_copy_page: CMAP1 busy");
2754 if (*(int *) gd
->gd_CMAP2
)
2755 panic("pmap_copy_page: CMAP2 busy");
2757 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2758 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2760 cpu_invlpg(gd
->gd_CADDR1
);
2761 cpu_invlpg(gd
->gd_CADDR2
);
2763 bcopy((char *)gd
->gd_CADDR1
+ (src
& PAGE_MASK
),
2764 (char *)gd
->gd_CADDR2
+ (dst
& PAGE_MASK
),
2767 *(int *) gd
->gd_CMAP1
= 0;
2768 *(int *) gd
->gd_CMAP2
= 0;
2773 * Returns true if the pmap's pv is one of the first
2774 * 16 pvs linked to from this page. This count may
2775 * be changed upwards or downwards in the future; it
2776 * is only necessary that true be returned for a small
2777 * subset of pmaps for proper page aging.
2780 pmap_page_exists_quick(pmap_t pmap
, vm_page_t m
)
2785 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
2790 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
2791 if (pv
->pv_pmap
== pmap
) {
2804 * Remove all pages from specified address space
2805 * this aids process exit speeds. Also, this code
2806 * is special cased for current process only, but
2807 * can have the more generic (and slightly slower)
2808 * mode enabled. This is much faster than pmap_remove
2809 * in the case of running down an entire address space.
2812 pmap_remove_pages(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
)
2815 unsigned *pte
, tpte
;
2818 pmap_inval_info info
;
2820 int32_t save_generation
;
2822 lp
= curthread
->td_lwp
;
2823 if (lp
&& pmap
== vmspace_pmap(lp
->lwp_vmspace
))
2828 pmap_inval_init(&info
);
2830 for (pv
= TAILQ_FIRST(&pmap
->pm_pvlist
); pv
; pv
= npv
) {
2831 if (pv
->pv_va
>= eva
|| pv
->pv_va
< sva
) {
2832 npv
= TAILQ_NEXT(pv
, pv_plist
);
2836 KKASSERT(pmap
== pv
->pv_pmap
);
2839 pte
= (unsigned *)vtopte(pv
->pv_va
);
2841 pte
= pmap_pte_quick(pmap
, pv
->pv_va
);
2842 if (pmap
->pm_active
)
2843 pmap_inval_add(&info
, pmap
, pv
->pv_va
);
2846 * We cannot remove wired pages from a process' mapping
2850 npv
= TAILQ_NEXT(pv
, pv_plist
);
2853 tpte
= loadandclear(pte
);
2855 m
= PHYS_TO_VM_PAGE(tpte
);
2857 KASSERT(m
< &vm_page_array
[vm_page_array_size
],
2858 ("pmap_remove_pages: bad tpte %x", tpte
));
2860 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
2861 --pmap
->pm_stats
.resident_count
;
2864 * Update the vm_page_t clean and reference bits.
2870 npv
= TAILQ_NEXT(pv
, pv_plist
);
2871 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
2872 save_generation
= ++pmap
->pm_generation
;
2874 m
->md
.pv_list_count
--;
2875 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
2876 if (TAILQ_EMPTY(&m
->md
.pv_list
))
2877 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
2879 pmap_unuse_pt(pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
2883 * Restart the scan if we blocked during the unuse or free
2884 * calls and other removals were made.
2886 if (save_generation
!= pmap
->pm_generation
) {
2887 kprintf("Warning: pmap_remove_pages race-A avoided\n");
2888 pv
= TAILQ_FIRST(&pmap
->pm_pvlist
);
2891 pmap_inval_flush(&info
);
2896 * pmap_testbit tests bits in pte's
2897 * note that the testbit/clearbit routines are inline,
2898 * and a lot of things compile-time evaluate.
2901 pmap_testbit(vm_page_t m
, int bit
)
2906 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
2909 if (TAILQ_FIRST(&m
->md
.pv_list
) == NULL
)
2914 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
2916 * if the bit being tested is the modified bit, then
2917 * mark clean_map and ptes as never
2920 if (bit
& (PG_A
|PG_M
)) {
2921 if (!pmap_track_modified(pv
->pv_va
))
2925 #if defined(PMAP_DIAGNOSTIC)
2927 kprintf("Null pmap (tb) at va: 0x%x\n", pv
->pv_va
);
2931 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
2942 * this routine is used to modify bits in ptes
2944 static __inline
void
2945 pmap_clearbit(vm_page_t m
, int bit
)
2947 struct pmap_inval_info info
;
2952 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
2955 pmap_inval_init(&info
);
2959 * Loop over all current mappings setting/clearing as appropos If
2960 * setting RO do we need to clear the VAC?
2962 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
2964 * don't write protect pager mappings
2967 if (!pmap_track_modified(pv
->pv_va
))
2971 #if defined(PMAP_DIAGNOSTIC)
2973 kprintf("Null pmap (cb) at va: 0x%x\n", pv
->pv_va
);
2979 * Careful here. We can use a locked bus instruction to
2980 * clear PG_A or PG_M safely but we need to synchronize
2981 * with the target cpus when we mess with PG_RW.
2983 * We do not have to force synchronization when clearing
2984 * PG_M even for PTEs generated via virtual memory maps,
2985 * because the virtual kernel will invalidate the pmap
2986 * entry when/if it needs to resynchronize the Modify bit.
2989 pmap_inval_add(&info
, pv
->pv_pmap
, pv
->pv_va
);
2990 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
2997 atomic_clear_int(pte
, PG_M
|PG_RW
);
3000 * The cpu may be trying to set PG_M
3001 * simultaniously with our clearing
3004 if (!atomic_cmpset_int(pte
, pbits
,
3008 } else if (bit
== PG_M
) {
3010 * We could also clear PG_RW here to force
3011 * a fault on write to redetect PG_M for
3012 * virtual kernels, but it isn't necessary
3013 * since virtual kernels invalidate the pte
3014 * when they clear the VPTE_M bit in their
3015 * virtual page tables.
3017 atomic_clear_int(pte
, PG_M
);
3019 atomic_clear_int(pte
, bit
);
3023 pmap_inval_flush(&info
);
3028 * pmap_page_protect:
3030 * Lower the permission for all mappings to a given page.
3033 pmap_page_protect(vm_page_t m
, vm_prot_t prot
)
3035 if ((prot
& VM_PROT_WRITE
) == 0) {
3036 if (prot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) {
3037 pmap_clearbit(m
, PG_RW
);
3038 vm_page_flag_clear(m
, PG_WRITEABLE
);
3046 pmap_phys_address(vm_pindex_t ppn
)
3048 return (i386_ptob(ppn
));
3052 * pmap_ts_referenced:
3054 * Return a count of reference bits for a page, clearing those bits.
3055 * It is not necessary for every reference bit to be cleared, but it
3056 * is necessary that 0 only be returned when there are truly no
3057 * reference bits set.
3059 * XXX: The exact number of bits to check and clear is a matter that
3060 * should be tested and standardized at some point in the future for
3061 * optimal aging of shared pages.
3064 pmap_ts_referenced(vm_page_t m
)
3066 pv_entry_t pv
, pvf
, pvn
;
3070 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3075 if ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
3080 pvn
= TAILQ_NEXT(pv
, pv_list
);
3082 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
3084 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
3086 if (!pmap_track_modified(pv
->pv_va
))
3089 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3091 if (pte
&& (*pte
& PG_A
)) {
3093 atomic_clear_int(pte
, PG_A
);
3095 atomic_clear_int_nonlocked(pte
, PG_A
);
3102 } while ((pv
= pvn
) != NULL
&& pv
!= pvf
);
3112 * Return whether or not the specified physical page was modified
3113 * in any physical maps.
3116 pmap_is_modified(vm_page_t m
)
3118 return pmap_testbit(m
, PG_M
);
3122 * Clear the modify bits on the specified physical page.
3125 pmap_clear_modify(vm_page_t m
)
3127 pmap_clearbit(m
, PG_M
);
3131 * pmap_clear_reference:
3133 * Clear the reference bit on the specified physical page.
3136 pmap_clear_reference(vm_page_t m
)
3138 pmap_clearbit(m
, PG_A
);
3142 * Miscellaneous support routines follow
3146 i386_protection_init(void)
3150 kp
= protection_codes
;
3151 for (prot
= 0; prot
< 8; prot
++) {
3153 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_NONE
:
3155 * Read access is also 0. There isn't any execute bit,
3156 * so just make it readable.
3158 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_NONE
:
3159 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3160 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3163 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_NONE
:
3164 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3165 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_NONE
:
3166 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3174 * Map a set of physical memory pages into the kernel virtual
3175 * address space. Return a pointer to where it is mapped. This
3176 * routine is intended to be used for mapping device memory,
3179 * NOTE: we can't use pgeflag unless we invalidate the pages one at
3183 pmap_mapdev(vm_paddr_t pa
, vm_size_t size
)
3185 vm_offset_t va
, tmpva
, offset
;
3188 offset
= pa
& PAGE_MASK
;
3189 size
= roundup(offset
+ size
, PAGE_SIZE
);
3191 va
= kmem_alloc_nofault(&kernel_map
, size
);
3193 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3196 for (tmpva
= va
; size
> 0;) {
3197 pte
= (unsigned *)vtopte(tmpva
);
3198 *pte
= pa
| PG_RW
| PG_V
; /* | pgeflag; */
3206 return ((void *)(va
+ offset
));
3210 pmap_unmapdev(vm_offset_t va
, vm_size_t size
)
3212 vm_offset_t base
, offset
;
3214 base
= va
& PG_FRAME
;
3215 offset
= va
& PAGE_MASK
;
3216 size
= roundup(offset
+ size
, PAGE_SIZE
);
3217 pmap_qremove(va
, size
>> PAGE_SHIFT
);
3218 kmem_free(&kernel_map
, base
, size
);
3222 * perform the pmap work for mincore
3225 pmap_mincore(pmap_t pmap
, vm_offset_t addr
)
3227 unsigned *ptep
, pte
;
3231 ptep
= pmap_pte(pmap
, addr
);
3236 if ((pte
= *ptep
) != 0) {
3239 val
= MINCORE_INCORE
;
3240 if ((pte
& PG_MANAGED
) == 0)
3243 pa
= pte
& PG_FRAME
;
3245 m
= PHYS_TO_VM_PAGE(pa
);
3251 val
|= MINCORE_MODIFIED
|MINCORE_MODIFIED_OTHER
;
3253 * Modified by someone
3255 else if (m
->dirty
|| pmap_is_modified(m
))
3256 val
|= MINCORE_MODIFIED_OTHER
;
3261 val
|= MINCORE_REFERENCED
|MINCORE_REFERENCED_OTHER
;
3264 * Referenced by someone
3266 else if ((m
->flags
& PG_REFERENCED
) || pmap_ts_referenced(m
)) {
3267 val
|= MINCORE_REFERENCED_OTHER
;
3268 vm_page_flag_set(m
, PG_REFERENCED
);
3275 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new
3276 * vmspace will be ref'd and the old one will be deref'd.
3278 * The vmspace for all lwps associated with the process will be adjusted
3279 * and cr3 will be reloaded if any lwp is the current lwp.
3282 pmap_replacevm(struct proc
*p
, struct vmspace
*newvm
, int adjrefs
)
3284 struct vmspace
*oldvm
;
3288 oldvm
= p
->p_vmspace
;
3289 if (oldvm
!= newvm
) {
3290 p
->p_vmspace
= newvm
;
3291 KKASSERT(p
->p_nthreads
== 1);
3292 lp
= RB_ROOT(&p
->p_lwp_tree
);
3293 pmap_setlwpvm(lp
, newvm
);
3295 sysref_get(&newvm
->vm_sysref
);
3296 sysref_put(&oldvm
->vm_sysref
);
3303 * Set the vmspace for a LWP. The vmspace is almost universally set the
3304 * same as the process vmspace, but virtual kernels need to swap out contexts
3305 * on a per-lwp basis.
3308 pmap_setlwpvm(struct lwp
*lp
, struct vmspace
*newvm
)
3310 struct vmspace
*oldvm
;
3314 oldvm
= lp
->lwp_vmspace
;
3316 if (oldvm
!= newvm
) {
3317 lp
->lwp_vmspace
= newvm
;
3318 if (curthread
->td_lwp
== lp
) {
3319 pmap
= vmspace_pmap(newvm
);
3321 atomic_set_int(&pmap
->pm_active
, 1 << mycpu
->gd_cpuid
);
3323 pmap
->pm_active
|= 1;
3325 #if defined(SWTCH_OPTIM_STATS)
3328 curthread
->td_pcb
->pcb_cr3
= vtophys(pmap
->pm_pdir
);
3329 load_cr3(curthread
->td_pcb
->pcb_cr3
);
3330 pmap
= vmspace_pmap(oldvm
);
3332 atomic_clear_int(&pmap
->pm_active
,
3333 1 << mycpu
->gd_cpuid
);
3335 pmap
->pm_active
&= ~1;
3343 pmap_addr_hint(vm_object_t obj
, vm_offset_t addr
, vm_size_t size
)
3346 if ((obj
== NULL
) || (size
< NBPDR
) || (obj
->type
!= OBJT_DEVICE
)) {
3350 addr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
3357 static void pads (pmap_t pm
);
3358 void pmap_pvdump (vm_paddr_t pa
);
3360 /* print address space of pmap*/
3367 if (pm
== &kernel_pmap
)
3370 for (i
= 0; i
< 1024; i
++) {
3371 if (pm
->pm_pdir
[i
]) {
3372 for (j
= 0; j
< 1024; j
++) {
3373 va
= (i
<< PDRSHIFT
) + (j
<< PAGE_SHIFT
);
3374 if (pm
== &kernel_pmap
&& va
< KERNBASE
)
3376 if (pm
!= &kernel_pmap
&& va
> UPT_MAX_ADDRESS
)
3378 ptep
= pmap_pte_quick(pm
, va
);
3379 if (pmap_pte_v(ptep
))
3380 kprintf("%x:%x ", va
, *(int *) ptep
);
3389 pmap_pvdump(vm_paddr_t pa
)
3394 kprintf("pa %08llx", (long long)pa
);
3395 m
= PHYS_TO_VM_PAGE(pa
);
3396 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3398 kprintf(" -> pmap %p, va %x, flags %x",
3399 (void *)pv
->pv_pmap
, pv
->pv_va
, pv
->pv_flags
);
3401 kprintf(" -> pmap %p, va %x", (void *)pv
->pv_pmap
, pv
->pv_va
);