2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
43 * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.87 2008/08/25 17:01:38 dillon Exp $
47 * Manages physical address maps.
49 * In addition to hardware address maps, this
50 * module is called upon to provide software-use-only
51 * maps which may or may not be stored in the same
52 * form as hardware maps. These pseudo-maps are
53 * used to store intermediate results from copy
54 * operations to and from address spaces.
56 * Since the information managed by this module is
57 * also stored by the logical address mapping module,
58 * this module may throw away valid virtual-to-physical
59 * mappings at almost any time. However, invalidations
60 * of virtual-to-physical mappings must be done as
63 * In order to cope with hardware architectures which
64 * make virtual-to-physical map invalidates expensive,
65 * this module may delay invalidate or reduced protection
66 * operations until such time as they are actually
67 * necessary. This module is given full information as
68 * to which processors are currently using which maps,
69 * and to when physical maps must be made correct.
72 * PMAP_DEBUG - see platform/pc32/include/pmap.h
75 #include "opt_disable_pse.h"
77 #include "opt_msgbuf.h"
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
83 #include <sys/msgbuf.h>
84 #include <sys/vmmeter.h>
88 #include <vm/vm_param.h>
89 #include <sys/sysctl.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_zone.h>
100 #include <sys/user.h>
101 #include <sys/thread2.h>
102 #include <sys/sysref2.h>
104 #include <machine/cputypes.h>
105 #include <machine/md_var.h>
106 #include <machine/specialreg.h>
107 #include <machine/smp.h>
108 #include <machine_base/apic/apicreg.h>
109 #include <machine/globaldata.h>
110 #include <machine/pmap.h>
111 #include <machine/pmap_inval.h>
113 #define PMAP_KEEP_PDIRS
114 #ifndef PMAP_SHPGPERPROC
115 #define PMAP_SHPGPERPROC 200
118 #if defined(DIAGNOSTIC)
119 #define PMAP_DIAGNOSTIC
124 #if !defined(PMAP_DIAGNOSTIC)
125 #define PMAP_INLINE __inline
131 * Get PDEs and PTEs for user/kernel address space
133 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
134 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
136 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
137 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
138 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
139 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
140 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
144 * Given a map and a machine independent protection code,
145 * convert to a vax protection code.
147 #define pte_prot(m, p) \
148 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
149 static int protection_codes
[8];
151 struct pmap kernel_pmap
;
152 static TAILQ_HEAD(,pmap
) pmap_list
= TAILQ_HEAD_INITIALIZER(pmap_list
);
154 vm_paddr_t avail_start
; /* PA of first available physical page */
155 vm_paddr_t avail_end
; /* PA of last available physical page */
156 vm_offset_t virtual_start
; /* VA of first avail page (after kernel bss) */
157 vm_offset_t virtual_end
; /* VA of last avail page (end of kernel AS) */
158 vm_offset_t KvaStart
; /* VA start of KVA space */
159 vm_offset_t KvaEnd
; /* VA end of KVA space (non-inclusive) */
160 vm_offset_t KvaSize
; /* max size of kernel virtual address space */
161 static boolean_t pmap_initialized
= FALSE
; /* Has pmap_init completed? */
162 static int pgeflag
; /* PG_G or-in */
163 static int pseflag
; /* PG_PS or-in */
165 static vm_object_t kptobj
;
168 vm_offset_t kernel_vm_end
;
171 * Data for the pv entry allocation mechanism
173 static vm_zone_t pvzone
;
174 static struct vm_zone pvzone_store
;
175 static struct vm_object pvzone_obj
;
176 static int pv_entry_count
=0, pv_entry_max
=0, pv_entry_high_water
=0;
177 static int pmap_pagedaemon_waken
= 0;
178 static struct pv_entry
*pvinit
;
181 * All those kernel PT submaps that BSD is so fond of
183 pt_entry_t
*CMAP1
= 0, *ptmmap
;
184 caddr_t CADDR1
= 0, ptvmmap
= 0;
185 static pt_entry_t
*msgbufmap
;
186 struct msgbuf
*msgbufp
=0;
191 static pt_entry_t
*pt_crashdumpmap
;
192 static caddr_t crashdumpmap
;
194 extern pt_entry_t
*SMPpt
;
196 static PMAP_INLINE
void free_pv_entry (pv_entry_t pv
);
197 static unsigned * get_ptbase (pmap_t pmap
);
198 static pv_entry_t
get_pv_entry (void);
199 static void i386_protection_init (void);
200 static __inline
void pmap_clearbit (vm_page_t m
, int bit
);
202 static void pmap_remove_all (vm_page_t m
);
203 static void pmap_enter_quick (pmap_t pmap
, vm_offset_t va
, vm_page_t m
);
204 static int pmap_remove_pte (struct pmap
*pmap
, unsigned *ptq
,
205 vm_offset_t sva
, pmap_inval_info_t info
);
206 static void pmap_remove_page (struct pmap
*pmap
,
207 vm_offset_t va
, pmap_inval_info_t info
);
208 static int pmap_remove_entry (struct pmap
*pmap
, vm_page_t m
,
209 vm_offset_t va
, pmap_inval_info_t info
);
210 static boolean_t
pmap_testbit (vm_page_t m
, int bit
);
211 static void pmap_insert_entry (pmap_t pmap
, vm_offset_t va
,
212 vm_page_t mpte
, vm_page_t m
);
214 static vm_page_t
pmap_allocpte (pmap_t pmap
, vm_offset_t va
);
216 static int pmap_release_free_page (pmap_t pmap
, vm_page_t p
);
217 static vm_page_t
_pmap_allocpte (pmap_t pmap
, unsigned ptepindex
);
218 static unsigned * pmap_pte_quick (pmap_t pmap
, vm_offset_t va
);
219 static vm_page_t
pmap_page_lookup (vm_object_t object
, vm_pindex_t pindex
);
220 static int pmap_unuse_pt (pmap_t
, vm_offset_t
, vm_page_t
, pmap_inval_info_t
);
221 static vm_offset_t
pmap_kmem_choose(vm_offset_t addr
);
223 static unsigned pdir4mb
;
226 * Move the kernel virtual free pointer to the next
227 * 4MB. This is used to help improve performance
228 * by using a large (4MB) page for much of the kernel
229 * (.text, .data, .bss)
232 pmap_kmem_choose(vm_offset_t addr
)
234 vm_offset_t newaddr
= addr
;
236 if (cpu_feature
& CPUID_PSE
) {
237 newaddr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
246 * Extract the page table entry associated with the given map/virtual
249 * This function may NOT be called from an interrupt.
251 PMAP_INLINE
unsigned *
252 pmap_pte(pmap_t pmap
, vm_offset_t va
)
257 pdeaddr
= (unsigned *) pmap_pde(pmap
, va
);
258 if (*pdeaddr
& PG_PS
)
261 return get_ptbase(pmap
) + i386_btop(va
);
270 * Super fast pmap_pte routine best used when scanning the pv lists.
271 * This eliminates many course-grained invltlb calls. Note that many of
272 * the pv list scans are across different pmaps and it is very wasteful
273 * to do an entire invltlb when checking a single mapping.
275 * Should only be called while in a critical section.
277 * Unlike get_ptbase(), this function MAY be called from an interrupt or
281 pmap_pte_quick(pmap_t pmap
, vm_offset_t va
)
283 struct mdglobaldata
*gd
= mdcpu
;
286 if ((pde
= (unsigned) pmap
->pm_pdir
[va
>> PDRSHIFT
]) != 0) {
287 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
288 unsigned index
= i386_btop(va
);
289 /* are we current address space or kernel? */
290 if ((pmap
== &kernel_pmap
) ||
291 (frame
== (((unsigned) PTDpde
) & PG_FRAME
))) {
292 return (unsigned *) PTmap
+ index
;
294 newpf
= pde
& PG_FRAME
;
295 if ( ((* (unsigned *) gd
->gd_PMAP1
) & PG_FRAME
) != newpf
) {
296 * (unsigned *) gd
->gd_PMAP1
= newpf
| PG_RW
| PG_V
;
297 cpu_invlpg(gd
->gd_PADDR1
);
299 return gd
->gd_PADDR1
+ ((unsigned) index
& (NPTEPG
- 1));
306 * Bootstrap the system enough to run with virtual memory.
308 * On the i386 this is called after mapping has already been enabled
309 * and just syncs the pmap module with what has already been done.
310 * [We can't call it easily with mapping off since the kernel is not
311 * mapped with PA == VA, hence we would have to relocate every address
312 * from the linked base (virtual) address "KERNBASE" to the actual
313 * (physical) address starting relative to 0]
316 pmap_bootstrap(vm_paddr_t firstaddr
, vm_paddr_t loadaddr
)
320 struct mdglobaldata
*gd
;
324 KvaStart
= (vm_offset_t
)VADDR(PTDPTDI
, 0);
325 KvaSize
= (vm_offset_t
)VADDR(APTDPTDI
, 0) - KvaStart
;
326 KvaEnd
= KvaStart
+ KvaSize
;
328 avail_start
= firstaddr
;
331 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
332 * too large. It should instead be correctly calculated in locore.s and
333 * not based on 'first' (which is a physical address, not a virtual
334 * address, for the start of unused physical memory). The kernel
335 * page tables are NOT double mapped and thus should not be included
336 * in this calculation.
338 virtual_start
= (vm_offset_t
) KERNBASE
+ firstaddr
;
339 virtual_start
= pmap_kmem_choose(virtual_start
);
340 virtual_end
= VADDR(KPTDI
+NKPDE
-1, NPTEPG
-1);
343 * Initialize protection array.
345 i386_protection_init();
348 * The kernel's pmap is statically allocated so we don't have to use
349 * pmap_create, which is unlikely to work correctly at this part of
350 * the boot sequence (XXX and which no longer exists).
352 kernel_pmap
.pm_pdir
= (pd_entry_t
*)(KERNBASE
+ (u_int
)IdlePTD
);
353 kernel_pmap
.pm_count
= 1;
354 kernel_pmap
.pm_active
= (cpumask_t
)-1; /* don't allow deactivation */
355 TAILQ_INIT(&kernel_pmap
.pm_pvlist
);
359 * Reserve some special page table entries/VA space for temporary
362 #define SYSMAP(c, p, v, n) \
363 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
366 pte
= (pt_entry_t
*) pmap_pte(&kernel_pmap
, va
);
369 * CMAP1/CMAP2 are used for zeroing and copying pages.
371 SYSMAP(caddr_t
, CMAP1
, CADDR1
, 1)
376 SYSMAP(caddr_t
, pt_crashdumpmap
, crashdumpmap
, MAXDUMPPGS
);
379 * ptvmmap is used for reading arbitrary physical pages via
382 SYSMAP(caddr_t
, ptmmap
, ptvmmap
, 1)
385 * msgbufp is used to map the system message buffer.
386 * XXX msgbufmap is not used.
388 SYSMAP(struct msgbuf
*, msgbufmap
, msgbufp
,
389 atop(round_page(MSGBUF_SIZE
)))
394 for (i
= 0; i
< NKPT
; i
++)
398 * PG_G is terribly broken on SMP because we IPI invltlb's in some
399 * cases rather then invl1pg. Actually, I don't even know why it
400 * works under UP because self-referential page table mappings
405 if (cpu_feature
& CPUID_PGE
)
410 * Initialize the 4MB page size flag
414 * The 4MB page version of the initial
415 * kernel page mapping.
419 #if !defined(DISABLE_PSE)
420 if (cpu_feature
& CPUID_PSE
) {
423 * Note that we have enabled PSE mode
426 ptditmp
= *((unsigned *)PTmap
+ i386_btop(KERNBASE
));
427 ptditmp
&= ~(NBPDR
- 1);
428 ptditmp
|= PG_V
| PG_RW
| PG_PS
| PG_U
| pgeflag
;
433 * Enable the PSE mode. If we are SMP we can't do this
434 * now because the APs will not be able to use it when
437 load_cr4(rcr4() | CR4_PSE
);
440 * We can do the mapping here for the single processor
441 * case. We simply ignore the old page table page from
445 * For SMP, we still need 4K pages to bootstrap APs,
446 * PSE will be enabled as soon as all APs are up.
448 PTD
[KPTDI
] = (pd_entry_t
)ptditmp
;
449 kernel_pmap
.pm_pdir
[KPTDI
] = (pd_entry_t
)ptditmp
;
456 * We need to finish setting up the globaldata page for the BSP.
457 * locore has already populated the page table for the mdglobaldata
460 pg
= MDGLOBALDATA_BASEALLOC_PAGES
;
461 gd
= &CPU_prvspace
[0].mdglobaldata
;
462 gd
->gd_CMAP1
= &SMPpt
[pg
+ 0];
463 gd
->gd_CMAP2
= &SMPpt
[pg
+ 1];
464 gd
->gd_CMAP3
= &SMPpt
[pg
+ 2];
465 gd
->gd_PMAP1
= &SMPpt
[pg
+ 3];
466 gd
->gd_GDMAP1
= &PTD
[KGDTDI
];
467 gd
->gd_CADDR1
= CPU_prvspace
[0].CPAGE1
;
468 gd
->gd_CADDR2
= CPU_prvspace
[0].CPAGE2
;
469 gd
->gd_CADDR3
= CPU_prvspace
[0].CPAGE3
;
470 gd
->gd_PADDR1
= (unsigned *)CPU_prvspace
[0].PPAGE1
;
471 gd
->gd_GDADDR1
= (unsigned *)VADDR(KGDTDI
, 0);
478 * Set 4mb pdir for mp startup
483 if (pseflag
&& (cpu_feature
& CPUID_PSE
)) {
484 load_cr4(rcr4() | CR4_PSE
);
485 if (pdir4mb
&& mycpu
->gd_cpuid
== 0) { /* only on BSP */
486 kernel_pmap
.pm_pdir
[KPTDI
] =
487 PTD
[KPTDI
] = (pd_entry_t
)pdir4mb
;
495 * Initialize the pmap module.
496 * Called by vm_init, to initialize any structures that the pmap
497 * system needs to map virtual memory.
498 * pmap_init has been enhanced to support in a fairly consistant
499 * way, discontiguous physical memory.
508 * object for kernel page table pages
510 kptobj
= vm_object_allocate(OBJT_DEFAULT
, NKPDE
);
513 * Allocate memory for random pmap data structures. Includes the
517 for(i
= 0; i
< vm_page_array_size
; i
++) {
520 m
= &vm_page_array
[i
];
521 TAILQ_INIT(&m
->md
.pv_list
);
522 m
->md
.pv_list_count
= 0;
526 * init the pv free list
528 initial_pvs
= vm_page_array_size
;
529 if (initial_pvs
< MINPV
)
531 pvzone
= &pvzone_store
;
532 pvinit
= (struct pv_entry
*) kmem_alloc(&kernel_map
,
533 initial_pvs
* sizeof (struct pv_entry
));
534 zbootinit(pvzone
, "PV ENTRY", sizeof (struct pv_entry
), pvinit
,
538 * Now it is safe to enable pv_table recording.
540 pmap_initialized
= TRUE
;
544 * Initialize the address space (zone) for the pv_entries. Set a
545 * high water mark so that the system can recover from excessive
546 * numbers of pv entries.
551 int shpgperproc
= PMAP_SHPGPERPROC
;
553 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc
);
554 pv_entry_max
= shpgperproc
* maxproc
+ vm_page_array_size
;
555 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max
);
556 pv_entry_high_water
= 9 * (pv_entry_max
/ 10);
557 zinitna(pvzone
, &pvzone_obj
, NULL
, 0, pv_entry_max
, ZONE_INTERRUPT
, 1);
561 /***************************************************
562 * Low level helper routines.....
563 ***************************************************/
568 test_m_maps_pv(vm_page_t m
, pv_entry_t pv
)
574 KKASSERT(pv
->pv_m
== m
);
576 TAILQ_FOREACH(spv
, &m
->md
.pv_list
, pv_list
) {
583 panic("test_m_maps_pv: failed m %p pv %p\n", m
, pv
);
587 ptbase_assert(struct pmap
*pmap
)
589 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
591 /* are we current address space or kernel? */
592 if (pmap
== &kernel_pmap
|| frame
== (((unsigned)PTDpde
) & PG_FRAME
)) {
595 KKASSERT(frame
== (*mycpu
->gd_GDMAP1
& PG_FRAME
));
600 #define test_m_maps_pv(m, pv)
601 #define ptbase_assert(pmap)
605 #if defined(PMAP_DIAGNOSTIC)
608 * This code checks for non-writeable/modified pages.
609 * This should be an invalid condition.
612 pmap_nw_modified(pt_entry_t ptea
)
618 if ((pte
& (PG_M
|PG_RW
)) == PG_M
)
627 * this routine defines the region(s) of memory that should
628 * not be tested for the modified bit.
630 static PMAP_INLINE
int
631 pmap_track_modified(vm_offset_t va
)
633 if ((va
< clean_sva
) || (va
>= clean_eva
))
640 * Retrieve the mapped page table base for a particular pmap. Use our self
641 * mapping for the kernel_pmap or our current pmap.
643 * For foreign pmaps we use the per-cpu page table map. Since this involves
644 * installing a ptd it's actually (per-process x per-cpu). However, we
645 * still cannot depend on our mapping to survive thread switches because
646 * the process might be threaded and switching to another thread for the
647 * same process on the same cpu will allow that other thread to make its
650 * This could be a bit confusing but the jist is for something like the
651 * vkernel which uses foreign pmaps all the time this represents a pretty
652 * good cache that avoids unnecessary invltlb()s.
655 get_ptbase(pmap_t pmap
)
657 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
658 struct mdglobaldata
*gd
= mdcpu
;
661 * We can use PTmap if the pmap is our current address space or
662 * the kernel address space.
664 if (pmap
== &kernel_pmap
|| frame
== (((unsigned) PTDpde
) & PG_FRAME
)) {
665 return (unsigned *) PTmap
;
669 * Otherwise we use the per-cpu alternative page table map. Each
670 * cpu gets its own map. Because of this we cannot use this map
671 * from interrupts or threads which can preempt.
673 * Even if we already have the map cached we may still have to
674 * invalidate the TLB if another cpu modified a PDE in the map.
676 KKASSERT(gd
->mi
.gd_intr_nesting_level
== 0 &&
677 (gd
->mi
.gd_curthread
->td_flags
& TDF_INTTHREAD
) == 0);
679 if ((*gd
->gd_GDMAP1
& PG_FRAME
) != frame
) {
680 *gd
->gd_GDMAP1
= frame
| PG_RW
| PG_V
;
681 pmap
->pm_cached
|= gd
->mi
.gd_cpumask
;
683 } else if ((pmap
->pm_cached
& gd
->mi
.gd_cpumask
) == 0) {
684 pmap
->pm_cached
|= gd
->mi
.gd_cpumask
;
687 return ((unsigned *)gd
->gd_GDADDR1
);
693 * Extract the physical page address associated with the map/VA pair.
695 * This function may not be called from an interrupt if the pmap is
699 pmap_extract(pmap_t pmap
, vm_offset_t va
)
702 vm_offset_t pdirindex
;
704 pdirindex
= va
>> PDRSHIFT
;
705 if (pmap
&& (rtval
= (unsigned) pmap
->pm_pdir
[pdirindex
])) {
707 if ((rtval
& PG_PS
) != 0) {
708 rtval
&= ~(NBPDR
- 1);
709 rtval
|= va
& (NBPDR
- 1);
712 pte
= get_ptbase(pmap
) + i386_btop(va
);
713 rtval
= ((*pte
& PG_FRAME
) | (va
& PAGE_MASK
));
719 /***************************************************
720 * Low level mapping routines.....
721 ***************************************************/
724 * Routine: pmap_kenter
726 * Add a wired page to the KVA
727 * NOTE! note that in order for the mapping to take effect -- you
728 * should do an invltlb after doing the pmap_kenter().
731 pmap_kenter(vm_offset_t va
, vm_paddr_t pa
)
735 pmap_inval_info info
;
737 pmap_inval_init(&info
);
738 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
739 pte
= (unsigned *)vtopte(va
);
740 pmap_inval_add(&info
, &kernel_pmap
, va
);
742 pmap_inval_flush(&info
);
746 * Routine: pmap_kenter_quick
748 * Similar to pmap_kenter(), except we only invalidate the
749 * mapping on the current CPU.
752 pmap_kenter_quick(vm_offset_t va
, vm_paddr_t pa
)
757 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
758 pte
= (unsigned *)vtopte(va
);
760 cpu_invlpg((void *)va
);
764 pmap_kenter_sync(vm_offset_t va
)
766 pmap_inval_info info
;
768 pmap_inval_init(&info
);
769 pmap_inval_add(&info
, &kernel_pmap
, va
);
770 pmap_inval_flush(&info
);
774 pmap_kenter_sync_quick(vm_offset_t va
)
776 cpu_invlpg((void *)va
);
780 * remove a page from the kernel pagetables
783 pmap_kremove(vm_offset_t va
)
786 pmap_inval_info info
;
788 pmap_inval_init(&info
);
789 pte
= (unsigned *)vtopte(va
);
790 pmap_inval_add(&info
, &kernel_pmap
, va
);
792 pmap_inval_flush(&info
);
796 pmap_kremove_quick(vm_offset_t va
)
799 pte
= (unsigned *)vtopte(va
);
801 cpu_invlpg((void *)va
);
805 * XXX these need to be recoded. They are not used in any critical path.
808 pmap_kmodify_rw(vm_offset_t va
)
810 *vtopte(va
) |= PG_RW
;
811 cpu_invlpg((void *)va
);
815 pmap_kmodify_nc(vm_offset_t va
)
818 cpu_invlpg((void *)va
);
822 * Used to map a range of physical addresses into kernel
823 * virtual address space.
825 * For now, VM is already on, we only need to map the
829 pmap_map(vm_offset_t
*virtp
, vm_paddr_t start
, vm_paddr_t end
, int prot
)
831 vm_offset_t sva
, virt
;
834 while (start
< end
) {
835 pmap_kenter(virt
, start
);
845 * Add a list of wired pages to the kva
846 * this routine is only used for temporary
847 * kernel mappings that do not need to have
848 * page modification or references recorded.
849 * Note that old mappings are simply written
850 * over. The page *must* be wired.
853 pmap_qenter(vm_offset_t va
, vm_page_t
*m
, int count
)
857 end_va
= va
+ count
* PAGE_SIZE
;
859 while (va
< end_va
) {
862 pte
= (unsigned *)vtopte(va
);
863 *pte
= VM_PAGE_TO_PHYS(*m
) | PG_RW
| PG_V
| pgeflag
;
864 cpu_invlpg((void *)va
);
869 smp_invltlb(); /* XXX */
874 pmap_qenter2(vm_offset_t va
, vm_page_t
*m
, int count
, cpumask_t
*mask
)
877 cpumask_t cmask
= mycpu
->gd_cpumask
;
879 end_va
= va
+ count
* PAGE_SIZE
;
881 while (va
< end_va
) {
886 * Install the new PTE. If the pte changed from the prior
887 * mapping we must reset the cpu mask and invalidate the page.
888 * If the pte is the same but we have not seen it on the
889 * current cpu, invlpg the existing mapping. Otherwise the
890 * entry is optimal and no invalidation is required.
892 pte
= (unsigned *)vtopte(va
);
893 pteval
= VM_PAGE_TO_PHYS(*m
) | PG_A
| PG_RW
| PG_V
| pgeflag
;
894 if (*pte
!= pteval
) {
897 cpu_invlpg((void *)va
);
898 } else if ((*mask
& cmask
) == 0) {
899 cpu_invlpg((void *)va
);
908 * This routine jerks page mappings from the
909 * kernel -- it is meant only for temporary mappings.
911 * MPSAFE, INTERRUPT SAFE (cluster callback)
914 pmap_qremove(vm_offset_t va
, int count
)
918 end_va
= va
+ count
*PAGE_SIZE
;
920 while (va
< end_va
) {
923 pte
= (unsigned *)vtopte(va
);
925 cpu_invlpg((void *)va
);
934 * This routine works like vm_page_lookup() but also blocks as long as the
935 * page is busy. This routine does not busy the page it returns.
937 * Unless the caller is managing objects whos pages are in a known state,
938 * the call should be made with a critical section held so the page's object
939 * association remains valid on return.
942 pmap_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
947 m
= vm_page_lookup(object
, pindex
);
948 } while (m
&& vm_page_sleep_busy(m
, FALSE
, "pplookp"));
954 * Create a new thread and optionally associate it with a (new) process.
955 * NOTE! the new thread's cpu may not equal the current cpu.
958 pmap_init_thread(thread_t td
)
960 /* enforce pcb placement */
961 td
->td_pcb
= (struct pcb
*)(td
->td_kstack
+ td
->td_kstack_size
) - 1;
962 td
->td_savefpu
= &td
->td_pcb
->pcb_save
;
963 td
->td_sp
= (char *)td
->td_pcb
- 16;
967 * This routine directly affects the fork perf for a process.
970 pmap_init_proc(struct proc
*p
)
975 * Dispose the UPAGES for a process that has exited.
976 * This routine directly impacts the exit perf of a process.
979 pmap_dispose_proc(struct proc
*p
)
981 KASSERT(p
->p_lock
== 0, ("attempt to dispose referenced proc! %p", p
));
984 /***************************************************
985 * Page table page management routines.....
986 ***************************************************/
989 * This routine unholds page table pages, and if the hold count
990 * drops to zero, then it decrements the wire count.
993 _pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
996 * Wait until we can busy the page ourselves. We cannot have
997 * any active flushes if we block.
999 if (m
->flags
& PG_BUSY
) {
1000 pmap_inval_flush(info
);
1001 while (vm_page_sleep_busy(m
, FALSE
, "pmuwpt"))
1004 KASSERT(m
->queue
== PQ_NONE
,
1005 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m
));
1007 if (m
->hold_count
== 1) {
1009 * Unmap the page table page.
1011 * NOTE: We must clear pm_cached for all cpus, including
1012 * the current one, when clearing a page directory
1016 pmap_inval_add(info
, pmap
, -1);
1017 KKASSERT(pmap
->pm_pdir
[m
->pindex
]);
1018 pmap
->pm_pdir
[m
->pindex
] = 0;
1019 pmap
->pm_cached
= 0;
1021 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1022 --pmap
->pm_stats
.resident_count
;
1024 if (pmap
->pm_ptphint
== m
)
1025 pmap
->pm_ptphint
= NULL
;
1028 * This was our last hold, the page had better be unwired
1029 * after we decrement wire_count.
1031 * FUTURE NOTE: shared page directory page could result in
1032 * multiple wire counts.
1036 KKASSERT(m
->wire_count
== 0);
1037 --vmstats
.v_wire_count
;
1038 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1040 vm_page_free_zero(m
);
1043 KKASSERT(m
->hold_count
> 1);
1049 static PMAP_INLINE
int
1050 pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
1052 KKASSERT(m
->hold_count
> 0);
1053 if (m
->hold_count
> 1) {
1057 return _pmap_unwire_pte_hold(pmap
, m
, info
);
1062 * After removing a page table entry, this routine is used to
1063 * conditionally free the page, and manage the hold/wire counts.
1065 * WARNING: This function can block
1068 pmap_unuse_pt(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
,
1069 pmap_inval_info_t info
)
1072 if (va
>= UPT_MIN_ADDRESS
)
1076 ptepindex
= (va
>> PDRSHIFT
);
1077 if (pmap
->pm_ptphint
&&
1078 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
1079 mpte
= pmap
->pm_ptphint
;
1081 pmap_inval_flush(info
);
1082 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1083 pmap
->pm_ptphint
= mpte
;
1087 return pmap_unwire_pte_hold(pmap
, mpte
, info
);
1091 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1092 * it, and IdlePTD, represents the template used to update all other pmaps.
1094 * On architectures where the kernel pmap is not integrated into the user
1095 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1096 * kernel_pmap should be used to directly access the kernel_pmap.
1099 pmap_pinit0(struct pmap
*pmap
)
1102 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1103 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, (vm_offset_t
) IdlePTD
);
1105 pmap
->pm_active
= 0;
1106 pmap
->pm_cached
= 0;
1107 pmap
->pm_ptphint
= NULL
;
1108 TAILQ_INIT(&pmap
->pm_pvlist
);
1109 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1113 * Initialize a preallocated and zeroed pmap structure,
1114 * such as one in a vmspace structure.
1117 pmap_pinit(struct pmap
*pmap
)
1122 * No need to allocate page table space yet but we do need a valid
1123 * page directory table.
1125 if (pmap
->pm_pdir
== NULL
) {
1127 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1131 * Allocate an object for the ptes
1133 if (pmap
->pm_pteobj
== NULL
)
1134 pmap
->pm_pteobj
= vm_object_allocate(OBJT_DEFAULT
, PTDPTDI
+ 1);
1137 * Allocate the page directory page, unless we already have
1138 * one cached. If we used the cached page the wire_count will
1139 * already be set appropriately.
1141 if ((ptdpg
= pmap
->pm_pdirm
) == NULL
) {
1142 ptdpg
= vm_page_grab(pmap
->pm_pteobj
, PTDPTDI
,
1143 VM_ALLOC_NORMAL
| VM_ALLOC_RETRY
);
1144 pmap
->pm_pdirm
= ptdpg
;
1145 vm_page_flag_clear(ptdpg
, PG_MAPPED
| PG_BUSY
);
1146 ptdpg
->valid
= VM_PAGE_BITS_ALL
;
1147 ptdpg
->wire_count
= 1;
1148 ++vmstats
.v_wire_count
;
1149 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, VM_PAGE_TO_PHYS(ptdpg
));
1151 if ((ptdpg
->flags
& PG_ZERO
) == 0)
1152 bzero(pmap
->pm_pdir
, PAGE_SIZE
);
1155 pmap_page_assertzero(VM_PAGE_TO_PHYS(ptdpg
));
1158 pmap
->pm_pdir
[MPPTDI
] = PTD
[MPPTDI
];
1160 /* install self-referential address mapping entry */
1161 *(unsigned *) (pmap
->pm_pdir
+ PTDPTDI
) =
1162 VM_PAGE_TO_PHYS(ptdpg
) | PG_V
| PG_RW
| PG_A
| PG_M
;
1165 pmap
->pm_active
= 0;
1166 pmap
->pm_cached
= 0;
1167 pmap
->pm_ptphint
= NULL
;
1168 TAILQ_INIT(&pmap
->pm_pvlist
);
1169 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1170 pmap
->pm_stats
.resident_count
= 1;
1174 * Clean up a pmap structure so it can be physically freed. This routine
1175 * is called by the vmspace dtor function. A great deal of pmap data is
1176 * left passively mapped to improve vmspace management so we have a bit
1177 * of cleanup work to do here.
1180 pmap_puninit(pmap_t pmap
)
1184 KKASSERT(pmap
->pm_active
== 0);
1185 if ((p
= pmap
->pm_pdirm
) != NULL
) {
1186 KKASSERT(pmap
->pm_pdir
!= NULL
);
1187 pmap_kremove((vm_offset_t
)pmap
->pm_pdir
);
1189 vmstats
.v_wire_count
--;
1190 KKASSERT((p
->flags
& PG_BUSY
) == 0);
1192 vm_page_free_zero(p
);
1193 pmap
->pm_pdirm
= NULL
;
1195 if (pmap
->pm_pdir
) {
1196 kmem_free(&kernel_map
, (vm_offset_t
)pmap
->pm_pdir
, PAGE_SIZE
);
1197 pmap
->pm_pdir
= NULL
;
1199 if (pmap
->pm_pteobj
) {
1200 vm_object_deallocate(pmap
->pm_pteobj
);
1201 pmap
->pm_pteobj
= NULL
;
1206 * Wire in kernel global address entries. To avoid a race condition
1207 * between pmap initialization and pmap_growkernel, this procedure
1208 * adds the pmap to the master list (which growkernel scans to update),
1209 * then copies the template.
1212 pmap_pinit2(struct pmap
*pmap
)
1215 TAILQ_INSERT_TAIL(&pmap_list
, pmap
, pm_pmnode
);
1216 /* XXX copies current process, does not fill in MPPTDI */
1217 bcopy(PTD
+ KPTDI
, pmap
->pm_pdir
+ KPTDI
, nkpt
* PTESIZE
);
1222 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
1223 * 0 on failure (if the procedure had to sleep).
1225 * When asked to remove the page directory page itself, we actually just
1226 * leave it cached so we do not have to incur the SMP inval overhead of
1227 * removing the kernel mapping. pmap_puninit() will take care of it.
1230 pmap_release_free_page(struct pmap
*pmap
, vm_page_t p
)
1232 unsigned *pde
= (unsigned *) pmap
->pm_pdir
;
1234 * This code optimizes the case of freeing non-busy
1235 * page-table pages. Those pages are zero now, and
1236 * might as well be placed directly into the zero queue.
1238 if (vm_page_sleep_busy(p
, FALSE
, "pmaprl"))
1244 * Remove the page table page from the processes address space.
1246 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1247 KKASSERT(pde
[p
->pindex
]);
1249 --pmap
->pm_stats
.resident_count
;
1251 if (p
->hold_count
) {
1252 panic("pmap_release: freeing held page table page");
1254 if (pmap
->pm_ptphint
&& (pmap
->pm_ptphint
->pindex
== p
->pindex
))
1255 pmap
->pm_ptphint
= NULL
;
1258 * We leave the page directory page cached, wired, and mapped in
1259 * the pmap until the dtor function (pmap_puninit()) gets called.
1260 * However, still clean it up so we can set PG_ZERO.
1262 * The pmap has already been removed from the pmap_list in the
1265 if (p
->pindex
== PTDPTDI
) {
1266 bzero(pde
+ KPTDI
, nkpt
* PTESIZE
);
1267 bzero(pde
+ KGDTDI
, (NPDEPG
- KGDTDI
) * PTESIZE
);
1268 vm_page_flag_set(p
, PG_ZERO
);
1272 vmstats
.v_wire_count
--;
1273 vm_page_free_zero(p
);
1279 * this routine is called if the page table page is not
1283 _pmap_allocpte(pmap_t pmap
, unsigned ptepindex
)
1285 vm_offset_t pteva
, ptepa
;
1289 * Find or fabricate a new pagetable page
1291 m
= vm_page_grab(pmap
->pm_pteobj
, ptepindex
,
1292 VM_ALLOC_NORMAL
| VM_ALLOC_ZERO
| VM_ALLOC_RETRY
);
1294 KASSERT(m
->queue
== PQ_NONE
,
1295 ("_pmap_allocpte: %p->queue != PQ_NONE", m
));
1298 * Increment the hold count for the page we will be returning to
1304 * It is possible that someone else got in and mapped by the page
1305 * directory page while we were blocked, if so just unbusy and
1306 * return the held page.
1308 if ((ptepa
= pmap
->pm_pdir
[ptepindex
]) != 0) {
1309 KKASSERT((ptepa
& PG_FRAME
) == VM_PAGE_TO_PHYS(m
));
1314 if (m
->wire_count
== 0)
1315 vmstats
.v_wire_count
++;
1320 * Map the pagetable page into the process address space, if
1321 * it isn't already there.
1323 * NOTE: For safety clear pm_cached for all cpus including the
1324 * current one when adding a PDE to the map.
1326 ++pmap
->pm_stats
.resident_count
;
1328 ptepa
= VM_PAGE_TO_PHYS(m
);
1329 pmap
->pm_pdir
[ptepindex
] =
1330 (pd_entry_t
) (ptepa
| PG_U
| PG_RW
| PG_V
| PG_A
| PG_M
);
1331 pmap
->pm_cached
= 0;
1334 * Set the page table hint
1336 pmap
->pm_ptphint
= m
;
1339 * Try to use the new mapping, but if we cannot, then
1340 * do it with the routine that maps the page explicitly.
1342 if ((m
->flags
& PG_ZERO
) == 0) {
1343 if ((((unsigned)pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
) ==
1344 (((unsigned) PTDpde
) & PG_FRAME
)) {
1345 pteva
= UPT_MIN_ADDRESS
+ i386_ptob(ptepindex
);
1346 bzero((caddr_t
) pteva
, PAGE_SIZE
);
1348 pmap_zero_page(ptepa
);
1353 pmap_page_assertzero(VM_PAGE_TO_PHYS(m
));
1357 m
->valid
= VM_PAGE_BITS_ALL
;
1358 vm_page_flag_clear(m
, PG_ZERO
);
1359 vm_page_flag_set(m
, PG_MAPPED
);
1366 pmap_allocpte(pmap_t pmap
, vm_offset_t va
)
1373 * Calculate pagetable page index
1375 ptepindex
= va
>> PDRSHIFT
;
1378 * Get the page directory entry
1380 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
1383 * This supports switching from a 4MB page to a
1386 if (ptepa
& PG_PS
) {
1387 pmap
->pm_pdir
[ptepindex
] = 0;
1394 * If the page table page is mapped, we just increment the
1395 * hold count, and activate it.
1399 * In order to get the page table page, try the
1402 if (pmap
->pm_ptphint
&&
1403 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
1404 m
= pmap
->pm_ptphint
;
1406 m
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1407 pmap
->pm_ptphint
= m
;
1413 * Here if the pte page isn't mapped, or if it has been deallocated.
1415 return _pmap_allocpte(pmap
, ptepindex
);
1419 /***************************************************
1420 * Pmap allocation/deallocation routines.
1421 ***************************************************/
1424 * Release any resources held by the given physical map.
1425 * Called when a pmap initialized by pmap_pinit is being released.
1426 * Should only be called if the map contains no valid mappings.
1428 static int pmap_release_callback(struct vm_page
*p
, void *data
);
1431 pmap_release(struct pmap
*pmap
)
1433 vm_object_t object
= pmap
->pm_pteobj
;
1434 struct rb_vm_page_scan_info info
;
1436 KASSERT(pmap
->pm_active
== 0, ("pmap still active! %08x", pmap
->pm_active
));
1437 #if defined(DIAGNOSTIC)
1438 if (object
->ref_count
!= 1)
1439 panic("pmap_release: pteobj reference count != 1");
1443 info
.object
= object
;
1445 TAILQ_REMOVE(&pmap_list
, pmap
, pm_pmnode
);
1452 info
.limit
= object
->generation
;
1454 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
1455 pmap_release_callback
, &info
);
1456 if (info
.error
== 0 && info
.mpte
) {
1457 if (!pmap_release_free_page(pmap
, info
.mpte
))
1461 } while (info
.error
);
1465 pmap_release_callback(struct vm_page
*p
, void *data
)
1467 struct rb_vm_page_scan_info
*info
= data
;
1469 if (p
->pindex
== PTDPTDI
) {
1473 if (!pmap_release_free_page(info
->pmap
, p
)) {
1477 if (info
->object
->generation
!= info
->limit
) {
1485 * Grow the number of kernel page table entries, if needed.
1489 pmap_growkernel(vm_offset_t addr
)
1492 vm_offset_t ptppaddr
;
1497 if (kernel_vm_end
== 0) {
1498 kernel_vm_end
= KERNBASE
;
1500 while (pdir_pde(PTD
, kernel_vm_end
)) {
1501 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1505 addr
= (addr
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1506 while (kernel_vm_end
< addr
) {
1507 if (pdir_pde(PTD
, kernel_vm_end
)) {
1508 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1513 * This index is bogus, but out of the way
1515 nkpg
= vm_page_alloc(kptobj
, nkpt
,
1516 VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
);
1518 panic("pmap_growkernel: no memory to grow kernel");
1521 ptppaddr
= VM_PAGE_TO_PHYS(nkpg
);
1522 pmap_zero_page(ptppaddr
);
1523 newpdir
= (pd_entry_t
) (ptppaddr
| PG_V
| PG_RW
| PG_A
| PG_M
);
1524 pdir_pde(PTD
, kernel_vm_end
) = newpdir
;
1525 *pmap_pde(&kernel_pmap
, kernel_vm_end
) = newpdir
;
1529 * This update must be interlocked with pmap_pinit2.
1531 TAILQ_FOREACH(pmap
, &pmap_list
, pm_pmnode
) {
1532 *pmap_pde(pmap
, kernel_vm_end
) = newpdir
;
1534 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) &
1535 ~(PAGE_SIZE
* NPTEPG
- 1);
1541 * Retire the given physical map from service.
1542 * Should only be called if the map contains
1543 * no valid mappings.
1546 pmap_destroy(pmap_t pmap
)
1553 count
= --pmap
->pm_count
;
1556 panic("destroying a pmap is not yet implemented");
1561 * Add a reference to the specified pmap.
1564 pmap_reference(pmap_t pmap
)
1571 /***************************************************
1572 * page management routines.
1573 ***************************************************/
1576 * free the pv_entry back to the free list. This function may be
1577 * called from an interrupt.
1579 static PMAP_INLINE
void
1580 free_pv_entry(pv_entry_t pv
)
1583 KKASSERT(pv
->pv_m
!= NULL
);
1591 * get a new pv_entry, allocating a block from the system
1592 * when needed. This function may be called from an interrupt.
1598 if (pv_entry_high_water
&&
1599 (pv_entry_count
> pv_entry_high_water
) &&
1600 (pmap_pagedaemon_waken
== 0)) {
1601 pmap_pagedaemon_waken
= 1;
1602 wakeup (&vm_pages_needed
);
1604 return zalloc(pvzone
);
1608 * This routine is very drastic, but can save the system
1616 static int warningdone
=0;
1618 if (pmap_pagedaemon_waken
== 0)
1620 pmap_pagedaemon_waken
= 0;
1622 if (warningdone
< 5) {
1623 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1627 for(i
= 0; i
< vm_page_array_size
; i
++) {
1628 m
= &vm_page_array
[i
];
1629 if (m
->wire_count
|| m
->hold_count
|| m
->busy
||
1630 (m
->flags
& PG_BUSY
))
1638 * If it is the first entry on the list, it is actually
1639 * in the header and we must copy the following entry up
1640 * to the header. Otherwise we must search the list for
1641 * the entry. In either case we free the now unused entry.
1644 pmap_remove_entry(struct pmap
*pmap
, vm_page_t m
,
1645 vm_offset_t va
, pmap_inval_info_t info
)
1651 if (m
->md
.pv_list_count
< pmap
->pm_stats
.resident_count
) {
1652 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
1653 if (pmap
== pv
->pv_pmap
&& va
== pv
->pv_va
)
1657 TAILQ_FOREACH(pv
, &pmap
->pm_pvlist
, pv_plist
) {
1659 KKASSERT(pv
->pv_pmap
== pmap
);
1661 if (va
== pv
->pv_va
)
1668 test_m_maps_pv(m
, pv
);
1669 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1670 m
->md
.pv_list_count
--;
1671 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1672 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1673 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
1674 ++pmap
->pm_generation
;
1675 rtval
= pmap_unuse_pt(pmap
, va
, pv
->pv_ptem
, info
);
1682 * Create a pv entry for page at pa for
1686 pmap_insert_entry(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
, vm_page_t m
)
1691 pv
= get_pv_entry();
1693 KKASSERT(pv
->pv_m
== NULL
);
1700 TAILQ_INSERT_TAIL(&pmap
->pm_pvlist
, pv
, pv_plist
);
1701 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
1702 ++pmap
->pm_generation
;
1703 m
->md
.pv_list_count
++;
1709 * pmap_remove_pte: do the things to unmap a page in a process.
1711 * WARNING: This function may block (via pmap_remove_entry/pmap_unuse_pt),
1712 * callers using temporary pmaps must reload them.
1715 pmap_remove_pte(struct pmap
*pmap
, unsigned *ptq
, vm_offset_t va
,
1716 pmap_inval_info_t info
)
1721 ptbase_assert(pmap
);
1722 pmap_inval_add(info
, pmap
, va
);
1723 ptbase_assert(pmap
);
1724 oldpte
= loadandclear(ptq
);
1727 pmap
->pm_stats
.wired_count
-= 1;
1729 * Machines that don't support invlpg, also don't support
1730 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1734 cpu_invlpg((void *)va
);
1735 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1736 --pmap
->pm_stats
.resident_count
;
1737 if (oldpte
& PG_MANAGED
) {
1738 m
= PHYS_TO_VM_PAGE(oldpte
);
1739 if (oldpte
& PG_M
) {
1740 #if defined(PMAP_DIAGNOSTIC)
1741 if (pmap_nw_modified((pt_entry_t
) oldpte
)) {
1742 kprintf("pmap_remove: modified page not "
1743 "writable: va: %p, pte: 0x%lx\n",
1744 (void *)va
, (long)oldpte
);
1747 if (pmap_track_modified(va
))
1751 vm_page_flag_set(m
, PG_REFERENCED
);
1752 return pmap_remove_entry(pmap
, m
, va
, info
);
1754 return pmap_unuse_pt(pmap
, va
, NULL
, info
);
1763 * Remove a single page from a process address space.
1765 * This function may not be called from an interrupt if the pmap is
1769 pmap_remove_page(struct pmap
*pmap
, vm_offset_t va
, pmap_inval_info_t info
)
1774 * if there is no pte for this address, just skip it!!! Otherwise
1775 * get a local va for mappings for this pmap and remove the entry.
1777 if (*pmap_pde(pmap
, va
) != 0) {
1778 ptq
= get_ptbase(pmap
) + i386_btop(va
);
1780 pmap_remove_pte(pmap
, ptq
, va
, info
);
1789 * Remove the given range of addresses from the specified map.
1791 * It is assumed that the start and end are properly
1792 * rounded to the page size.
1794 * This function may not be called from an interrupt if the pmap is
1798 pmap_remove(struct pmap
*pmap
, vm_offset_t sva
, vm_offset_t eva
)
1802 vm_offset_t ptpaddr
;
1803 vm_offset_t sindex
, eindex
;
1804 struct pmap_inval_info info
;
1809 if (pmap
->pm_stats
.resident_count
== 0)
1812 pmap_inval_init(&info
);
1815 * special handling of removing one page. a very
1816 * common operation and easy to short circuit some
1819 if (((sva
+ PAGE_SIZE
) == eva
) &&
1820 (((unsigned) pmap
->pm_pdir
[(sva
>> PDRSHIFT
)] & PG_PS
) == 0)) {
1821 pmap_remove_page(pmap
, sva
, &info
);
1822 pmap_inval_flush(&info
);
1827 * Get a local virtual address for the mappings that are being
1830 sindex
= i386_btop(sva
);
1831 eindex
= i386_btop(eva
);
1833 for (; sindex
< eindex
; sindex
= pdnxt
) {
1837 * Calculate index for next page table.
1839 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
1840 if (pmap
->pm_stats
.resident_count
== 0)
1843 pdirindex
= sindex
/ NPDEPG
;
1844 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
1845 pmap_inval_add(&info
, pmap
, -1);
1846 pmap
->pm_pdir
[pdirindex
] = 0;
1847 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
1848 pmap
->pm_cached
= 0;
1853 * Weed out invalid mappings. Note: we assume that the page
1854 * directory table is always allocated, and in kernel virtual.
1860 * Limit our scan to either the end of the va represented
1861 * by the current page table page, or to the end of the
1862 * range being removed.
1864 if (pdnxt
> eindex
) {
1869 * NOTE: pmap_remove_pte() can block and wipe the temporary
1872 for (; sindex
!= pdnxt
; sindex
++) {
1875 ptbase
= get_ptbase(pmap
);
1876 if (ptbase
[sindex
] == 0)
1878 va
= i386_ptob(sindex
);
1879 if (pmap_remove_pte(pmap
, ptbase
+ sindex
, va
, &info
))
1883 pmap_inval_flush(&info
);
1889 * Removes this physical page from all physical maps in which it resides.
1890 * Reflects back modify bits to the pager.
1892 * This routine may not be called from an interrupt.
1896 pmap_remove_all(vm_page_t m
)
1898 struct pmap_inval_info info
;
1899 unsigned *pte
, tpte
;
1902 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
1905 pmap_inval_init(&info
);
1907 while ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
1908 KKASSERT(pv
->pv_pmap
->pm_stats
.resident_count
> 0);
1909 --pv
->pv_pmap
->pm_stats
.resident_count
;
1911 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
1912 pmap_inval_add(&info
, pv
->pv_pmap
, pv
->pv_va
);
1913 tpte
= loadandclear(pte
);
1915 KKASSERT(PHYS_TO_VM_PAGE(tpte
) == m
);
1918 pv
->pv_pmap
->pm_stats
.wired_count
--;
1921 vm_page_flag_set(m
, PG_REFERENCED
);
1924 * Update the vm_page_t clean and reference bits.
1927 #if defined(PMAP_DIAGNOSTIC)
1928 if (pmap_nw_modified((pt_entry_t
) tpte
)) {
1929 kprintf("pmap_remove_all: modified page "
1930 "not writable: va: %p, pte: 0x%lx\n",
1931 (void *)pv
->pv_va
, (long)tpte
);
1934 if (pmap_track_modified(pv
->pv_va
))
1938 KKASSERT(pv
->pv_m
== m
);
1940 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1941 TAILQ_REMOVE(&pv
->pv_pmap
->pm_pvlist
, pv
, pv_plist
);
1942 ++pv
->pv_pmap
->pm_generation
;
1943 m
->md
.pv_list_count
--;
1944 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1945 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1946 pmap_unuse_pt(pv
->pv_pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
1950 KKASSERT((m
->flags
& (PG_MAPPED
|PG_WRITEABLE
)) == 0);
1951 pmap_inval_flush(&info
);
1957 * Set the physical protection on the specified range of this map
1960 * This function may not be called from an interrupt if the map is
1961 * not the kernel_pmap.
1964 pmap_protect(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
, vm_prot_t prot
)
1967 vm_offset_t pdnxt
, ptpaddr
;
1968 vm_pindex_t sindex
, eindex
;
1969 pmap_inval_info info
;
1974 if ((prot
& VM_PROT_READ
) == VM_PROT_NONE
) {
1975 pmap_remove(pmap
, sva
, eva
);
1979 if (prot
& VM_PROT_WRITE
)
1982 pmap_inval_init(&info
);
1984 ptbase
= get_ptbase(pmap
);
1986 sindex
= i386_btop(sva
);
1987 eindex
= i386_btop(eva
);
1989 for (; sindex
< eindex
; sindex
= pdnxt
) {
1993 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
1995 pdirindex
= sindex
/ NPDEPG
;
1996 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
1997 pmap_inval_add(&info
, pmap
, -1);
1998 pmap
->pm_pdir
[pdirindex
] &= ~(PG_M
|PG_RW
);
1999 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
2004 * Weed out invalid mappings. Note: we assume that the page
2005 * directory table is always allocated, and in kernel virtual.
2010 if (pdnxt
> eindex
) {
2014 for (; sindex
!= pdnxt
; sindex
++) {
2020 * XXX non-optimal. Note also that there can be
2021 * no pmap_inval_flush() calls until after we modify
2022 * ptbase[sindex] (or otherwise we have to do another
2023 * pmap_inval_add() call).
2025 pmap_inval_add(&info
, pmap
, i386_ptob(sindex
));
2026 pbits
= ptbase
[sindex
];
2028 if (pbits
& PG_MANAGED
) {
2031 m
= PHYS_TO_VM_PAGE(pbits
);
2032 vm_page_flag_set(m
, PG_REFERENCED
);
2036 if (pmap_track_modified(i386_ptob(sindex
))) {
2038 m
= PHYS_TO_VM_PAGE(pbits
);
2047 if (pbits
!= ptbase
[sindex
]) {
2048 ptbase
[sindex
] = pbits
;
2052 pmap_inval_flush(&info
);
2056 * Insert the given physical page (p) at
2057 * the specified virtual address (v) in the
2058 * target physical map with the protection requested.
2060 * If specified, the page will be wired down, meaning
2061 * that the related pte can not be reclaimed.
2063 * NB: This is the only routine which MAY NOT lazy-evaluate
2064 * or lose information. That is, this routine must actually
2065 * insert this page into the given map NOW.
2068 pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_page_t m
, vm_prot_t prot
,
2074 vm_offset_t origpte
, newpte
;
2076 pmap_inval_info info
;
2082 #ifdef PMAP_DIAGNOSTIC
2084 panic("pmap_enter: toobig");
2085 if ((va
>= UPT_MIN_ADDRESS
) && (va
< UPT_MAX_ADDRESS
)) {
2086 panic("pmap_enter: invalid to pmap_enter page "
2087 "table pages (va: %p)", (void *)va
);
2090 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
2091 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
2094 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
2095 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
2100 * In the case that a page table page is not
2101 * resident, we are creating it here.
2103 if (va
< UPT_MIN_ADDRESS
)
2104 mpte
= pmap_allocpte(pmap
, va
);
2108 pmap_inval_init(&info
);
2109 pte
= pmap_pte(pmap
, va
);
2112 * Page Directory table entry not valid, we need a new PT page
2115 panic("pmap_enter: invalid page directory pdir=0x%lx, va=%p\n",
2116 (long)pmap
->pm_pdir
[PTDPTDI
], (void *)va
);
2119 pa
= VM_PAGE_TO_PHYS(m
) & PG_FRAME
;
2120 origpte
= *(vm_offset_t
*)pte
;
2121 opa
= origpte
& PG_FRAME
;
2123 if (origpte
& PG_PS
)
2124 panic("pmap_enter: attempted pmap_enter on 4MB page");
2127 * Mapping has not changed, must be protection or wiring change.
2129 if (origpte
&& (opa
== pa
)) {
2131 * Wiring change, just update stats. We don't worry about
2132 * wiring PT pages as they remain resident as long as there
2133 * are valid mappings in them. Hence, if a user page is wired,
2134 * the PT page will be also.
2136 if (wired
&& ((origpte
& PG_W
) == 0))
2137 pmap
->pm_stats
.wired_count
++;
2138 else if (!wired
&& (origpte
& PG_W
))
2139 pmap
->pm_stats
.wired_count
--;
2141 #if defined(PMAP_DIAGNOSTIC)
2142 if (pmap_nw_modified((pt_entry_t
) origpte
)) {
2143 kprintf("pmap_enter: modified page not "
2144 "writable: va: %p, pte: 0x%lx\n",
2145 (void *)va
, (long )origpte
);
2150 * Remove the extra pte reference. Note that we cannot
2151 * optimize the RO->RW case because we have adjusted the
2152 * wiring count above and may need to adjust the wiring
2159 * We might be turning off write access to the page,
2160 * so we go ahead and sense modify status.
2162 if (origpte
& PG_MANAGED
) {
2163 if ((origpte
& PG_M
) && pmap_track_modified(va
)) {
2165 om
= PHYS_TO_VM_PAGE(opa
);
2169 KKASSERT(m
->flags
& PG_MAPPED
);
2174 * Mapping has changed, invalidate old range and fall through to
2175 * handle validating new mapping.
2177 * Since we have a ref on the page directory page pmap_pte()
2178 * will always return non-NULL.
2180 * NOTE: pmap_remove_pte() can block and cause the temporary ptbase
2181 * to get wiped. reload the ptbase. I'm not sure if it is
2182 * also possible to race another pmap_enter() but check for
2188 KKASSERT((origpte
& PG_FRAME
) ==
2189 (*(vm_offset_t
*)pte
& PG_FRAME
));
2190 err
= pmap_remove_pte(pmap
, pte
, va
, &info
);
2192 panic("pmap_enter: pte vanished, va: %p", (void *)va
);
2193 pte
= pmap_pte(pmap
, va
);
2194 origpte
= *(vm_offset_t
*)pte
;
2195 opa
= origpte
& PG_FRAME
;
2197 kprintf("pmap_enter: Warning, raced pmap %p va %p\n",
2203 * Enter on the PV list if part of our managed memory. Note that we
2204 * raise IPL while manipulating pv_table since pmap_enter can be
2205 * called at interrupt time.
2207 if (pmap_initialized
&&
2208 (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2209 pmap_insert_entry(pmap
, va
, mpte
, m
);
2210 ptbase_assert(pmap
);
2212 vm_page_flag_set(m
, PG_MAPPED
);
2216 * Increment counters
2218 ++pmap
->pm_stats
.resident_count
;
2220 pmap
->pm_stats
.wired_count
++;
2221 KKASSERT(*pte
== 0);
2225 * Now validate mapping with desired protection/wiring.
2227 ptbase_assert(pmap
);
2228 newpte
= (vm_offset_t
) (pa
| pte_prot(pmap
, prot
) | PG_V
);
2232 if (va
< UPT_MIN_ADDRESS
)
2234 if (pmap
== &kernel_pmap
)
2238 * if the mapping or permission bits are different, we need
2239 * to update the pte.
2241 if ((origpte
& ~(PG_M
|PG_A
)) != newpte
) {
2242 pmap_inval_add(&info
, pmap
, va
);
2243 ptbase_assert(pmap
);
2244 KKASSERT(*pte
== 0 ||
2245 (*pte
& PG_FRAME
) == (newpte
& PG_FRAME
));
2246 *pte
= newpte
| PG_A
;
2248 vm_page_flag_set(m
, PG_WRITEABLE
);
2250 KKASSERT((newpte
& PG_MANAGED
) == 0 || (m
->flags
& PG_MAPPED
));
2251 pmap_inval_flush(&info
);
2255 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2256 * This code also assumes that the pmap has no pre-existing entry for this
2259 * This code currently may only be used on user pmaps, not kernel_pmap.
2262 pmap_enter_quick(pmap_t pmap
, vm_offset_t va
, vm_page_t m
)
2269 pmap_inval_info info
;
2271 pmap_inval_init(&info
);
2273 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
2274 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
2277 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
2278 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
2282 KKASSERT(va
< UPT_MIN_ADDRESS
); /* assert used on user pmaps only */
2285 * Calculate the page table page (mpte), allocating it if necessary.
2287 * A held page table page (mpte), or NULL, is passed onto the
2288 * section following.
2290 if (va
< UPT_MIN_ADDRESS
) {
2292 * Calculate pagetable page index
2294 ptepindex
= va
>> PDRSHIFT
;
2298 * Get the page directory entry
2300 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
2303 * If the page table page is mapped, we just increment
2304 * the hold count, and activate it.
2308 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2309 if (pmap
->pm_ptphint
&&
2310 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
2311 mpte
= pmap
->pm_ptphint
;
2313 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
2314 pmap
->pm_ptphint
= mpte
;
2319 mpte
= _pmap_allocpte(pmap
, ptepindex
);
2321 } while (mpte
== NULL
);
2324 /* this code path is not yet used */
2328 * With a valid (and held) page directory page, we can just use
2329 * vtopte() to get to the pte. If the pte is already present
2330 * we do not disturb it.
2332 pte
= (unsigned *)vtopte(va
);
2335 pmap_unwire_pte_hold(pmap
, mpte
, &info
);
2336 pa
= VM_PAGE_TO_PHYS(m
);
2337 KKASSERT(((*pte
^ pa
) & PG_FRAME
) == 0);
2342 * Enter on the PV list if part of our managed memory
2344 if ((m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2345 pmap_insert_entry(pmap
, va
, mpte
, m
);
2346 vm_page_flag_set(m
, PG_MAPPED
);
2350 * Increment counters
2352 ++pmap
->pm_stats
.resident_count
;
2354 pa
= VM_PAGE_TO_PHYS(m
);
2357 * Now validate mapping with RO protection
2359 if (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
))
2360 *pte
= pa
| PG_V
| PG_U
;
2362 *pte
= pa
| PG_V
| PG_U
| PG_MANAGED
;
2363 /* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2364 pmap_inval_flush(&info
);
2368 * Make a temporary mapping for a physical address. This is only intended
2369 * to be used for panic dumps.
2372 pmap_kenter_temporary(vm_paddr_t pa
, int i
)
2374 pmap_kenter((vm_offset_t
)crashdumpmap
+ (i
* PAGE_SIZE
), pa
);
2375 return ((void *)crashdumpmap
);
2378 #define MAX_INIT_PT (96)
2381 * This routine preloads the ptes for a given object into the specified pmap.
2382 * This eliminates the blast of soft faults on process startup and
2383 * immediately after an mmap.
2385 static int pmap_object_init_pt_callback(vm_page_t p
, void *data
);
2388 pmap_object_init_pt(pmap_t pmap
, vm_offset_t addr
, vm_prot_t prot
,
2389 vm_object_t object
, vm_pindex_t pindex
,
2390 vm_size_t size
, int limit
)
2392 struct rb_vm_page_scan_info info
;
2397 * We can't preinit if read access isn't set or there is no pmap
2400 if ((prot
& VM_PROT_READ
) == 0 || pmap
== NULL
|| object
== NULL
)
2404 * We can't preinit if the pmap is not the current pmap
2406 lp
= curthread
->td_lwp
;
2407 if (lp
== NULL
|| pmap
!= vmspace_pmap(lp
->lwp_vmspace
))
2410 psize
= i386_btop(size
);
2412 if ((object
->type
!= OBJT_VNODE
) ||
2413 ((limit
& MAP_PREFAULT_PARTIAL
) && (psize
> MAX_INIT_PT
) &&
2414 (object
->resident_page_count
> MAX_INIT_PT
))) {
2418 if (psize
+ pindex
> object
->size
) {
2419 if (object
->size
< pindex
)
2421 psize
= object
->size
- pindex
;
2428 * Use a red-black scan to traverse the requested range and load
2429 * any valid pages found into the pmap.
2431 * We cannot safely scan the object's memq unless we are in a
2432 * critical section since interrupts can remove pages from objects.
2434 info
.start_pindex
= pindex
;
2435 info
.end_pindex
= pindex
+ psize
- 1;
2442 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
2443 pmap_object_init_pt_callback
, &info
);
2449 pmap_object_init_pt_callback(vm_page_t p
, void *data
)
2451 struct rb_vm_page_scan_info
*info
= data
;
2452 vm_pindex_t rel_index
;
2454 * don't allow an madvise to blow away our really
2455 * free pages allocating pv entries.
2457 if ((info
->limit
& MAP_PREFAULT_MADVISE
) &&
2458 vmstats
.v_free_count
< vmstats
.v_free_reserved
) {
2461 if (((p
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) &&
2462 (p
->busy
== 0) && (p
->flags
& (PG_BUSY
| PG_FICTITIOUS
)) == 0) {
2463 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
2464 vm_page_deactivate(p
);
2466 rel_index
= p
->pindex
- info
->start_pindex
;
2467 pmap_enter_quick(info
->pmap
,
2468 info
->addr
+ i386_ptob(rel_index
), p
);
2475 * pmap_prefault provides a quick way of clustering pagefaults into a
2476 * processes address space. It is a "cousin" of pmap_object_init_pt,
2477 * except it runs at page fault time instead of mmap time.
2481 #define PAGEORDER_SIZE (PFBAK+PFFOR)
2483 static int pmap_prefault_pageorder
[] = {
2484 -PAGE_SIZE
, PAGE_SIZE
,
2485 -2 * PAGE_SIZE
, 2 * PAGE_SIZE
,
2486 -3 * PAGE_SIZE
, 3 * PAGE_SIZE
,
2487 -4 * PAGE_SIZE
, 4 * PAGE_SIZE
2491 pmap_prefault(pmap_t pmap
, vm_offset_t addra
, vm_map_entry_t entry
)
2502 * We do not currently prefault mappings that use virtual page
2503 * tables. We do not prefault foreign pmaps.
2505 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
)
2507 lp
= curthread
->td_lwp
;
2508 if (lp
== NULL
|| (pmap
!= vmspace_pmap(lp
->lwp_vmspace
)))
2511 object
= entry
->object
.vm_object
;
2513 starta
= addra
- PFBAK
* PAGE_SIZE
;
2514 if (starta
< entry
->start
)
2515 starta
= entry
->start
;
2516 else if (starta
> addra
)
2520 * critical section protection is required to maintain the
2521 * page/object association, interrupts can free pages and remove
2522 * them from their objects.
2525 for (i
= 0; i
< PAGEORDER_SIZE
; i
++) {
2526 vm_object_t lobject
;
2529 addr
= addra
+ pmap_prefault_pageorder
[i
];
2530 if (addr
> addra
+ (PFFOR
* PAGE_SIZE
))
2533 if (addr
< starta
|| addr
>= entry
->end
)
2536 if ((*pmap_pde(pmap
, addr
)) == 0)
2539 pte
= (unsigned *) vtopte(addr
);
2543 pindex
= ((addr
- entry
->start
) + entry
->offset
) >> PAGE_SHIFT
;
2546 for (m
= vm_page_lookup(lobject
, pindex
);
2547 (!m
&& (lobject
->type
== OBJT_DEFAULT
) &&
2548 (lobject
->backing_object
));
2549 lobject
= lobject
->backing_object
2551 if (lobject
->backing_object_offset
& PAGE_MASK
)
2553 pindex
+= (lobject
->backing_object_offset
>> PAGE_SHIFT
);
2554 m
= vm_page_lookup(lobject
->backing_object
, pindex
);
2558 * give-up when a page is not in memory
2563 if (((m
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) &&
2565 (m
->flags
& (PG_BUSY
| PG_FICTITIOUS
)) == 0) {
2567 if ((m
->queue
- m
->pc
) == PQ_CACHE
) {
2568 vm_page_deactivate(m
);
2571 pmap_enter_quick(pmap
, addr
, m
);
2579 * Routine: pmap_change_wiring
2580 * Function: Change the wiring attribute for a map/virtual-address
2582 * In/out conditions:
2583 * The mapping must already exist in the pmap.
2586 pmap_change_wiring(pmap_t pmap
, vm_offset_t va
, boolean_t wired
)
2593 pte
= pmap_pte(pmap
, va
);
2595 if (wired
&& !pmap_pte_w(pte
))
2596 pmap
->pm_stats
.wired_count
++;
2597 else if (!wired
&& pmap_pte_w(pte
))
2598 pmap
->pm_stats
.wired_count
--;
2601 * Wiring is not a hardware characteristic so there is no need to
2602 * invalidate TLB. However, in an SMP environment we must use
2603 * a locked bus cycle to update the pte (if we are not using
2604 * the pmap_inval_*() API that is)... it's ok to do this for simple
2609 atomic_set_int(pte
, PG_W
);
2611 atomic_clear_int(pte
, PG_W
);
2614 atomic_set_int_nonlocked(pte
, PG_W
);
2616 atomic_clear_int_nonlocked(pte
, PG_W
);
2623 * Copy the range specified by src_addr/len
2624 * from the source map to the range dst_addr/len
2625 * in the destination map.
2627 * This routine is only advisory and need not do anything.
2630 pmap_copy(pmap_t dst_pmap
, pmap_t src_pmap
, vm_offset_t dst_addr
,
2631 vm_size_t len
, vm_offset_t src_addr
)
2634 pmap_inval_info info
;
2636 vm_offset_t end_addr
= src_addr
+ len
;
2638 unsigned src_frame
, dst_frame
;
2642 if (dst_addr
!= src_addr
)
2645 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
2646 * valid through blocking calls, and that's just not going to
2653 src_frame
= ((unsigned) src_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2654 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
)) {
2658 dst_frame
= ((unsigned) dst_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2659 if ((*gd
->gd_GDMAP1
& PG_FRAME
) != dst_frame
) {
2660 *gd
->gd_GDMAP1
= dst_frame
| PG_RW
| PG_V
;
2664 pmap_inval_init(&info
);
2665 pmap_inval_add(&info
, dst_pmap
, -1);
2666 pmap_inval_add(&info
, src_pmap
, -1);
2669 * critical section protection is required to maintain the page/object
2670 * association, interrupts can free pages and remove them from
2674 for (addr
= src_addr
; addr
< end_addr
; addr
= pdnxt
) {
2675 unsigned *src_pte
, *dst_pte
;
2676 vm_page_t dstmpte
, srcmpte
;
2677 vm_offset_t srcptepaddr
;
2680 if (addr
>= UPT_MIN_ADDRESS
)
2681 panic("pmap_copy: invalid to pmap_copy page tables\n");
2684 * Don't let optional prefaulting of pages make us go
2685 * way below the low water mark of free pages or way
2686 * above high water mark of used pv entries.
2688 if (vmstats
.v_free_count
< vmstats
.v_free_reserved
||
2689 pv_entry_count
> pv_entry_high_water
)
2692 pdnxt
= ((addr
+ PAGE_SIZE
*NPTEPG
) & ~(PAGE_SIZE
*NPTEPG
- 1));
2693 ptepindex
= addr
>> PDRSHIFT
;
2695 srcptepaddr
= (vm_offset_t
) src_pmap
->pm_pdir
[ptepindex
];
2696 if (srcptepaddr
== 0)
2699 if (srcptepaddr
& PG_PS
) {
2700 if (dst_pmap
->pm_pdir
[ptepindex
] == 0) {
2701 dst_pmap
->pm_pdir
[ptepindex
] = (pd_entry_t
) srcptepaddr
;
2702 dst_pmap
->pm_stats
.resident_count
+= NBPDR
/ PAGE_SIZE
;
2708 srcmpte
= vm_page_lookup(src_pmap
->pm_pteobj
, ptepindex
);
2709 if ((srcmpte
== NULL
) || (srcmpte
->hold_count
== 0) ||
2710 (srcmpte
->flags
& PG_BUSY
)) {
2714 if (pdnxt
> end_addr
)
2717 src_pte
= (unsigned *) vtopte(addr
);
2718 dst_pte
= (unsigned *) avtopte(addr
);
2719 while (addr
< pdnxt
) {
2724 * we only virtual copy managed pages
2726 if ((ptetemp
& PG_MANAGED
) != 0) {
2728 * We have to check after allocpte for the
2729 * pte still being around... allocpte can
2732 * pmap_allocpte() can block. If we lose
2733 * our page directory mappings we stop.
2735 dstmpte
= pmap_allocpte(dst_pmap
, addr
);
2737 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
) ||
2738 XXX dst_frame
!= (((unsigned) xxx
) & PG_FRAME
)
2740 kprintf("WARNING: pmap_copy: detected and corrected race\n");
2741 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2743 } else if ((*dst_pte
== 0) &&
2744 (ptetemp
= *src_pte
) != 0 &&
2745 (ptetemp
& PG_MANAGED
)) {
2747 * Clear the modified and
2748 * accessed (referenced) bits
2751 m
= PHYS_TO_VM_PAGE(ptetemp
);
2752 *dst_pte
= ptetemp
& ~(PG_M
| PG_A
);
2753 ++dst_pmap
->pm_stats
.resident_count
;
2754 pmap_insert_entry(dst_pmap
, addr
,
2756 KKASSERT(m
->flags
& PG_MAPPED
);
2758 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
2759 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2762 if (dstmpte
->hold_count
>= srcmpte
->hold_count
)
2772 pmap_inval_flush(&info
);
2779 * Zero the specified PA by mapping the page into KVM and clearing its
2782 * This function may be called from an interrupt and no locking is
2786 pmap_zero_page(vm_paddr_t phys
)
2788 struct mdglobaldata
*gd
= mdcpu
;
2791 if (*(int *)gd
->gd_CMAP3
)
2792 panic("pmap_zero_page: CMAP3 busy");
2793 *(int *)gd
->gd_CMAP3
=
2794 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2795 cpu_invlpg(gd
->gd_CADDR3
);
2797 #if defined(I686_CPU)
2798 if (cpu_class
== CPUCLASS_686
)
2799 i686_pagezero(gd
->gd_CADDR3
);
2802 bzero(gd
->gd_CADDR3
, PAGE_SIZE
);
2803 *(int *) gd
->gd_CMAP3
= 0;
2808 * pmap_page_assertzero:
2810 * Assert that a page is empty, panic if it isn't.
2813 pmap_page_assertzero(vm_paddr_t phys
)
2815 struct mdglobaldata
*gd
= mdcpu
;
2819 if (*(int *)gd
->gd_CMAP3
)
2820 panic("pmap_zero_page: CMAP3 busy");
2821 *(int *)gd
->gd_CMAP3
=
2822 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2823 cpu_invlpg(gd
->gd_CADDR3
);
2824 for (i
= 0; i
< PAGE_SIZE
; i
+= 4) {
2825 if (*(int *)((char *)gd
->gd_CADDR3
+ i
) != 0) {
2826 panic("pmap_page_assertzero() @ %p not zero!\n",
2827 (void *)gd
->gd_CADDR3
);
2830 *(int *) gd
->gd_CMAP3
= 0;
2837 * Zero part of a physical page by mapping it into memory and clearing
2838 * its contents with bzero.
2840 * off and size may not cover an area beyond a single hardware page.
2843 pmap_zero_page_area(vm_paddr_t phys
, int off
, int size
)
2845 struct mdglobaldata
*gd
= mdcpu
;
2848 if (*(int *) gd
->gd_CMAP3
)
2849 panic("pmap_zero_page: CMAP3 busy");
2850 *(int *) gd
->gd_CMAP3
= PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2851 cpu_invlpg(gd
->gd_CADDR3
);
2853 #if defined(I686_CPU)
2854 if (cpu_class
== CPUCLASS_686
&& off
== 0 && size
== PAGE_SIZE
)
2855 i686_pagezero(gd
->gd_CADDR3
);
2858 bzero((char *)gd
->gd_CADDR3
+ off
, size
);
2859 *(int *) gd
->gd_CMAP3
= 0;
2866 * Copy the physical page from the source PA to the target PA.
2867 * This function may be called from an interrupt. No locking
2871 pmap_copy_page(vm_paddr_t src
, vm_paddr_t dst
)
2873 struct mdglobaldata
*gd
= mdcpu
;
2876 if (*(int *) gd
->gd_CMAP1
)
2877 panic("pmap_copy_page: CMAP1 busy");
2878 if (*(int *) gd
->gd_CMAP2
)
2879 panic("pmap_copy_page: CMAP2 busy");
2881 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2882 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2884 cpu_invlpg(gd
->gd_CADDR1
);
2885 cpu_invlpg(gd
->gd_CADDR2
);
2887 bcopy(gd
->gd_CADDR1
, gd
->gd_CADDR2
, PAGE_SIZE
);
2889 *(int *) gd
->gd_CMAP1
= 0;
2890 *(int *) gd
->gd_CMAP2
= 0;
2895 * pmap_copy_page_frag:
2897 * Copy the physical page from the source PA to the target PA.
2898 * This function may be called from an interrupt. No locking
2902 pmap_copy_page_frag(vm_paddr_t src
, vm_paddr_t dst
, size_t bytes
)
2904 struct mdglobaldata
*gd
= mdcpu
;
2907 if (*(int *) gd
->gd_CMAP1
)
2908 panic("pmap_copy_page: CMAP1 busy");
2909 if (*(int *) gd
->gd_CMAP2
)
2910 panic("pmap_copy_page: CMAP2 busy");
2912 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2913 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2915 cpu_invlpg(gd
->gd_CADDR1
);
2916 cpu_invlpg(gd
->gd_CADDR2
);
2918 bcopy((char *)gd
->gd_CADDR1
+ (src
& PAGE_MASK
),
2919 (char *)gd
->gd_CADDR2
+ (dst
& PAGE_MASK
),
2922 *(int *) gd
->gd_CMAP1
= 0;
2923 *(int *) gd
->gd_CMAP2
= 0;
2928 * Returns true if the pmap's pv is one of the first
2929 * 16 pvs linked to from this page. This count may
2930 * be changed upwards or downwards in the future; it
2931 * is only necessary that true be returned for a small
2932 * subset of pmaps for proper page aging.
2935 pmap_page_exists_quick(pmap_t pmap
, vm_page_t m
)
2940 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
2945 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
2946 if (pv
->pv_pmap
== pmap
) {
2959 * Remove all pages from specified address space
2960 * this aids process exit speeds. Also, this code
2961 * is special cased for current process only, but
2962 * can have the more generic (and slightly slower)
2963 * mode enabled. This is much faster than pmap_remove
2964 * in the case of running down an entire address space.
2967 pmap_remove_pages(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
)
2970 unsigned *pte
, tpte
;
2973 pmap_inval_info info
;
2975 int32_t save_generation
;
2977 lp
= curthread
->td_lwp
;
2978 if (lp
&& pmap
== vmspace_pmap(lp
->lwp_vmspace
))
2983 pmap_inval_init(&info
);
2985 for (pv
= TAILQ_FIRST(&pmap
->pm_pvlist
); pv
; pv
= npv
) {
2986 if (pv
->pv_va
>= eva
|| pv
->pv_va
< sva
) {
2987 npv
= TAILQ_NEXT(pv
, pv_plist
);
2991 KKASSERT(pmap
== pv
->pv_pmap
);
2994 pte
= (unsigned *)vtopte(pv
->pv_va
);
2996 pte
= pmap_pte_quick(pmap
, pv
->pv_va
);
2998 if (pmap
->pm_active
)
2999 pmap_inval_add(&info
, pmap
, pv
->pv_va
);
3002 * We cannot remove wired pages from a process' mapping
3006 npv
= TAILQ_NEXT(pv
, pv_plist
);
3010 tpte
= loadandclear(pte
);
3012 m
= PHYS_TO_VM_PAGE(tpte
);
3013 test_m_maps_pv(m
, pv
);
3015 KASSERT(m
< &vm_page_array
[vm_page_array_size
],
3016 ("pmap_remove_pages: bad tpte %x", tpte
));
3018 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
3019 --pmap
->pm_stats
.resident_count
;
3022 * Update the vm_page_t clean and reference bits.
3028 npv
= TAILQ_NEXT(pv
, pv_plist
);
3030 KKASSERT(pv
->pv_m
== m
);
3031 KKASSERT(pv
->pv_pmap
== pmap
);
3033 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
3034 save_generation
= ++pmap
->pm_generation
;
3036 m
->md
.pv_list_count
--;
3037 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
3038 if (TAILQ_EMPTY(&m
->md
.pv_list
))
3039 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
3041 pmap_unuse_pt(pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
3045 * Restart the scan if we blocked during the unuse or free
3046 * calls and other removals were made.
3048 if (save_generation
!= pmap
->pm_generation
) {
3049 kprintf("Warning: pmap_remove_pages race-A avoided\n");
3050 npv
= TAILQ_FIRST(&pmap
->pm_pvlist
);
3053 pmap_inval_flush(&info
);
3058 * pmap_testbit tests bits in pte's
3059 * note that the testbit/clearbit routines are inline,
3060 * and a lot of things compile-time evaluate.
3063 pmap_testbit(vm_page_t m
, int bit
)
3068 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3071 if (TAILQ_FIRST(&m
->md
.pv_list
) == NULL
)
3076 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3078 * if the bit being tested is the modified bit, then
3079 * mark clean_map and ptes as never
3082 if (bit
& (PG_A
|PG_M
)) {
3083 if (!pmap_track_modified(pv
->pv_va
))
3087 #if defined(PMAP_DIAGNOSTIC)
3089 kprintf("Null pmap (tb) at va: %p\n",
3094 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3105 * this routine is used to modify bits in ptes
3107 static __inline
void
3108 pmap_clearbit(vm_page_t m
, int bit
)
3110 struct pmap_inval_info info
;
3115 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3118 pmap_inval_init(&info
);
3122 * Loop over all current mappings setting/clearing as appropos If
3123 * setting RO do we need to clear the VAC?
3125 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3127 * don't write protect pager mappings
3130 if (!pmap_track_modified(pv
->pv_va
))
3134 #if defined(PMAP_DIAGNOSTIC)
3136 kprintf("Null pmap (cb) at va: %p\n",
3143 * Careful here. We can use a locked bus instruction to
3144 * clear PG_A or PG_M safely but we need to synchronize
3145 * with the target cpus when we mess with PG_RW.
3147 * We do not have to force synchronization when clearing
3148 * PG_M even for PTEs generated via virtual memory maps,
3149 * because the virtual kernel will invalidate the pmap
3150 * entry when/if it needs to resynchronize the Modify bit.
3153 pmap_inval_add(&info
, pv
->pv_pmap
, pv
->pv_va
);
3154 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3161 atomic_clear_int(pte
, PG_M
|PG_RW
);
3164 * The cpu may be trying to set PG_M
3165 * simultaniously with our clearing
3168 if (!atomic_cmpset_int(pte
, pbits
,
3172 } else if (bit
== PG_M
) {
3174 * We could also clear PG_RW here to force
3175 * a fault on write to redetect PG_M for
3176 * virtual kernels, but it isn't necessary
3177 * since virtual kernels invalidate the pte
3178 * when they clear the VPTE_M bit in their
3179 * virtual page tables.
3181 atomic_clear_int(pte
, PG_M
);
3183 atomic_clear_int(pte
, bit
);
3187 pmap_inval_flush(&info
);
3192 * pmap_page_protect:
3194 * Lower the permission for all mappings to a given page.
3197 pmap_page_protect(vm_page_t m
, vm_prot_t prot
)
3199 if ((prot
& VM_PROT_WRITE
) == 0) {
3200 if (prot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) {
3201 pmap_clearbit(m
, PG_RW
);
3202 vm_page_flag_clear(m
, PG_WRITEABLE
);
3210 pmap_phys_address(vm_pindex_t ppn
)
3212 return (i386_ptob(ppn
));
3216 * pmap_ts_referenced:
3218 * Return a count of reference bits for a page, clearing those bits.
3219 * It is not necessary for every reference bit to be cleared, but it
3220 * is necessary that 0 only be returned when there are truly no
3221 * reference bits set.
3223 * XXX: The exact number of bits to check and clear is a matter that
3224 * should be tested and standardized at some point in the future for
3225 * optimal aging of shared pages.
3228 pmap_ts_referenced(vm_page_t m
)
3230 pv_entry_t pv
, pvf
, pvn
;
3234 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3239 if ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
3244 pvn
= TAILQ_NEXT(pv
, pv_list
);
3247 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
3248 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
3251 if (!pmap_track_modified(pv
->pv_va
))
3254 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3256 if (pte
&& (*pte
& PG_A
)) {
3258 atomic_clear_int(pte
, PG_A
);
3260 atomic_clear_int_nonlocked(pte
, PG_A
);
3267 } while ((pv
= pvn
) != NULL
&& pv
!= pvf
);
3277 * Return whether or not the specified physical page was modified
3278 * in any physical maps.
3281 pmap_is_modified(vm_page_t m
)
3283 return pmap_testbit(m
, PG_M
);
3287 * Clear the modify bits on the specified physical page.
3290 pmap_clear_modify(vm_page_t m
)
3292 pmap_clearbit(m
, PG_M
);
3296 * pmap_clear_reference:
3298 * Clear the reference bit on the specified physical page.
3301 pmap_clear_reference(vm_page_t m
)
3303 pmap_clearbit(m
, PG_A
);
3307 * Miscellaneous support routines follow
3311 i386_protection_init(void)
3315 kp
= protection_codes
;
3316 for (prot
= 0; prot
< 8; prot
++) {
3318 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_NONE
:
3320 * Read access is also 0. There isn't any execute bit,
3321 * so just make it readable.
3323 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_NONE
:
3324 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3325 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3328 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_NONE
:
3329 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3330 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_NONE
:
3331 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3339 * Map a set of physical memory pages into the kernel virtual
3340 * address space. Return a pointer to where it is mapped. This
3341 * routine is intended to be used for mapping device memory,
3344 * NOTE: we can't use pgeflag unless we invalidate the pages one at
3348 pmap_mapdev(vm_paddr_t pa
, vm_size_t size
)
3350 vm_offset_t va
, tmpva
, offset
;
3353 offset
= pa
& PAGE_MASK
;
3354 size
= roundup(offset
+ size
, PAGE_SIZE
);
3356 va
= kmem_alloc_nofault(&kernel_map
, size
);
3358 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3361 for (tmpva
= va
; size
> 0;) {
3362 pte
= (unsigned *)vtopte(tmpva
);
3363 *pte
= pa
| PG_RW
| PG_V
; /* | pgeflag; */
3371 return ((void *)(va
+ offset
));
3375 pmap_unmapdev(vm_offset_t va
, vm_size_t size
)
3377 vm_offset_t base
, offset
;
3379 base
= va
& PG_FRAME
;
3380 offset
= va
& PAGE_MASK
;
3381 size
= roundup(offset
+ size
, PAGE_SIZE
);
3382 pmap_qremove(va
, size
>> PAGE_SHIFT
);
3383 kmem_free(&kernel_map
, base
, size
);
3387 * perform the pmap work for mincore
3390 pmap_mincore(pmap_t pmap
, vm_offset_t addr
)
3392 unsigned *ptep
, pte
;
3396 ptep
= pmap_pte(pmap
, addr
);
3401 if ((pte
= *ptep
) != 0) {
3404 val
= MINCORE_INCORE
;
3405 if ((pte
& PG_MANAGED
) == 0)
3408 pa
= pte
& PG_FRAME
;
3410 m
= PHYS_TO_VM_PAGE(pa
);
3416 val
|= MINCORE_MODIFIED
|MINCORE_MODIFIED_OTHER
;
3418 * Modified by someone
3420 else if (m
->dirty
|| pmap_is_modified(m
))
3421 val
|= MINCORE_MODIFIED_OTHER
;
3426 val
|= MINCORE_REFERENCED
|MINCORE_REFERENCED_OTHER
;
3429 * Referenced by someone
3431 else if ((m
->flags
& PG_REFERENCED
) || pmap_ts_referenced(m
)) {
3432 val
|= MINCORE_REFERENCED_OTHER
;
3433 vm_page_flag_set(m
, PG_REFERENCED
);
3440 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new
3441 * vmspace will be ref'd and the old one will be deref'd.
3443 * The vmspace for all lwps associated with the process will be adjusted
3444 * and cr3 will be reloaded if any lwp is the current lwp.
3447 pmap_replacevm(struct proc
*p
, struct vmspace
*newvm
, int adjrefs
)
3449 struct vmspace
*oldvm
;
3453 oldvm
= p
->p_vmspace
;
3454 if (oldvm
!= newvm
) {
3455 p
->p_vmspace
= newvm
;
3456 KKASSERT(p
->p_nthreads
== 1);
3457 lp
= RB_ROOT(&p
->p_lwp_tree
);
3458 pmap_setlwpvm(lp
, newvm
);
3460 sysref_get(&newvm
->vm_sysref
);
3461 sysref_put(&oldvm
->vm_sysref
);
3468 * Set the vmspace for a LWP. The vmspace is almost universally set the
3469 * same as the process vmspace, but virtual kernels need to swap out contexts
3470 * on a per-lwp basis.
3473 pmap_setlwpvm(struct lwp
*lp
, struct vmspace
*newvm
)
3475 struct vmspace
*oldvm
;
3479 oldvm
= lp
->lwp_vmspace
;
3481 if (oldvm
!= newvm
) {
3482 lp
->lwp_vmspace
= newvm
;
3483 if (curthread
->td_lwp
== lp
) {
3484 pmap
= vmspace_pmap(newvm
);
3486 atomic_set_int(&pmap
->pm_active
, 1 << mycpu
->gd_cpuid
);
3488 pmap
->pm_active
|= 1;
3490 #if defined(SWTCH_OPTIM_STATS)
3493 curthread
->td_pcb
->pcb_cr3
= vtophys(pmap
->pm_pdir
);
3494 load_cr3(curthread
->td_pcb
->pcb_cr3
);
3495 pmap
= vmspace_pmap(oldvm
);
3497 atomic_clear_int(&pmap
->pm_active
,
3498 1 << mycpu
->gd_cpuid
);
3500 pmap
->pm_active
&= ~1;
3508 pmap_addr_hint(vm_object_t obj
, vm_offset_t addr
, vm_size_t size
)
3511 if ((obj
== NULL
) || (size
< NBPDR
) || (obj
->type
!= OBJT_DEVICE
)) {
3515 addr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
3520 pmap_get_pgeflag(void)
3527 static void pads (pmap_t pm
);
3528 void pmap_pvdump (vm_paddr_t pa
);
3530 /* print address space of pmap*/
3537 if (pm
== &kernel_pmap
)
3540 for (i
= 0; i
< 1024; i
++) {
3541 if (pm
->pm_pdir
[i
]) {
3542 for (j
= 0; j
< 1024; j
++) {
3543 va
= (i
<< PDRSHIFT
) + (j
<< PAGE_SHIFT
);
3544 if (pm
== &kernel_pmap
&& va
< KERNBASE
)
3546 if (pm
!= &kernel_pmap
&& va
> UPT_MAX_ADDRESS
)
3548 ptep
= pmap_pte_quick(pm
, va
);
3549 if (pmap_pte_v(ptep
))
3550 kprintf("%x:%x ", va
, *(int *) ptep
);
3559 pmap_pvdump(vm_paddr_t pa
)
3564 kprintf("pa %08llx", (long long)pa
);
3565 m
= PHYS_TO_VM_PAGE(pa
);
3566 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3568 kprintf(" -> pmap %p, va %p, flags %x",
3569 (void *)pv
->pv_pmap
, (long)pv
->pv_va
, pv
->pv_flags
);
3571 kprintf(" -> pmap %p, va %p",
3572 (void *)pv
->pv_pmap
, (void *)pv
->pv_va
);