2 * Copyright (c) 1991 Regents of the University of California.
4 * Copyright (c) 1994 John S. Dyson
6 * Copyright (c) 1994 David Greenman
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
42 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $
43 * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.87 2008/08/25 17:01:38 dillon Exp $
47 * Manages physical address maps.
49 * In addition to hardware address maps, this
50 * module is called upon to provide software-use-only
51 * maps which may or may not be stored in the same
52 * form as hardware maps. These pseudo-maps are
53 * used to store intermediate results from copy
54 * operations to and from address spaces.
56 * Since the information managed by this module is
57 * also stored by the logical address mapping module,
58 * this module may throw away valid virtual-to-physical
59 * mappings at almost any time. However, invalidations
60 * of virtual-to-physical mappings must be done as
63 * In order to cope with hardware architectures which
64 * make virtual-to-physical map invalidates expensive,
65 * this module may delay invalidate or reduced protection
66 * operations until such time as they are actually
67 * necessary. This module is given full information as
68 * to which processors are currently using which maps,
69 * and to when physical maps must be made correct.
72 * PMAP_DEBUG - see platform/pc32/include/pmap.h
75 #include "opt_disable_pse.h"
77 #include "opt_msgbuf.h"
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/kernel.h>
83 #include <sys/msgbuf.h>
84 #include <sys/vmmeter.h>
88 #include <vm/vm_param.h>
89 #include <sys/sysctl.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_pager.h>
98 #include <vm/vm_zone.h>
100 #include <sys/user.h>
101 #include <sys/thread2.h>
102 #include <sys/sysref2.h>
104 #include <machine/cputypes.h>
105 #include <machine/md_var.h>
106 #include <machine/specialreg.h>
107 #include <machine/smp.h>
108 #include <machine_base/apic/apicreg.h>
109 #include <machine/globaldata.h>
110 #include <machine/pmap.h>
111 #include <machine/pmap_inval.h>
113 #define PMAP_KEEP_PDIRS
114 #ifndef PMAP_SHPGPERPROC
115 #define PMAP_SHPGPERPROC 200
118 #if defined(DIAGNOSTIC)
119 #define PMAP_DIAGNOSTIC
124 #if !defined(PMAP_DIAGNOSTIC)
125 #define PMAP_INLINE __inline
131 * Get PDEs and PTEs for user/kernel address space
133 #define pmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
134 #define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
136 #define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
137 #define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
138 #define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
139 #define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
140 #define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
144 * Given a map and a machine independent protection code,
145 * convert to a vax protection code.
147 #define pte_prot(m, p) \
148 (protection_codes[p & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)])
149 static int protection_codes
[8];
151 struct pmap kernel_pmap
;
152 static TAILQ_HEAD(,pmap
) pmap_list
= TAILQ_HEAD_INITIALIZER(pmap_list
);
154 vm_paddr_t avail_start
; /* PA of first available physical page */
155 vm_paddr_t avail_end
; /* PA of last available physical page */
156 vm_offset_t virtual_start
; /* VA of first avail page (after kernel bss) */
157 vm_offset_t virtual_end
; /* VA of last avail page (end of kernel AS) */
158 vm_offset_t virtual2_start
;
159 vm_offset_t virtual2_end
;
160 vm_offset_t KvaStart
; /* VA start of KVA space */
161 vm_offset_t KvaEnd
; /* VA end of KVA space (non-inclusive) */
162 vm_offset_t KvaSize
; /* max size of kernel virtual address space */
163 static boolean_t pmap_initialized
= FALSE
; /* Has pmap_init completed? */
164 static int pgeflag
; /* PG_G or-in */
165 static int pseflag
; /* PG_PS or-in */
167 static vm_object_t kptobj
;
170 vm_offset_t kernel_vm_end
;
173 * Data for the pv entry allocation mechanism
175 static vm_zone_t pvzone
;
176 static struct vm_zone pvzone_store
;
177 static struct vm_object pvzone_obj
;
178 static int pv_entry_count
=0, pv_entry_max
=0, pv_entry_high_water
=0;
179 static int pmap_pagedaemon_waken
= 0;
180 static struct pv_entry
*pvinit
;
183 * Considering all the issues I'm having with pmap caching, if breakage
184 * continues to occur, and for debugging, I've added a sysctl that will
185 * just do an unconditional invltlb.
187 static int dreadful_invltlb
;
189 SYSCTL_INT(_vm
, OID_AUTO
, dreadful_invltlb
,
190 CTLFLAG_RW
, &dreadful_invltlb
, 0, "");
193 * All those kernel PT submaps that BSD is so fond of
195 pt_entry_t
*CMAP1
= 0, *ptmmap
;
196 caddr_t CADDR1
= 0, ptvmmap
= 0;
197 static pt_entry_t
*msgbufmap
;
198 struct msgbuf
*msgbufp
=0;
203 static pt_entry_t
*pt_crashdumpmap
;
204 static caddr_t crashdumpmap
;
206 extern pt_entry_t
*SMPpt
;
208 static PMAP_INLINE
void free_pv_entry (pv_entry_t pv
);
209 static unsigned * get_ptbase (pmap_t pmap
);
210 static pv_entry_t
get_pv_entry (void);
211 static void i386_protection_init (void);
212 static __inline
void pmap_clearbit (vm_page_t m
, int bit
);
214 static void pmap_remove_all (vm_page_t m
);
215 static int pmap_remove_pte (struct pmap
*pmap
, unsigned *ptq
,
216 vm_offset_t sva
, pmap_inval_info_t info
);
217 static void pmap_remove_page (struct pmap
*pmap
,
218 vm_offset_t va
, pmap_inval_info_t info
);
219 static int pmap_remove_entry (struct pmap
*pmap
, vm_page_t m
,
220 vm_offset_t va
, pmap_inval_info_t info
);
221 static boolean_t
pmap_testbit (vm_page_t m
, int bit
);
222 static void pmap_insert_entry (pmap_t pmap
, vm_offset_t va
,
223 vm_page_t mpte
, vm_page_t m
);
225 static vm_page_t
pmap_allocpte (pmap_t pmap
, vm_offset_t va
);
227 static int pmap_release_free_page (pmap_t pmap
, vm_page_t p
);
228 static vm_page_t
_pmap_allocpte (pmap_t pmap
, unsigned ptepindex
);
229 static unsigned * pmap_pte_quick (pmap_t pmap
, vm_offset_t va
);
230 static vm_page_t
pmap_page_lookup (vm_object_t object
, vm_pindex_t pindex
);
231 static int pmap_unuse_pt (pmap_t
, vm_offset_t
, vm_page_t
, pmap_inval_info_t
);
232 static vm_offset_t
pmap_kmem_choose(vm_offset_t addr
);
234 static unsigned pdir4mb
;
237 * Move the kernel virtual free pointer to the next
238 * 4MB. This is used to help improve performance
239 * by using a large (4MB) page for much of the kernel
240 * (.text, .data, .bss)
243 pmap_kmem_choose(vm_offset_t addr
)
245 vm_offset_t newaddr
= addr
;
247 if (cpu_feature
& CPUID_PSE
) {
248 newaddr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
257 * Extract the page table entry associated with the given map/virtual
260 * This function may NOT be called from an interrupt.
262 PMAP_INLINE
unsigned *
263 pmap_pte(pmap_t pmap
, vm_offset_t va
)
268 pdeaddr
= (unsigned *) pmap_pde(pmap
, va
);
269 if (*pdeaddr
& PG_PS
)
272 return get_ptbase(pmap
) + i386_btop(va
);
281 * Super fast pmap_pte routine best used when scanning the pv lists.
282 * This eliminates many course-grained invltlb calls. Note that many of
283 * the pv list scans are across different pmaps and it is very wasteful
284 * to do an entire invltlb when checking a single mapping.
286 * Should only be called while in a critical section.
288 * Unlike get_ptbase(), this function MAY be called from an interrupt or
292 pmap_pte_quick(pmap_t pmap
, vm_offset_t va
)
294 struct mdglobaldata
*gd
= mdcpu
;
297 if ((pde
= (unsigned) pmap
->pm_pdir
[va
>> PDRSHIFT
]) != 0) {
298 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
299 unsigned index
= i386_btop(va
);
300 /* are we current address space or kernel? */
301 if ((pmap
== &kernel_pmap
) ||
302 (frame
== (((unsigned) PTDpde
) & PG_FRAME
))) {
303 return (unsigned *) PTmap
+ index
;
305 newpf
= pde
& PG_FRAME
;
306 if ( ((* (unsigned *) gd
->gd_PMAP1
) & PG_FRAME
) != newpf
) {
307 * (unsigned *) gd
->gd_PMAP1
= newpf
| PG_RW
| PG_V
;
308 cpu_invlpg(gd
->gd_PADDR1
);
310 return gd
->gd_PADDR1
+ ((unsigned) index
& (NPTEPG
- 1));
317 * Bootstrap the system enough to run with virtual memory.
319 * On the i386 this is called after mapping has already been enabled
320 * and just syncs the pmap module with what has already been done.
321 * [We can't call it easily with mapping off since the kernel is not
322 * mapped with PA == VA, hence we would have to relocate every address
323 * from the linked base (virtual) address "KERNBASE" to the actual
324 * (physical) address starting relative to 0]
327 pmap_bootstrap(vm_paddr_t firstaddr
, vm_paddr_t loadaddr
)
331 struct mdglobaldata
*gd
;
335 KvaStart
= (vm_offset_t
)VADDR(PTDPTDI
, 0);
336 KvaSize
= (vm_offset_t
)VADDR(APTDPTDI
, 0) - KvaStart
;
337 KvaEnd
= KvaStart
+ KvaSize
;
339 avail_start
= firstaddr
;
342 * XXX The calculation of virtual_start is wrong. It's NKPT*PAGE_SIZE
343 * too large. It should instead be correctly calculated in locore.s and
344 * not based on 'first' (which is a physical address, not a virtual
345 * address, for the start of unused physical memory). The kernel
346 * page tables are NOT double mapped and thus should not be included
347 * in this calculation.
349 virtual_start
= (vm_offset_t
) KERNBASE
+ firstaddr
;
350 virtual_start
= pmap_kmem_choose(virtual_start
);
351 virtual_end
= VADDR(KPTDI
+NKPDE
-1, NPTEPG
-1);
354 * Initialize protection array.
356 i386_protection_init();
359 * The kernel's pmap is statically allocated so we don't have to use
360 * pmap_create, which is unlikely to work correctly at this part of
361 * the boot sequence (XXX and which no longer exists).
363 kernel_pmap
.pm_pdir
= (pd_entry_t
*)(KERNBASE
+ (u_int
)IdlePTD
);
364 kernel_pmap
.pm_count
= 1;
365 kernel_pmap
.pm_active
= (cpumask_t
)-1 & ~CPUMASK_LOCK
;
366 TAILQ_INIT(&kernel_pmap
.pm_pvlist
);
370 * Reserve some special page table entries/VA space for temporary
373 #define SYSMAP(c, p, v, n) \
374 v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
377 pte
= (pt_entry_t
*) pmap_pte(&kernel_pmap
, va
);
380 * CMAP1/CMAP2 are used for zeroing and copying pages.
382 SYSMAP(caddr_t
, CMAP1
, CADDR1
, 1)
387 SYSMAP(caddr_t
, pt_crashdumpmap
, crashdumpmap
, MAXDUMPPGS
);
390 * ptvmmap is used for reading arbitrary physical pages via
393 SYSMAP(caddr_t
, ptmmap
, ptvmmap
, 1)
396 * msgbufp is used to map the system message buffer.
397 * XXX msgbufmap is not used.
399 SYSMAP(struct msgbuf
*, msgbufmap
, msgbufp
,
400 atop(round_page(MSGBUF_SIZE
)))
405 for (i
= 0; i
< NKPT
; i
++)
409 * PG_G is terribly broken on SMP because we IPI invltlb's in some
410 * cases rather then invl1pg. Actually, I don't even know why it
411 * works under UP because self-referential page table mappings
416 if (cpu_feature
& CPUID_PGE
)
421 * Initialize the 4MB page size flag
425 * The 4MB page version of the initial
426 * kernel page mapping.
430 #if !defined(DISABLE_PSE)
431 if (cpu_feature
& CPUID_PSE
) {
434 * Note that we have enabled PSE mode
437 ptditmp
= *((unsigned *)PTmap
+ i386_btop(KERNBASE
));
438 ptditmp
&= ~(NBPDR
- 1);
439 ptditmp
|= PG_V
| PG_RW
| PG_PS
| PG_U
| pgeflag
;
444 * Enable the PSE mode. If we are SMP we can't do this
445 * now because the APs will not be able to use it when
448 load_cr4(rcr4() | CR4_PSE
);
451 * We can do the mapping here for the single processor
452 * case. We simply ignore the old page table page from
456 * For SMP, we still need 4K pages to bootstrap APs,
457 * PSE will be enabled as soon as all APs are up.
459 PTD
[KPTDI
] = (pd_entry_t
)ptditmp
;
460 kernel_pmap
.pm_pdir
[KPTDI
] = (pd_entry_t
)ptditmp
;
467 * We need to finish setting up the globaldata page for the BSP.
468 * locore has already populated the page table for the mdglobaldata
471 pg
= MDGLOBALDATA_BASEALLOC_PAGES
;
472 gd
= &CPU_prvspace
[0].mdglobaldata
;
473 gd
->gd_CMAP1
= &SMPpt
[pg
+ 0];
474 gd
->gd_CMAP2
= &SMPpt
[pg
+ 1];
475 gd
->gd_CMAP3
= &SMPpt
[pg
+ 2];
476 gd
->gd_PMAP1
= &SMPpt
[pg
+ 3];
477 gd
->gd_GDMAP1
= &PTD
[KGDTDI
];
478 gd
->gd_CADDR1
= CPU_prvspace
[0].CPAGE1
;
479 gd
->gd_CADDR2
= CPU_prvspace
[0].CPAGE2
;
480 gd
->gd_CADDR3
= CPU_prvspace
[0].CPAGE3
;
481 gd
->gd_PADDR1
= (unsigned *)CPU_prvspace
[0].PPAGE1
;
482 gd
->gd_GDADDR1
= (unsigned *)VADDR(KGDTDI
, 0);
489 * Set 4mb pdir for mp startup
494 if (pseflag
&& (cpu_feature
& CPUID_PSE
)) {
495 load_cr4(rcr4() | CR4_PSE
);
496 if (pdir4mb
&& mycpu
->gd_cpuid
== 0) { /* only on BSP */
497 kernel_pmap
.pm_pdir
[KPTDI
] =
498 PTD
[KPTDI
] = (pd_entry_t
)pdir4mb
;
506 * Initialize the pmap module.
507 * Called by vm_init, to initialize any structures that the pmap
508 * system needs to map virtual memory.
509 * pmap_init has been enhanced to support in a fairly consistant
510 * way, discontiguous physical memory.
519 * object for kernel page table pages
521 kptobj
= vm_object_allocate(OBJT_DEFAULT
, NKPDE
);
524 * Allocate memory for random pmap data structures. Includes the
528 for(i
= 0; i
< vm_page_array_size
; i
++) {
531 m
= &vm_page_array
[i
];
532 TAILQ_INIT(&m
->md
.pv_list
);
533 m
->md
.pv_list_count
= 0;
537 * init the pv free list
539 initial_pvs
= vm_page_array_size
;
540 if (initial_pvs
< MINPV
)
542 pvzone
= &pvzone_store
;
543 pvinit
= (struct pv_entry
*) kmem_alloc(&kernel_map
,
544 initial_pvs
* sizeof (struct pv_entry
));
545 zbootinit(pvzone
, "PV ENTRY", sizeof (struct pv_entry
), pvinit
,
549 * Now it is safe to enable pv_table recording.
551 pmap_initialized
= TRUE
;
555 * Initialize the address space (zone) for the pv_entries. Set a
556 * high water mark so that the system can recover from excessive
557 * numbers of pv entries.
562 int shpgperproc
= PMAP_SHPGPERPROC
;
564 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc
);
565 pv_entry_max
= shpgperproc
* maxproc
+ vm_page_array_size
;
566 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max
);
567 pv_entry_high_water
= 9 * (pv_entry_max
/ 10);
568 zinitna(pvzone
, &pvzone_obj
, NULL
, 0, pv_entry_max
, ZONE_INTERRUPT
, 1);
572 /***************************************************
573 * Low level helper routines.....
574 ***************************************************/
579 test_m_maps_pv(vm_page_t m
, pv_entry_t pv
)
585 KKASSERT(pv
->pv_m
== m
);
587 TAILQ_FOREACH(spv
, &m
->md
.pv_list
, pv_list
) {
594 panic("test_m_maps_pv: failed m %p pv %p\n", m
, pv
);
598 ptbase_assert(struct pmap
*pmap
)
600 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
602 /* are we current address space or kernel? */
603 if (pmap
== &kernel_pmap
|| frame
== (((unsigned)PTDpde
) & PG_FRAME
)) {
606 KKASSERT(frame
== (*mycpu
->gd_GDMAP1
& PG_FRAME
));
611 #define test_m_maps_pv(m, pv)
612 #define ptbase_assert(pmap)
616 #if defined(PMAP_DIAGNOSTIC)
619 * This code checks for non-writeable/modified pages.
620 * This should be an invalid condition.
623 pmap_nw_modified(pt_entry_t ptea
)
629 if ((pte
& (PG_M
|PG_RW
)) == PG_M
)
638 * this routine defines the region(s) of memory that should
639 * not be tested for the modified bit.
641 static PMAP_INLINE
int
642 pmap_track_modified(vm_offset_t va
)
644 if ((va
< clean_sva
) || (va
>= clean_eva
))
651 * Retrieve the mapped page table base for a particular pmap. Use our self
652 * mapping for the kernel_pmap or our current pmap.
654 * For foreign pmaps we use the per-cpu page table map. Since this involves
655 * installing a ptd it's actually (per-process x per-cpu). However, we
656 * still cannot depend on our mapping to survive thread switches because
657 * the process might be threaded and switching to another thread for the
658 * same process on the same cpu will allow that other thread to make its
661 * This could be a bit confusing but the jist is for something like the
662 * vkernel which uses foreign pmaps all the time this represents a pretty
663 * good cache that avoids unnecessary invltlb()s.
666 get_ptbase(pmap_t pmap
)
668 unsigned frame
= (unsigned) pmap
->pm_pdir
[PTDPTDI
] & PG_FRAME
;
669 struct mdglobaldata
*gd
= mdcpu
;
672 * We can use PTmap if the pmap is our current address space or
673 * the kernel address space.
675 if (pmap
== &kernel_pmap
|| frame
== (((unsigned) PTDpde
) & PG_FRAME
)) {
676 return (unsigned *) PTmap
;
680 * Otherwise we use the per-cpu alternative page table map. Each
681 * cpu gets its own map. Because of this we cannot use this map
682 * from interrupts or threads which can preempt.
684 * Even if we already have the map cached we may still have to
685 * invalidate the TLB if another cpu modified a PDE in the map.
687 KKASSERT(gd
->mi
.gd_intr_nesting_level
== 0 &&
688 (gd
->mi
.gd_curthread
->td_flags
& TDF_INTTHREAD
) == 0);
690 if ((*gd
->gd_GDMAP1
& PG_FRAME
) != frame
) {
691 *gd
->gd_GDMAP1
= frame
| PG_RW
| PG_V
;
692 pmap
->pm_cached
|= gd
->mi
.gd_cpumask
;
694 } else if ((pmap
->pm_cached
& gd
->mi
.gd_cpumask
) == 0) {
695 pmap
->pm_cached
|= gd
->mi
.gd_cpumask
;
697 } else if (dreadful_invltlb
) {
700 return ((unsigned *)gd
->gd_GDADDR1
);
706 * Extract the physical page address associated with the map/VA pair.
708 * This function may not be called from an interrupt if the pmap is
712 pmap_extract(pmap_t pmap
, vm_offset_t va
)
715 vm_offset_t pdirindex
;
717 pdirindex
= va
>> PDRSHIFT
;
718 if (pmap
&& (rtval
= (unsigned) pmap
->pm_pdir
[pdirindex
])) {
720 if ((rtval
& PG_PS
) != 0) {
721 rtval
&= ~(NBPDR
- 1);
722 rtval
|= va
& (NBPDR
- 1);
725 pte
= get_ptbase(pmap
) + i386_btop(va
);
726 rtval
= ((*pte
& PG_FRAME
) | (va
& PAGE_MASK
));
732 /***************************************************
733 * Low level mapping routines.....
734 ***************************************************/
737 * Routine: pmap_kenter
739 * Add a wired page to the KVA
740 * NOTE! note that in order for the mapping to take effect -- you
741 * should do an invltlb after doing the pmap_kenter().
744 pmap_kenter(vm_offset_t va
, vm_paddr_t pa
)
748 pmap_inval_info info
;
750 pmap_inval_init(&info
);
751 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
752 pte
= (unsigned *)vtopte(va
);
753 pmap_inval_interlock(&info
, &kernel_pmap
, va
);
755 pmap_inval_deinterlock(&info
, &kernel_pmap
);
756 pmap_inval_done(&info
);
760 * Routine: pmap_kenter_quick
762 * Similar to pmap_kenter(), except we only invalidate the
763 * mapping on the current CPU.
766 pmap_kenter_quick(vm_offset_t va
, vm_paddr_t pa
)
771 npte
= pa
| PG_RW
| PG_V
| pgeflag
;
772 pte
= (unsigned *)vtopte(va
);
774 cpu_invlpg((void *)va
);
778 pmap_kenter_sync(vm_offset_t va
)
780 pmap_inval_info info
;
782 pmap_inval_init(&info
);
783 pmap_inval_interlock(&info
, &kernel_pmap
, va
);
784 pmap_inval_deinterlock(&info
, &kernel_pmap
);
785 pmap_inval_done(&info
);
789 pmap_kenter_sync_quick(vm_offset_t va
)
791 cpu_invlpg((void *)va
);
795 * remove a page from the kernel pagetables
798 pmap_kremove(vm_offset_t va
)
801 pmap_inval_info info
;
803 pmap_inval_init(&info
);
804 pte
= (unsigned *)vtopte(va
);
805 pmap_inval_interlock(&info
, &kernel_pmap
, va
);
807 pmap_inval_deinterlock(&info
, &kernel_pmap
);
808 pmap_inval_done(&info
);
812 pmap_kremove_quick(vm_offset_t va
)
815 pte
= (unsigned *)vtopte(va
);
817 cpu_invlpg((void *)va
);
821 * XXX these need to be recoded. They are not used in any critical path.
824 pmap_kmodify_rw(vm_offset_t va
)
826 *vtopte(va
) |= PG_RW
;
827 cpu_invlpg((void *)va
);
831 pmap_kmodify_nc(vm_offset_t va
)
834 cpu_invlpg((void *)va
);
838 * Used to map a range of physical addresses into kernel
839 * virtual address space.
841 * For now, VM is already on, we only need to map the
845 pmap_map(vm_offset_t
*virtp
, vm_paddr_t start
, vm_paddr_t end
, int prot
)
847 vm_offset_t sva
, virt
;
850 while (start
< end
) {
851 pmap_kenter(virt
, start
);
861 * Add a list of wired pages to the kva
862 * this routine is only used for temporary
863 * kernel mappings that do not need to have
864 * page modification or references recorded.
865 * Note that old mappings are simply written
866 * over. The page *must* be wired.
869 pmap_qenter(vm_offset_t va
, vm_page_t
*m
, int count
)
873 end_va
= va
+ count
* PAGE_SIZE
;
875 while (va
< end_va
) {
878 pte
= (unsigned *)vtopte(va
);
879 *pte
= VM_PAGE_TO_PHYS(*m
) | PG_RW
| PG_V
| pgeflag
;
880 cpu_invlpg((void *)va
);
885 smp_invltlb(); /* XXX */
890 pmap_qenter2(vm_offset_t va
, vm_page_t
*m
, int count
, cpumask_t
*mask
)
893 cpumask_t cmask
= mycpu
->gd_cpumask
;
895 end_va
= va
+ count
* PAGE_SIZE
;
897 while (va
< end_va
) {
902 * Install the new PTE. If the pte changed from the prior
903 * mapping we must reset the cpu mask and invalidate the page.
904 * If the pte is the same but we have not seen it on the
905 * current cpu, invlpg the existing mapping. Otherwise the
906 * entry is optimal and no invalidation is required.
908 pte
= (unsigned *)vtopte(va
);
909 pteval
= VM_PAGE_TO_PHYS(*m
) | PG_A
| PG_RW
| PG_V
| pgeflag
;
910 if (*pte
!= pteval
) {
913 cpu_invlpg((void *)va
);
914 } else if ((*mask
& cmask
) == 0) {
915 cpu_invlpg((void *)va
);
924 * This routine jerks page mappings from the
925 * kernel -- it is meant only for temporary mappings.
927 * MPSAFE, INTERRUPT SAFE (cluster callback)
930 pmap_qremove(vm_offset_t va
, int count
)
934 end_va
= va
+ count
*PAGE_SIZE
;
936 while (va
< end_va
) {
939 pte
= (unsigned *)vtopte(va
);
941 cpu_invlpg((void *)va
);
950 * This routine works like vm_page_lookup() but also blocks as long as the
951 * page is busy. This routine does not busy the page it returns.
953 * Unless the caller is managing objects whos pages are in a known state,
954 * the call should be made with a critical section held so the page's object
955 * association remains valid on return.
958 pmap_page_lookup(vm_object_t object
, vm_pindex_t pindex
)
963 m
= vm_page_lookup(object
, pindex
);
964 } while (m
&& vm_page_sleep_busy(m
, FALSE
, "pplookp"));
970 * Create a new thread and optionally associate it with a (new) process.
971 * NOTE! the new thread's cpu may not equal the current cpu.
974 pmap_init_thread(thread_t td
)
976 /* enforce pcb placement */
977 td
->td_pcb
= (struct pcb
*)(td
->td_kstack
+ td
->td_kstack_size
) - 1;
978 td
->td_savefpu
= &td
->td_pcb
->pcb_save
;
979 td
->td_sp
= (char *)td
->td_pcb
- 16;
983 * This routine directly affects the fork perf for a process.
986 pmap_init_proc(struct proc
*p
)
991 * Dispose the UPAGES for a process that has exited.
992 * This routine directly impacts the exit perf of a process.
995 pmap_dispose_proc(struct proc
*p
)
997 KASSERT(p
->p_lock
== 0, ("attempt to dispose referenced proc! %p", p
));
1000 /***************************************************
1001 * Page table page management routines.....
1002 ***************************************************/
1005 * This routine unholds page table pages, and if the hold count
1006 * drops to zero, then it decrements the wire count.
1009 _pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
1012 * Wait until we can busy the page ourselves. We cannot have
1013 * any active flushes if we block.
1015 if (m
->flags
& PG_BUSY
) {
1016 pmap_inval_flush(info
);
1017 while (vm_page_sleep_busy(m
, FALSE
, "pmuwpt"))
1020 KASSERT(m
->queue
== PQ_NONE
,
1021 ("_pmap_unwire_pte_hold: %p->queue != PQ_NONE", m
));
1023 if (m
->hold_count
== 1) {
1025 * Unmap the page table page.
1027 * NOTE: We must clear pm_cached for all cpus, including
1028 * the current one, when clearing a page directory
1032 pmap_inval_interlock(info
, pmap
, -1);
1033 KKASSERT(pmap
->pm_pdir
[m
->pindex
]);
1034 pmap
->pm_pdir
[m
->pindex
] = 0;
1035 pmap
->pm_cached
= 0;
1036 pmap_inval_deinterlock(info
, pmap
);
1038 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1039 --pmap
->pm_stats
.resident_count
;
1041 if (pmap
->pm_ptphint
== m
)
1042 pmap
->pm_ptphint
= NULL
;
1045 * This was our last hold, the page had better be unwired
1046 * after we decrement wire_count.
1048 * FUTURE NOTE: shared page directory page could result in
1049 * multiple wire counts.
1053 KKASSERT(m
->wire_count
== 0);
1054 --vmstats
.v_wire_count
;
1055 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1057 vm_page_free_zero(m
);
1060 KKASSERT(m
->hold_count
> 1);
1066 static PMAP_INLINE
int
1067 pmap_unwire_pte_hold(pmap_t pmap
, vm_page_t m
, pmap_inval_info_t info
)
1069 KKASSERT(m
->hold_count
> 0);
1070 if (m
->hold_count
> 1) {
1074 return _pmap_unwire_pte_hold(pmap
, m
, info
);
1079 * After removing a page table entry, this routine is used to
1080 * conditionally free the page, and manage the hold/wire counts.
1082 * WARNING: This function can block
1085 pmap_unuse_pt(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
,
1086 pmap_inval_info_t info
)
1089 if (va
>= UPT_MIN_ADDRESS
)
1093 ptepindex
= (va
>> PDRSHIFT
);
1094 if (pmap
->pm_ptphint
&&
1095 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
1096 mpte
= pmap
->pm_ptphint
;
1098 pmap_inval_flush(info
);
1099 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1100 pmap
->pm_ptphint
= mpte
;
1104 return pmap_unwire_pte_hold(pmap
, mpte
, info
);
1108 * Initialize pmap0/vmspace0. This pmap is not added to pmap_list because
1109 * it, and IdlePTD, represents the template used to update all other pmaps.
1111 * On architectures where the kernel pmap is not integrated into the user
1112 * process pmap, this pmap represents the process pmap, not the kernel pmap.
1113 * kernel_pmap should be used to directly access the kernel_pmap.
1116 pmap_pinit0(struct pmap
*pmap
)
1119 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1120 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, (vm_offset_t
) IdlePTD
);
1122 pmap
->pm_active
= 0;
1123 pmap
->pm_cached
= 0;
1124 pmap
->pm_ptphint
= NULL
;
1125 TAILQ_INIT(&pmap
->pm_pvlist
);
1126 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1130 * Initialize a preallocated and zeroed pmap structure,
1131 * such as one in a vmspace structure.
1134 pmap_pinit(struct pmap
*pmap
)
1139 * No need to allocate page table space yet but we do need a valid
1140 * page directory table.
1142 if (pmap
->pm_pdir
== NULL
) {
1144 (pd_entry_t
*)kmem_alloc_pageable(&kernel_map
, PAGE_SIZE
);
1148 * Allocate an object for the ptes
1150 if (pmap
->pm_pteobj
== NULL
)
1151 pmap
->pm_pteobj
= vm_object_allocate(OBJT_DEFAULT
, PTDPTDI
+ 1);
1154 * Allocate the page directory page, unless we already have
1155 * one cached. If we used the cached page the wire_count will
1156 * already be set appropriately.
1158 if ((ptdpg
= pmap
->pm_pdirm
) == NULL
) {
1159 ptdpg
= vm_page_grab(pmap
->pm_pteobj
, PTDPTDI
,
1160 VM_ALLOC_NORMAL
| VM_ALLOC_RETRY
);
1161 pmap
->pm_pdirm
= ptdpg
;
1162 vm_page_flag_clear(ptdpg
, PG_MAPPED
| PG_BUSY
);
1163 ptdpg
->valid
= VM_PAGE_BITS_ALL
;
1164 ptdpg
->wire_count
= 1;
1165 ++vmstats
.v_wire_count
;
1166 pmap_kenter((vm_offset_t
)pmap
->pm_pdir
, VM_PAGE_TO_PHYS(ptdpg
));
1168 if ((ptdpg
->flags
& PG_ZERO
) == 0)
1169 bzero(pmap
->pm_pdir
, PAGE_SIZE
);
1172 pmap_page_assertzero(VM_PAGE_TO_PHYS(ptdpg
));
1175 pmap
->pm_pdir
[MPPTDI
] = PTD
[MPPTDI
];
1177 /* install self-referential address mapping entry */
1178 *(unsigned *) (pmap
->pm_pdir
+ PTDPTDI
) =
1179 VM_PAGE_TO_PHYS(ptdpg
) | PG_V
| PG_RW
| PG_A
| PG_M
;
1182 pmap
->pm_active
= 0;
1183 pmap
->pm_cached
= 0;
1184 pmap
->pm_ptphint
= NULL
;
1185 TAILQ_INIT(&pmap
->pm_pvlist
);
1186 bzero(&pmap
->pm_stats
, sizeof pmap
->pm_stats
);
1187 pmap
->pm_stats
.resident_count
= 1;
1191 * Clean up a pmap structure so it can be physically freed. This routine
1192 * is called by the vmspace dtor function. A great deal of pmap data is
1193 * left passively mapped to improve vmspace management so we have a bit
1194 * of cleanup work to do here.
1197 pmap_puninit(pmap_t pmap
)
1201 KKASSERT(pmap
->pm_active
== 0);
1202 if ((p
= pmap
->pm_pdirm
) != NULL
) {
1203 KKASSERT(pmap
->pm_pdir
!= NULL
);
1204 pmap_kremove((vm_offset_t
)pmap
->pm_pdir
);
1206 vmstats
.v_wire_count
--;
1207 KKASSERT((p
->flags
& PG_BUSY
) == 0);
1209 vm_page_free_zero(p
);
1210 pmap
->pm_pdirm
= NULL
;
1212 if (pmap
->pm_pdir
) {
1213 kmem_free(&kernel_map
, (vm_offset_t
)pmap
->pm_pdir
, PAGE_SIZE
);
1214 pmap
->pm_pdir
= NULL
;
1216 if (pmap
->pm_pteobj
) {
1217 vm_object_deallocate(pmap
->pm_pteobj
);
1218 pmap
->pm_pteobj
= NULL
;
1223 * Wire in kernel global address entries. To avoid a race condition
1224 * between pmap initialization and pmap_growkernel, this procedure
1225 * adds the pmap to the master list (which growkernel scans to update),
1226 * then copies the template.
1229 pmap_pinit2(struct pmap
*pmap
)
1232 TAILQ_INSERT_TAIL(&pmap_list
, pmap
, pm_pmnode
);
1233 /* XXX copies current process, does not fill in MPPTDI */
1234 bcopy(PTD
+ KPTDI
, pmap
->pm_pdir
+ KPTDI
, nkpt
* PTESIZE
);
1239 * Attempt to release and free a vm_page in a pmap. Returns 1 on success,
1240 * 0 on failure (if the procedure had to sleep).
1242 * When asked to remove the page directory page itself, we actually just
1243 * leave it cached so we do not have to incur the SMP inval overhead of
1244 * removing the kernel mapping. pmap_puninit() will take care of it.
1247 pmap_release_free_page(struct pmap
*pmap
, vm_page_t p
)
1249 unsigned *pde
= (unsigned *) pmap
->pm_pdir
;
1251 * This code optimizes the case of freeing non-busy
1252 * page-table pages. Those pages are zero now, and
1253 * might as well be placed directly into the zero queue.
1255 if (vm_page_sleep_busy(p
, FALSE
, "pmaprl"))
1261 * Remove the page table page from the processes address space.
1263 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1264 KKASSERT(pde
[p
->pindex
]);
1266 --pmap
->pm_stats
.resident_count
;
1267 pmap
->pm_cached
= 0;
1269 if (p
->hold_count
) {
1270 panic("pmap_release: freeing held page table page");
1272 if (pmap
->pm_ptphint
&& (pmap
->pm_ptphint
->pindex
== p
->pindex
))
1273 pmap
->pm_ptphint
= NULL
;
1276 * We leave the page directory page cached, wired, and mapped in
1277 * the pmap until the dtor function (pmap_puninit()) gets called.
1278 * However, still clean it up so we can set PG_ZERO.
1280 * The pmap has already been removed from the pmap_list in the
1283 if (p
->pindex
== PTDPTDI
) {
1284 bzero(pde
+ KPTDI
, nkpt
* PTESIZE
);
1285 bzero(pde
+ KGDTDI
, (NPDEPG
- KGDTDI
) * PTESIZE
);
1286 vm_page_flag_set(p
, PG_ZERO
);
1290 vmstats
.v_wire_count
--;
1291 vm_page_free_zero(p
);
1297 * this routine is called if the page table page is not
1301 _pmap_allocpte(pmap_t pmap
, unsigned ptepindex
)
1303 vm_offset_t pteva
, ptepa
;
1307 * Find or fabricate a new pagetable page
1309 m
= vm_page_grab(pmap
->pm_pteobj
, ptepindex
,
1310 VM_ALLOC_NORMAL
| VM_ALLOC_ZERO
| VM_ALLOC_RETRY
);
1312 KASSERT(m
->queue
== PQ_NONE
,
1313 ("_pmap_allocpte: %p->queue != PQ_NONE", m
));
1316 * Increment the hold count for the page we will be returning to
1322 * It is possible that someone else got in and mapped by the page
1323 * directory page while we were blocked, if so just unbusy and
1324 * return the held page.
1326 if ((ptepa
= pmap
->pm_pdir
[ptepindex
]) != 0) {
1327 KKASSERT((ptepa
& PG_FRAME
) == VM_PAGE_TO_PHYS(m
));
1332 if (m
->wire_count
== 0)
1333 vmstats
.v_wire_count
++;
1338 * Map the pagetable page into the process address space, if
1339 * it isn't already there.
1341 * NOTE: For safety clear pm_cached for all cpus including the
1342 * current one when adding a PDE to the map.
1344 ++pmap
->pm_stats
.resident_count
;
1346 ptepa
= VM_PAGE_TO_PHYS(m
);
1347 pmap
->pm_pdir
[ptepindex
] =
1348 (pd_entry_t
) (ptepa
| PG_U
| PG_RW
| PG_V
| PG_A
| PG_M
);
1349 pmap
->pm_cached
= 0;
1352 * Set the page table hint
1354 pmap
->pm_ptphint
= m
;
1357 * Try to use the new mapping, but if we cannot, then
1358 * do it with the routine that maps the page explicitly.
1360 if ((m
->flags
& PG_ZERO
) == 0) {
1361 if ((((unsigned)pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
) ==
1362 (((unsigned) PTDpde
) & PG_FRAME
)) {
1363 pteva
= UPT_MIN_ADDRESS
+ i386_ptob(ptepindex
);
1364 bzero((caddr_t
) pteva
, PAGE_SIZE
);
1366 pmap_zero_page(ptepa
);
1371 pmap_page_assertzero(VM_PAGE_TO_PHYS(m
));
1375 m
->valid
= VM_PAGE_BITS_ALL
;
1376 vm_page_flag_clear(m
, PG_ZERO
);
1377 vm_page_flag_set(m
, PG_MAPPED
);
1384 pmap_allocpte(pmap_t pmap
, vm_offset_t va
)
1391 * Calculate pagetable page index
1393 ptepindex
= va
>> PDRSHIFT
;
1396 * Get the page directory entry
1398 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
1401 * This supports switching from a 4MB page to a
1404 if (ptepa
& PG_PS
) {
1405 pmap
->pm_pdir
[ptepindex
] = 0;
1412 * If the page table page is mapped, we just increment the
1413 * hold count, and activate it.
1417 * In order to get the page table page, try the
1420 if (pmap
->pm_ptphint
&&
1421 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
1422 m
= pmap
->pm_ptphint
;
1424 m
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
1425 pmap
->pm_ptphint
= m
;
1431 * Here if the pte page isn't mapped, or if it has been deallocated.
1433 return _pmap_allocpte(pmap
, ptepindex
);
1437 /***************************************************
1438 * Pmap allocation/deallocation routines.
1439 ***************************************************/
1442 * Release any resources held by the given physical map.
1443 * Called when a pmap initialized by pmap_pinit is being released.
1444 * Should only be called if the map contains no valid mappings.
1446 static int pmap_release_callback(struct vm_page
*p
, void *data
);
1449 pmap_release(struct pmap
*pmap
)
1451 vm_object_t object
= pmap
->pm_pteobj
;
1452 struct rb_vm_page_scan_info info
;
1454 KASSERT(pmap
->pm_active
== 0, ("pmap still active! %08x", pmap
->pm_active
));
1455 #if defined(DIAGNOSTIC)
1456 if (object
->ref_count
!= 1)
1457 panic("pmap_release: pteobj reference count != 1");
1461 info
.object
= object
;
1463 TAILQ_REMOVE(&pmap_list
, pmap
, pm_pmnode
);
1470 info
.limit
= object
->generation
;
1472 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, NULL
,
1473 pmap_release_callback
, &info
);
1474 if (info
.error
== 0 && info
.mpte
) {
1475 if (!pmap_release_free_page(pmap
, info
.mpte
))
1479 } while (info
.error
);
1480 pmap
->pm_cached
= 0;
1484 pmap_release_callback(struct vm_page
*p
, void *data
)
1486 struct rb_vm_page_scan_info
*info
= data
;
1488 if (p
->pindex
== PTDPTDI
) {
1492 if (!pmap_release_free_page(info
->pmap
, p
)) {
1496 if (info
->object
->generation
!= info
->limit
) {
1504 * Grow the number of kernel page table entries, if needed.
1508 pmap_growkernel(vm_offset_t addr
)
1511 vm_offset_t ptppaddr
;
1516 if (kernel_vm_end
== 0) {
1517 kernel_vm_end
= KERNBASE
;
1519 while (pdir_pde(PTD
, kernel_vm_end
)) {
1520 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1524 addr
= (addr
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1525 while (kernel_vm_end
< addr
) {
1526 if (pdir_pde(PTD
, kernel_vm_end
)) {
1527 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) & ~(PAGE_SIZE
* NPTEPG
- 1);
1532 * This index is bogus, but out of the way
1534 nkpg
= vm_page_alloc(kptobj
, nkpt
,
1535 VM_ALLOC_NORMAL
| VM_ALLOC_SYSTEM
| VM_ALLOC_INTERRUPT
);
1537 panic("pmap_growkernel: no memory to grow kernel");
1540 ptppaddr
= VM_PAGE_TO_PHYS(nkpg
);
1541 pmap_zero_page(ptppaddr
);
1542 newpdir
= (pd_entry_t
) (ptppaddr
| PG_V
| PG_RW
| PG_A
| PG_M
);
1543 pdir_pde(PTD
, kernel_vm_end
) = newpdir
;
1544 *pmap_pde(&kernel_pmap
, kernel_vm_end
) = newpdir
;
1548 * This update must be interlocked with pmap_pinit2.
1550 TAILQ_FOREACH(pmap
, &pmap_list
, pm_pmnode
) {
1551 *pmap_pde(pmap
, kernel_vm_end
) = newpdir
;
1553 kernel_vm_end
= (kernel_vm_end
+ PAGE_SIZE
* NPTEPG
) &
1554 ~(PAGE_SIZE
* NPTEPG
- 1);
1560 * Retire the given physical map from service.
1561 * Should only be called if the map contains
1562 * no valid mappings.
1565 pmap_destroy(pmap_t pmap
)
1572 count
= --pmap
->pm_count
;
1575 panic("destroying a pmap is not yet implemented");
1580 * Add a reference to the specified pmap.
1583 pmap_reference(pmap_t pmap
)
1590 /***************************************************
1591 * page management routines.
1592 ***************************************************/
1595 * free the pv_entry back to the free list. This function may be
1596 * called from an interrupt.
1598 static PMAP_INLINE
void
1599 free_pv_entry(pv_entry_t pv
)
1602 KKASSERT(pv
->pv_m
!= NULL
);
1610 * get a new pv_entry, allocating a block from the system
1611 * when needed. This function may be called from an interrupt.
1617 if (pv_entry_high_water
&&
1618 (pv_entry_count
> pv_entry_high_water
) &&
1619 (pmap_pagedaemon_waken
== 0)) {
1620 pmap_pagedaemon_waken
= 1;
1621 wakeup (&vm_pages_needed
);
1623 return zalloc(pvzone
);
1627 * This routine is very drastic, but can save the system
1635 static int warningdone
=0;
1637 if (pmap_pagedaemon_waken
== 0)
1639 pmap_pagedaemon_waken
= 0;
1641 if (warningdone
< 5) {
1642 kprintf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1646 for(i
= 0; i
< vm_page_array_size
; i
++) {
1647 m
= &vm_page_array
[i
];
1648 if (m
->wire_count
|| m
->hold_count
|| m
->busy
||
1649 (m
->flags
& PG_BUSY
))
1657 * If it is the first entry on the list, it is actually
1658 * in the header and we must copy the following entry up
1659 * to the header. Otherwise we must search the list for
1660 * the entry. In either case we free the now unused entry.
1663 pmap_remove_entry(struct pmap
*pmap
, vm_page_t m
,
1664 vm_offset_t va
, pmap_inval_info_t info
)
1670 if (m
->md
.pv_list_count
< pmap
->pm_stats
.resident_count
) {
1671 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
1672 if (pmap
== pv
->pv_pmap
&& va
== pv
->pv_va
)
1676 TAILQ_FOREACH(pv
, &pmap
->pm_pvlist
, pv_plist
) {
1678 KKASSERT(pv
->pv_pmap
== pmap
);
1680 if (va
== pv
->pv_va
)
1687 test_m_maps_pv(m
, pv
);
1688 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1689 m
->md
.pv_list_count
--;
1690 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1691 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1692 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
1693 ++pmap
->pm_generation
;
1694 rtval
= pmap_unuse_pt(pmap
, va
, pv
->pv_ptem
, info
);
1701 * Create a pv entry for page at pa for
1705 pmap_insert_entry(pmap_t pmap
, vm_offset_t va
, vm_page_t mpte
, vm_page_t m
)
1710 pv
= get_pv_entry();
1712 KKASSERT(pv
->pv_m
== NULL
);
1719 TAILQ_INSERT_TAIL(&pmap
->pm_pvlist
, pv
, pv_plist
);
1720 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
1721 ++pmap
->pm_generation
;
1722 m
->md
.pv_list_count
++;
1728 * pmap_remove_pte: do the things to unmap a page in a process.
1730 * WARNING: This function may block (via pmap_remove_entry/pmap_unuse_pt),
1731 * callers using temporary pmaps must reload them.
1734 pmap_remove_pte(struct pmap
*pmap
, unsigned *ptq
, vm_offset_t va
,
1735 pmap_inval_info_t info
)
1740 ptbase_assert(pmap
);
1741 pmap_inval_interlock(info
, pmap
, va
);
1742 ptbase_assert(pmap
);
1743 oldpte
= loadandclear(ptq
);
1745 pmap
->pm_stats
.wired_count
-= 1;
1746 pmap_inval_deinterlock(info
, pmap
);
1749 * Machines that don't support invlpg, also don't support
1750 * PG_G. XXX PG_G is disabled for SMP so don't worry about
1754 cpu_invlpg((void *)va
);
1755 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
1756 --pmap
->pm_stats
.resident_count
;
1757 if (oldpte
& PG_MANAGED
) {
1758 m
= PHYS_TO_VM_PAGE(oldpte
);
1759 if (oldpte
& PG_M
) {
1760 #if defined(PMAP_DIAGNOSTIC)
1761 if (pmap_nw_modified((pt_entry_t
) oldpte
)) {
1762 kprintf("pmap_remove: modified page not "
1763 "writable: va: %p, pte: 0x%lx\n",
1764 (void *)va
, (long)oldpte
);
1767 if (pmap_track_modified(va
))
1771 vm_page_flag_set(m
, PG_REFERENCED
);
1772 return pmap_remove_entry(pmap
, m
, va
, info
);
1774 return pmap_unuse_pt(pmap
, va
, NULL
, info
);
1783 * Remove a single page from a process address space.
1785 * This function may not be called from an interrupt if the pmap is
1789 pmap_remove_page(struct pmap
*pmap
, vm_offset_t va
, pmap_inval_info_t info
)
1794 * if there is no pte for this address, just skip it!!! Otherwise
1795 * get a local va for mappings for this pmap and remove the entry.
1797 if (*pmap_pde(pmap
, va
) != 0) {
1798 ptq
= get_ptbase(pmap
) + i386_btop(va
);
1800 pmap_remove_pte(pmap
, ptq
, va
, info
);
1809 * Remove the given range of addresses from the specified map.
1811 * It is assumed that the start and end are properly
1812 * rounded to the page size.
1814 * This function may not be called from an interrupt if the pmap is
1818 pmap_remove(struct pmap
*pmap
, vm_offset_t sva
, vm_offset_t eva
)
1822 vm_offset_t ptpaddr
;
1823 vm_offset_t sindex
, eindex
;
1824 struct pmap_inval_info info
;
1829 if (pmap
->pm_stats
.resident_count
== 0)
1832 pmap_inval_init(&info
);
1835 * special handling of removing one page. a very
1836 * common operation and easy to short circuit some
1839 if (((sva
+ PAGE_SIZE
) == eva
) &&
1840 (((unsigned) pmap
->pm_pdir
[(sva
>> PDRSHIFT
)] & PG_PS
) == 0)) {
1841 pmap_remove_page(pmap
, sva
, &info
);
1842 pmap_inval_done(&info
);
1847 * Get a local virtual address for the mappings that are being
1850 sindex
= i386_btop(sva
);
1851 eindex
= i386_btop(eva
);
1853 for (; sindex
< eindex
; sindex
= pdnxt
) {
1857 * Calculate index for next page table.
1859 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
1860 if (pmap
->pm_stats
.resident_count
== 0)
1863 pdirindex
= sindex
/ NPDEPG
;
1864 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
1865 pmap_inval_interlock(&info
, pmap
, -1);
1866 pmap
->pm_pdir
[pdirindex
] = 0;
1867 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
1868 pmap
->pm_cached
= 0;
1869 pmap_inval_deinterlock(&info
, pmap
);
1874 * Weed out invalid mappings. Note: we assume that the page
1875 * directory table is always allocated, and in kernel virtual.
1881 * Limit our scan to either the end of the va represented
1882 * by the current page table page, or to the end of the
1883 * range being removed.
1885 if (pdnxt
> eindex
) {
1890 * NOTE: pmap_remove_pte() can block and wipe the temporary
1893 for (; sindex
!= pdnxt
; sindex
++) {
1896 ptbase
= get_ptbase(pmap
);
1897 if (ptbase
[sindex
] == 0)
1899 va
= i386_ptob(sindex
);
1900 if (pmap_remove_pte(pmap
, ptbase
+ sindex
, va
, &info
))
1904 pmap_inval_done(&info
);
1910 * Removes this physical page from all physical maps in which it resides.
1911 * Reflects back modify bits to the pager.
1913 * This routine may not be called from an interrupt.
1917 pmap_remove_all(vm_page_t m
)
1919 struct pmap_inval_info info
;
1920 unsigned *pte
, tpte
;
1923 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
1926 pmap_inval_init(&info
);
1927 while ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
1928 KKASSERT(pv
->pv_pmap
->pm_stats
.resident_count
> 0);
1929 --pv
->pv_pmap
->pm_stats
.resident_count
;
1931 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
1932 pmap_inval_interlock(&info
, pv
->pv_pmap
, pv
->pv_va
);
1933 tpte
= loadandclear(pte
);
1935 pv
->pv_pmap
->pm_stats
.wired_count
--;
1936 pmap_inval_deinterlock(&info
, pv
->pv_pmap
);
1938 vm_page_flag_set(m
, PG_REFERENCED
);
1940 KKASSERT(PHYS_TO_VM_PAGE(tpte
) == m
);
1944 * Update the vm_page_t clean and reference bits.
1947 #if defined(PMAP_DIAGNOSTIC)
1948 if (pmap_nw_modified((pt_entry_t
) tpte
)) {
1949 kprintf("pmap_remove_all: modified page "
1950 "not writable: va: %p, pte: 0x%lx\n",
1951 (void *)pv
->pv_va
, (long)tpte
);
1954 if (pmap_track_modified(pv
->pv_va
))
1958 KKASSERT(pv
->pv_m
== m
);
1960 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
1961 TAILQ_REMOVE(&pv
->pv_pmap
->pm_pvlist
, pv
, pv_plist
);
1962 ++pv
->pv_pmap
->pm_generation
;
1963 m
->md
.pv_list_count
--;
1964 if (TAILQ_EMPTY(&m
->md
.pv_list
))
1965 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
1966 pmap_unuse_pt(pv
->pv_pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
1969 KKASSERT((m
->flags
& (PG_MAPPED
|PG_WRITEABLE
)) == 0);
1970 pmap_inval_done(&info
);
1976 * Set the physical protection on the specified range of this map
1979 * This function may not be called from an interrupt if the map is
1980 * not the kernel_pmap.
1983 pmap_protect(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
, vm_prot_t prot
)
1986 vm_offset_t pdnxt
, ptpaddr
;
1987 vm_pindex_t sindex
, eindex
;
1988 pmap_inval_info info
;
1993 if ((prot
& VM_PROT_READ
) == VM_PROT_NONE
) {
1994 pmap_remove(pmap
, sva
, eva
);
1998 if (prot
& VM_PROT_WRITE
)
2001 pmap_inval_init(&info
);
2003 ptbase
= get_ptbase(pmap
);
2005 sindex
= i386_btop(sva
);
2006 eindex
= i386_btop(eva
);
2008 for (; sindex
< eindex
; sindex
= pdnxt
) {
2011 pdnxt
= ((sindex
+ NPTEPG
) & ~(NPTEPG
- 1));
2013 pdirindex
= sindex
/ NPDEPG
;
2014 if (((ptpaddr
= (unsigned) pmap
->pm_pdir
[pdirindex
]) & PG_PS
) != 0) {
2015 pmap_inval_interlock(&info
, pmap
, -1);
2016 pmap
->pm_pdir
[pdirindex
] &= ~(PG_M
|PG_RW
);
2017 pmap
->pm_stats
.resident_count
-= NBPDR
/ PAGE_SIZE
;
2018 pmap_inval_deinterlock(&info
, pmap
);
2023 * Weed out invalid mappings. Note: we assume that the page
2024 * directory table is always allocated, and in kernel virtual.
2029 if (pdnxt
> eindex
) {
2033 for (; sindex
!= pdnxt
; sindex
++) {
2039 * XXX non-optimal. Note also that there can be
2040 * no pmap_inval_flush() calls until after we modify
2041 * ptbase[sindex] (or otherwise we have to do another
2042 * pmap_inval_interlock() call).
2044 pmap_inval_interlock(&info
, pmap
, i386_ptob(sindex
));
2046 pbits
= ptbase
[sindex
];
2049 if (pbits
& PG_MANAGED
) {
2052 m
= PHYS_TO_VM_PAGE(pbits
);
2053 vm_page_flag_set(m
, PG_REFERENCED
);
2057 if (pmap_track_modified(i386_ptob(sindex
))) {
2059 m
= PHYS_TO_VM_PAGE(pbits
);
2066 if (pbits
!= cbits
&&
2067 !atomic_cmpset_int(ptbase
+ sindex
, pbits
, cbits
)) {
2070 pmap_inval_deinterlock(&info
, pmap
);
2073 pmap_inval_done(&info
);
2077 * Insert the given physical page (p) at
2078 * the specified virtual address (v) in the
2079 * target physical map with the protection requested.
2081 * If specified, the page will be wired down, meaning
2082 * that the related pte can not be reclaimed.
2084 * NB: This is the only routine which MAY NOT lazy-evaluate
2085 * or lose information. That is, this routine must actually
2086 * insert this page into the given map NOW.
2089 pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_page_t m
, vm_prot_t prot
,
2095 vm_offset_t origpte
, newpte
;
2097 pmap_inval_info info
;
2103 #ifdef PMAP_DIAGNOSTIC
2105 panic("pmap_enter: toobig");
2106 if ((va
>= UPT_MIN_ADDRESS
) && (va
< UPT_MAX_ADDRESS
)) {
2107 panic("pmap_enter: invalid to pmap_enter page "
2108 "table pages (va: %p)", (void *)va
);
2111 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
2112 kprintf("Warning: pmap_enter called on UVA with kernel_pmap\n");
2115 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
2116 kprintf("Warning: pmap_enter called on KVA without kernel_pmap\n");
2121 * In the case that a page table page is not
2122 * resident, we are creating it here.
2124 if (va
< UPT_MIN_ADDRESS
)
2125 mpte
= pmap_allocpte(pmap
, va
);
2129 pmap_inval_init(&info
);
2130 pte
= pmap_pte(pmap
, va
);
2133 * Page Directory table entry not valid, we need a new PT page
2136 panic("pmap_enter: invalid page directory pdir=0x%lx, va=%p\n",
2137 (long)pmap
->pm_pdir
[PTDPTDI
], (void *)va
);
2140 pa
= VM_PAGE_TO_PHYS(m
) & PG_FRAME
;
2141 origpte
= *(vm_offset_t
*)pte
;
2142 opa
= origpte
& PG_FRAME
;
2144 if (origpte
& PG_PS
)
2145 panic("pmap_enter: attempted pmap_enter on 4MB page");
2148 * Mapping has not changed, must be protection or wiring change.
2150 if (origpte
&& (opa
== pa
)) {
2152 * Wiring change, just update stats. We don't worry about
2153 * wiring PT pages as they remain resident as long as there
2154 * are valid mappings in them. Hence, if a user page is wired,
2155 * the PT page will be also.
2157 if (wired
&& ((origpte
& PG_W
) == 0))
2158 pmap
->pm_stats
.wired_count
++;
2159 else if (!wired
&& (origpte
& PG_W
))
2160 pmap
->pm_stats
.wired_count
--;
2162 #if defined(PMAP_DIAGNOSTIC)
2163 if (pmap_nw_modified((pt_entry_t
) origpte
)) {
2164 kprintf("pmap_enter: modified page not "
2165 "writable: va: %p, pte: 0x%lx\n",
2166 (void *)va
, (long )origpte
);
2171 * Remove the extra pte reference. Note that we cannot
2172 * optimize the RO->RW case because we have adjusted the
2173 * wiring count above and may need to adjust the wiring
2180 * We might be turning off write access to the page,
2181 * so we go ahead and sense modify status.
2183 if (origpte
& PG_MANAGED
) {
2184 if ((origpte
& PG_M
) && pmap_track_modified(va
)) {
2186 om
= PHYS_TO_VM_PAGE(opa
);
2190 KKASSERT(m
->flags
& PG_MAPPED
);
2195 * Mapping has changed, invalidate old range and fall through to
2196 * handle validating new mapping.
2198 * Since we have a ref on the page directory page pmap_pte()
2199 * will always return non-NULL.
2201 * NOTE: pmap_remove_pte() can block and cause the temporary ptbase
2202 * to get wiped. reload the ptbase. I'm not sure if it is
2203 * also possible to race another pmap_enter() but check for
2209 KKASSERT((origpte
& PG_FRAME
) ==
2210 (*(vm_offset_t
*)pte
& PG_FRAME
));
2211 err
= pmap_remove_pte(pmap
, pte
, va
, &info
);
2213 panic("pmap_enter: pte vanished, va: %p", (void *)va
);
2214 pte
= pmap_pte(pmap
, va
);
2215 origpte
= *(vm_offset_t
*)pte
;
2216 opa
= origpte
& PG_FRAME
;
2218 kprintf("pmap_enter: Warning, raced pmap %p va %p\n",
2224 * Enter on the PV list if part of our managed memory. Note that we
2225 * raise IPL while manipulating pv_table since pmap_enter can be
2226 * called at interrupt time.
2228 if (pmap_initialized
&&
2229 (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2230 pmap_insert_entry(pmap
, va
, mpte
, m
);
2231 ptbase_assert(pmap
);
2233 vm_page_flag_set(m
, PG_MAPPED
);
2237 * Increment counters
2239 ++pmap
->pm_stats
.resident_count
;
2241 pmap
->pm_stats
.wired_count
++;
2242 KKASSERT(*pte
== 0);
2246 * Now validate mapping with desired protection/wiring.
2248 ptbase_assert(pmap
);
2249 newpte
= (vm_offset_t
) (pa
| pte_prot(pmap
, prot
) | PG_V
);
2253 if (va
< UPT_MIN_ADDRESS
)
2255 if (pmap
== &kernel_pmap
)
2259 * if the mapping or permission bits are different, we need
2260 * to update the pte.
2262 if ((origpte
& ~(PG_M
|PG_A
)) != newpte
) {
2263 pmap_inval_interlock(&info
, pmap
, va
);
2264 ptbase_assert(pmap
);
2265 KKASSERT(*pte
== 0 ||
2266 (*pte
& PG_FRAME
) == (newpte
& PG_FRAME
));
2267 *pte
= newpte
| PG_A
;
2268 pmap_inval_deinterlock(&info
, pmap
);
2270 vm_page_flag_set(m
, PG_WRITEABLE
);
2272 KKASSERT((newpte
& PG_MANAGED
) == 0 || (m
->flags
& PG_MAPPED
));
2273 pmap_inval_done(&info
);
2277 * This code works like pmap_enter() but assumes VM_PROT_READ and not-wired.
2278 * This code also assumes that the pmap has no pre-existing entry for this
2281 * This code currently may only be used on user pmaps, not kernel_pmap.
2284 pmap_enter_quick(pmap_t pmap
, vm_offset_t va
, vm_page_t m
)
2291 pmap_inval_info info
;
2293 pmap_inval_init(&info
);
2295 if (va
< UPT_MAX_ADDRESS
&& pmap
== &kernel_pmap
) {
2296 kprintf("Warning: pmap_enter_quick called on UVA with kernel_pmap\n");
2299 if (va
>= UPT_MAX_ADDRESS
&& pmap
!= &kernel_pmap
) {
2300 kprintf("Warning: pmap_enter_quick called on KVA without kernel_pmap\n");
2304 KKASSERT(va
< UPT_MIN_ADDRESS
); /* assert used on user pmaps only */
2307 * Calculate the page table page (mpte), allocating it if necessary.
2309 * A held page table page (mpte), or NULL, is passed onto the
2310 * section following.
2312 if (va
< UPT_MIN_ADDRESS
) {
2314 * Calculate pagetable page index
2316 ptepindex
= va
>> PDRSHIFT
;
2320 * Get the page directory entry
2322 ptepa
= (vm_offset_t
) pmap
->pm_pdir
[ptepindex
];
2325 * If the page table page is mapped, we just increment
2326 * the hold count, and activate it.
2330 panic("pmap_enter_quick: unexpected mapping into 4MB page");
2331 if (pmap
->pm_ptphint
&&
2332 (pmap
->pm_ptphint
->pindex
== ptepindex
)) {
2333 mpte
= pmap
->pm_ptphint
;
2335 mpte
= pmap_page_lookup( pmap
->pm_pteobj
, ptepindex
);
2336 pmap
->pm_ptphint
= mpte
;
2341 mpte
= _pmap_allocpte(pmap
, ptepindex
);
2343 } while (mpte
== NULL
);
2346 /* this code path is not yet used */
2350 * With a valid (and held) page directory page, we can just use
2351 * vtopte() to get to the pte. If the pte is already present
2352 * we do not disturb it.
2354 pte
= (unsigned *)vtopte(va
);
2357 pmap_unwire_pte_hold(pmap
, mpte
, &info
);
2358 pa
= VM_PAGE_TO_PHYS(m
);
2359 KKASSERT(((*pte
^ pa
) & PG_FRAME
) == 0);
2360 pmap_inval_done(&info
);
2365 * Enter on the PV list if part of our managed memory
2367 if ((m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
)) == 0) {
2368 pmap_insert_entry(pmap
, va
, mpte
, m
);
2369 vm_page_flag_set(m
, PG_MAPPED
);
2373 * Increment counters
2375 ++pmap
->pm_stats
.resident_count
;
2377 pa
= VM_PAGE_TO_PHYS(m
);
2380 * Now validate mapping with RO protection
2382 if (m
->flags
& (PG_FICTITIOUS
|PG_UNMANAGED
))
2383 *pte
= pa
| PG_V
| PG_U
;
2385 *pte
= pa
| PG_V
| PG_U
| PG_MANAGED
;
2386 /* pmap_inval_add(&info, pmap, va); shouldn't be needed inval->valid */
2387 pmap_inval_done(&info
);
2391 * Make a temporary mapping for a physical address. This is only intended
2392 * to be used for panic dumps.
2395 pmap_kenter_temporary(vm_paddr_t pa
, int i
)
2397 pmap_kenter((vm_offset_t
)crashdumpmap
+ (i
* PAGE_SIZE
), pa
);
2398 return ((void *)crashdumpmap
);
2401 #define MAX_INIT_PT (96)
2404 * This routine preloads the ptes for a given object into the specified pmap.
2405 * This eliminates the blast of soft faults on process startup and
2406 * immediately after an mmap.
2408 static int pmap_object_init_pt_callback(vm_page_t p
, void *data
);
2411 pmap_object_init_pt(pmap_t pmap
, vm_offset_t addr
, vm_prot_t prot
,
2412 vm_object_t object
, vm_pindex_t pindex
,
2413 vm_size_t size
, int limit
)
2415 struct rb_vm_page_scan_info info
;
2420 * We can't preinit if read access isn't set or there is no pmap
2423 if ((prot
& VM_PROT_READ
) == 0 || pmap
== NULL
|| object
== NULL
)
2427 * We can't preinit if the pmap is not the current pmap
2429 lp
= curthread
->td_lwp
;
2430 if (lp
== NULL
|| pmap
!= vmspace_pmap(lp
->lwp_vmspace
))
2433 psize
= i386_btop(size
);
2435 if ((object
->type
!= OBJT_VNODE
) ||
2436 ((limit
& MAP_PREFAULT_PARTIAL
) && (psize
> MAX_INIT_PT
) &&
2437 (object
->resident_page_count
> MAX_INIT_PT
))) {
2441 if (psize
+ pindex
> object
->size
) {
2442 if (object
->size
< pindex
)
2444 psize
= object
->size
- pindex
;
2451 * Use a red-black scan to traverse the requested range and load
2452 * any valid pages found into the pmap.
2454 * We cannot safely scan the object's memq unless we are in a
2455 * critical section since interrupts can remove pages from objects.
2457 info
.start_pindex
= pindex
;
2458 info
.end_pindex
= pindex
+ psize
- 1;
2465 vm_page_rb_tree_RB_SCAN(&object
->rb_memq
, rb_vm_page_scancmp
,
2466 pmap_object_init_pt_callback
, &info
);
2472 pmap_object_init_pt_callback(vm_page_t p
, void *data
)
2474 struct rb_vm_page_scan_info
*info
= data
;
2475 vm_pindex_t rel_index
;
2477 * don't allow an madvise to blow away our really
2478 * free pages allocating pv entries.
2480 if ((info
->limit
& MAP_PREFAULT_MADVISE
) &&
2481 vmstats
.v_free_count
< vmstats
.v_free_reserved
) {
2484 if (((p
->valid
& VM_PAGE_BITS_ALL
) == VM_PAGE_BITS_ALL
) &&
2485 (p
->busy
== 0) && (p
->flags
& (PG_BUSY
| PG_FICTITIOUS
)) == 0) {
2486 if ((p
->queue
- p
->pc
) == PQ_CACHE
)
2487 vm_page_deactivate(p
);
2489 rel_index
= p
->pindex
- info
->start_pindex
;
2490 pmap_enter_quick(info
->pmap
,
2491 info
->addr
+ i386_ptob(rel_index
), p
);
2498 * Return TRUE if the pmap is in shape to trivially
2499 * pre-fault the specified address.
2501 * Returns FALSE if it would be non-trivial or if a
2502 * pte is already loaded into the slot.
2505 pmap_prefault_ok(pmap_t pmap
, vm_offset_t addr
)
2509 if ((*pmap_pde(pmap
, addr
)) == 0)
2511 pte
= (unsigned *) vtopte(addr
);
2518 * Routine: pmap_change_wiring
2519 * Function: Change the wiring attribute for a map/virtual-address
2521 * In/out conditions:
2522 * The mapping must already exist in the pmap.
2525 pmap_change_wiring(pmap_t pmap
, vm_offset_t va
, boolean_t wired
)
2532 pte
= pmap_pte(pmap
, va
);
2534 if (wired
&& !pmap_pte_w(pte
))
2535 pmap
->pm_stats
.wired_count
++;
2536 else if (!wired
&& pmap_pte_w(pte
))
2537 pmap
->pm_stats
.wired_count
--;
2540 * Wiring is not a hardware characteristic so there is no need to
2541 * invalidate TLB. However, in an SMP environment we must use
2542 * a locked bus cycle to update the pte (if we are not using
2543 * the pmap_inval_*() API that is)... it's ok to do this for simple
2548 atomic_set_int(pte
, PG_W
);
2550 atomic_clear_int(pte
, PG_W
);
2553 atomic_set_int_nonlocked(pte
, PG_W
);
2555 atomic_clear_int_nonlocked(pte
, PG_W
);
2562 * Copy the range specified by src_addr/len
2563 * from the source map to the range dst_addr/len
2564 * in the destination map.
2566 * This routine is only advisory and need not do anything.
2569 pmap_copy(pmap_t dst_pmap
, pmap_t src_pmap
, vm_offset_t dst_addr
,
2570 vm_size_t len
, vm_offset_t src_addr
)
2573 pmap_inval_info info
;
2575 vm_offset_t end_addr
= src_addr
+ len
;
2577 unsigned src_frame
, dst_frame
;
2581 if (dst_addr
!= src_addr
)
2584 * XXX BUGGY. Amoung other things srcmpte is assumed to remain
2585 * valid through blocking calls, and that's just not going to
2592 src_frame
= ((unsigned) src_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2593 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
)) {
2597 dst_frame
= ((unsigned) dst_pmap
->pm_pdir
[PTDPTDI
]) & PG_FRAME
;
2598 if ((*gd
->gd_GDMAP1
& PG_FRAME
) != dst_frame
) {
2599 *gd
->gd_GDMAP1
= dst_frame
| PG_RW
| PG_V
;
2603 pmap_inval_init(&info
);
2604 pmap_inval_add(&info
, dst_pmap
, -1);
2605 pmap_inval_add(&info
, src_pmap
, -1);
2608 * critical section protection is required to maintain the page/object
2609 * association, interrupts can free pages and remove them from
2613 for (addr
= src_addr
; addr
< end_addr
; addr
= pdnxt
) {
2614 unsigned *src_pte
, *dst_pte
;
2615 vm_page_t dstmpte
, srcmpte
;
2616 vm_offset_t srcptepaddr
;
2619 if (addr
>= UPT_MIN_ADDRESS
)
2620 panic("pmap_copy: invalid to pmap_copy page tables\n");
2623 * Don't let optional prefaulting of pages make us go
2624 * way below the low water mark of free pages or way
2625 * above high water mark of used pv entries.
2627 if (vmstats
.v_free_count
< vmstats
.v_free_reserved
||
2628 pv_entry_count
> pv_entry_high_water
)
2631 pdnxt
= ((addr
+ PAGE_SIZE
*NPTEPG
) & ~(PAGE_SIZE
*NPTEPG
- 1));
2632 ptepindex
= addr
>> PDRSHIFT
;
2634 srcptepaddr
= (vm_offset_t
) src_pmap
->pm_pdir
[ptepindex
];
2635 if (srcptepaddr
== 0)
2638 if (srcptepaddr
& PG_PS
) {
2639 if (dst_pmap
->pm_pdir
[ptepindex
] == 0) {
2640 dst_pmap
->pm_pdir
[ptepindex
] = (pd_entry_t
) srcptepaddr
;
2641 dst_pmap
->pm_stats
.resident_count
+= NBPDR
/ PAGE_SIZE
;
2647 srcmpte
= vm_page_lookup(src_pmap
->pm_pteobj
, ptepindex
);
2648 if ((srcmpte
== NULL
) || (srcmpte
->hold_count
== 0) ||
2649 (srcmpte
->flags
& PG_BUSY
)) {
2653 if (pdnxt
> end_addr
)
2656 src_pte
= (unsigned *) vtopte(addr
);
2657 dst_pte
= (unsigned *) avtopte(addr
);
2658 while (addr
< pdnxt
) {
2663 * we only virtual copy managed pages
2665 if ((ptetemp
& PG_MANAGED
) != 0) {
2667 * We have to check after allocpte for the
2668 * pte still being around... allocpte can
2671 * pmap_allocpte() can block. If we lose
2672 * our page directory mappings we stop.
2674 dstmpte
= pmap_allocpte(dst_pmap
, addr
);
2676 if (src_frame
!= (((unsigned) PTDpde
) & PG_FRAME
) ||
2677 XXX dst_frame
!= (((unsigned) xxx
) & PG_FRAME
)
2679 kprintf("WARNING: pmap_copy: detected and corrected race\n");
2680 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2682 } else if ((*dst_pte
== 0) &&
2683 (ptetemp
= *src_pte
) != 0 &&
2684 (ptetemp
& PG_MANAGED
)) {
2686 * Clear the modified and
2687 * accessed (referenced) bits
2690 m
= PHYS_TO_VM_PAGE(ptetemp
);
2691 *dst_pte
= ptetemp
& ~(PG_M
| PG_A
);
2692 ++dst_pmap
->pm_stats
.resident_count
;
2693 pmap_insert_entry(dst_pmap
, addr
,
2695 KKASSERT(m
->flags
& PG_MAPPED
);
2697 kprintf("WARNING: pmap_copy: dst_pte race detected and corrected\n");
2698 pmap_unwire_pte_hold(dst_pmap
, dstmpte
, &info
);
2701 if (dstmpte
->hold_count
>= srcmpte
->hold_count
)
2711 pmap_inval_done(&info
);
2718 * Zero the specified PA by mapping the page into KVM and clearing its
2721 * This function may be called from an interrupt and no locking is
2725 pmap_zero_page(vm_paddr_t phys
)
2727 struct mdglobaldata
*gd
= mdcpu
;
2730 if (*(int *)gd
->gd_CMAP3
)
2731 panic("pmap_zero_page: CMAP3 busy");
2732 *(int *)gd
->gd_CMAP3
=
2733 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2734 cpu_invlpg(gd
->gd_CADDR3
);
2736 #if defined(I686_CPU)
2737 if (cpu_class
== CPUCLASS_686
)
2738 i686_pagezero(gd
->gd_CADDR3
);
2741 bzero(gd
->gd_CADDR3
, PAGE_SIZE
);
2742 *(int *) gd
->gd_CMAP3
= 0;
2747 * pmap_page_assertzero:
2749 * Assert that a page is empty, panic if it isn't.
2752 pmap_page_assertzero(vm_paddr_t phys
)
2754 struct mdglobaldata
*gd
= mdcpu
;
2758 if (*(int *)gd
->gd_CMAP3
)
2759 panic("pmap_zero_page: CMAP3 busy");
2760 *(int *)gd
->gd_CMAP3
=
2761 PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2762 cpu_invlpg(gd
->gd_CADDR3
);
2763 for (i
= 0; i
< PAGE_SIZE
; i
+= 4) {
2764 if (*(int *)((char *)gd
->gd_CADDR3
+ i
) != 0) {
2765 panic("pmap_page_assertzero() @ %p not zero!\n",
2766 (void *)gd
->gd_CADDR3
);
2769 *(int *) gd
->gd_CMAP3
= 0;
2776 * Zero part of a physical page by mapping it into memory and clearing
2777 * its contents with bzero.
2779 * off and size may not cover an area beyond a single hardware page.
2782 pmap_zero_page_area(vm_paddr_t phys
, int off
, int size
)
2784 struct mdglobaldata
*gd
= mdcpu
;
2787 if (*(int *) gd
->gd_CMAP3
)
2788 panic("pmap_zero_page: CMAP3 busy");
2789 *(int *) gd
->gd_CMAP3
= PG_V
| PG_RW
| (phys
& PG_FRAME
) | PG_A
| PG_M
;
2790 cpu_invlpg(gd
->gd_CADDR3
);
2792 #if defined(I686_CPU)
2793 if (cpu_class
== CPUCLASS_686
&& off
== 0 && size
== PAGE_SIZE
)
2794 i686_pagezero(gd
->gd_CADDR3
);
2797 bzero((char *)gd
->gd_CADDR3
+ off
, size
);
2798 *(int *) gd
->gd_CMAP3
= 0;
2805 * Copy the physical page from the source PA to the target PA.
2806 * This function may be called from an interrupt. No locking
2810 pmap_copy_page(vm_paddr_t src
, vm_paddr_t dst
)
2812 struct mdglobaldata
*gd
= mdcpu
;
2815 if (*(int *) gd
->gd_CMAP1
)
2816 panic("pmap_copy_page: CMAP1 busy");
2817 if (*(int *) gd
->gd_CMAP2
)
2818 panic("pmap_copy_page: CMAP2 busy");
2820 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2821 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2823 cpu_invlpg(gd
->gd_CADDR1
);
2824 cpu_invlpg(gd
->gd_CADDR2
);
2826 bcopy(gd
->gd_CADDR1
, gd
->gd_CADDR2
, PAGE_SIZE
);
2828 *(int *) gd
->gd_CMAP1
= 0;
2829 *(int *) gd
->gd_CMAP2
= 0;
2834 * pmap_copy_page_frag:
2836 * Copy the physical page from the source PA to the target PA.
2837 * This function may be called from an interrupt. No locking
2841 pmap_copy_page_frag(vm_paddr_t src
, vm_paddr_t dst
, size_t bytes
)
2843 struct mdglobaldata
*gd
= mdcpu
;
2846 if (*(int *) gd
->gd_CMAP1
)
2847 panic("pmap_copy_page: CMAP1 busy");
2848 if (*(int *) gd
->gd_CMAP2
)
2849 panic("pmap_copy_page: CMAP2 busy");
2851 *(int *) gd
->gd_CMAP1
= PG_V
| (src
& PG_FRAME
) | PG_A
;
2852 *(int *) gd
->gd_CMAP2
= PG_V
| PG_RW
| (dst
& PG_FRAME
) | PG_A
| PG_M
;
2854 cpu_invlpg(gd
->gd_CADDR1
);
2855 cpu_invlpg(gd
->gd_CADDR2
);
2857 bcopy((char *)gd
->gd_CADDR1
+ (src
& PAGE_MASK
),
2858 (char *)gd
->gd_CADDR2
+ (dst
& PAGE_MASK
),
2861 *(int *) gd
->gd_CMAP1
= 0;
2862 *(int *) gd
->gd_CMAP2
= 0;
2867 * Returns true if the pmap's pv is one of the first
2868 * 16 pvs linked to from this page. This count may
2869 * be changed upwards or downwards in the future; it
2870 * is only necessary that true be returned for a small
2871 * subset of pmaps for proper page aging.
2874 pmap_page_exists_quick(pmap_t pmap
, vm_page_t m
)
2879 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
2884 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
2885 if (pv
->pv_pmap
== pmap
) {
2898 * Remove all pages from specified address space
2899 * this aids process exit speeds. Also, this code
2900 * is special cased for current process only, but
2901 * can have the more generic (and slightly slower)
2902 * mode enabled. This is much faster than pmap_remove
2903 * in the case of running down an entire address space.
2906 pmap_remove_pages(pmap_t pmap
, vm_offset_t sva
, vm_offset_t eva
)
2909 unsigned *pte
, tpte
;
2912 pmap_inval_info info
;
2914 int32_t save_generation
;
2916 lp
= curthread
->td_lwp
;
2917 if (lp
&& pmap
== vmspace_pmap(lp
->lwp_vmspace
))
2922 pmap_inval_init(&info
);
2923 for (pv
= TAILQ_FIRST(&pmap
->pm_pvlist
); pv
; pv
= npv
) {
2924 if (pv
->pv_va
>= eva
|| pv
->pv_va
< sva
) {
2925 npv
= TAILQ_NEXT(pv
, pv_plist
);
2929 KKASSERT(pmap
== pv
->pv_pmap
);
2932 pte
= (unsigned *)vtopte(pv
->pv_va
);
2934 pte
= pmap_pte_quick(pmap
, pv
->pv_va
);
2936 pmap_inval_interlock(&info
, pmap
, pv
->pv_va
);
2939 * We cannot remove wired pages from a process' mapping
2943 pmap_inval_deinterlock(&info
, pmap
);
2944 npv
= TAILQ_NEXT(pv
, pv_plist
);
2948 tpte
= loadandclear(pte
);
2949 pmap_inval_deinterlock(&info
, pmap
);
2951 m
= PHYS_TO_VM_PAGE(tpte
);
2952 test_m_maps_pv(m
, pv
);
2954 KASSERT(m
< &vm_page_array
[vm_page_array_size
],
2955 ("pmap_remove_pages: bad tpte %x", tpte
));
2957 KKASSERT(pmap
->pm_stats
.resident_count
> 0);
2958 --pmap
->pm_stats
.resident_count
;
2961 * Update the vm_page_t clean and reference bits.
2967 npv
= TAILQ_NEXT(pv
, pv_plist
);
2969 KKASSERT(pv
->pv_m
== m
);
2970 KKASSERT(pv
->pv_pmap
== pmap
);
2972 TAILQ_REMOVE(&pmap
->pm_pvlist
, pv
, pv_plist
);
2973 save_generation
= ++pmap
->pm_generation
;
2975 m
->md
.pv_list_count
--;
2976 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
2977 if (TAILQ_EMPTY(&m
->md
.pv_list
))
2978 vm_page_flag_clear(m
, PG_MAPPED
| PG_WRITEABLE
);
2980 pmap_unuse_pt(pmap
, pv
->pv_va
, pv
->pv_ptem
, &info
);
2984 * Restart the scan if we blocked during the unuse or free
2985 * calls and other removals were made.
2987 if (save_generation
!= pmap
->pm_generation
) {
2988 kprintf("Warning: pmap_remove_pages race-A avoided\n");
2989 npv
= TAILQ_FIRST(&pmap
->pm_pvlist
);
2992 pmap_inval_done(&info
);
2996 * pmap_testbit tests bits in pte's
2997 * note that the testbit/clearbit routines are inline,
2998 * and a lot of things compile-time evaluate.
3001 pmap_testbit(vm_page_t m
, int bit
)
3006 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3009 if (TAILQ_FIRST(&m
->md
.pv_list
) == NULL
)
3014 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3016 * if the bit being tested is the modified bit, then
3017 * mark clean_map and ptes as never
3020 if (bit
& (PG_A
|PG_M
)) {
3021 if (!pmap_track_modified(pv
->pv_va
))
3025 #if defined(PMAP_DIAGNOSTIC)
3027 kprintf("Null pmap (tb) at va: %p\n",
3032 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3043 * this routine is used to modify bits in ptes
3045 static __inline
void
3046 pmap_clearbit(vm_page_t m
, int bit
)
3048 struct pmap_inval_info info
;
3053 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3056 pmap_inval_init(&info
);
3059 * Loop over all current mappings setting/clearing as appropos If
3060 * setting RO do we need to clear the VAC?
3062 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3064 * don't write protect pager mappings
3067 if (!pmap_track_modified(pv
->pv_va
))
3071 #if defined(PMAP_DIAGNOSTIC)
3073 kprintf("Null pmap (cb) at va: %p\n",
3080 * Careful here. We can use a locked bus instruction to
3081 * clear PG_A or PG_M safely but we need to synchronize
3082 * with the target cpus when we mess with PG_RW.
3084 * We do not have to force synchronization when clearing
3085 * PG_M even for PTEs generated via virtual memory maps,
3086 * because the virtual kernel will invalidate the pmap
3087 * entry when/if it needs to resynchronize the Modify bit.
3090 pmap_inval_interlock(&info
, pv
->pv_pmap
, pv
->pv_va
);
3091 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3098 atomic_clear_int(pte
, PG_M
|PG_RW
);
3101 * The cpu may be trying to set PG_M
3102 * simultaniously with our clearing
3105 if (!atomic_cmpset_int(pte
, pbits
,
3109 } else if (bit
== PG_M
) {
3111 * We could also clear PG_RW here to force
3112 * a fault on write to redetect PG_M for
3113 * virtual kernels, but it isn't necessary
3114 * since virtual kernels invalidate the pte
3115 * when they clear the VPTE_M bit in their
3116 * virtual page tables.
3118 atomic_clear_int(pte
, PG_M
);
3120 atomic_clear_int(pte
, bit
);
3124 pmap_inval_deinterlock(&info
, pv
->pv_pmap
);
3126 pmap_inval_done(&info
);
3130 * pmap_page_protect:
3132 * Lower the permission for all mappings to a given page.
3135 pmap_page_protect(vm_page_t m
, vm_prot_t prot
)
3137 if ((prot
& VM_PROT_WRITE
) == 0) {
3138 if (prot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) {
3139 pmap_clearbit(m
, PG_RW
);
3140 vm_page_flag_clear(m
, PG_WRITEABLE
);
3148 pmap_phys_address(vm_pindex_t ppn
)
3150 return (i386_ptob(ppn
));
3154 * pmap_ts_referenced:
3156 * Return a count of reference bits for a page, clearing those bits.
3157 * It is not necessary for every reference bit to be cleared, but it
3158 * is necessary that 0 only be returned when there are truly no
3159 * reference bits set.
3161 * XXX: The exact number of bits to check and clear is a matter that
3162 * should be tested and standardized at some point in the future for
3163 * optimal aging of shared pages.
3166 pmap_ts_referenced(vm_page_t m
)
3168 pv_entry_t pv
, pvf
, pvn
;
3172 if (!pmap_initialized
|| (m
->flags
& PG_FICTITIOUS
))
3177 if ((pv
= TAILQ_FIRST(&m
->md
.pv_list
)) != NULL
) {
3182 pvn
= TAILQ_NEXT(pv
, pv_list
);
3185 TAILQ_REMOVE(&m
->md
.pv_list
, pv
, pv_list
);
3186 TAILQ_INSERT_TAIL(&m
->md
.pv_list
, pv
, pv_list
);
3189 if (!pmap_track_modified(pv
->pv_va
))
3192 pte
= pmap_pte_quick(pv
->pv_pmap
, pv
->pv_va
);
3194 if (pte
&& (*pte
& PG_A
)) {
3196 atomic_clear_int(pte
, PG_A
);
3198 atomic_clear_int_nonlocked(pte
, PG_A
);
3205 } while ((pv
= pvn
) != NULL
&& pv
!= pvf
);
3215 * Return whether or not the specified physical page was modified
3216 * in any physical maps.
3219 pmap_is_modified(vm_page_t m
)
3221 return pmap_testbit(m
, PG_M
);
3225 * Clear the modify bits on the specified physical page.
3228 pmap_clear_modify(vm_page_t m
)
3230 pmap_clearbit(m
, PG_M
);
3234 * pmap_clear_reference:
3236 * Clear the reference bit on the specified physical page.
3239 pmap_clear_reference(vm_page_t m
)
3241 pmap_clearbit(m
, PG_A
);
3245 * Miscellaneous support routines follow
3249 i386_protection_init(void)
3253 kp
= protection_codes
;
3254 for (prot
= 0; prot
< 8; prot
++) {
3256 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_NONE
:
3258 * Read access is also 0. There isn't any execute bit,
3259 * so just make it readable.
3261 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_NONE
:
3262 case VM_PROT_READ
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3263 case VM_PROT_NONE
| VM_PROT_NONE
| VM_PROT_EXECUTE
:
3266 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_NONE
:
3267 case VM_PROT_NONE
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3268 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_NONE
:
3269 case VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
:
3277 * Map a set of physical memory pages into the kernel virtual
3278 * address space. Return a pointer to where it is mapped. This
3279 * routine is intended to be used for mapping device memory,
3282 * NOTE: we can't use pgeflag unless we invalidate the pages one at
3286 pmap_mapdev(vm_paddr_t pa
, vm_size_t size
)
3288 vm_offset_t va
, tmpva
, offset
;
3291 offset
= pa
& PAGE_MASK
;
3292 size
= roundup(offset
+ size
, PAGE_SIZE
);
3294 va
= kmem_alloc_nofault(&kernel_map
, size
);
3296 panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3299 for (tmpva
= va
; size
> 0;) {
3300 pte
= (unsigned *)vtopte(tmpva
);
3301 *pte
= pa
| PG_RW
| PG_V
; /* | pgeflag; */
3309 return ((void *)(va
+ offset
));
3313 pmap_unmapdev(vm_offset_t va
, vm_size_t size
)
3315 vm_offset_t base
, offset
;
3317 base
= va
& PG_FRAME
;
3318 offset
= va
& PAGE_MASK
;
3319 size
= roundup(offset
+ size
, PAGE_SIZE
);
3320 pmap_qremove(va
, size
>> PAGE_SHIFT
);
3321 kmem_free(&kernel_map
, base
, size
);
3325 * perform the pmap work for mincore
3328 pmap_mincore(pmap_t pmap
, vm_offset_t addr
)
3330 unsigned *ptep
, pte
;
3334 ptep
= pmap_pte(pmap
, addr
);
3339 if ((pte
= *ptep
) != 0) {
3342 val
= MINCORE_INCORE
;
3343 if ((pte
& PG_MANAGED
) == 0)
3346 pa
= pte
& PG_FRAME
;
3348 m
= PHYS_TO_VM_PAGE(pa
);
3354 val
|= MINCORE_MODIFIED
|MINCORE_MODIFIED_OTHER
;
3356 * Modified by someone
3358 else if (m
->dirty
|| pmap_is_modified(m
))
3359 val
|= MINCORE_MODIFIED_OTHER
;
3364 val
|= MINCORE_REFERENCED
|MINCORE_REFERENCED_OTHER
;
3367 * Referenced by someone
3369 else if ((m
->flags
& PG_REFERENCED
) || pmap_ts_referenced(m
)) {
3370 val
|= MINCORE_REFERENCED_OTHER
;
3371 vm_page_flag_set(m
, PG_REFERENCED
);
3378 * Replace p->p_vmspace with a new one. If adjrefs is non-zero the new
3379 * vmspace will be ref'd and the old one will be deref'd.
3381 * The vmspace for all lwps associated with the process will be adjusted
3382 * and cr3 will be reloaded if any lwp is the current lwp.
3385 pmap_replacevm(struct proc
*p
, struct vmspace
*newvm
, int adjrefs
)
3387 struct vmspace
*oldvm
;
3391 oldvm
= p
->p_vmspace
;
3392 if (oldvm
!= newvm
) {
3393 p
->p_vmspace
= newvm
;
3394 KKASSERT(p
->p_nthreads
== 1);
3395 lp
= RB_ROOT(&p
->p_lwp_tree
);
3396 pmap_setlwpvm(lp
, newvm
);
3398 sysref_get(&newvm
->vm_sysref
);
3399 sysref_put(&oldvm
->vm_sysref
);
3406 * Set the vmspace for a LWP. The vmspace is almost universally set the
3407 * same as the process vmspace, but virtual kernels need to swap out contexts
3408 * on a per-lwp basis.
3411 pmap_setlwpvm(struct lwp
*lp
, struct vmspace
*newvm
)
3413 struct vmspace
*oldvm
;
3417 oldvm
= lp
->lwp_vmspace
;
3419 if (oldvm
!= newvm
) {
3420 lp
->lwp_vmspace
= newvm
;
3421 if (curthread
->td_lwp
== lp
) {
3422 pmap
= vmspace_pmap(newvm
);
3424 atomic_set_int(&pmap
->pm_active
, mycpu
->gd_cpumask
);
3425 if (pmap
->pm_active
& CPUMASK_LOCK
)
3426 pmap_interlock_wait(newvm
);
3428 pmap
->pm_active
|= 1;
3430 #if defined(SWTCH_OPTIM_STATS)
3433 curthread
->td_pcb
->pcb_cr3
= vtophys(pmap
->pm_pdir
);
3434 load_cr3(curthread
->td_pcb
->pcb_cr3
);
3435 pmap
= vmspace_pmap(oldvm
);
3437 atomic_clear_int(&pmap
->pm_active
, mycpu
->gd_cpumask
);
3439 pmap
->pm_active
&= ~1;
3448 * Called when switching to a locked pmap
3451 pmap_interlock_wait(struct vmspace
*vm
)
3453 struct pmap
*pmap
= &vm
->vm_pmap
;
3455 if (pmap
->pm_active
& CPUMASK_LOCK
) {
3456 kprintf("Warning: pmap_interlock %08x\n", pmap
->pm_active
);
3457 while (pmap
->pm_active
& CPUMASK_LOCK
) {
3460 lwkt_process_ipiq();
3468 pmap_addr_hint(vm_object_t obj
, vm_offset_t addr
, vm_size_t size
)
3471 if ((obj
== NULL
) || (size
< NBPDR
) || (obj
->type
!= OBJT_DEVICE
)) {
3475 addr
= (addr
+ (NBPDR
- 1)) & ~(NBPDR
- 1);
3480 pmap_get_pgeflag(void)
3487 static void pads (pmap_t pm
);
3488 void pmap_pvdump (vm_paddr_t pa
);
3490 /* print address space of pmap*/
3497 if (pm
== &kernel_pmap
)
3500 for (i
= 0; i
< 1024; i
++) {
3501 if (pm
->pm_pdir
[i
]) {
3502 for (j
= 0; j
< 1024; j
++) {
3503 va
= (i
<< PDRSHIFT
) + (j
<< PAGE_SHIFT
);
3504 if (pm
== &kernel_pmap
&& va
< KERNBASE
)
3506 if (pm
!= &kernel_pmap
&& va
> UPT_MAX_ADDRESS
)
3508 ptep
= pmap_pte_quick(pm
, va
);
3509 if (pmap_pte_v(ptep
))
3510 kprintf("%x:%x ", va
, *(int *) ptep
);
3519 pmap_pvdump(vm_paddr_t pa
)
3524 kprintf("pa %08llx", (long long)pa
);
3525 m
= PHYS_TO_VM_PAGE(pa
);
3526 TAILQ_FOREACH(pv
, &m
->md
.pv_list
, pv_list
) {
3528 kprintf(" -> pmap %p, va %p, flags %x",
3529 (void *)pv
->pv_pmap
, (long)pv
->pv_va
, pv
->pv_flags
);
3531 kprintf(" -> pmap %p, va %p",
3532 (void *)pv
->pv_pmap
, (void *)pv
->pv_va
);