2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
9 * This code is derived from software contributed to The DragonFly Project
10 * by Matthew Dillon <dillon@backplane.com>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94
38 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
39 * All rights reserved.
41 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 * Carnegie Mellon requests users of this software to return to
55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
56 * School of Computer Science
57 * Carnegie Mellon University
58 * Pittsburgh PA 15213-3890
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
67 #include <sys/serialize.h>
69 #include <sys/vmmeter.h>
71 #include <sys/vnode.h>
72 #include <sys/resourcevar.h>
75 #include <sys/malloc.h>
76 #include <sys/objcache.h>
77 #include <sys/kern_syscall.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/swap_pager.h>
89 #include <vm/vm_zone.h>
91 #include <sys/random.h>
92 #include <sys/sysctl.h>
93 #include <sys/spinlock.h>
95 #include <sys/thread2.h>
96 #include <sys/spinlock2.h>
99 * Virtual memory maps provide for the mapping, protection, and sharing
100 * of virtual memory objects. In addition, this module provides for an
101 * efficient virtual copy of memory from one map to another.
103 * Synchronization is required prior to most operations.
105 * Maps consist of an ordered doubly-linked list of simple entries.
106 * A hint and a RB tree is used to speed-up lookups.
108 * Callers looking to modify maps specify start/end addresses which cause
109 * the related map entry to be clipped if necessary, and then later
110 * recombined if the pieces remained compatible.
112 * Virtual copy operations are performed by copying VM object references
113 * from one map to another, and then marking both regions as copy-on-write.
115 static boolean_t
vmspace_ctor(void *obj
, void *privdata
, int ocflags
);
116 static void vmspace_dtor(void *obj
, void *privdata
);
117 static void vmspace_terminate(struct vmspace
*vm
, int final
);
119 MALLOC_DEFINE(M_VMSPACE
, "vmspace", "vmspace objcache backingstore");
120 MALLOC_DEFINE(M_MAP_BACKING
, "map_backing", "vm_map_backing to entry");
121 static struct objcache
*vmspace_cache
;
124 * per-cpu page table cross mappings are initialized in early boot
125 * and might require a considerable number of vm_map_entry structures.
127 #define MAPENTRYBSP_CACHE (MAXCPU+1)
128 #define MAPENTRYAP_CACHE 8
131 * Partioning threaded programs with large anonymous memory areas can
132 * improve concurrent fault performance.
134 #define MAP_ENTRY_PARTITION_SIZE ((vm_offset_t)(32 * 1024 * 1024))
135 #define MAP_ENTRY_PARTITION_MASK (MAP_ENTRY_PARTITION_SIZE - 1)
137 #define VM_MAP_ENTRY_WITHIN_PARTITION(entry) \
138 ((((entry)->ba.start ^ (entry)->ba.end) & ~MAP_ENTRY_PARTITION_MASK) == 0)
140 static struct vm_zone mapentzone_store
;
141 __read_mostly
static vm_zone_t mapentzone
;
143 static struct vm_map_entry map_entry_init
[MAX_MAPENT
];
144 static struct vm_map_entry cpu_map_entry_init_bsp
[MAPENTRYBSP_CACHE
];
145 static struct vm_map_entry cpu_map_entry_init_ap
[MAXCPU
][MAPENTRYAP_CACHE
];
147 static int randomize_mmap
;
148 SYSCTL_INT(_vm
, OID_AUTO
, randomize_mmap
, CTLFLAG_RW
, &randomize_mmap
, 0,
149 "Randomize mmap offsets");
150 static int vm_map_relock_enable
= 1;
151 SYSCTL_INT(_vm
, OID_AUTO
, map_relock_enable
, CTLFLAG_RW
,
152 &vm_map_relock_enable
, 0, "insert pop pgtable optimization");
153 static int vm_map_partition_enable
= 1;
154 SYSCTL_INT(_vm
, OID_AUTO
, map_partition_enable
, CTLFLAG_RW
,
155 &vm_map_partition_enable
, 0, "Break up larger vm_map_entry's");
156 static int vm_map_backing_limit
= 5;
157 SYSCTL_INT(_vm
, OID_AUTO
, map_backing_limit
, CTLFLAG_RW
,
158 &vm_map_backing_limit
, 0, "ba.backing_ba link depth");
159 static int vm_map_backing_shadow_test
= 1;
160 SYSCTL_INT(_vm
, OID_AUTO
, map_backing_shadow_test
, CTLFLAG_RW
,
161 &vm_map_backing_shadow_test
, 0, "ba.object shadow test");
163 static void vmspace_drop_notoken(struct vmspace
*vm
);
164 static void vm_map_entry_shadow(vm_map_entry_t entry
);
165 static vm_map_entry_t
vm_map_entry_create(int *);
166 static void vm_map_entry_dispose (vm_map_t map
, vm_map_entry_t entry
, int *);
167 static void vm_map_entry_dispose_ba (vm_map_backing_t ba
);
168 static void vm_map_backing_replicated(vm_map_t map
,
169 vm_map_entry_t entry
, int flags
);
170 static void vm_map_backing_adjust_start(vm_map_entry_t entry
,
172 static void vm_map_backing_adjust_end(vm_map_entry_t entry
,
174 static void vm_map_backing_attach (vm_map_backing_t ba
);
175 static void vm_map_backing_detach (vm_map_backing_t ba
);
176 static void _vm_map_clip_end (vm_map_t
, vm_map_entry_t
, vm_offset_t
, int *);
177 static void _vm_map_clip_start (vm_map_t
, vm_map_entry_t
, vm_offset_t
, int *);
178 static void vm_map_entry_delete (vm_map_t
, vm_map_entry_t
, int *);
179 static void vm_map_entry_unwire (vm_map_t
, vm_map_entry_t
);
180 static void vm_map_copy_entry (vm_map_t
, vm_map_t
, vm_map_entry_t
,
182 static void vm_map_unclip_range (vm_map_t map
, vm_map_entry_t start_entry
,
183 vm_offset_t start
, vm_offset_t end
, int *countp
, int flags
);
184 static void vm_map_entry_partition(vm_map_t map
, vm_map_entry_t entry
,
185 vm_offset_t vaddr
, int *countp
);
187 #define MAP_BACK_CLIPPED 0x0001
188 #define MAP_BACK_BASEOBJREFD 0x0002
191 * Initialize the vm_map module. Must be called before any other vm_map
194 * Map and entry structures are allocated from the general purpose
195 * memory pool with some exceptions:
197 * - The kernel map is allocated statically.
198 * - Initial kernel map entries are allocated out of a static pool.
199 * - We must set ZONE_SPECIAL here or the early boot code can get
200 * stuck if there are >63 cores.
202 * These restrictions are necessary since malloc() uses the
203 * maps and requires map entries.
205 * Called from the low level boot code only.
210 mapentzone
= &mapentzone_store
;
211 zbootinit(mapentzone
, "MAP ENTRY", sizeof (struct vm_map_entry
),
212 map_entry_init
, MAX_MAPENT
);
213 mapentzone_store
.zflags
|= ZONE_SPECIAL
;
217 * Called prior to any vmspace allocations.
219 * Called from the low level boot code only.
224 vmspace_cache
= objcache_create_mbacked(M_VMSPACE
,
225 sizeof(struct vmspace
),
227 vmspace_ctor
, vmspace_dtor
,
229 zinitna(mapentzone
, NULL
, 0, 0, ZONE_USE_RESERVE
| ZONE_SPECIAL
);
235 * objcache support. We leave the pmap root cached as long as possible
236 * for performance reasons.
240 vmspace_ctor(void *obj
, void *privdata
, int ocflags
)
242 struct vmspace
*vm
= obj
;
244 bzero(vm
, sizeof(*vm
));
245 vm
->vm_refcnt
= VM_REF_DELETED
;
252 vmspace_dtor(void *obj
, void *privdata
)
254 struct vmspace
*vm
= obj
;
256 KKASSERT(vm
->vm_refcnt
== VM_REF_DELETED
);
257 pmap_puninit(vmspace_pmap(vm
));
261 * Red black tree functions
263 * The caller must hold the related map lock.
265 static int rb_vm_map_compare(vm_map_entry_t a
, vm_map_entry_t b
);
266 RB_GENERATE(vm_map_rb_tree
, vm_map_entry
, rb_entry
, rb_vm_map_compare
);
268 /* a->ba.start is address, and the only field which must be initialized */
270 rb_vm_map_compare(vm_map_entry_t a
, vm_map_entry_t b
)
272 if (a
->ba
.start
< b
->ba
.start
)
274 else if (a
->ba
.start
> b
->ba
.start
)
280 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for
284 vmspace_initrefs(struct vmspace
*vm
)
291 * Allocate a vmspace structure, including a vm_map and pmap.
292 * Initialize numerous fields. While the initial allocation is zerod,
293 * subsequence reuse from the objcache leaves elements of the structure
294 * intact (particularly the pmap), so portions must be zerod.
296 * Returns a referenced vmspace.
301 vmspace_alloc(vm_offset_t min
, vm_offset_t max
)
305 vm
= objcache_get(vmspace_cache
, M_WAITOK
);
307 bzero(&vm
->vm_startcopy
,
308 (char *)&vm
->vm_endcopy
- (char *)&vm
->vm_startcopy
);
309 vm_map_init(&vm
->vm_map
, min
, max
, NULL
); /* initializes token */
312 * NOTE: hold to acquires token for safety.
314 * On return vmspace is referenced (refs=1, hold=1). That is,
315 * each refcnt also has a holdcnt. There can be additional holds
316 * (holdcnt) above and beyond the refcnt. Finalization is handled in
317 * two stages, one on refs 1->0, and the the second on hold 1->0.
319 KKASSERT(vm
->vm_holdcnt
== 0);
320 KKASSERT(vm
->vm_refcnt
== VM_REF_DELETED
);
321 vmspace_initrefs(vm
);
323 pmap_pinit(vmspace_pmap(vm
)); /* (some fields reused) */
324 vm
->vm_map
.pmap
= vmspace_pmap(vm
); /* XXX */
327 cpu_vmspace_alloc(vm
);
334 * NOTE: Can return 0 if the vmspace is exiting.
337 vmspace_getrefs(struct vmspace
*vm
)
343 if (n
& VM_REF_DELETED
)
349 vmspace_hold(struct vmspace
*vm
)
351 atomic_add_int(&vm
->vm_holdcnt
, 1);
352 lwkt_gettoken(&vm
->vm_map
.token
);
356 * Drop with final termination interlock.
359 vmspace_drop(struct vmspace
*vm
)
361 lwkt_reltoken(&vm
->vm_map
.token
);
362 vmspace_drop_notoken(vm
);
366 vmspace_drop_notoken(struct vmspace
*vm
)
368 if (atomic_fetchadd_int(&vm
->vm_holdcnt
, -1) == 1) {
369 if (vm
->vm_refcnt
& VM_REF_DELETED
)
370 vmspace_terminate(vm
, 1);
375 * A vmspace object must not be in a terminated state to be able to obtain
376 * additional refs on it.
378 * These are official references to the vmspace, the count is used to check
379 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'.
381 * XXX we need to combine hold & ref together into one 64-bit field to allow
382 * holds to prevent stage-1 termination.
385 vmspace_ref(struct vmspace
*vm
)
389 atomic_add_int(&vm
->vm_holdcnt
, 1);
390 n
= atomic_fetchadd_int(&vm
->vm_refcnt
, 1);
391 KKASSERT((n
& VM_REF_DELETED
) == 0);
395 * Release a ref on the vmspace. On the 1->0 transition we do stage-1
396 * termination of the vmspace. Then, on the final drop of the hold we
397 * will do stage-2 final termination.
400 vmspace_rel(struct vmspace
*vm
)
405 * Drop refs. Each ref also has a hold which is also dropped.
407 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold
408 * prevent finalization) to start termination processing.
409 * Finalization occurs when the last hold count drops to 0.
411 n
= atomic_fetchadd_int(&vm
->vm_refcnt
, -1) - 1;
413 if (atomic_cmpset_int(&vm
->vm_refcnt
, 0, VM_REF_DELETED
)) {
414 vmspace_terminate(vm
, 0);
420 vmspace_drop_notoken(vm
);
424 * This is called during exit indicating that the vmspace is no
425 * longer in used by an exiting process, but the process has not yet
428 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt
429 * to prevent stage-2 until the process is reaped. Note hte order of
430 * operation, we must hold first.
435 vmspace_relexit(struct vmspace
*vm
)
437 atomic_add_int(&vm
->vm_holdcnt
, 1);
442 * Called during reap to disconnect the remainder of the vmspace from
443 * the process. On the hold drop the vmspace termination is finalized.
448 vmspace_exitfree(struct proc
*p
)
454 vmspace_drop_notoken(vm
);
458 * Called in two cases:
460 * (1) When the last refcnt is dropped and the vmspace becomes inactive,
461 * called with final == 0. refcnt will be (u_int)-1 at this point,
462 * and holdcnt will still be non-zero.
464 * (2) When holdcnt becomes 0, called with final == 1. There should no
465 * longer be anyone with access to the vmspace.
467 * VMSPACE_EXIT1 flags the primary deactivation
468 * VMSPACE_EXIT2 flags the last reap
471 vmspace_terminate(struct vmspace
*vm
, int final
)
475 lwkt_gettoken(&vm
->vm_map
.token
);
477 KKASSERT((vm
->vm_flags
& VMSPACE_EXIT1
) == 0);
478 vm
->vm_flags
|= VMSPACE_EXIT1
;
481 * Get rid of most of the resources. Leave the kernel pmap
484 * If the pmap does not contain wired pages we can bulk-delete
485 * the pmap as a performance optimization before removing the
488 * If the pmap contains wired pages we cannot do this
489 * pre-optimization because currently vm_fault_unwire()
490 * expects the pmap pages to exist and will not decrement
491 * p->wire_count if they do not.
494 if (vmspace_pmap(vm
)->pm_stats
.wired_count
) {
495 vm_map_remove(&vm
->vm_map
, VM_MIN_USER_ADDRESS
,
496 VM_MAX_USER_ADDRESS
);
497 pmap_remove_pages(vmspace_pmap(vm
), VM_MIN_USER_ADDRESS
,
498 VM_MAX_USER_ADDRESS
);
500 pmap_remove_pages(vmspace_pmap(vm
), VM_MIN_USER_ADDRESS
,
501 VM_MAX_USER_ADDRESS
);
502 vm_map_remove(&vm
->vm_map
, VM_MIN_USER_ADDRESS
,
503 VM_MAX_USER_ADDRESS
);
505 lwkt_reltoken(&vm
->vm_map
.token
);
507 KKASSERT((vm
->vm_flags
& VMSPACE_EXIT1
) != 0);
508 KKASSERT((vm
->vm_flags
& VMSPACE_EXIT2
) == 0);
511 * Get rid of remaining basic resources.
513 vm
->vm_flags
|= VMSPACE_EXIT2
;
516 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
517 vm_map_lock(&vm
->vm_map
);
518 cpu_vmspace_free(vm
);
521 * Lock the map, to wait out all other references to it.
522 * Delete all of the mappings and pages they hold, then call
523 * the pmap module to reclaim anything left.
525 vm_map_delete(&vm
->vm_map
,
526 vm_map_min(&vm
->vm_map
),
527 vm_map_max(&vm
->vm_map
),
529 vm_map_unlock(&vm
->vm_map
);
530 vm_map_entry_release(count
);
532 pmap_release(vmspace_pmap(vm
));
533 lwkt_reltoken(&vm
->vm_map
.token
);
534 objcache_put(vmspace_cache
, vm
);
539 * Swap useage is determined by taking the proportional swap used by
540 * VM objects backing the VM map. To make up for fractional losses,
541 * if the VM object has any swap use at all the associated map entries
542 * count for at least 1 swap page.
547 vmspace_swap_count(struct vmspace
*vm
)
549 vm_map_t map
= &vm
->vm_map
;
552 vm_offset_t count
= 0;
557 RB_FOREACH(cur
, vm_map_rb_tree
, &map
->rb_root
) {
558 switch(cur
->maptype
) {
559 case VM_MAPTYPE_NORMAL
:
560 case VM_MAPTYPE_VPAGETABLE
:
561 if ((object
= cur
->ba
.object
) == NULL
)
563 if (object
->swblock_count
) {
564 n
= (cur
->ba
.end
- cur
->ba
.start
) / PAGE_SIZE
;
565 count
+= object
->swblock_count
*
566 SWAP_META_PAGES
* n
/ object
->size
+ 1;
579 * Calculate the approximate number of anonymous pages in use by
580 * this vmspace. To make up for fractional losses, we count each
581 * VM object as having at least 1 anonymous page.
586 vmspace_anonymous_count(struct vmspace
*vm
)
588 vm_map_t map
= &vm
->vm_map
;
591 vm_offset_t count
= 0;
594 RB_FOREACH(cur
, vm_map_rb_tree
, &map
->rb_root
) {
595 switch(cur
->maptype
) {
596 case VM_MAPTYPE_NORMAL
:
597 case VM_MAPTYPE_VPAGETABLE
:
598 if ((object
= cur
->ba
.object
) == NULL
)
600 if (object
->type
!= OBJT_DEFAULT
&&
601 object
->type
!= OBJT_SWAP
) {
604 count
+= object
->resident_page_count
;
616 * Initialize an existing vm_map structure such as that in the vmspace
617 * structure. The pmap is initialized elsewhere.
622 vm_map_init(struct vm_map
*map
, vm_offset_t min_addr
, vm_offset_t max_addr
,
625 RB_INIT(&map
->rb_root
);
626 spin_init(&map
->ilock_spin
, "ilock");
627 map
->ilock_base
= NULL
;
631 vm_map_min(map
) = min_addr
;
632 vm_map_max(map
) = max_addr
;
636 bzero(&map
->freehint
, sizeof(map
->freehint
));
637 lwkt_token_init(&map
->token
, "vm_map");
638 lockinit(&map
->lock
, "vm_maplk", (hz
+ 9) / 10, 0);
642 * Find the first possible free address for the specified request length.
643 * Returns 0 if we don't have one cached.
647 vm_map_freehint_find(vm_map_t map
, vm_size_t length
, vm_size_t align
)
649 vm_map_freehint_t
*scan
;
651 scan
= &map
->freehint
[0];
652 while (scan
< &map
->freehint
[VM_MAP_FFCOUNT
]) {
653 if (scan
->length
== length
&& scan
->align
== align
)
661 * Unconditionally set the freehint. Called by vm_map_findspace() after
662 * it finds an address. This will help us iterate optimally on the next
667 vm_map_freehint_update(vm_map_t map
, vm_offset_t start
,
668 vm_size_t length
, vm_size_t align
)
670 vm_map_freehint_t
*scan
;
672 scan
= &map
->freehint
[0];
673 while (scan
< &map
->freehint
[VM_MAP_FFCOUNT
]) {
674 if (scan
->length
== length
&& scan
->align
== align
) {
680 scan
= &map
->freehint
[map
->freehint_newindex
& VM_MAP_FFMASK
];
683 scan
->length
= length
;
684 ++map
->freehint_newindex
;
688 * Update any existing freehints (for any alignment), for the hole we just
693 vm_map_freehint_hole(vm_map_t map
, vm_offset_t start
, vm_size_t length
)
695 vm_map_freehint_t
*scan
;
697 scan
= &map
->freehint
[0];
698 while (scan
< &map
->freehint
[VM_MAP_FFCOUNT
]) {
699 if (scan
->length
<= length
&& scan
->start
> start
)
706 * This function handles MAP_ENTRY_NEEDS_COPY by inserting a fronting
707 * object in the entry for COW faults.
709 * The entire chain including entry->ba (prior to inserting the fronting
710 * object) essentially becomes set in stone... elements of it can be paged
711 * in or out, but cannot be further modified.
713 * NOTE: If we do not optimize the backing chain then a unique copy is not
714 * needed. Note, however, that because portions of the chain are
715 * shared across pmaps we cannot make any changes to the vm_map_backing
716 * elements themselves.
718 * If the map segment is governed by a virtual page table then it is
719 * possible to address offsets beyond the mapped area. Just allocate
720 * a maximally sized object for this case.
722 * If addref is non-zero an additional reference is added to the returned
723 * entry. This mechanic exists because the additional reference might have
724 * to be added atomically and not after return to prevent a premature
725 * collapse. XXX currently there is no collapse code.
727 * The vm_map must be exclusively locked.
728 * No other requirements.
732 vm_map_entry_shadow(vm_map_entry_t entry
)
739 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
)
742 length
= atop(entry
->ba
.end
- entry
->ba
.start
);
745 * Don't create the new object if the old object isn't shared.
746 * This case occurs quite often when programs fork/exec/wait.
748 * Caller ensures source exists (all backing_ba's must have objects),
749 * typically indirectly by virtue of the NEEDS_COPY flag being set.
750 * We have a ref on source by virtue of the entry and do not need
751 * to lock it to do this test.
753 source
= entry
->ba
.object
;
756 if (source
->type
!= OBJT_VNODE
) {
757 if (source
->ref_count
== 1 &&
758 source
->handle
== NULL
&&
759 (source
->type
== OBJT_DEFAULT
||
760 source
->type
== OBJT_SWAP
)) {
764 ba
= kmalloc(sizeof(*ba
), M_MAP_BACKING
, M_INTWAIT
); /* copied later */
765 vm_object_hold_shared(source
);
768 * Once it becomes part of a backing_ba chain it can wind up anywhere,
769 * drop the ONEMAPPING flag now.
771 vm_object_clear_flag(source
, OBJ_ONEMAPPING
);
774 * Allocate a new object with the given length. The new object
775 * is returned referenced but we may have to add another one.
776 * If we are adding a second reference we must clear OBJ_ONEMAPPING.
777 * (typically because the caller is about to clone a vm_map_entry).
779 * The source object currently has an extra reference to prevent
780 * collapses into it while we mess with its shadow list, which
781 * we will remove later in this routine.
783 * The target object may require a second reference if asked for one
786 result
= vm_object_allocate_hold(OBJT_DEFAULT
, length
);
788 panic("vm_object_shadow: no object for shadowing");
791 * The new object shadows the source object.
793 * Try to optimize the result object's page color when shadowing
794 * in order to maintain page coloring consistency in the combined
797 * The source object is moved to ba, retaining its existing ref-count.
798 * No additional ref is needed.
800 * SHADOWING IS NOT APPLICABLE TO OBJT_VNODE OBJECTS
802 vm_map_backing_detach(&entry
->ba
);
803 *ba
= entry
->ba
; /* previous ba */
804 entry
->ba
.object
= result
; /* new ba (at head of entry) */
805 entry
->ba
.backing_ba
= ba
;
806 entry
->ba
.backing_count
= ba
->backing_count
+ 1;
807 entry
->ba
.offset
= 0;
809 /* cpu localization twist */
810 result
->pg_color
= vm_quickcolor();
812 vm_map_backing_attach(&entry
->ba
);
813 vm_map_backing_attach(ba
);
816 * Adjust the return storage. Drop the ref on source before
819 vm_object_drop(result
);
820 vm_object_drop(source
);
822 entry
->eflags
&= ~MAP_ENTRY_NEEDS_COPY
;
826 * Allocate an object for a vm_map_entry.
828 * Object allocation for anonymous mappings is defered as long as possible.
829 * This function is called when we can defer no longer, generally when a map
830 * entry might be split or forked or takes a page fault.
832 * If the map segment is governed by a virtual page table then it is
833 * possible to address offsets beyond the mapped area. Just allocate
834 * a maximally sized object for this case.
836 * The vm_map must be exclusively locked.
837 * No other requirements.
840 vm_map_entry_allocate_object(vm_map_entry_t entry
)
845 * ba.offset is NOT cumulatively added in the backing_ba scan like
846 * it was in the old object chain, so we can assign whatever offset
847 * we like to the new object.
849 * For now assign a value of 0 to make debugging object sizes
852 entry
->ba
.offset
= 0;
854 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
) {
856 obj
= vm_object_allocate(OBJT_DEFAULT
, 0x7FFFFFFF);
858 obj
= vm_object_allocate(OBJT_DEFAULT
,
859 atop(entry
->ba
.end
- entry
->ba
.start
) +
862 entry
->ba
.object
= obj
;
863 vm_map_backing_attach(&entry
->ba
);
867 * Set an initial negative count so the first attempt to reserve
868 * space preloads a bunch of vm_map_entry's for this cpu. Also
869 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
870 * map a new page for vm_map_entry structures. SMP systems are
871 * particularly sensitive.
873 * This routine is called in early boot so we cannot just call
874 * vm_map_entry_reserve().
876 * Called from the low level boot code only (for each cpu)
878 * WARNING! Take care not to have too-big a static/BSS structure here
879 * as MAXCPU can be 256+, otherwise the loader's 64MB heap
880 * can get blown out by the kernel plus the initrd image.
883 vm_map_entry_reserve_cpu_init(globaldata_t gd
)
885 vm_map_entry_t entry
;
889 atomic_add_int(&gd
->gd_vme_avail
, -MAP_RESERVE_COUNT
* 2);
890 if (gd
->gd_cpuid
== 0) {
891 entry
= &cpu_map_entry_init_bsp
[0];
892 count
= MAPENTRYBSP_CACHE
;
894 entry
= &cpu_map_entry_init_ap
[gd
->gd_cpuid
][0];
895 count
= MAPENTRYAP_CACHE
;
897 for (i
= 0; i
< count
; ++i
, ++entry
) {
898 MAPENT_FREELIST(entry
) = gd
->gd_vme_base
;
899 gd
->gd_vme_base
= entry
;
904 * Reserves vm_map_entry structures so code later-on can manipulate
905 * map_entry structures within a locked map without blocking trying
906 * to allocate a new vm_map_entry.
910 * WARNING! We must not decrement gd_vme_avail until after we have
911 * ensured that sufficient entries exist, otherwise we can
912 * get into an endless call recursion in the zalloc code
916 vm_map_entry_reserve(int count
)
918 struct globaldata
*gd
= mycpu
;
919 vm_map_entry_t entry
;
922 * Make sure we have enough structures in gd_vme_base to handle
923 * the reservation request.
925 * Use a critical section to protect against VM faults. It might
926 * not be needed, but we have to be careful here.
928 if (gd
->gd_vme_avail
< count
) {
930 while (gd
->gd_vme_avail
< count
) {
931 entry
= zalloc(mapentzone
);
932 MAPENT_FREELIST(entry
) = gd
->gd_vme_base
;
933 gd
->gd_vme_base
= entry
;
934 atomic_add_int(&gd
->gd_vme_avail
, 1);
938 atomic_add_int(&gd
->gd_vme_avail
, -count
);
944 * Releases previously reserved vm_map_entry structures that were not
945 * used. If we have too much junk in our per-cpu cache clean some of
951 vm_map_entry_release(int count
)
953 struct globaldata
*gd
= mycpu
;
954 vm_map_entry_t entry
;
955 vm_map_entry_t efree
;
957 count
= atomic_fetchadd_int(&gd
->gd_vme_avail
, count
) + count
;
958 if (gd
->gd_vme_avail
> MAP_RESERVE_SLOP
) {
961 while (gd
->gd_vme_avail
> MAP_RESERVE_HYST
) {
962 entry
= gd
->gd_vme_base
;
963 KKASSERT(entry
!= NULL
);
964 gd
->gd_vme_base
= MAPENT_FREELIST(entry
);
965 atomic_add_int(&gd
->gd_vme_avail
, -1);
966 MAPENT_FREELIST(entry
) = efree
;
970 while ((entry
= efree
) != NULL
) {
971 efree
= MAPENT_FREELIST(efree
);
972 zfree(mapentzone
, entry
);
978 * Reserve map entry structures for use in kernel_map itself. These
979 * entries have *ALREADY* been reserved on a per-cpu basis when the map
980 * was inited. This function is used by zalloc() to avoid a recursion
981 * when zalloc() itself needs to allocate additional kernel memory.
983 * This function works like the normal reserve but does not load the
984 * vm_map_entry cache (because that would result in an infinite
985 * recursion). Note that gd_vme_avail may go negative. This is expected.
987 * Any caller of this function must be sure to renormalize after
988 * potentially eating entries to ensure that the reserve supply
994 vm_map_entry_kreserve(int count
)
996 struct globaldata
*gd
= mycpu
;
998 atomic_add_int(&gd
->gd_vme_avail
, -count
);
999 KASSERT(gd
->gd_vme_base
!= NULL
,
1000 ("no reserved entries left, gd_vme_avail = %d",
1006 * Release previously reserved map entries for kernel_map. We do not
1007 * attempt to clean up like the normal release function as this would
1008 * cause an unnecessary (but probably not fatal) deep procedure call.
1013 vm_map_entry_krelease(int count
)
1015 struct globaldata
*gd
= mycpu
;
1017 atomic_add_int(&gd
->gd_vme_avail
, count
);
1021 * Allocates a VM map entry for insertion. No entry fields are filled in.
1023 * The entries should have previously been reserved. The reservation count
1024 * is tracked in (*countp).
1028 static vm_map_entry_t
1029 vm_map_entry_create(int *countp
)
1031 struct globaldata
*gd
= mycpu
;
1032 vm_map_entry_t entry
;
1034 KKASSERT(*countp
> 0);
1037 entry
= gd
->gd_vme_base
;
1038 KASSERT(entry
!= NULL
, ("gd_vme_base NULL! count %d", *countp
));
1039 gd
->gd_vme_base
= MAPENT_FREELIST(entry
);
1049 vm_map_backing_attach(vm_map_backing_t ba
)
1051 vm_object_t obj
= ba
->object
;
1053 lockmgr(&obj
->backing_lk
, LK_EXCLUSIVE
);
1054 TAILQ_INSERT_TAIL(&obj
->backing_list
, ba
, entry
);
1055 lockmgr(&obj
->backing_lk
, LK_RELEASE
);
1059 vm_map_backing_detach(vm_map_backing_t ba
)
1061 vm_object_t obj
= ba
->object
;
1063 lockmgr(&obj
->backing_lk
, LK_EXCLUSIVE
);
1064 TAILQ_REMOVE(&obj
->backing_list
, ba
, entry
);
1065 lockmgr(&obj
->backing_lk
, LK_RELEASE
);
1069 * Dispose of the dynamically allocated backing_ba chain associated
1070 * with a vm_map_entry.
1072 * We decrement the (possibly shared) element and kfree() on the
1073 * 1->0 transition. We only iterate to the next backing_ba when
1074 * the previous one went through a 1->0 transition.
1077 vm_map_entry_dispose_ba(vm_map_backing_t ba
)
1079 vm_map_backing_t next
;
1083 vm_map_backing_detach(ba
);
1084 vm_object_deallocate(ba
->object
);
1086 next
= ba
->backing_ba
;
1087 kfree(ba
, M_MAP_BACKING
);
1093 * Dispose of a vm_map_entry that is no longer being referenced.
1098 vm_map_entry_dispose(vm_map_t map
, vm_map_entry_t entry
, int *countp
)
1100 struct globaldata
*gd
= mycpu
;
1103 * Dispose of the base object and the backing link.
1105 switch(entry
->maptype
) {
1106 case VM_MAPTYPE_NORMAL
:
1107 case VM_MAPTYPE_VPAGETABLE
:
1108 if (entry
->ba
.object
) {
1109 vm_map_backing_detach(&entry
->ba
);
1110 vm_object_deallocate(entry
->ba
.object
);
1113 case VM_MAPTYPE_SUBMAP
:
1114 case VM_MAPTYPE_UKSMAP
:
1120 vm_map_entry_dispose_ba(entry
->ba
.backing_ba
);
1123 * Cleanup for safety.
1125 entry
->ba
.backing_ba
= NULL
;
1126 entry
->ba
.object
= NULL
;
1127 entry
->ba
.offset
= 0;
1131 MAPENT_FREELIST(entry
) = gd
->gd_vme_base
;
1132 gd
->gd_vme_base
= entry
;
1138 * Insert/remove entries from maps.
1140 * The related map must be exclusively locked.
1141 * The caller must hold map->token
1142 * No other requirements.
1144 static __inline
void
1145 vm_map_entry_link(vm_map_t map
, vm_map_entry_t entry
)
1147 ASSERT_VM_MAP_LOCKED(map
);
1150 if (vm_map_rb_tree_RB_INSERT(&map
->rb_root
, entry
))
1151 panic("vm_map_entry_link: dup addr map %p ent %p", map
, entry
);
1154 static __inline
void
1155 vm_map_entry_unlink(vm_map_t map
,
1156 vm_map_entry_t entry
)
1158 ASSERT_VM_MAP_LOCKED(map
);
1160 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
1161 panic("vm_map_entry_unlink: attempt to mess with "
1162 "locked entry! %p", entry
);
1164 vm_map_rb_tree_RB_REMOVE(&map
->rb_root
, entry
);
1169 * Finds the map entry containing (or immediately preceding) the specified
1170 * address in the given map. The entry is returned in (*entry).
1172 * The boolean result indicates whether the address is actually contained
1175 * The related map must be locked.
1176 * No other requirements.
1179 vm_map_lookup_entry(vm_map_t map
, vm_offset_t address
, vm_map_entry_t
*entry
)
1182 vm_map_entry_t last
;
1184 ASSERT_VM_MAP_LOCKED(map
);
1187 * Locate the record from the top of the tree. 'last' tracks the
1188 * closest prior record and is returned if no match is found, which
1189 * in binary tree terms means tracking the most recent right-branch
1190 * taken. If there is no prior record, *entry is set to NULL.
1193 tmp
= RB_ROOT(&map
->rb_root
);
1196 if (address
>= tmp
->ba
.start
) {
1197 if (address
< tmp
->ba
.end
) {
1202 tmp
= RB_RIGHT(tmp
, rb_entry
);
1204 tmp
= RB_LEFT(tmp
, rb_entry
);
1212 * Inserts the given whole VM object into the target map at the specified
1213 * address range. The object's size should match that of the address range.
1215 * The map must be exclusively locked.
1216 * The object must be held.
1217 * The caller must have reserved sufficient vm_map_entry structures.
1219 * If object is non-NULL, ref count must be bumped by caller prior to
1220 * making call to account for the new entry. XXX API is a bit messy.
1223 vm_map_insert(vm_map_t map
, int *countp
, void *map_object
, void *map_aux
,
1224 vm_ooffset_t offset
, vm_offset_t start
, vm_offset_t end
,
1225 vm_maptype_t maptype
, vm_subsys_t id
,
1226 vm_prot_t prot
, vm_prot_t max
, int cow
)
1228 vm_map_entry_t new_entry
;
1229 vm_map_entry_t prev_entry
;
1230 vm_map_entry_t next
;
1231 vm_map_entry_t temp_entry
;
1232 vm_eflags_t protoeflags
;
1236 if (maptype
== VM_MAPTYPE_UKSMAP
)
1239 object
= map_object
;
1241 ASSERT_VM_MAP_LOCKED(map
);
1243 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object
));
1246 * Check that the start and end points are not bogus.
1248 if ((start
< vm_map_min(map
)) || (end
> vm_map_max(map
)) ||
1250 return (KERN_INVALID_ADDRESS
);
1254 * Find the entry prior to the proposed starting address; if it's part
1255 * of an existing entry, this range is bogus.
1257 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1258 return (KERN_NO_SPACE
);
1259 prev_entry
= temp_entry
;
1262 * Assert that the next entry doesn't overlap the end point.
1265 next
= vm_map_rb_tree_RB_NEXT(prev_entry
);
1267 next
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
1268 if (next
&& next
->ba
.start
< end
)
1269 return (KERN_NO_SPACE
);
1273 if (cow
& MAP_COPY_ON_WRITE
)
1274 protoeflags
|= MAP_ENTRY_COW
|MAP_ENTRY_NEEDS_COPY
;
1276 if (cow
& MAP_NOFAULT
) {
1277 protoeflags
|= MAP_ENTRY_NOFAULT
;
1279 KASSERT(object
== NULL
,
1280 ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1282 if (cow
& MAP_DISABLE_SYNCER
)
1283 protoeflags
|= MAP_ENTRY_NOSYNC
;
1284 if (cow
& MAP_DISABLE_COREDUMP
)
1285 protoeflags
|= MAP_ENTRY_NOCOREDUMP
;
1286 if (cow
& MAP_IS_STACK
)
1287 protoeflags
|= MAP_ENTRY_STACK
;
1288 if (cow
& MAP_IS_KSTACK
)
1289 protoeflags
|= MAP_ENTRY_KSTACK
;
1291 lwkt_gettoken(&map
->token
);
1295 } else if (prev_entry
&&
1296 (prev_entry
->eflags
== protoeflags
) &&
1297 (prev_entry
->ba
.end
== start
) &&
1298 (prev_entry
->wired_count
== 0) &&
1299 (prev_entry
->id
== id
) &&
1300 prev_entry
->maptype
== maptype
&&
1301 maptype
== VM_MAPTYPE_NORMAL
&&
1302 prev_entry
->ba
.backing_ba
== NULL
&& /* not backed */
1303 ((prev_entry
->ba
.object
== NULL
) ||
1304 vm_object_coalesce(prev_entry
->ba
.object
,
1305 OFF_TO_IDX(prev_entry
->ba
.offset
),
1306 (vm_size_t
)(prev_entry
->ba
.end
- prev_entry
->ba
.start
),
1307 (vm_size_t
)(end
- prev_entry
->ba
.end
)))) {
1309 * We were able to extend the object. Determine if we
1310 * can extend the previous map entry to include the
1311 * new range as well.
1313 if ((prev_entry
->inheritance
== VM_INHERIT_DEFAULT
) &&
1314 (prev_entry
->protection
== prot
) &&
1315 (prev_entry
->max_protection
== max
)) {
1316 map
->size
+= (end
- prev_entry
->ba
.end
);
1317 vm_map_backing_adjust_end(prev_entry
, end
);
1318 vm_map_simplify_entry(map
, prev_entry
, countp
);
1319 lwkt_reltoken(&map
->token
);
1320 return (KERN_SUCCESS
);
1324 * If we can extend the object but cannot extend the
1325 * map entry, we have to create a new map entry. We
1326 * must bump the ref count on the extended object to
1327 * account for it. object may be NULL.
1329 object
= prev_entry
->ba
.object
;
1330 offset
= prev_entry
->ba
.offset
+
1331 (prev_entry
->ba
.end
- prev_entry
->ba
.start
);
1333 vm_object_hold(object
);
1334 vm_object_lock_swap(); /* map->token order */
1335 vm_object_reference_locked(object
);
1336 map_object
= object
;
1342 * NOTE: if conditionals fail, object can be NULL here. This occurs
1343 * in things like the buffer map where we manage kva but do not manage
1348 * Create a new entry
1350 new_entry
= vm_map_entry_create(countp
);
1351 new_entry
->ba
.pmap
= map
->pmap
;
1352 new_entry
->ba
.start
= start
;
1353 new_entry
->ba
.end
= end
;
1356 new_entry
->maptype
= maptype
;
1357 new_entry
->eflags
= protoeflags
;
1358 new_entry
->aux
.master_pde
= 0; /* in case size is different */
1359 new_entry
->aux
.map_aux
= map_aux
;
1360 new_entry
->ba
.map_object
= map_object
;
1361 new_entry
->ba
.backing_ba
= NULL
;
1362 new_entry
->ba
.backing_count
= 0;
1363 new_entry
->ba
.offset
= offset
;
1364 new_entry
->ba
.flags
= 0;
1365 new_entry
->ba
.pmap
= map
->pmap
;
1367 new_entry
->inheritance
= VM_INHERIT_DEFAULT
;
1368 new_entry
->protection
= prot
;
1369 new_entry
->max_protection
= max
;
1370 new_entry
->wired_count
= 0;
1373 * Insert the new entry into the list
1375 vm_map_backing_replicated(map
, new_entry
, MAP_BACK_BASEOBJREFD
);
1376 vm_map_entry_link(map
, new_entry
);
1377 map
->size
+= new_entry
->ba
.end
- new_entry
->ba
.start
;
1380 * Don't worry about updating freehint[] when inserting, allow
1381 * addresses to be lower than the actual first free spot.
1385 * Temporarily removed to avoid MAP_STACK panic, due to
1386 * MAP_STACK being a huge hack. Will be added back in
1387 * when MAP_STACK (and the user stack mapping) is fixed.
1390 * It may be possible to simplify the entry
1392 vm_map_simplify_entry(map
, new_entry
, countp
);
1396 * Try to pre-populate the page table. Mappings governed by virtual
1397 * page tables cannot be prepopulated without a lot of work, so
1400 if ((cow
& (MAP_PREFAULT
|MAP_PREFAULT_PARTIAL
)) &&
1401 maptype
!= VM_MAPTYPE_VPAGETABLE
&&
1402 maptype
!= VM_MAPTYPE_UKSMAP
) {
1404 if (vm_map_relock_enable
&& (cow
& MAP_PREFAULT_RELOCK
)) {
1406 vm_object_lock_swap();
1407 vm_object_drop(object
);
1409 pmap_object_init_pt(map
->pmap
, new_entry
,
1410 new_entry
->ba
.start
,
1411 new_entry
->ba
.end
- new_entry
->ba
.start
,
1412 cow
& MAP_PREFAULT_PARTIAL
);
1414 vm_object_hold(object
);
1415 vm_object_lock_swap();
1418 lwkt_reltoken(&map
->token
);
1420 vm_object_drop(object
);
1422 return (KERN_SUCCESS
);
1426 * Find sufficient space for `length' bytes in the given map, starting at
1427 * `start'. Returns 0 on success, 1 on no space.
1429 * This function will returned an arbitrarily aligned pointer. If no
1430 * particular alignment is required you should pass align as 1. Note that
1431 * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1432 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1435 * 'align' should be a power of 2 but is not required to be.
1437 * The map must be exclusively locked.
1438 * No other requirements.
1441 vm_map_findspace(vm_map_t map
, vm_offset_t start
, vm_size_t length
,
1442 vm_size_t align
, int flags
, vm_offset_t
*addr
)
1444 vm_map_entry_t entry
;
1446 vm_offset_t hole_start
;
1448 vm_offset_t align_mask
;
1450 if (start
< vm_map_min(map
))
1451 start
= vm_map_min(map
);
1452 if (start
> vm_map_max(map
))
1456 * If the alignment is not a power of 2 we will have to use
1457 * a mod/division, set align_mask to a special value.
1459 if ((align
| (align
- 1)) + 1 != (align
<< 1))
1460 align_mask
= (vm_offset_t
)-1;
1462 align_mask
= align
- 1;
1465 * Use freehint to adjust the start point, hopefully reducing
1466 * the iteration to O(1).
1468 hole_start
= vm_map_freehint_find(map
, length
, align
);
1469 if (start
< hole_start
)
1471 if (vm_map_lookup_entry(map
, start
, &tmp
))
1472 start
= tmp
->ba
.end
;
1473 entry
= tmp
; /* may be NULL */
1476 * Look through the rest of the map, trying to fit a new region in the
1477 * gap between existing regions, or after the very last region.
1481 * Adjust the proposed start by the requested alignment,
1482 * be sure that we didn't wrap the address.
1484 if (align_mask
== (vm_offset_t
)-1)
1485 end
= roundup(start
, align
);
1487 end
= (start
+ align_mask
) & ~align_mask
;
1493 * Find the end of the proposed new region. Be sure we didn't
1494 * go beyond the end of the map, or wrap around the address.
1495 * Then check to see if this is the last entry or if the
1496 * proposed end fits in the gap between this and the next
1499 end
= start
+ length
;
1500 if (end
> vm_map_max(map
) || end
< start
)
1504 * Locate the next entry, we can stop if this is the
1505 * last entry (we know we are in-bounds so that would
1509 entry
= vm_map_rb_tree_RB_NEXT(entry
);
1511 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
1516 * Determine if the proposed area would overlap the
1519 * When matching against a STACK entry, only allow the
1520 * memory map to intrude on the ungrown portion of the
1521 * STACK entry when MAP_TRYFIXED is set.
1523 if (entry
->ba
.start
>= end
) {
1524 if ((entry
->eflags
& MAP_ENTRY_STACK
) == 0)
1526 if (flags
& MAP_TRYFIXED
)
1528 if (entry
->ba
.start
- entry
->aux
.avail_ssize
>= end
)
1531 start
= entry
->ba
.end
;
1535 * Update the freehint
1537 vm_map_freehint_update(map
, start
, length
, align
);
1540 * Grow the kernel_map if necessary. pmap_growkernel() will panic
1541 * if it fails. The kernel_map is locked and nothing can steal
1542 * our address space if pmap_growkernel() blocks.
1544 * NOTE: This may be unconditionally called for kldload areas on
1545 * x86_64 because these do not bump kernel_vm_end (which would
1546 * fill 128G worth of page tables!). Therefore we must not
1549 if (map
== &kernel_map
) {
1552 kstop
= round_page(start
+ length
);
1553 if (kstop
> kernel_vm_end
)
1554 pmap_growkernel(start
, kstop
);
1561 * vm_map_find finds an unallocated region in the target address map with
1562 * the given length and allocates it. The search is defined to be first-fit
1563 * from the specified address; the region found is returned in the same
1566 * If object is non-NULL, ref count must be bumped by caller
1567 * prior to making call to account for the new entry.
1569 * No requirements. This function will lock the map temporarily.
1572 vm_map_find(vm_map_t map
, void *map_object
, void *map_aux
,
1573 vm_ooffset_t offset
, vm_offset_t
*addr
,
1574 vm_size_t length
, vm_size_t align
, boolean_t fitit
,
1575 vm_maptype_t maptype
, vm_subsys_t id
,
1576 vm_prot_t prot
, vm_prot_t max
, int cow
)
1583 if (maptype
== VM_MAPTYPE_UKSMAP
)
1586 object
= map_object
;
1590 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
1593 vm_object_hold_shared(object
);
1595 if (vm_map_findspace(map
, start
, length
, align
, 0, addr
)) {
1597 vm_object_drop(object
);
1599 vm_map_entry_release(count
);
1600 return (KERN_NO_SPACE
);
1604 result
= vm_map_insert(map
, &count
, map_object
, map_aux
,
1605 offset
, start
, start
+ length
,
1606 maptype
, id
, prot
, max
, cow
);
1608 vm_object_drop(object
);
1610 vm_map_entry_release(count
);
1616 * Simplify the given map entry by merging with either neighbor. This
1617 * routine also has the ability to merge with both neighbors.
1619 * This routine guarentees that the passed entry remains valid (though
1620 * possibly extended). When merging, this routine may delete one or
1621 * both neighbors. No action is taken on entries which have their
1622 * in-transition flag set.
1624 * The map must be exclusively locked.
1627 vm_map_simplify_entry(vm_map_t map
, vm_map_entry_t entry
, int *countp
)
1629 vm_map_entry_t next
, prev
;
1630 vm_size_t prevsize
, esize
;
1632 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
1633 ++mycpu
->gd_cnt
.v_intrans_coll
;
1637 if (entry
->maptype
== VM_MAPTYPE_SUBMAP
)
1639 if (entry
->maptype
== VM_MAPTYPE_UKSMAP
)
1642 prev
= vm_map_rb_tree_RB_PREV(entry
);
1644 prevsize
= prev
->ba
.end
- prev
->ba
.start
;
1645 if ( (prev
->ba
.end
== entry
->ba
.start
) &&
1646 (prev
->maptype
== entry
->maptype
) &&
1647 (prev
->ba
.object
== entry
->ba
.object
) &&
1648 (prev
->ba
.backing_ba
== entry
->ba
.backing_ba
) &&
1649 (!prev
->ba
.object
||
1650 (prev
->ba
.offset
+ prevsize
== entry
->ba
.offset
)) &&
1651 (prev
->eflags
== entry
->eflags
) &&
1652 (prev
->protection
== entry
->protection
) &&
1653 (prev
->max_protection
== entry
->max_protection
) &&
1654 (prev
->inheritance
== entry
->inheritance
) &&
1655 (prev
->id
== entry
->id
) &&
1656 (prev
->wired_count
== entry
->wired_count
)) {
1658 * NOTE: order important. Unlink before gumming up
1659 * the RBTREE w/adjust, adjust before disposal
1660 * of prior entry, to avoid pmap snafus.
1662 vm_map_entry_unlink(map
, prev
);
1663 vm_map_backing_adjust_start(entry
, prev
->ba
.start
);
1664 if (entry
->ba
.object
== NULL
)
1665 entry
->ba
.offset
= 0;
1666 vm_map_entry_dispose(map
, prev
, countp
);
1670 next
= vm_map_rb_tree_RB_NEXT(entry
);
1672 esize
= entry
->ba
.end
- entry
->ba
.start
;
1673 if ((entry
->ba
.end
== next
->ba
.start
) &&
1674 (next
->maptype
== entry
->maptype
) &&
1675 (next
->ba
.object
== entry
->ba
.object
) &&
1676 (prev
->ba
.backing_ba
== entry
->ba
.backing_ba
) &&
1677 (!entry
->ba
.object
||
1678 (entry
->ba
.offset
+ esize
== next
->ba
.offset
)) &&
1679 (next
->eflags
== entry
->eflags
) &&
1680 (next
->protection
== entry
->protection
) &&
1681 (next
->max_protection
== entry
->max_protection
) &&
1682 (next
->inheritance
== entry
->inheritance
) &&
1683 (next
->id
== entry
->id
) &&
1684 (next
->wired_count
== entry
->wired_count
)) {
1686 * NOTE: order important. Unlink before gumming up
1687 * the RBTREE w/adjust, adjust before disposal
1688 * of prior entry, to avoid pmap snafus.
1690 vm_map_entry_unlink(map
, next
);
1691 vm_map_backing_adjust_end(entry
, next
->ba
.end
);
1692 vm_map_entry_dispose(map
, next
, countp
);
1698 * Asserts that the given entry begins at or after the specified address.
1699 * If necessary, it splits the entry into two.
1701 #define vm_map_clip_start(map, entry, startaddr, countp) \
1703 if (startaddr > entry->ba.start) \
1704 _vm_map_clip_start(map, entry, startaddr, countp); \
1708 * This routine is called only when it is known that the entry must be split.
1710 * The map must be exclusively locked.
1713 _vm_map_clip_start(vm_map_t map
, vm_map_entry_t entry
, vm_offset_t start
,
1716 vm_map_entry_t new_entry
;
1719 * Split off the front portion -- note that we must insert the new
1720 * entry BEFORE this one, so that this entry has the specified
1724 vm_map_simplify_entry(map
, entry
, countp
);
1727 * If there is no object backing this entry, we might as well create
1728 * one now. If we defer it, an object can get created after the map
1729 * is clipped, and individual objects will be created for the split-up
1730 * map. This is a bit of a hack, but is also about the best place to
1731 * put this improvement.
1733 if (entry
->ba
.object
== NULL
&& !map
->system_map
&&
1734 VM_MAP_ENTRY_WITHIN_PARTITION(entry
)) {
1735 vm_map_entry_allocate_object(entry
);
1739 * NOTE: The replicated function will adjust start, end, and offset
1740 * for the remainder of the backing_ba linkages. We must fixup
1743 new_entry
= vm_map_entry_create(countp
);
1744 *new_entry
= *entry
;
1745 new_entry
->ba
.end
= start
;
1748 * Ordering is important, make sure the new entry is replicated
1749 * before we cut the exiting entry.
1751 vm_map_backing_replicated(map
, new_entry
, MAP_BACK_CLIPPED
);
1752 vm_map_backing_adjust_start(entry
, start
);
1753 vm_map_entry_link(map
, new_entry
);
1757 * Asserts that the given entry ends at or before the specified address.
1758 * If necessary, it splits the entry into two.
1760 * The map must be exclusively locked.
1762 #define vm_map_clip_end(map, entry, endaddr, countp) \
1764 if (endaddr < entry->ba.end) \
1765 _vm_map_clip_end(map, entry, endaddr, countp); \
1769 * This routine is called only when it is known that the entry must be split.
1771 * The map must be exclusively locked.
1774 _vm_map_clip_end(vm_map_t map
, vm_map_entry_t entry
, vm_offset_t end
,
1777 vm_map_entry_t new_entry
;
1780 * If there is no object backing this entry, we might as well create
1781 * one now. If we defer it, an object can get created after the map
1782 * is clipped, and individual objects will be created for the split-up
1783 * map. This is a bit of a hack, but is also about the best place to
1784 * put this improvement.
1787 if (entry
->ba
.object
== NULL
&& !map
->system_map
&&
1788 VM_MAP_ENTRY_WITHIN_PARTITION(entry
)) {
1789 vm_map_entry_allocate_object(entry
);
1793 * Create a new entry and insert it AFTER the specified entry
1795 * NOTE: The replicated function will adjust start, end, and offset
1796 * for the remainder of the backing_ba linkages. We must fixup
1799 new_entry
= vm_map_entry_create(countp
);
1800 *new_entry
= *entry
;
1801 new_entry
->ba
.start
= end
;
1802 new_entry
->ba
.offset
+= (new_entry
->ba
.start
- entry
->ba
.start
);
1805 * Ordering is important, make sure the new entry is replicated
1806 * before we cut the exiting entry.
1808 vm_map_backing_replicated(map
, new_entry
, MAP_BACK_CLIPPED
);
1809 vm_map_backing_adjust_end(entry
, end
);
1810 vm_map_entry_link(map
, new_entry
);
1814 * Asserts that the starting and ending region addresses fall within the
1815 * valid range for the map.
1817 #define VM_MAP_RANGE_CHECK(map, start, end) \
1819 if (start < vm_map_min(map)) \
1820 start = vm_map_min(map); \
1821 if (end > vm_map_max(map)) \
1822 end = vm_map_max(map); \
1828 * Used to block when an in-transition collison occurs. The map
1829 * is unlocked for the sleep and relocked before the return.
1832 vm_map_transition_wait(vm_map_t map
, int relock
)
1834 tsleep_interlock(map
, 0);
1836 tsleep(map
, PINTERLOCKED
, "vment", 0);
1842 * When we do blocking operations with the map lock held it is
1843 * possible that a clip might have occured on our in-transit entry,
1844 * requiring an adjustment to the entry in our loop. These macros
1845 * help the pageable and clip_range code deal with the case. The
1846 * conditional costs virtually nothing if no clipping has occured.
1849 #define CLIP_CHECK_BACK(entry, save_start) \
1851 while (entry->ba.start != save_start) { \
1852 entry = vm_map_rb_tree_RB_PREV(entry); \
1853 KASSERT(entry, ("bad entry clip")); \
1857 #define CLIP_CHECK_FWD(entry, save_end) \
1859 while (entry->ba.end != save_end) { \
1860 entry = vm_map_rb_tree_RB_NEXT(entry); \
1861 KASSERT(entry, ("bad entry clip")); \
1867 * Clip the specified range and return the base entry. The
1868 * range may cover several entries starting at the returned base
1869 * and the first and last entry in the covering sequence will be
1870 * properly clipped to the requested start and end address.
1872 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1875 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1876 * covered by the requested range.
1878 * The map must be exclusively locked on entry and will remain locked
1879 * on return. If no range exists or the range contains holes and you
1880 * specified that no holes were allowed, NULL will be returned. This
1881 * routine may temporarily unlock the map in order avoid a deadlock when
1886 vm_map_clip_range(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
1887 int *countp
, int flags
)
1889 vm_map_entry_t start_entry
;
1890 vm_map_entry_t entry
;
1891 vm_map_entry_t next
;
1894 * Locate the entry and effect initial clipping. The in-transition
1895 * case does not occur very often so do not try to optimize it.
1898 if (vm_map_lookup_entry(map
, start
, &start_entry
) == FALSE
)
1900 entry
= start_entry
;
1901 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
1902 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
1903 ++mycpu
->gd_cnt
.v_intrans_coll
;
1904 ++mycpu
->gd_cnt
.v_intrans_wait
;
1905 vm_map_transition_wait(map
, 1);
1907 * entry and/or start_entry may have been clipped while
1908 * we slept, or may have gone away entirely. We have
1909 * to restart from the lookup.
1915 * Since we hold an exclusive map lock we do not have to restart
1916 * after clipping, even though clipping may block in zalloc.
1918 vm_map_clip_start(map
, entry
, start
, countp
);
1919 vm_map_clip_end(map
, entry
, end
, countp
);
1920 entry
->eflags
|= MAP_ENTRY_IN_TRANSITION
;
1923 * Scan entries covered by the range. When working on the next
1924 * entry a restart need only re-loop on the current entry which
1925 * we have already locked, since 'next' may have changed. Also,
1926 * even though entry is safe, it may have been clipped so we
1927 * have to iterate forwards through the clip after sleeping.
1930 next
= vm_map_rb_tree_RB_NEXT(entry
);
1931 if (next
== NULL
|| next
->ba
.start
>= end
)
1933 if (flags
& MAP_CLIP_NO_HOLES
) {
1934 if (next
->ba
.start
> entry
->ba
.end
) {
1935 vm_map_unclip_range(map
, start_entry
,
1936 start
, entry
->ba
.end
, countp
, flags
);
1941 if (next
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
1942 vm_offset_t save_end
= entry
->ba
.end
;
1943 next
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
1944 ++mycpu
->gd_cnt
.v_intrans_coll
;
1945 ++mycpu
->gd_cnt
.v_intrans_wait
;
1946 vm_map_transition_wait(map
, 1);
1949 * clips might have occured while we blocked.
1951 CLIP_CHECK_FWD(entry
, save_end
);
1952 CLIP_CHECK_BACK(start_entry
, start
);
1957 * No restart necessary even though clip_end may block, we
1958 * are holding the map lock.
1960 vm_map_clip_end(map
, next
, end
, countp
);
1961 next
->eflags
|= MAP_ENTRY_IN_TRANSITION
;
1964 if (flags
& MAP_CLIP_NO_HOLES
) {
1965 if (entry
->ba
.end
!= end
) {
1966 vm_map_unclip_range(map
, start_entry
,
1967 start
, entry
->ba
.end
, countp
, flags
);
1971 return(start_entry
);
1975 * Undo the effect of vm_map_clip_range(). You should pass the same
1976 * flags and the same range that you passed to vm_map_clip_range().
1977 * This code will clear the in-transition flag on the entries and
1978 * wake up anyone waiting. This code will also simplify the sequence
1979 * and attempt to merge it with entries before and after the sequence.
1981 * The map must be locked on entry and will remain locked on return.
1983 * Note that you should also pass the start_entry returned by
1984 * vm_map_clip_range(). However, if you block between the two calls
1985 * with the map unlocked please be aware that the start_entry may
1986 * have been clipped and you may need to scan it backwards to find
1987 * the entry corresponding with the original start address. You are
1988 * responsible for this, vm_map_unclip_range() expects the correct
1989 * start_entry to be passed to it and will KASSERT otherwise.
1993 vm_map_unclip_range(vm_map_t map
, vm_map_entry_t start_entry
,
1994 vm_offset_t start
, vm_offset_t end
,
1995 int *countp
, int flags
)
1997 vm_map_entry_t entry
;
1999 entry
= start_entry
;
2001 KASSERT(entry
->ba
.start
== start
, ("unclip_range: illegal base entry"));
2002 while (entry
&& entry
->ba
.start
< end
) {
2003 KASSERT(entry
->eflags
& MAP_ENTRY_IN_TRANSITION
,
2004 ("in-transition flag not set during unclip on: %p",
2006 KASSERT(entry
->ba
.end
<= end
,
2007 ("unclip_range: tail wasn't clipped"));
2008 entry
->eflags
&= ~MAP_ENTRY_IN_TRANSITION
;
2009 if (entry
->eflags
& MAP_ENTRY_NEEDS_WAKEUP
) {
2010 entry
->eflags
&= ~MAP_ENTRY_NEEDS_WAKEUP
;
2013 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2017 * Simplification does not block so there is no restart case.
2019 entry
= start_entry
;
2020 while (entry
&& entry
->ba
.start
< end
) {
2021 vm_map_simplify_entry(map
, entry
, countp
);
2022 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2027 * Mark the given range as handled by a subordinate map.
2029 * This range must have been created with vm_map_find(), and no other
2030 * operations may have been performed on this range prior to calling
2033 * Submappings cannot be removed.
2038 vm_map_submap(vm_map_t map
, vm_offset_t start
, vm_offset_t end
, vm_map_t submap
)
2040 vm_map_entry_t entry
;
2041 int result
= KERN_INVALID_ARGUMENT
;
2044 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2047 VM_MAP_RANGE_CHECK(map
, start
, end
);
2049 if (vm_map_lookup_entry(map
, start
, &entry
)) {
2050 vm_map_clip_start(map
, entry
, start
, &count
);
2052 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2054 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
2057 vm_map_clip_end(map
, entry
, end
, &count
);
2059 if ((entry
->ba
.start
== start
) && (entry
->ba
.end
== end
) &&
2060 ((entry
->eflags
& MAP_ENTRY_COW
) == 0) &&
2061 (entry
->ba
.object
== NULL
)) {
2062 entry
->ba
.sub_map
= submap
;
2063 entry
->maptype
= VM_MAPTYPE_SUBMAP
;
2064 result
= KERN_SUCCESS
;
2067 vm_map_entry_release(count
);
2073 * Sets the protection of the specified address region in the target map.
2074 * If "set_max" is specified, the maximum protection is to be set;
2075 * otherwise, only the current protection is affected.
2077 * The protection is not applicable to submaps, but is applicable to normal
2078 * maps and maps governed by virtual page tables. For example, when operating
2079 * on a virtual page table our protection basically controls how COW occurs
2080 * on the backing object, whereas the virtual page table abstraction itself
2081 * is an abstraction for userland.
2086 vm_map_protect(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2087 vm_prot_t new_prot
, boolean_t set_max
)
2089 vm_map_entry_t current
;
2090 vm_map_entry_t entry
;
2093 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2096 VM_MAP_RANGE_CHECK(map
, start
, end
);
2098 if (vm_map_lookup_entry(map
, start
, &entry
)) {
2099 vm_map_clip_start(map
, entry
, start
, &count
);
2101 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2103 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
2107 * Make a first pass to check for protection violations.
2110 while (current
&& current
->ba
.start
< end
) {
2111 if (current
->maptype
== VM_MAPTYPE_SUBMAP
) {
2113 vm_map_entry_release(count
);
2114 return (KERN_INVALID_ARGUMENT
);
2116 if ((new_prot
& current
->max_protection
) != new_prot
) {
2118 vm_map_entry_release(count
);
2119 return (KERN_PROTECTION_FAILURE
);
2123 * When making a SHARED+RW file mmap writable, update
2126 if (new_prot
& PROT_WRITE
&&
2127 (current
->eflags
& MAP_ENTRY_NEEDS_COPY
) == 0 &&
2128 (current
->maptype
== VM_MAPTYPE_NORMAL
||
2129 current
->maptype
== VM_MAPTYPE_VPAGETABLE
) &&
2130 current
->ba
.object
&&
2131 current
->ba
.object
->type
== OBJT_VNODE
) {
2134 vp
= current
->ba
.object
->handle
;
2135 if (vp
&& vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
| LK_NOWAIT
) == 0) {
2136 vfs_timestamp(&vp
->v_lastwrite_ts
);
2137 vsetflags(vp
, VLASTWRITETS
);
2141 current
= vm_map_rb_tree_RB_NEXT(current
);
2145 * Go back and fix up protections. [Note that clipping is not
2146 * necessary the second time.]
2150 while (current
&& current
->ba
.start
< end
) {
2153 vm_map_clip_end(map
, current
, end
, &count
);
2155 old_prot
= current
->protection
;
2157 current
->max_protection
= new_prot
;
2158 current
->protection
= new_prot
& old_prot
;
2160 current
->protection
= new_prot
;
2164 * Update physical map if necessary. Worry about copy-on-write
2165 * here -- CHECK THIS XXX
2167 if (current
->protection
!= old_prot
) {
2168 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2171 pmap_protect(map
->pmap
, current
->ba
.start
,
2173 current
->protection
& MASK(current
));
2177 vm_map_simplify_entry(map
, current
, &count
);
2179 current
= vm_map_rb_tree_RB_NEXT(current
);
2182 vm_map_entry_release(count
);
2183 return (KERN_SUCCESS
);
2187 * This routine traverses a processes map handling the madvise
2188 * system call. Advisories are classified as either those effecting
2189 * the vm_map_entry structure, or those effecting the underlying
2192 * The <value> argument is used for extended madvise calls.
2197 vm_map_madvise(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2198 int behav
, off_t value
)
2200 vm_map_entry_t current
, entry
;
2206 * Some madvise calls directly modify the vm_map_entry, in which case
2207 * we need to use an exclusive lock on the map and we need to perform
2208 * various clipping operations. Otherwise we only need a read-lock
2211 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2215 case MADV_SEQUENTIAL
:
2229 vm_map_lock_read(map
);
2232 vm_map_entry_release(count
);
2237 * Locate starting entry and clip if necessary.
2240 VM_MAP_RANGE_CHECK(map
, start
, end
);
2242 if (vm_map_lookup_entry(map
, start
, &entry
)) {
2244 vm_map_clip_start(map
, entry
, start
, &count
);
2246 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2248 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
2253 * madvise behaviors that are implemented in the vm_map_entry.
2255 * We clip the vm_map_entry so that behavioral changes are
2256 * limited to the specified address range.
2258 for (current
= entry
;
2259 current
&& current
->ba
.start
< end
;
2260 current
= vm_map_rb_tree_RB_NEXT(current
)) {
2264 if (current
->maptype
== VM_MAPTYPE_SUBMAP
)
2267 vm_map_clip_end(map
, current
, end
, &count
);
2271 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_NORMAL
);
2273 case MADV_SEQUENTIAL
:
2274 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_SEQUENTIAL
);
2277 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_RANDOM
);
2280 current
->eflags
|= MAP_ENTRY_NOSYNC
;
2283 current
->eflags
&= ~MAP_ENTRY_NOSYNC
;
2286 current
->eflags
|= MAP_ENTRY_NOCOREDUMP
;
2289 current
->eflags
&= ~MAP_ENTRY_NOCOREDUMP
;
2293 * Set the page directory page for a map
2294 * governed by a virtual page table. Mark
2295 * the entry as being governed by a virtual
2296 * page table if it is not.
2298 * XXX the page directory page is stored
2299 * in the avail_ssize field if the map_entry.
2301 * XXX the map simplification code does not
2302 * compare this field so weird things may
2303 * happen if you do not apply this function
2304 * to the entire mapping governed by the
2305 * virtual page table.
2307 if (current
->maptype
!= VM_MAPTYPE_VPAGETABLE
) {
2311 current
->aux
.master_pde
= value
;
2312 pmap_remove(map
->pmap
,
2313 current
->ba
.start
, current
->ba
.end
);
2317 * Invalidate the related pmap entries, used
2318 * to flush portions of the real kernel's
2319 * pmap when the caller has removed or
2320 * modified existing mappings in a virtual
2323 * (exclusive locked map version does not
2324 * need the range interlock).
2326 pmap_remove(map
->pmap
,
2327 current
->ba
.start
, current
->ba
.end
);
2333 vm_map_simplify_entry(map
, current
, &count
);
2341 * madvise behaviors that are implemented in the underlying
2344 * Since we don't clip the vm_map_entry, we have to clip
2345 * the vm_object pindex and count.
2347 * NOTE! These functions are only supported on normal maps,
2348 * except MADV_INVAL which is also supported on
2349 * virtual page tables.
2351 * NOTE! These functions only apply to the top-most object.
2352 * It is not applicable to backing objects.
2354 for (current
= entry
;
2355 current
&& current
->ba
.start
< end
;
2356 current
= vm_map_rb_tree_RB_NEXT(current
)) {
2357 vm_offset_t useStart
;
2359 if (current
->maptype
!= VM_MAPTYPE_NORMAL
&&
2360 (current
->maptype
!= VM_MAPTYPE_VPAGETABLE
||
2361 behav
!= MADV_INVAL
)) {
2365 pindex
= OFF_TO_IDX(current
->ba
.offset
);
2366 delta
= atop(current
->ba
.end
- current
->ba
.start
);
2367 useStart
= current
->ba
.start
;
2369 if (current
->ba
.start
< start
) {
2370 pindex
+= atop(start
- current
->ba
.start
);
2371 delta
-= atop(start
- current
->ba
.start
);
2374 if (current
->ba
.end
> end
)
2375 delta
-= atop(current
->ba
.end
- end
);
2377 if ((vm_spindex_t
)delta
<= 0)
2380 if (behav
== MADV_INVAL
) {
2382 * Invalidate the related pmap entries, used
2383 * to flush portions of the real kernel's
2384 * pmap when the caller has removed or
2385 * modified existing mappings in a virtual
2388 * (shared locked map version needs the
2389 * interlock, see vm_fault()).
2391 struct vm_map_ilock ilock
;
2393 KASSERT(useStart
>= VM_MIN_USER_ADDRESS
&&
2394 useStart
+ ptoa(delta
) <=
2395 VM_MAX_USER_ADDRESS
,
2396 ("Bad range %016jx-%016jx (%016jx)",
2397 useStart
, useStart
+ ptoa(delta
),
2399 vm_map_interlock(map
, &ilock
,
2401 useStart
+ ptoa(delta
));
2402 pmap_remove(map
->pmap
,
2404 useStart
+ ptoa(delta
));
2405 vm_map_deinterlock(map
, &ilock
);
2407 vm_object_madvise(current
->ba
.object
,
2408 pindex
, delta
, behav
);
2412 * Try to populate the page table. Mappings governed
2413 * by virtual page tables cannot be pre-populated
2414 * without a lot of work so don't try.
2416 if (behav
== MADV_WILLNEED
&&
2417 current
->maptype
!= VM_MAPTYPE_VPAGETABLE
) {
2418 pmap_object_init_pt(
2421 (delta
<< PAGE_SHIFT
),
2422 MAP_PREFAULT_MADVISE
2426 vm_map_unlock_read(map
);
2428 vm_map_entry_release(count
);
2434 * Sets the inheritance of the specified address range in the target map.
2435 * Inheritance affects how the map will be shared with child maps at the
2436 * time of vm_map_fork.
2439 vm_map_inherit(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2440 vm_inherit_t new_inheritance
)
2442 vm_map_entry_t entry
;
2443 vm_map_entry_t temp_entry
;
2446 switch (new_inheritance
) {
2447 case VM_INHERIT_NONE
:
2448 case VM_INHERIT_COPY
:
2449 case VM_INHERIT_SHARE
:
2452 return (KERN_INVALID_ARGUMENT
);
2455 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2458 VM_MAP_RANGE_CHECK(map
, start
, end
);
2460 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
2462 vm_map_clip_start(map
, entry
, start
, &count
);
2463 } else if (temp_entry
) {
2464 entry
= vm_map_rb_tree_RB_NEXT(temp_entry
);
2466 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
2469 while (entry
&& entry
->ba
.start
< end
) {
2470 vm_map_clip_end(map
, entry
, end
, &count
);
2472 entry
->inheritance
= new_inheritance
;
2474 vm_map_simplify_entry(map
, entry
, &count
);
2476 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2479 vm_map_entry_release(count
);
2480 return (KERN_SUCCESS
);
2484 * Implement the semantics of mlock
2487 vm_map_unwire(vm_map_t map
, vm_offset_t start
, vm_offset_t real_end
,
2488 boolean_t new_pageable
)
2490 vm_map_entry_t entry
;
2491 vm_map_entry_t start_entry
;
2493 int rv
= KERN_SUCCESS
;
2496 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2498 VM_MAP_RANGE_CHECK(map
, start
, real_end
);
2501 start_entry
= vm_map_clip_range(map
, start
, end
, &count
,
2503 if (start_entry
== NULL
) {
2505 vm_map_entry_release(count
);
2506 return (KERN_INVALID_ADDRESS
);
2509 if (new_pageable
== 0) {
2510 entry
= start_entry
;
2511 while (entry
&& entry
->ba
.start
< end
) {
2512 vm_offset_t save_start
;
2513 vm_offset_t save_end
;
2516 * Already user wired or hard wired (trivial cases)
2518 if (entry
->eflags
& MAP_ENTRY_USER_WIRED
) {
2519 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2522 if (entry
->wired_count
!= 0) {
2523 entry
->wired_count
++;
2524 entry
->eflags
|= MAP_ENTRY_USER_WIRED
;
2525 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2530 * A new wiring requires instantiation of appropriate
2531 * management structures and the faulting in of the
2534 if (entry
->maptype
== VM_MAPTYPE_NORMAL
||
2535 entry
->maptype
== VM_MAPTYPE_VPAGETABLE
) {
2536 int copyflag
= entry
->eflags
&
2537 MAP_ENTRY_NEEDS_COPY
;
2538 if (copyflag
&& ((entry
->protection
&
2539 VM_PROT_WRITE
) != 0)) {
2540 vm_map_entry_shadow(entry
);
2541 } else if (entry
->ba
.object
== NULL
&&
2543 vm_map_entry_allocate_object(entry
);
2546 entry
->wired_count
++;
2547 entry
->eflags
|= MAP_ENTRY_USER_WIRED
;
2550 * Now fault in the area. Note that vm_fault_wire()
2551 * may release the map lock temporarily, it will be
2552 * relocked on return. The in-transition
2553 * flag protects the entries.
2555 save_start
= entry
->ba
.start
;
2556 save_end
= entry
->ba
.end
;
2557 rv
= vm_fault_wire(map
, entry
, TRUE
, 0);
2559 CLIP_CHECK_BACK(entry
, save_start
);
2561 KASSERT(entry
->wired_count
== 1, ("bad wired_count on entry"));
2562 entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
2563 entry
->wired_count
= 0;
2564 if (entry
->ba
.end
== save_end
)
2566 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2568 ("bad entry clip during backout"));
2570 end
= save_start
; /* unwire the rest */
2574 * note that even though the entry might have been
2575 * clipped, the USER_WIRED flag we set prevents
2576 * duplication so we do not have to do a
2579 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2583 * If we failed fall through to the unwiring section to
2584 * unwire what we had wired so far. 'end' has already
2591 * start_entry might have been clipped if we unlocked the
2592 * map and blocked. No matter how clipped it has gotten
2593 * there should be a fragment that is on our start boundary.
2595 CLIP_CHECK_BACK(start_entry
, start
);
2599 * Deal with the unwiring case.
2603 * This is the unwiring case. We must first ensure that the
2604 * range to be unwired is really wired down. We know there
2607 entry
= start_entry
;
2608 while (entry
&& entry
->ba
.start
< end
) {
2609 if ((entry
->eflags
& MAP_ENTRY_USER_WIRED
) == 0) {
2610 rv
= KERN_INVALID_ARGUMENT
;
2613 KASSERT(entry
->wired_count
!= 0,
2614 ("wired count was 0 with USER_WIRED set! %p",
2616 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2620 * Now decrement the wiring count for each region. If a region
2621 * becomes completely unwired, unwire its physical pages and
2625 * The map entries are processed in a loop, checking to
2626 * make sure the entry is wired and asserting it has a wired
2627 * count. However, another loop was inserted more-or-less in
2628 * the middle of the unwiring path. This loop picks up the
2629 * "entry" loop variable from the first loop without first
2630 * setting it to start_entry. Naturally, the secound loop
2631 * is never entered and the pages backing the entries are
2632 * never unwired. This can lead to a leak of wired pages.
2634 entry
= start_entry
;
2635 while (entry
&& entry
->ba
.start
< end
) {
2636 KASSERT(entry
->eflags
& MAP_ENTRY_USER_WIRED
,
2637 ("expected USER_WIRED on entry %p", entry
));
2638 entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
2639 entry
->wired_count
--;
2640 if (entry
->wired_count
== 0)
2641 vm_fault_unwire(map
, entry
);
2642 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2646 vm_map_unclip_range(map
, start_entry
, start
, real_end
, &count
,
2649 vm_map_entry_release(count
);
2655 * Sets the pageability of the specified address range in the target map.
2656 * Regions specified as not pageable require locked-down physical
2657 * memory and physical page maps.
2659 * The map must not be locked, but a reference must remain to the map
2660 * throughout the call.
2662 * This function may be called via the zalloc path and must properly
2663 * reserve map entries for kernel_map.
2668 vm_map_wire(vm_map_t map
, vm_offset_t start
, vm_offset_t real_end
, int kmflags
)
2670 vm_map_entry_t entry
;
2671 vm_map_entry_t start_entry
;
2673 int rv
= KERN_SUCCESS
;
2676 if (kmflags
& KM_KRESERVE
)
2677 count
= vm_map_entry_kreserve(MAP_RESERVE_COUNT
);
2679 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
2681 VM_MAP_RANGE_CHECK(map
, start
, real_end
);
2684 start_entry
= vm_map_clip_range(map
, start
, end
, &count
,
2686 if (start_entry
== NULL
) {
2688 rv
= KERN_INVALID_ADDRESS
;
2691 if ((kmflags
& KM_PAGEABLE
) == 0) {
2695 * 1. Holding the write lock, we create any shadow or zero-fill
2696 * objects that need to be created. Then we clip each map
2697 * entry to the region to be wired and increment its wiring
2698 * count. We create objects before clipping the map entries
2699 * to avoid object proliferation.
2701 * 2. We downgrade to a read lock, and call vm_fault_wire to
2702 * fault in the pages for any newly wired area (wired_count is
2705 * Downgrading to a read lock for vm_fault_wire avoids a
2706 * possible deadlock with another process that may have faulted
2707 * on one of the pages to be wired (it would mark the page busy,
2708 * blocking us, then in turn block on the map lock that we
2709 * hold). Because of problems in the recursive lock package,
2710 * we cannot upgrade to a write lock in vm_map_lookup. Thus,
2711 * any actions that require the write lock must be done
2712 * beforehand. Because we keep the read lock on the map, the
2713 * copy-on-write status of the entries we modify here cannot
2716 entry
= start_entry
;
2717 while (entry
&& entry
->ba
.start
< end
) {
2719 * Trivial case if the entry is already wired
2721 if (entry
->wired_count
) {
2722 entry
->wired_count
++;
2723 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2728 * The entry is being newly wired, we have to setup
2729 * appropriate management structures. A shadow
2730 * object is required for a copy-on-write region,
2731 * or a normal object for a zero-fill region. We
2732 * do not have to do this for entries that point to sub
2733 * maps because we won't hold the lock on the sub map.
2735 if (entry
->maptype
== VM_MAPTYPE_NORMAL
||
2736 entry
->maptype
== VM_MAPTYPE_VPAGETABLE
) {
2737 int copyflag
= entry
->eflags
&
2738 MAP_ENTRY_NEEDS_COPY
;
2739 if (copyflag
&& ((entry
->protection
&
2740 VM_PROT_WRITE
) != 0)) {
2741 vm_map_entry_shadow(entry
);
2742 } else if (entry
->ba
.object
== NULL
&&
2744 vm_map_entry_allocate_object(entry
);
2747 entry
->wired_count
++;
2748 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2756 * HACK HACK HACK HACK
2758 * vm_fault_wire() temporarily unlocks the map to avoid
2759 * deadlocks. The in-transition flag from vm_map_clip_range
2760 * call should protect us from changes while the map is
2763 * NOTE: Previously this comment stated that clipping might
2764 * still occur while the entry is unlocked, but from
2765 * what I can tell it actually cannot.
2767 * It is unclear whether the CLIP_CHECK_*() calls
2768 * are still needed but we keep them in anyway.
2770 * HACK HACK HACK HACK
2773 entry
= start_entry
;
2774 while (entry
&& entry
->ba
.start
< end
) {
2776 * If vm_fault_wire fails for any page we need to undo
2777 * what has been done. We decrement the wiring count
2778 * for those pages which have not yet been wired (now)
2779 * and unwire those that have (later).
2781 vm_offset_t save_start
= entry
->ba
.start
;
2782 vm_offset_t save_end
= entry
->ba
.end
;
2784 if (entry
->wired_count
== 1)
2785 rv
= vm_fault_wire(map
, entry
, FALSE
, kmflags
);
2787 CLIP_CHECK_BACK(entry
, save_start
);
2789 KASSERT(entry
->wired_count
== 1,
2790 ("wired_count changed unexpectedly"));
2791 entry
->wired_count
= 0;
2792 if (entry
->ba
.end
== save_end
)
2794 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2796 ("bad entry clip during backout"));
2801 CLIP_CHECK_FWD(entry
, save_end
);
2802 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2806 * If a failure occured undo everything by falling through
2807 * to the unwiring code. 'end' has already been adjusted
2811 kmflags
|= KM_PAGEABLE
;
2814 * start_entry is still IN_TRANSITION but may have been
2815 * clipped since vm_fault_wire() unlocks and relocks the
2816 * map. No matter how clipped it has gotten there should
2817 * be a fragment that is on our start boundary.
2819 CLIP_CHECK_BACK(start_entry
, start
);
2822 if (kmflags
& KM_PAGEABLE
) {
2824 * This is the unwiring case. We must first ensure that the
2825 * range to be unwired is really wired down. We know there
2828 entry
= start_entry
;
2829 while (entry
&& entry
->ba
.start
< end
) {
2830 if (entry
->wired_count
== 0) {
2831 rv
= KERN_INVALID_ARGUMENT
;
2834 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2838 * Now decrement the wiring count for each region. If a region
2839 * becomes completely unwired, unwire its physical pages and
2842 entry
= start_entry
;
2843 while (entry
&& entry
->ba
.start
< end
) {
2844 entry
->wired_count
--;
2845 if (entry
->wired_count
== 0)
2846 vm_fault_unwire(map
, entry
);
2847 entry
= vm_map_rb_tree_RB_NEXT(entry
);
2851 vm_map_unclip_range(map
, start_entry
, start
, real_end
,
2852 &count
, MAP_CLIP_NO_HOLES
);
2855 if (kmflags
& KM_KRESERVE
)
2856 vm_map_entry_krelease(count
);
2858 vm_map_entry_release(count
);
2863 * Mark a newly allocated address range as wired but do not fault in
2864 * the pages. The caller is expected to load the pages into the object.
2866 * The map must be locked on entry and will remain locked on return.
2867 * No other requirements.
2870 vm_map_set_wired_quick(vm_map_t map
, vm_offset_t addr
, vm_size_t size
,
2873 vm_map_entry_t scan
;
2874 vm_map_entry_t entry
;
2876 entry
= vm_map_clip_range(map
, addr
, addr
+ size
,
2877 countp
, MAP_CLIP_NO_HOLES
);
2879 while (scan
&& scan
->ba
.start
< addr
+ size
) {
2880 KKASSERT(scan
->wired_count
== 0);
2881 scan
->wired_count
= 1;
2882 scan
= vm_map_rb_tree_RB_NEXT(scan
);
2884 vm_map_unclip_range(map
, entry
, addr
, addr
+ size
,
2885 countp
, MAP_CLIP_NO_HOLES
);
2889 * Push any dirty cached pages in the address range to their pager.
2890 * If syncio is TRUE, dirty pages are written synchronously.
2891 * If invalidate is TRUE, any cached pages are freed as well.
2893 * This routine is called by sys_msync()
2895 * Returns an error if any part of the specified range is not mapped.
2900 vm_map_clean(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2901 boolean_t syncio
, boolean_t invalidate
)
2903 vm_map_entry_t current
;
2904 vm_map_entry_t next
;
2905 vm_map_entry_t entry
;
2906 vm_map_backing_t ba
;
2909 vm_ooffset_t offset
;
2911 vm_map_lock_read(map
);
2912 VM_MAP_RANGE_CHECK(map
, start
, end
);
2913 if (!vm_map_lookup_entry(map
, start
, &entry
)) {
2914 vm_map_unlock_read(map
);
2915 return (KERN_INVALID_ADDRESS
);
2917 lwkt_gettoken(&map
->token
);
2920 * Make a first pass to check for holes.
2923 while (current
&& current
->ba
.start
< end
) {
2924 if (current
->maptype
== VM_MAPTYPE_SUBMAP
) {
2925 lwkt_reltoken(&map
->token
);
2926 vm_map_unlock_read(map
);
2927 return (KERN_INVALID_ARGUMENT
);
2929 next
= vm_map_rb_tree_RB_NEXT(current
);
2930 if (end
> current
->ba
.end
&&
2932 current
->ba
.end
!= next
->ba
.start
)) {
2933 lwkt_reltoken(&map
->token
);
2934 vm_map_unlock_read(map
);
2935 return (KERN_INVALID_ADDRESS
);
2941 pmap_remove(vm_map_pmap(map
), start
, end
);
2944 * Make a second pass, cleaning/uncaching pages from the indicated
2948 while (current
&& current
->ba
.start
< end
) {
2949 offset
= current
->ba
.offset
+ (start
- current
->ba
.start
);
2950 size
= (end
<= current
->ba
.end
? end
: current
->ba
.end
) - start
;
2952 switch(current
->maptype
) {
2953 case VM_MAPTYPE_SUBMAP
:
2956 vm_map_entry_t tentry
;
2959 smap
= current
->ba
.sub_map
;
2960 vm_map_lock_read(smap
);
2961 vm_map_lookup_entry(smap
, offset
, &tentry
);
2962 if (tentry
== NULL
) {
2963 tsize
= vm_map_max(smap
) - offset
;
2965 offset
= 0 + (offset
- vm_map_min(smap
));
2967 tsize
= tentry
->ba
.end
- offset
;
2969 offset
= tentry
->ba
.offset
+
2970 (offset
- tentry
->ba
.start
);
2972 vm_map_unlock_read(smap
);
2977 case VM_MAPTYPE_NORMAL
:
2978 case VM_MAPTYPE_VPAGETABLE
:
2986 object
= ba
->object
;
2988 vm_object_hold(object
);
2994 * Note that there is absolutely no sense in writing out
2995 * anonymous objects, so we track down the vnode object
2997 * We invalidate (remove) all pages from the address space
2998 * anyway, for semantic correctness.
3000 * note: certain anonymous maps, such as MAP_NOSYNC maps,
3001 * may start out with a NULL object.
3003 * XXX do we really want to stop at the first backing store
3004 * here if there are more? XXX
3010 while (ba
->backing_ba
!= NULL
) {
3011 offset
-= ba
->offset
;
3012 ba
= ba
->backing_ba
;
3013 offset
+= ba
->offset
;
3015 if (tobj
->size
< OFF_TO_IDX(offset
+ size
))
3016 size
= IDX_TO_OFF(tobj
->size
) - offset
;
3017 break; /* XXX this break is not correct */
3019 if (object
!= tobj
) {
3021 vm_object_drop(object
);
3023 vm_object_hold(object
);
3027 if (object
&& (object
->type
== OBJT_VNODE
) &&
3028 (current
->protection
& VM_PROT_WRITE
) &&
3029 (object
->flags
& OBJ_NOMSYNC
) == 0) {
3031 * Flush pages if writing is allowed, invalidate them
3032 * if invalidation requested. Pages undergoing I/O
3033 * will be ignored by vm_object_page_remove().
3035 * We cannot lock the vnode and then wait for paging
3036 * to complete without deadlocking against vm_fault.
3037 * Instead we simply call vm_object_page_remove() and
3038 * allow it to block internally on a page-by-page
3039 * basis when it encounters pages undergoing async
3044 /* no chain wait needed for vnode objects */
3045 vm_object_reference_locked(object
);
3046 vn_lock(object
->handle
, LK_EXCLUSIVE
| LK_RETRY
);
3047 flags
= (syncio
|| invalidate
) ? OBJPC_SYNC
: 0;
3048 flags
|= invalidate
? OBJPC_INVAL
: 0;
3051 * When operating on a virtual page table just
3052 * flush the whole object. XXX we probably ought
3055 switch(current
->maptype
) {
3056 case VM_MAPTYPE_NORMAL
:
3057 vm_object_page_clean(object
,
3059 OFF_TO_IDX(offset
+ size
+ PAGE_MASK
),
3062 case VM_MAPTYPE_VPAGETABLE
:
3063 vm_object_page_clean(object
, 0, 0, flags
);
3066 vn_unlock(((struct vnode
*)object
->handle
));
3067 vm_object_deallocate_locked(object
);
3069 if (object
&& invalidate
&&
3070 ((object
->type
== OBJT_VNODE
) ||
3071 (object
->type
== OBJT_DEVICE
) ||
3072 (object
->type
== OBJT_MGTDEVICE
))) {
3074 ((object
->type
== OBJT_DEVICE
) ||
3075 (object
->type
== OBJT_MGTDEVICE
)) ? FALSE
: TRUE
;
3076 /* no chain wait needed for vnode/device objects */
3077 vm_object_reference_locked(object
);
3078 switch(current
->maptype
) {
3079 case VM_MAPTYPE_NORMAL
:
3080 vm_object_page_remove(object
,
3082 OFF_TO_IDX(offset
+ size
+ PAGE_MASK
),
3085 case VM_MAPTYPE_VPAGETABLE
:
3086 vm_object_page_remove(object
, 0, 0, clean_only
);
3089 vm_object_deallocate_locked(object
);
3093 vm_object_drop(object
);
3094 current
= vm_map_rb_tree_RB_NEXT(current
);
3097 lwkt_reltoken(&map
->token
);
3098 vm_map_unlock_read(map
);
3100 return (KERN_SUCCESS
);
3104 * Make the region specified by this entry pageable.
3106 * The vm_map must be exclusively locked.
3109 vm_map_entry_unwire(vm_map_t map
, vm_map_entry_t entry
)
3111 entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
3112 entry
->wired_count
= 0;
3113 vm_fault_unwire(map
, entry
);
3117 * Deallocate the given entry from the target map.
3119 * The vm_map must be exclusively locked.
3122 vm_map_entry_delete(vm_map_t map
, vm_map_entry_t entry
, int *countp
)
3124 vm_map_entry_unlink(map
, entry
);
3125 map
->size
-= entry
->ba
.end
- entry
->ba
.start
;
3126 vm_map_entry_dispose(map
, entry
, countp
);
3130 * Deallocates the given address range from the target map.
3132 * The vm_map must be exclusively locked.
3135 vm_map_delete(vm_map_t map
, vm_offset_t start
, vm_offset_t end
, int *countp
)
3138 vm_map_entry_t entry
;
3139 vm_map_entry_t first_entry
;
3140 vm_offset_t hole_start
;
3142 ASSERT_VM_MAP_LOCKED(map
);
3143 lwkt_gettoken(&map
->token
);
3146 * Find the start of the region, and clip it. Set entry to point
3147 * at the first record containing the requested address or, if no
3148 * such record exists, the next record with a greater address. The
3149 * loop will run from this point until a record beyond the termination
3150 * address is encountered.
3152 * Adjust freehint[] for either the clip case or the extension case.
3154 * GGG see other GGG comment.
3156 if (vm_map_lookup_entry(map
, start
, &first_entry
)) {
3157 entry
= first_entry
;
3158 vm_map_clip_start(map
, entry
, start
, countp
);
3162 entry
= vm_map_rb_tree_RB_NEXT(first_entry
);
3164 hole_start
= first_entry
->ba
.start
;
3166 hole_start
= first_entry
->ba
.end
;
3168 entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
3170 hole_start
= vm_map_min(map
);
3172 hole_start
= vm_map_max(map
);
3177 * Step through all entries in this region
3179 while (entry
&& entry
->ba
.start
< end
) {
3180 vm_map_entry_t next
;
3182 vm_pindex_t offidxstart
, offidxend
, count
;
3185 * If we hit an in-transition entry we have to sleep and
3186 * retry. It's easier (and not really slower) to just retry
3187 * since this case occurs so rarely and the hint is already
3188 * pointing at the right place. We have to reset the
3189 * start offset so as not to accidently delete an entry
3190 * another process just created in vacated space.
3192 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
3193 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
3194 start
= entry
->ba
.start
;
3195 ++mycpu
->gd_cnt
.v_intrans_coll
;
3196 ++mycpu
->gd_cnt
.v_intrans_wait
;
3197 vm_map_transition_wait(map
, 1);
3200 vm_map_clip_end(map
, entry
, end
, countp
);
3202 s
= entry
->ba
.start
;
3204 next
= vm_map_rb_tree_RB_NEXT(entry
);
3206 offidxstart
= OFF_TO_IDX(entry
->ba
.offset
);
3207 count
= OFF_TO_IDX(e
- s
);
3209 switch(entry
->maptype
) {
3210 case VM_MAPTYPE_NORMAL
:
3211 case VM_MAPTYPE_VPAGETABLE
:
3212 case VM_MAPTYPE_SUBMAP
:
3213 object
= entry
->ba
.object
;
3221 * Unwire before removing addresses from the pmap; otherwise,
3222 * unwiring will put the entries back in the pmap.
3224 * Generally speaking, doing a bulk pmap_remove() before
3225 * removing the pages from the VM object is better at
3226 * reducing unnecessary IPIs. The pmap code is now optimized
3227 * to not blindly iterate the range when pt and pd pages
3230 if (entry
->wired_count
!= 0)
3231 vm_map_entry_unwire(map
, entry
);
3233 offidxend
= offidxstart
+ count
;
3235 if (object
== &kernel_object
) {
3236 pmap_remove(map
->pmap
, s
, e
);
3237 vm_object_hold(object
);
3238 vm_object_page_remove(object
, offidxstart
,
3240 vm_object_drop(object
);
3241 } else if (object
&& object
->type
!= OBJT_DEFAULT
&&
3242 object
->type
!= OBJT_SWAP
) {
3244 * vnode object routines cannot be chain-locked,
3245 * but since we aren't removing pages from the
3246 * object here we can use a shared hold.
3248 vm_object_hold_shared(object
);
3249 pmap_remove(map
->pmap
, s
, e
);
3250 vm_object_drop(object
);
3251 } else if (object
) {
3252 vm_object_hold(object
);
3253 pmap_remove(map
->pmap
, s
, e
);
3255 if (object
!= NULL
&&
3256 object
->ref_count
!= 1 &&
3257 (object
->flags
& (OBJ_NOSPLIT
|OBJ_ONEMAPPING
)) ==
3259 (object
->type
== OBJT_DEFAULT
||
3260 object
->type
== OBJT_SWAP
)) {
3262 * When ONEMAPPING is set we can destroy the
3263 * pages underlying the entry's range.
3265 vm_object_page_remove(object
, offidxstart
,
3267 if (object
->type
== OBJT_SWAP
) {
3268 swap_pager_freespace(object
,
3272 if (offidxend
>= object
->size
&&
3273 offidxstart
< object
->size
) {
3274 object
->size
= offidxstart
;
3277 vm_object_drop(object
);
3278 } else if (entry
->maptype
== VM_MAPTYPE_UKSMAP
) {
3279 pmap_remove(map
->pmap
, s
, e
);
3283 * Delete the entry (which may delete the object) only after
3284 * removing all pmap entries pointing to its pages.
3285 * (Otherwise, its page frames may be reallocated, and any
3286 * modify bits will be set in the wrong object!)
3288 vm_map_entry_delete(map
, entry
, countp
);
3293 * We either reached the end and use vm_map_max as the end
3294 * address, or we didn't and we use the next entry as the
3297 if (entry
== NULL
) {
3298 vm_map_freehint_hole(map
, hole_start
,
3299 vm_map_max(map
) - hole_start
);
3301 vm_map_freehint_hole(map
, hole_start
,
3302 entry
->ba
.start
- hole_start
);
3305 lwkt_reltoken(&map
->token
);
3307 return (KERN_SUCCESS
);
3311 * Remove the given address range from the target map.
3312 * This is the exported form of vm_map_delete.
3317 vm_map_remove(vm_map_t map
, vm_offset_t start
, vm_offset_t end
)
3322 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
3324 VM_MAP_RANGE_CHECK(map
, start
, end
);
3325 result
= vm_map_delete(map
, start
, end
, &count
);
3327 vm_map_entry_release(count
);
3333 * Assert that the target map allows the specified privilege on the
3334 * entire address region given. The entire region must be allocated.
3336 * The caller must specify whether the vm_map is already locked or not.
3339 vm_map_check_protection(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
3340 vm_prot_t protection
, boolean_t have_lock
)
3342 vm_map_entry_t entry
;
3343 vm_map_entry_t tmp_entry
;
3346 if (have_lock
== FALSE
)
3347 vm_map_lock_read(map
);
3349 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
)) {
3350 if (have_lock
== FALSE
)
3351 vm_map_unlock_read(map
);
3357 while (start
< end
) {
3358 if (entry
== NULL
) {
3367 if (start
< entry
->ba
.start
) {
3372 * Check protection associated with entry.
3375 if ((entry
->protection
& protection
) != protection
) {
3379 /* go to next entry */
3380 start
= entry
->ba
.end
;
3381 entry
= vm_map_rb_tree_RB_NEXT(entry
);
3383 if (have_lock
== FALSE
)
3384 vm_map_unlock_read(map
);
3389 * vm_map_backing structures are not shared across forks and must be
3392 * Generally speaking we must reallocate the backing_ba sequence and
3393 * also adjust it for any changes made to the base entry->ba.start and
3394 * entry->ba.end. The first ba in the chain is of course &entry->ba,
3395 * so we only need to adjust subsequent ba's start, end, and offset.
3397 * MAP_BACK_CLIPPED - Called as part of a clipping replication.
3398 * Do not clear OBJ_ONEMAPPING.
3400 * MAP_BACK_BASEOBJREFD - Called from vm_map_insert(). The base object
3401 * has already been referenced.
3405 vm_map_backing_replicated(vm_map_t map
, vm_map_entry_t entry
, int flags
)
3407 vm_map_backing_t ba
;
3408 vm_map_backing_t nba
;
3413 object
= ba
->object
;
3414 ba
->pmap
= map
->pmap
;
3416 (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
||
3417 entry
->maptype
== VM_MAPTYPE_NORMAL
)) {
3418 if (ba
!= &entry
->ba
||
3419 (flags
& MAP_BACK_BASEOBJREFD
) == 0) {
3420 vm_object_reference_quick(object
);
3422 vm_map_backing_attach(ba
);
3423 if ((flags
& MAP_BACK_CLIPPED
) == 0 &&
3424 object
->ref_count
> 1) {
3425 vm_object_clear_flag(object
, OBJ_ONEMAPPING
);
3428 if (ba
->backing_ba
== NULL
)
3430 nba
= kmalloc(sizeof(*nba
), M_MAP_BACKING
, M_INTWAIT
);
3431 *nba
= *ba
->backing_ba
;
3432 nba
->offset
+= (ba
->start
- nba
->start
); /* += (new - old) */
3433 nba
->start
= ba
->start
;
3435 ba
->backing_ba
= nba
;
3437 /* pmap is replaced at the top of the loop */
3443 vm_map_backing_adjust_start(vm_map_entry_t entry
, vm_ooffset_t start
)
3445 vm_map_backing_t ba
;
3447 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
||
3448 entry
->maptype
== VM_MAPTYPE_NORMAL
) {
3449 for (ba
= &entry
->ba
; ba
; ba
= ba
->backing_ba
) {
3451 lockmgr(&ba
->object
->backing_lk
, LK_EXCLUSIVE
);
3452 ba
->offset
+= (start
- ba
->start
);
3454 lockmgr(&ba
->object
->backing_lk
, LK_RELEASE
);
3456 ba
->offset
+= (start
- ba
->start
);
3461 /* not an object and can't be shadowed */
3467 vm_map_backing_adjust_end(vm_map_entry_t entry
, vm_ooffset_t end
)
3469 vm_map_backing_t ba
;
3471 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
||
3472 entry
->maptype
== VM_MAPTYPE_NORMAL
) {
3473 for (ba
= &entry
->ba
; ba
; ba
= ba
->backing_ba
) {
3475 lockmgr(&ba
->object
->backing_lk
, LK_EXCLUSIVE
);
3477 lockmgr(&ba
->object
->backing_lk
, LK_RELEASE
);
3483 /* not an object and can't be shadowed */
3488 * Handles the dirty work of making src_entry and dst_entry copy-on-write
3489 * after src_entry has been cloned to dst_entry. For normal entries only.
3491 * The vm_maps must be exclusively locked.
3492 * The vm_map's token must be held.
3494 * Because the maps are locked no faults can be in progress during the
3498 vm_map_copy_entry(vm_map_t src_map
, vm_map_t dst_map
,
3499 vm_map_entry_t src_entry
, vm_map_entry_t dst_entry
)
3503 KKASSERT(dst_entry
->maptype
== VM_MAPTYPE_NORMAL
||
3504 dst_entry
->maptype
== VM_MAPTYPE_VPAGETABLE
);
3506 if (src_entry
->wired_count
&&
3507 src_entry
->maptype
!= VM_MAPTYPE_VPAGETABLE
) {
3509 * Of course, wired down pages can't be set copy-on-write.
3510 * Cause wired pages to be copied into the new map by
3511 * simulating faults (the new pages are pageable)
3513 * Scrap ba.object (its ref-count has not yet been adjusted
3514 * so we can just NULL out the field). Remove the backing
3517 * Then call vm_fault_copy_entry() to create a new object
3518 * in dst_entry and copy the wired pages from src to dst.
3520 * The fault-copy code doesn't work with virtual page
3523 if ((obj
= dst_entry
->ba
.object
) != NULL
) {
3524 vm_map_backing_detach(&dst_entry
->ba
);
3525 dst_entry
->ba
.object
= NULL
;
3526 vm_map_entry_dispose_ba(dst_entry
->ba
.backing_ba
);
3527 dst_entry
->ba
.backing_ba
= NULL
;
3528 dst_entry
->ba
.backing_count
= 0;
3530 vm_fault_copy_entry(dst_map
, src_map
, dst_entry
, src_entry
);
3532 if ((src_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) == 0) {
3534 * If the source entry is not already marked NEEDS_COPY
3535 * we need to write-protect the PTEs.
3537 pmap_protect(src_map
->pmap
,
3538 src_entry
->ba
.start
,
3540 src_entry
->protection
& ~VM_PROT_WRITE
);
3544 * dst_entry.ba_object might be stale. Update it (its
3545 * ref-count has not yet been updated so just overwrite
3548 * If there is no object then we are golden. Also, in
3549 * this situation if there are no backing_ba linkages then
3550 * we can set ba.offset to whatever we want. For now we
3551 * set the offset for 0 for make debugging object sizes
3554 obj
= src_entry
->ba
.object
;
3557 src_entry
->eflags
|= (MAP_ENTRY_COW
|
3558 MAP_ENTRY_NEEDS_COPY
);
3559 dst_entry
->eflags
|= (MAP_ENTRY_COW
|
3560 MAP_ENTRY_NEEDS_COPY
);
3561 KKASSERT(dst_entry
->ba
.offset
== src_entry
->ba
.offset
);
3563 dst_entry
->ba
.offset
= 0;
3567 * Normal, allow the backing_ba link depth to
3570 pmap_copy(dst_map
->pmap
, src_map
->pmap
,
3571 dst_entry
->ba
.start
,
3572 dst_entry
->ba
.end
- dst_entry
->ba
.start
,
3573 src_entry
->ba
.start
);
3578 * Create a vmspace for a new process and its related vm_map based on an
3579 * existing vmspace. The new map inherits information from the old map
3580 * according to inheritance settings.
3582 * The source map must not be locked.
3585 static void vmspace_fork_normal_entry(vm_map_t old_map
, vm_map_t new_map
,
3586 vm_map_entry_t old_entry
, int *countp
);
3587 static void vmspace_fork_uksmap_entry(vm_map_t old_map
, vm_map_t new_map
,
3588 vm_map_entry_t old_entry
, int *countp
);
3591 vmspace_fork(struct vmspace
*vm1
)
3593 struct vmspace
*vm2
;
3594 vm_map_t old_map
= &vm1
->vm_map
;
3596 vm_map_entry_t old_entry
;
3599 lwkt_gettoken(&vm1
->vm_map
.token
);
3600 vm_map_lock(old_map
);
3602 vm2
= vmspace_alloc(vm_map_min(old_map
), vm_map_max(old_map
));
3603 lwkt_gettoken(&vm2
->vm_map
.token
);
3606 * We must bump the timestamp to force any concurrent fault
3609 bcopy(&vm1
->vm_startcopy
, &vm2
->vm_startcopy
,
3610 (caddr_t
)&vm1
->vm_endcopy
- (caddr_t
)&vm1
->vm_startcopy
);
3611 new_map
= &vm2
->vm_map
; /* XXX */
3612 new_map
->timestamp
= 1;
3614 vm_map_lock(new_map
);
3616 count
= old_map
->nentries
;
3617 count
= vm_map_entry_reserve(count
+ MAP_RESERVE_COUNT
);
3619 RB_FOREACH(old_entry
, vm_map_rb_tree
, &old_map
->rb_root
) {
3620 switch(old_entry
->maptype
) {
3621 case VM_MAPTYPE_SUBMAP
:
3622 panic("vm_map_fork: encountered a submap");
3624 case VM_MAPTYPE_UKSMAP
:
3625 vmspace_fork_uksmap_entry(old_map
, new_map
,
3628 case VM_MAPTYPE_NORMAL
:
3629 case VM_MAPTYPE_VPAGETABLE
:
3630 vmspace_fork_normal_entry(old_map
, new_map
,
3636 new_map
->size
= old_map
->size
;
3637 vm_map_unlock(new_map
);
3638 vm_map_unlock(old_map
);
3639 vm_map_entry_release(count
);
3641 lwkt_reltoken(&vm2
->vm_map
.token
);
3642 lwkt_reltoken(&vm1
->vm_map
.token
);
3649 vmspace_fork_normal_entry(vm_map_t old_map
, vm_map_t new_map
,
3650 vm_map_entry_t old_entry
, int *countp
)
3652 vm_map_entry_t new_entry
;
3653 vm_map_backing_t ba
;
3657 * If the backing_ba link list gets too long then fault it
3658 * all into the head object and dispose of the list. We do
3659 * this in old_entry prior to cloning in order to benefit both
3662 * We can test our fronting object's size against its
3663 * resident_page_count for a really cheap (but probably not perfect)
3664 * all-shadowed test, allowing us to disconnect the backing_ba
3667 * XXX Currently doesn't work for VPAGETABLEs (the entire object
3668 * would have to be copied).
3670 object
= old_entry
->ba
.object
;
3671 if (old_entry
->ba
.backing_ba
&&
3672 old_entry
->maptype
!= VM_MAPTYPE_VPAGETABLE
&&
3673 (old_entry
->ba
.backing_count
>= vm_map_backing_limit
||
3674 (vm_map_backing_shadow_test
&& object
&&
3675 object
->size
== object
->resident_page_count
))) {
3677 * If there are too many backing_ba linkages we
3678 * collapse everything into the head
3680 * This will also remove all the pte's.
3682 if (old_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
)
3683 vm_map_entry_shadow(old_entry
);
3685 vm_map_entry_allocate_object(old_entry
);
3686 if (vm_fault_collapse(old_map
, old_entry
) == KERN_SUCCESS
) {
3687 ba
= old_entry
->ba
.backing_ba
;
3688 old_entry
->ba
.backing_ba
= NULL
;
3689 old_entry
->ba
.backing_count
= 0;
3690 vm_map_entry_dispose_ba(ba
);
3693 object
= NULL
; /* object variable is now invalid */
3698 switch (old_entry
->inheritance
) {
3699 case VM_INHERIT_NONE
:
3701 case VM_INHERIT_SHARE
:
3703 * Clone the entry as a shared entry. This will look like
3704 * shared memory across the old and the new process. We must
3705 * ensure that the object is allocated.
3707 if (old_entry
->ba
.object
== NULL
)
3708 vm_map_entry_allocate_object(old_entry
);
3710 if (old_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) {
3712 * Create the fronting vm_map_backing for
3713 * an entry which needs a copy, plus an extra
3714 * ref because we are going to duplicate it
3717 * The call to vm_map_entry_shadow() will also clear
3720 * XXX no more collapse. Still need extra ref
3723 vm_map_entry_shadow(old_entry
);
3724 } else if (old_entry
->ba
.object
) {
3725 object
= old_entry
->ba
.object
;
3729 * Clone the entry. We've already bumped the ref on
3730 * the vm_object for our new entry.
3732 new_entry
= vm_map_entry_create(countp
);
3733 *new_entry
= *old_entry
;
3735 new_entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
3736 new_entry
->wired_count
= 0;
3739 * Replicate and index the vm_map_backing. Don't share
3740 * the vm_map_backing across vm_map's (only across clips).
3742 * Insert the entry into the new map -- we know we're
3743 * inserting at the end of the new map.
3745 vm_map_backing_replicated(new_map
, new_entry
, 0);
3746 vm_map_entry_link(new_map
, new_entry
);
3749 * Update the physical map
3751 pmap_copy(new_map
->pmap
, old_map
->pmap
,
3752 new_entry
->ba
.start
,
3753 (old_entry
->ba
.end
- old_entry
->ba
.start
),
3754 old_entry
->ba
.start
);
3756 case VM_INHERIT_COPY
:
3758 * Clone the entry and link the copy into the new map.
3760 * Note that ref-counting adjustment for old_entry->ba.object
3761 * (if it isn't a special map that is) is handled by
3762 * vm_map_copy_entry().
3764 new_entry
= vm_map_entry_create(countp
);
3765 *new_entry
= *old_entry
;
3767 new_entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
3768 new_entry
->wired_count
= 0;
3770 vm_map_backing_replicated(new_map
, new_entry
, 0);
3771 vm_map_entry_link(new_map
, new_entry
);
3774 * This does the actual dirty work of making both entries
3775 * copy-on-write, and will also handle the fronting object.
3777 vm_map_copy_entry(old_map
, new_map
, old_entry
, new_entry
);
3783 * When forking user-kernel shared maps, the map might change in the
3784 * child so do not try to copy the underlying pmap entries.
3788 vmspace_fork_uksmap_entry(vm_map_t old_map
, vm_map_t new_map
,
3789 vm_map_entry_t old_entry
, int *countp
)
3791 vm_map_entry_t new_entry
;
3793 new_entry
= vm_map_entry_create(countp
);
3794 *new_entry
= *old_entry
;
3796 new_entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
3797 new_entry
->wired_count
= 0;
3798 KKASSERT(new_entry
->ba
.backing_ba
== NULL
);
3799 vm_map_backing_replicated(new_map
, new_entry
, 0);
3801 vm_map_entry_link(new_map
, new_entry
);
3805 * Create an auto-grow stack entry
3810 vm_map_stack (vm_map_t map
, vm_offset_t
*addrbos
, vm_size_t max_ssize
,
3811 int flags
, vm_prot_t prot
, vm_prot_t max
, int cow
)
3813 vm_map_entry_t prev_entry
;
3814 vm_map_entry_t next
;
3815 vm_size_t init_ssize
;
3818 vm_offset_t tmpaddr
;
3820 cow
|= MAP_IS_STACK
;
3822 if (max_ssize
< sgrowsiz
)
3823 init_ssize
= max_ssize
;
3825 init_ssize
= sgrowsiz
;
3827 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
3831 * Find space for the mapping
3833 if ((flags
& (MAP_FIXED
| MAP_TRYFIXED
)) == 0) {
3834 if (vm_map_findspace(map
, *addrbos
, max_ssize
, 1,
3837 vm_map_entry_release(count
);
3838 return (KERN_NO_SPACE
);
3843 /* If addr is already mapped, no go */
3844 if (vm_map_lookup_entry(map
, *addrbos
, &prev_entry
)) {
3846 vm_map_entry_release(count
);
3847 return (KERN_NO_SPACE
);
3851 /* XXX already handled by kern_mmap() */
3852 /* If we would blow our VMEM resource limit, no go */
3853 if (map
->size
+ init_ssize
>
3854 curproc
->p_rlimit
[RLIMIT_VMEM
].rlim_cur
) {
3856 vm_map_entry_release(count
);
3857 return (KERN_NO_SPACE
);
3862 * If we can't accomodate max_ssize in the current mapping,
3863 * no go. However, we need to be aware that subsequent user
3864 * mappings might map into the space we have reserved for
3865 * stack, and currently this space is not protected.
3867 * Hopefully we will at least detect this condition
3868 * when we try to grow the stack.
3871 next
= vm_map_rb_tree_RB_NEXT(prev_entry
);
3873 next
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
3875 if (next
&& next
->ba
.start
< *addrbos
+ max_ssize
) {
3877 vm_map_entry_release(count
);
3878 return (KERN_NO_SPACE
);
3882 * We initially map a stack of only init_ssize. We will
3883 * grow as needed later. Since this is to be a grow
3884 * down stack, we map at the top of the range.
3886 * Note: we would normally expect prot and max to be
3887 * VM_PROT_ALL, and cow to be 0. Possibly we should
3888 * eliminate these as input parameters, and just
3889 * pass these values here in the insert call.
3891 rv
= vm_map_insert(map
, &count
, NULL
, NULL
,
3892 0, *addrbos
+ max_ssize
- init_ssize
,
3893 *addrbos
+ max_ssize
,
3895 VM_SUBSYS_STACK
, prot
, max
, cow
);
3897 /* Now set the avail_ssize amount */
3898 if (rv
== KERN_SUCCESS
) {
3900 next
= vm_map_rb_tree_RB_NEXT(prev_entry
);
3902 next
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
3903 if (prev_entry
!= NULL
) {
3904 vm_map_clip_end(map
,
3906 *addrbos
+ max_ssize
- init_ssize
,
3909 if (next
->ba
.end
!= *addrbos
+ max_ssize
||
3910 next
->ba
.start
!= *addrbos
+ max_ssize
- init_ssize
){
3911 panic ("Bad entry start/end for new stack entry");
3913 next
->aux
.avail_ssize
= max_ssize
- init_ssize
;
3918 vm_map_entry_release(count
);
3923 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
3924 * desired address is already mapped, or if we successfully grow
3925 * the stack. Also returns KERN_SUCCESS if addr is outside the
3926 * stack range (this is strange, but preserves compatibility with
3927 * the grow function in vm_machdep.c).
3932 vm_map_growstack (vm_map_t map
, vm_offset_t addr
)
3934 vm_map_entry_t prev_entry
;
3935 vm_map_entry_t stack_entry
;
3936 vm_map_entry_t next
;
3942 int rv
= KERN_SUCCESS
;
3944 int use_read_lock
= 1;
3950 lp
= curthread
->td_lwp
;
3951 p
= curthread
->td_proc
;
3952 KKASSERT(lp
!= NULL
);
3953 vm
= lp
->lwp_vmspace
;
3956 * Growstack is only allowed on the current process. We disallow
3957 * other use cases, e.g. trying to access memory via procfs that
3958 * the stack hasn't grown into.
3960 if (map
!= &vm
->vm_map
) {
3961 return KERN_FAILURE
;
3964 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
3967 vm_map_lock_read(map
);
3972 * If addr is already in the entry range, no need to grow.
3973 * prev_entry returns NULL if addr is at the head.
3975 if (vm_map_lookup_entry(map
, addr
, &prev_entry
))
3978 stack_entry
= vm_map_rb_tree_RB_NEXT(prev_entry
);
3980 stack_entry
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
3982 if (stack_entry
== NULL
)
3984 if (prev_entry
== NULL
)
3985 end
= stack_entry
->ba
.start
- stack_entry
->aux
.avail_ssize
;
3987 end
= prev_entry
->ba
.end
;
3990 * This next test mimics the old grow function in vm_machdep.c.
3991 * It really doesn't quite make sense, but we do it anyway
3992 * for compatibility.
3994 * If not growable stack, return success. This signals the
3995 * caller to proceed as he would normally with normal vm.
3997 if (stack_entry
->aux
.avail_ssize
< 1 ||
3998 addr
>= stack_entry
->ba
.start
||
3999 addr
< stack_entry
->ba
.start
- stack_entry
->aux
.avail_ssize
) {
4003 /* Find the minimum grow amount */
4004 grow_amount
= roundup (stack_entry
->ba
.start
- addr
, PAGE_SIZE
);
4005 if (grow_amount
> stack_entry
->aux
.avail_ssize
) {
4011 * If there is no longer enough space between the entries
4012 * nogo, and adjust the available space. Note: this
4013 * should only happen if the user has mapped into the
4014 * stack area after the stack was created, and is
4015 * probably an error.
4017 * This also effectively destroys any guard page the user
4018 * might have intended by limiting the stack size.
4020 if (grow_amount
> stack_entry
->ba
.start
- end
) {
4021 if (use_read_lock
&& vm_map_lock_upgrade(map
)) {
4027 stack_entry
->aux
.avail_ssize
= stack_entry
->ba
.start
- end
;
4032 is_procstack
= addr
>= (vm_offset_t
)vm
->vm_maxsaddr
;
4034 /* If this is the main process stack, see if we're over the
4037 if (is_procstack
&& (vm
->vm_ssize
+ grow_amount
>
4038 p
->p_rlimit
[RLIMIT_STACK
].rlim_cur
)) {
4043 /* Round up the grow amount modulo SGROWSIZ */
4044 grow_amount
= roundup (grow_amount
, sgrowsiz
);
4045 if (grow_amount
> stack_entry
->aux
.avail_ssize
) {
4046 grow_amount
= stack_entry
->aux
.avail_ssize
;
4048 if (is_procstack
&& (vm
->vm_ssize
+ grow_amount
>
4049 p
->p_rlimit
[RLIMIT_STACK
].rlim_cur
)) {
4050 grow_amount
= p
->p_rlimit
[RLIMIT_STACK
].rlim_cur
- vm
->vm_ssize
;
4053 /* If we would blow our VMEM resource limit, no go */
4054 if (map
->size
+ grow_amount
> p
->p_rlimit
[RLIMIT_VMEM
].rlim_cur
) {
4059 if (use_read_lock
&& vm_map_lock_upgrade(map
)) {
4066 /* Get the preliminary new entry start value */
4067 addr
= stack_entry
->ba
.start
- grow_amount
;
4069 /* If this puts us into the previous entry, cut back our growth
4070 * to the available space. Also, see the note above.
4073 stack_entry
->aux
.avail_ssize
= stack_entry
->ba
.start
- end
;
4077 rv
= vm_map_insert(map
, &count
, NULL
, NULL
,
4078 0, addr
, stack_entry
->ba
.start
,
4080 VM_SUBSYS_STACK
, VM_PROT_ALL
, VM_PROT_ALL
, 0);
4082 /* Adjust the available stack space by the amount we grew. */
4083 if (rv
== KERN_SUCCESS
) {
4085 vm_map_clip_end(map
, prev_entry
, addr
, &count
);
4086 next
= vm_map_rb_tree_RB_NEXT(prev_entry
);
4088 next
= RB_MIN(vm_map_rb_tree
, &map
->rb_root
);
4090 if (next
->ba
.end
!= stack_entry
->ba
.start
||
4091 next
->ba
.start
!= addr
) {
4092 panic ("Bad stack grow start/end in new stack entry");
4094 next
->aux
.avail_ssize
=
4095 stack_entry
->aux
.avail_ssize
-
4096 (next
->ba
.end
- next
->ba
.start
);
4098 vm
->vm_ssize
+= next
->ba
.end
-
4103 if (map
->flags
& MAP_WIREFUTURE
)
4104 vm_map_unwire(map
, next
->ba
.start
, next
->ba
.end
, FALSE
);
4109 vm_map_unlock_read(map
);
4112 vm_map_entry_release(count
);
4117 * Unshare the specified VM space for exec. If other processes are
4118 * mapped to it, then create a new one. The new vmspace is null.
4123 vmspace_exec(struct proc
*p
, struct vmspace
*vmcopy
)
4125 struct vmspace
*oldvmspace
= p
->p_vmspace
;
4126 struct vmspace
*newvmspace
;
4127 vm_map_t map
= &p
->p_vmspace
->vm_map
;
4130 * If we are execing a resident vmspace we fork it, otherwise
4131 * we create a new vmspace. Note that exitingcnt is not
4132 * copied to the new vmspace.
4134 lwkt_gettoken(&oldvmspace
->vm_map
.token
);
4136 newvmspace
= vmspace_fork(vmcopy
);
4137 lwkt_gettoken(&newvmspace
->vm_map
.token
);
4139 newvmspace
= vmspace_alloc(vm_map_min(map
), vm_map_max(map
));
4140 lwkt_gettoken(&newvmspace
->vm_map
.token
);
4141 bcopy(&oldvmspace
->vm_startcopy
, &newvmspace
->vm_startcopy
,
4142 (caddr_t
)&oldvmspace
->vm_endcopy
-
4143 (caddr_t
)&oldvmspace
->vm_startcopy
);
4147 * Finish initializing the vmspace before assigning it
4148 * to the process. The vmspace will become the current vmspace
4151 pmap_pinit2(vmspace_pmap(newvmspace
));
4152 pmap_replacevm(p
, newvmspace
, 0);
4153 lwkt_reltoken(&newvmspace
->vm_map
.token
);
4154 lwkt_reltoken(&oldvmspace
->vm_map
.token
);
4155 vmspace_rel(oldvmspace
);
4159 * Unshare the specified VM space for forcing COW. This
4160 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4163 vmspace_unshare(struct proc
*p
)
4165 struct vmspace
*oldvmspace
= p
->p_vmspace
;
4166 struct vmspace
*newvmspace
;
4168 lwkt_gettoken(&oldvmspace
->vm_map
.token
);
4169 if (vmspace_getrefs(oldvmspace
) == 1) {
4170 lwkt_reltoken(&oldvmspace
->vm_map
.token
);
4173 newvmspace
= vmspace_fork(oldvmspace
);
4174 lwkt_gettoken(&newvmspace
->vm_map
.token
);
4175 pmap_pinit2(vmspace_pmap(newvmspace
));
4176 pmap_replacevm(p
, newvmspace
, 0);
4177 lwkt_reltoken(&newvmspace
->vm_map
.token
);
4178 lwkt_reltoken(&oldvmspace
->vm_map
.token
);
4179 vmspace_rel(oldvmspace
);
4183 * vm_map_hint: return the beginning of the best area suitable for
4184 * creating a new mapping with "prot" protection.
4189 vm_map_hint(struct proc
*p
, vm_offset_t addr
, vm_prot_t prot
)
4191 struct vmspace
*vms
= p
->p_vmspace
;
4192 struct rlimit limit
;
4196 * Acquire datasize limit for mmap() operation,
4197 * calculate nearest power of 2.
4199 if (kern_getrlimit(RLIMIT_DATA
, &limit
))
4200 limit
.rlim_cur
= maxdsiz
;
4201 dsiz
= limit
.rlim_cur
;
4203 if (!randomize_mmap
|| addr
!= 0) {
4205 * Set a reasonable start point for the hint if it was
4206 * not specified or if it falls within the heap space.
4207 * Hinted mmap()s do not allocate out of the heap space.
4210 (addr
>= round_page((vm_offset_t
)vms
->vm_taddr
) &&
4211 addr
< round_page((vm_offset_t
)vms
->vm_daddr
+ dsiz
))) {
4212 addr
= round_page((vm_offset_t
)vms
->vm_daddr
+ dsiz
);
4219 * randomize_mmap && addr == 0. For now randomize the
4220 * address within a dsiz range beyond the data limit.
4222 addr
= (vm_offset_t
)vms
->vm_daddr
+ dsiz
;
4224 addr
+= (karc4random64() & 0x7FFFFFFFFFFFFFFFLU
) % dsiz
;
4225 return (round_page(addr
));
4229 * Finds the VM object, offset, and protection for a given virtual address
4230 * in the specified map, assuming a page fault of the type specified.
4232 * Leaves the map in question locked for read; return values are guaranteed
4233 * until a vm_map_lookup_done call is performed. Note that the map argument
4234 * is in/out; the returned map must be used in the call to vm_map_lookup_done.
4236 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
4239 * If a lookup is requested with "write protection" specified, the map may
4240 * be changed to perform virtual copying operations, although the data
4241 * referenced will remain the same.
4246 vm_map_lookup(vm_map_t
*var_map
, /* IN/OUT */
4248 vm_prot_t fault_typea
,
4249 vm_map_entry_t
*out_entry
, /* OUT */
4250 struct vm_map_backing
**bap
, /* OUT */
4251 vm_pindex_t
*pindex
, /* OUT */
4252 vm_prot_t
*out_prot
, /* OUT */
4253 int *wflags
) /* OUT */
4255 vm_map_entry_t entry
;
4256 vm_map_t map
= *var_map
;
4258 vm_prot_t fault_type
= fault_typea
;
4259 int use_read_lock
= 1;
4260 int rv
= KERN_SUCCESS
;
4262 thread_t td
= curthread
;
4265 * vm_map_entry_reserve() implements an important mitigation
4266 * against mmap() span running the kernel out of vm_map_entry
4267 * structures, but it can also cause an infinite call recursion.
4268 * Use td_nest_count to prevent an infinite recursion (allows
4269 * the vm_map code to dig into the pcpu vm_map_entry reserve).
4272 if (td
->td_nest_count
== 0) {
4273 ++td
->td_nest_count
;
4274 count
= vm_map_entry_reserve(MAP_RESERVE_COUNT
);
4275 --td
->td_nest_count
;
4279 vm_map_lock_read(map
);
4284 * Always do a full lookup. The hint doesn't get us much anymore
4285 * now that the map is RB'd.
4292 vm_map_entry_t tmp_entry
;
4294 if (!vm_map_lookup_entry(map
, vaddr
, &tmp_entry
)) {
4295 rv
= KERN_INVALID_ADDRESS
;
4305 if (entry
->maptype
== VM_MAPTYPE_SUBMAP
) {
4306 vm_map_t old_map
= map
;
4308 *var_map
= map
= entry
->ba
.sub_map
;
4310 vm_map_unlock_read(old_map
);
4312 vm_map_unlock(old_map
);
4318 * Check whether this task is allowed to have this page.
4319 * Note the special case for MAP_ENTRY_COW pages with an override.
4320 * This is to implement a forced COW for debuggers.
4322 if (fault_type
& VM_PROT_OVERRIDE_WRITE
)
4323 prot
= entry
->max_protection
;
4325 prot
= entry
->protection
;
4327 fault_type
&= (VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
);
4328 if ((fault_type
& prot
) != fault_type
) {
4329 rv
= KERN_PROTECTION_FAILURE
;
4333 if ((entry
->eflags
& MAP_ENTRY_USER_WIRED
) &&
4334 (entry
->eflags
& MAP_ENTRY_COW
) &&
4335 (fault_type
& VM_PROT_WRITE
) &&
4336 (fault_typea
& VM_PROT_OVERRIDE_WRITE
) == 0) {
4337 rv
= KERN_PROTECTION_FAILURE
;
4342 * If this page is not pageable, we have to get it for all possible
4346 if (entry
->wired_count
) {
4347 *wflags
|= FW_WIRED
;
4348 prot
= fault_type
= entry
->protection
;
4352 * Virtual page tables may need to update the accessed (A) bit
4353 * in a page table entry. Upgrade the fault to a write fault for
4354 * that case if the map will support it. If the map does not support
4355 * it the page table entry simply will not be updated.
4357 if (entry
->maptype
== VM_MAPTYPE_VPAGETABLE
) {
4358 if (prot
& VM_PROT_WRITE
)
4359 fault_type
|= VM_PROT_WRITE
;
4362 if (curthread
->td_lwp
&& curthread
->td_lwp
->lwp_vmspace
&&
4363 pmap_emulate_ad_bits(&curthread
->td_lwp
->lwp_vmspace
->vm_pmap
)) {
4364 if ((prot
& VM_PROT_WRITE
) == 0)
4365 fault_type
|= VM_PROT_WRITE
;
4369 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not.
4371 if (entry
->maptype
!= VM_MAPTYPE_NORMAL
&&
4372 entry
->maptype
!= VM_MAPTYPE_VPAGETABLE
) {
4378 * If the entry was copy-on-write, we either ...
4380 if (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) {
4382 * If we want to write the page, we may as well handle that
4383 * now since we've got the map locked.
4385 * If we don't need to write the page, we just demote the
4386 * permissions allowed.
4388 if (fault_type
& VM_PROT_WRITE
) {
4390 * Not allowed if TDF_NOFAULT is set as the shadowing
4391 * operation can deadlock against the faulting
4392 * function due to the copy-on-write.
4394 if (curthread
->td_flags
& TDF_NOFAULT
) {
4395 rv
= KERN_FAILURE_NOFAULT
;
4400 * Make a new vm_map_backing + object, and place it
4401 * in the object chain. Note that no new references
4402 * have appeared -- one just moved from the map to
4405 if (use_read_lock
&& vm_map_lock_upgrade(map
)) {
4411 vm_map_entry_shadow(entry
);
4412 *wflags
|= FW_DIDCOW
;
4415 * We're attempting to read a copy-on-write page --
4416 * don't allow writes.
4418 prot
&= ~VM_PROT_WRITE
;
4423 * Create an object if necessary. This code also handles
4424 * partitioning large entries to improve vm_fault performance.
4426 if (entry
->ba
.object
== NULL
&& !map
->system_map
) {
4427 if (use_read_lock
&& vm_map_lock_upgrade(map
)) {
4435 * Partition large entries, giving each its own VM object,
4436 * to improve concurrent fault performance. This is only
4437 * applicable to userspace.
4439 if (map
!= &kernel_map
&&
4440 entry
->maptype
== VM_MAPTYPE_NORMAL
&&
4441 ((entry
->ba
.start
^ entry
->ba
.end
) &
4442 ~MAP_ENTRY_PARTITION_MASK
) &&
4443 vm_map_partition_enable
) {
4444 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
4445 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
4446 ++mycpu
->gd_cnt
.v_intrans_coll
;
4447 ++mycpu
->gd_cnt
.v_intrans_wait
;
4448 vm_map_transition_wait(map
, 0);
4451 vm_map_entry_partition(map
, entry
, vaddr
, &count
);
4453 vm_map_entry_allocate_object(entry
);
4457 * Return the object/offset from this entry. If the entry was
4458 * copy-on-write or empty, it has been fixed up.
4463 *pindex
= OFF_TO_IDX((vaddr
- entry
->ba
.start
) + entry
->ba
.offset
);
4466 * Return whether this is the only map sharing this data. On
4467 * success we return with a read lock held on the map. On failure
4468 * we return with the map unlocked.
4472 if (rv
== KERN_SUCCESS
) {
4473 if (use_read_lock
== 0)
4474 vm_map_lock_downgrade(map
);
4475 } else if (use_read_lock
) {
4476 vm_map_unlock_read(map
);
4481 vm_map_entry_release(count
);
4487 * Releases locks acquired by a vm_map_lookup()
4488 * (according to the handle returned by that lookup).
4490 * No other requirements.
4493 vm_map_lookup_done(vm_map_t map
, vm_map_entry_t entry
, int count
)
4496 * Unlock the main-level map
4498 vm_map_unlock_read(map
);
4500 vm_map_entry_release(count
);
4504 vm_map_entry_partition(vm_map_t map
, vm_map_entry_t entry
,
4505 vm_offset_t vaddr
, int *countp
)
4507 vaddr
&= ~MAP_ENTRY_PARTITION_MASK
;
4508 vm_map_clip_start(map
, entry
, vaddr
, countp
);
4509 vaddr
+= MAP_ENTRY_PARTITION_SIZE
;
4510 vm_map_clip_end(map
, entry
, vaddr
, countp
);
4514 * Quick hack, needs some help to make it more SMP friendly.
4517 vm_map_interlock(vm_map_t map
, struct vm_map_ilock
*ilock
,
4518 vm_offset_t ran_beg
, vm_offset_t ran_end
)
4520 struct vm_map_ilock
*scan
;
4522 ilock
->ran_beg
= ran_beg
;
4523 ilock
->ran_end
= ran_end
;
4526 spin_lock(&map
->ilock_spin
);
4528 for (scan
= map
->ilock_base
; scan
; scan
= scan
->next
) {
4529 if (ran_end
> scan
->ran_beg
&& ran_beg
< scan
->ran_end
) {
4530 scan
->flags
|= ILOCK_WAITING
;
4531 ssleep(scan
, &map
->ilock_spin
, 0, "ilock", 0);
4535 ilock
->next
= map
->ilock_base
;
4536 map
->ilock_base
= ilock
;
4537 spin_unlock(&map
->ilock_spin
);
4541 vm_map_deinterlock(vm_map_t map
, struct vm_map_ilock
*ilock
)
4543 struct vm_map_ilock
*scan
;
4544 struct vm_map_ilock
**scanp
;
4546 spin_lock(&map
->ilock_spin
);
4547 scanp
= &map
->ilock_base
;
4548 while ((scan
= *scanp
) != NULL
) {
4549 if (scan
== ilock
) {
4550 *scanp
= ilock
->next
;
4551 spin_unlock(&map
->ilock_spin
);
4552 if (ilock
->flags
& ILOCK_WAITING
)
4556 scanp
= &scan
->next
;
4558 spin_unlock(&map
->ilock_spin
);
4559 panic("vm_map_deinterlock: missing ilock!");
4562 #include "opt_ddb.h"
4564 #include <ddb/ddb.h>
4569 DB_SHOW_COMMAND(map
, vm_map_print
)
4572 /* XXX convert args. */
4573 vm_map_t map
= (vm_map_t
)addr
;
4574 boolean_t full
= have_addr
;
4576 vm_map_entry_t entry
;
4578 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4580 (void *)map
->pmap
, map
->nentries
, map
->timestamp
);
4583 if (!full
&& db_indent
)
4587 RB_FOREACH(entry
, vm_map_rb_tree
, &map
->rb_root
) {
4588 db_iprintf("map entry %p: start=%p, end=%p\n",
4590 (void *)entry
->ba
.start
, (void *)entry
->ba
.end
);
4593 static char *inheritance_name
[4] =
4594 {"share", "copy", "none", "donate_copy"};
4596 db_iprintf(" prot=%x/%x/%s",
4598 entry
->max_protection
,
4599 inheritance_name
[(int)(unsigned char)
4600 entry
->inheritance
]);
4601 if (entry
->wired_count
!= 0)
4602 db_printf(", wired");
4604 switch(entry
->maptype
) {
4605 case VM_MAPTYPE_SUBMAP
:
4606 /* XXX no %qd in kernel. Truncate entry->ba.offset. */
4607 db_printf(", share=%p, offset=0x%lx\n",
4608 (void *)entry
->ba
.sub_map
,
4609 (long)entry
->ba
.offset
);
4613 vm_map_print((db_expr_t
)(intptr_t)entry
->ba
.sub_map
,
4617 case VM_MAPTYPE_NORMAL
:
4618 case VM_MAPTYPE_VPAGETABLE
:
4619 /* XXX no %qd in kernel. Truncate entry->ba.offset. */
4620 db_printf(", object=%p, offset=0x%lx",
4621 (void *)entry
->ba
.object
,
4622 (long)entry
->ba
.offset
);
4623 if (entry
->eflags
& MAP_ENTRY_COW
)
4624 db_printf(", copy (%s)",
4625 (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) ? "needed" : "done");
4629 if (entry
->ba
.object
) {
4631 vm_object_print((db_expr_t
)(intptr_t)
4638 case VM_MAPTYPE_UKSMAP
:
4639 db_printf(", uksmap=%p, offset=0x%lx",
4640 (void *)entry
->ba
.uksmap
,
4641 (long)entry
->ba
.offset
);
4642 if (entry
->eflags
& MAP_ENTRY_COW
)
4643 db_printf(", copy (%s)",
4644 (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) ? "needed" : "done");
4660 DB_SHOW_COMMAND(procvm
, procvm
)
4665 p
= (struct proc
*) addr
;
4670 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4671 (void *)p
, (void *)p
->p_vmspace
, (void *)&p
->p_vmspace
->vm_map
,
4672 (void *)vmspace_pmap(p
->p_vmspace
));
4674 vm_map_print((db_expr_t
)(intptr_t)&p
->p_vmspace
->vm_map
, 1, 0, NULL
);