2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Virtual memory mapping module.
65 #include <sys/cdefs.h>
66 __FBSDID("$FreeBSD$");
68 #include <sys/param.h>
69 #include <sys/systm.h>
70 #include <sys/kernel.h>
73 #include <sys/mutex.h>
75 #include <sys/vmmeter.h>
77 #include <sys/vnode.h>
78 #include <sys/racct.h>
79 #include <sys/resourcevar.h>
80 #include <sys/rwlock.h>
82 #include <sys/sysctl.h>
83 #include <sys/sysent.h>
87 #include <vm/vm_param.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_pager.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
95 #include <vm/vnode_pager.h>
96 #include <vm/swap_pager.h>
100 * Virtual memory maps provide for the mapping, protection,
101 * and sharing of virtual memory objects. In addition,
102 * this module provides for an efficient virtual copy of
103 * memory from one map to another.
105 * Synchronization is required prior to most operations.
107 * Maps consist of an ordered doubly-linked list of simple
108 * entries; a self-adjusting binary search tree of these
109 * entries is used to speed up lookups.
111 * Since portions of maps are specified by start/end addresses,
112 * which may not align with existing map entries, all
113 * routines merely "clip" entries to these start/end values.
114 * [That is, an entry is split into two, bordering at a
115 * start or end value.] Note that these clippings may not
116 * always be necessary (as the two resulting entries are then
117 * not changed); however, the clipping is done for convenience.
119 * As mentioned above, virtual copy operations are performed
120 * by copying VM object references from one map to
121 * another, and then marking both regions as copy-on-write.
124 static struct mtx map_sleep_mtx
;
125 static uma_zone_t mapentzone
;
126 static uma_zone_t kmapentzone
;
127 static uma_zone_t mapzone
;
128 static uma_zone_t vmspace_zone
;
129 static int vmspace_zinit(void *mem
, int size
, int flags
);
130 static int vm_map_zinit(void *mem
, int ize
, int flags
);
131 static void _vm_map_init(vm_map_t map
, pmap_t pmap
, vm_offset_t min
,
133 static void vm_map_entry_deallocate(vm_map_entry_t entry
, boolean_t system_map
);
134 static void vm_map_entry_dispose(vm_map_t map
, vm_map_entry_t entry
);
135 static void vm_map_entry_unwire(vm_map_t map
, vm_map_entry_t entry
);
136 static void vm_map_pmap_enter(vm_map_t map
, vm_offset_t addr
, vm_prot_t prot
,
137 vm_object_t object
, vm_pindex_t pindex
, vm_size_t size
, int flags
);
139 static void vm_map_zdtor(void *mem
, int size
, void *arg
);
140 static void vmspace_zdtor(void *mem
, int size
, void *arg
);
142 static int vm_map_stack_locked(vm_map_t map
, vm_offset_t addrbos
,
143 vm_size_t max_ssize
, vm_size_t growsize
, vm_prot_t prot
, vm_prot_t max
,
145 static void vm_map_wire_entry_failure(vm_map_t map
, vm_map_entry_t entry
,
146 vm_offset_t failed_addr
);
148 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
149 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
150 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
153 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
156 #define PROC_VMSPACE_LOCK(p) do { } while (0)
157 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
160 * VM_MAP_RANGE_CHECK: [ internal use only ]
162 * Asserts that the starting and ending region
163 * addresses fall within the valid range of the map.
165 #define VM_MAP_RANGE_CHECK(map, start, end) \
167 if (start < vm_map_min(map)) \
168 start = vm_map_min(map); \
169 if (end > vm_map_max(map)) \
170 end = vm_map_max(map); \
178 * Initialize the vm_map module. Must be called before
179 * any other vm_map routines.
181 * Map and entry structures are allocated from the general
182 * purpose memory pool with some exceptions:
184 * - The kernel map and kmem submap are allocated statically.
185 * - Kernel map entries are allocated out of a static pool.
187 * These restrictions are necessary since malloc() uses the
188 * maps and requires map entries.
194 mtx_init(&map_sleep_mtx
, "vm map sleep mutex", NULL
, MTX_DEF
);
195 mapzone
= uma_zcreate("MAP", sizeof(struct vm_map
), NULL
,
201 vm_map_zinit
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
202 uma_prealloc(mapzone
, MAX_KMAP
);
203 kmapentzone
= uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry
),
204 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
,
205 UMA_ZONE_MTXCLASS
| UMA_ZONE_VM
);
206 mapentzone
= uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry
),
207 NULL
, NULL
, NULL
, NULL
, UMA_ALIGN_PTR
, 0);
208 vmspace_zone
= uma_zcreate("VMSPACE", sizeof(struct vmspace
), NULL
,
214 vmspace_zinit
, NULL
, UMA_ALIGN_PTR
, UMA_ZONE_NOFREE
);
218 vmspace_zinit(void *mem
, int size
, int flags
)
222 vm
= (struct vmspace
*)mem
;
224 vm
->vm_map
.pmap
= NULL
;
225 (void)vm_map_zinit(&vm
->vm_map
, sizeof(vm
->vm_map
), flags
);
226 PMAP_LOCK_INIT(vmspace_pmap(vm
));
231 vm_map_zinit(void *mem
, int size
, int flags
)
236 memset(map
, 0, sizeof(*map
));
237 mtx_init(&map
->system_mtx
, "vm map (system)", NULL
, MTX_DEF
| MTX_DUPOK
);
238 sx_init(&map
->lock
, "vm map (user)");
244 vmspace_zdtor(void *mem
, int size
, void *arg
)
248 vm
= (struct vmspace
*)mem
;
250 vm_map_zdtor(&vm
->vm_map
, sizeof(vm
->vm_map
), arg
);
253 vm_map_zdtor(void *mem
, int size
, void *arg
)
258 KASSERT(map
->nentries
== 0,
259 ("map %p nentries == %d on free.",
260 map
, map
->nentries
));
261 KASSERT(map
->size
== 0,
262 ("map %p size == %lu on free.",
263 map
, (unsigned long)map
->size
));
265 #endif /* INVARIANTS */
268 * Allocate a vmspace structure, including a vm_map and pmap,
269 * and initialize those structures. The refcnt is set to 1.
271 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
274 vmspace_alloc(vm_offset_t min
, vm_offset_t max
, pmap_pinit_t pinit
)
278 vm
= uma_zalloc(vmspace_zone
, M_WAITOK
);
280 KASSERT(vm
->vm_map
.pmap
== NULL
, ("vm_map.pmap must be NULL"));
285 if (!pinit(vmspace_pmap(vm
))) {
286 uma_zfree(vmspace_zone
, vm
);
289 CTR1(KTR_VM
, "vmspace_alloc: %p", vm
);
290 _vm_map_init(&vm
->vm_map
, vmspace_pmap(vm
), min
, max
);
305 vmspace_container_reset(struct proc
*p
)
309 racct_set(p
, RACCT_DATA
, 0);
310 racct_set(p
, RACCT_STACK
, 0);
311 racct_set(p
, RACCT_RSS
, 0);
312 racct_set(p
, RACCT_MEMLOCK
, 0);
313 racct_set(p
, RACCT_VMEM
, 0);
319 vmspace_dofree(struct vmspace
*vm
)
322 CTR1(KTR_VM
, "vmspace_free: %p", vm
);
325 * Make sure any SysV shm is freed, it might not have been in
331 * Lock the map, to wait out all other references to it.
332 * Delete all of the mappings and pages they hold, then call
333 * the pmap module to reclaim anything left.
335 (void)vm_map_remove(&vm
->vm_map
, vm
->vm_map
.min_offset
,
336 vm
->vm_map
.max_offset
);
338 pmap_release(vmspace_pmap(vm
));
339 vm
->vm_map
.pmap
= NULL
;
340 uma_zfree(vmspace_zone
, vm
);
344 vmspace_free(struct vmspace
*vm
)
347 WITNESS_WARN(WARN_GIANTOK
| WARN_SLEEPOK
, NULL
,
348 "vmspace_free() called");
350 if (vm
->vm_refcnt
== 0)
351 panic("vmspace_free: attempt to free already freed vmspace");
353 if (atomic_fetchadd_int(&vm
->vm_refcnt
, -1) == 1)
358 vmspace_exitfree(struct proc
*p
)
362 PROC_VMSPACE_LOCK(p
);
365 PROC_VMSPACE_UNLOCK(p
);
366 KASSERT(vm
== &vmspace0
, ("vmspace_exitfree: wrong vmspace"));
371 vmspace_exit(struct thread
*td
)
378 * Release user portion of address space.
379 * This releases references to vnodes,
380 * which could cause I/O if the file has been unlinked.
381 * Need to do this early enough that we can still sleep.
383 * The last exiting process to reach this point releases as
384 * much of the environment as it can. vmspace_dofree() is the
385 * slower fallback in case another process had a temporary
386 * reference to the vmspace.
391 atomic_add_int(&vmspace0
.vm_refcnt
, 1);
393 refcnt
= vm
->vm_refcnt
;
394 if (refcnt
> 1 && p
->p_vmspace
!= &vmspace0
) {
395 /* Switch now since other proc might free vmspace */
396 PROC_VMSPACE_LOCK(p
);
397 p
->p_vmspace
= &vmspace0
;
398 PROC_VMSPACE_UNLOCK(p
);
401 } while (!atomic_cmpset_int(&vm
->vm_refcnt
, refcnt
, refcnt
- 1));
403 if (p
->p_vmspace
!= vm
) {
404 /* vmspace not yet freed, switch back */
405 PROC_VMSPACE_LOCK(p
);
407 PROC_VMSPACE_UNLOCK(p
);
410 pmap_remove_pages(vmspace_pmap(vm
));
411 /* Switch now since this proc will free vmspace */
412 PROC_VMSPACE_LOCK(p
);
413 p
->p_vmspace
= &vmspace0
;
414 PROC_VMSPACE_UNLOCK(p
);
420 vmspace_container_reset(p
);
424 /* Acquire reference to vmspace owned by another process. */
427 vmspace_acquire_ref(struct proc
*p
)
432 PROC_VMSPACE_LOCK(p
);
435 PROC_VMSPACE_UNLOCK(p
);
439 refcnt
= vm
->vm_refcnt
;
440 if (refcnt
<= 0) { /* Avoid 0->1 transition */
441 PROC_VMSPACE_UNLOCK(p
);
444 } while (!atomic_cmpset_int(&vm
->vm_refcnt
, refcnt
, refcnt
+ 1));
445 if (vm
!= p
->p_vmspace
) {
446 PROC_VMSPACE_UNLOCK(p
);
450 PROC_VMSPACE_UNLOCK(p
);
455 * Switch between vmspaces in an AIO kernel process.
457 * The AIO kernel processes switch to and from a user process's
458 * vmspace while performing an I/O operation on behalf of a user
459 * process. The new vmspace is either the vmspace of a user process
460 * obtained from an active AIO request or the initial vmspace of the
461 * AIO kernel process (when it is idling). Because user processes
462 * will block to drain any active AIO requests before proceeding in
463 * exit() or execve(), the vmspace reference count for these vmspaces
464 * can never be 0. This allows for a much simpler implementation than
465 * the loop in vmspace_acquire_ref() above. Similarly, AIO kernel
466 * processes hold an extra reference on their initial vmspace for the
467 * life of the process so that this guarantee is true for any vmspace
471 vmspace_switch_aio(struct vmspace
*newvm
)
473 struct vmspace
*oldvm
;
475 /* XXX: Need some way to assert that this is an aio daemon. */
477 KASSERT(newvm
->vm_refcnt
> 0,
478 ("vmspace_switch_aio: newvm unreferenced"));
480 oldvm
= curproc
->p_vmspace
;
485 * Point to the new address space and refer to it.
487 curproc
->p_vmspace
= newvm
;
488 atomic_add_int(&newvm
->vm_refcnt
, 1);
490 /* Activate the new mapping. */
491 pmap_activate(curthread
);
493 /* Remove the daemon's reference to the old address space. */
494 KASSERT(oldvm
->vm_refcnt
> 1,
495 ("vmspace_switch_aio: oldvm dropping last reference"));
500 _vm_map_lock(vm_map_t map
, const char *file
, int line
)
504 mtx_lock_flags_(&map
->system_mtx
, 0, file
, line
);
506 sx_xlock_(&map
->lock
, file
, line
);
511 vm_map_process_deferred(void)
514 vm_map_entry_t entry
, next
;
518 entry
= td
->td_map_def_user
;
519 td
->td_map_def_user
= NULL
;
520 while (entry
!= NULL
) {
522 if ((entry
->eflags
& MAP_ENTRY_VN_WRITECNT
) != 0) {
524 * Decrement the object's writemappings and
525 * possibly the vnode's v_writecount.
527 KASSERT((entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) == 0,
528 ("Submap with writecount"));
529 object
= entry
->object
.vm_object
;
530 KASSERT(object
!= NULL
, ("No object for writecount"));
531 vnode_pager_release_writecount(object
, entry
->start
,
534 vm_map_entry_deallocate(entry
, FALSE
);
540 _vm_map_unlock(vm_map_t map
, const char *file
, int line
)
544 mtx_unlock_flags_(&map
->system_mtx
, 0, file
, line
);
546 sx_xunlock_(&map
->lock
, file
, line
);
547 vm_map_process_deferred();
552 _vm_map_lock_read(vm_map_t map
, const char *file
, int line
)
556 mtx_lock_flags_(&map
->system_mtx
, 0, file
, line
);
558 sx_slock_(&map
->lock
, file
, line
);
562 _vm_map_unlock_read(vm_map_t map
, const char *file
, int line
)
566 mtx_unlock_flags_(&map
->system_mtx
, 0, file
, line
);
568 sx_sunlock_(&map
->lock
, file
, line
);
569 vm_map_process_deferred();
574 _vm_map_trylock(vm_map_t map
, const char *file
, int line
)
578 error
= map
->system_map
?
579 !mtx_trylock_flags_(&map
->system_mtx
, 0, file
, line
) :
580 !sx_try_xlock_(&map
->lock
, file
, line
);
587 _vm_map_trylock_read(vm_map_t map
, const char *file
, int line
)
591 error
= map
->system_map
?
592 !mtx_trylock_flags_(&map
->system_mtx
, 0, file
, line
) :
593 !sx_try_slock_(&map
->lock
, file
, line
);
598 * _vm_map_lock_upgrade: [ internal use only ]
600 * Tries to upgrade a read (shared) lock on the specified map to a write
601 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a
602 * non-zero value if the upgrade fails. If the upgrade fails, the map is
603 * returned without a read or write lock held.
605 * Requires that the map be read locked.
608 _vm_map_lock_upgrade(vm_map_t map
, const char *file
, int line
)
610 unsigned int last_timestamp
;
612 if (map
->system_map
) {
613 mtx_assert_(&map
->system_mtx
, MA_OWNED
, file
, line
);
615 if (!sx_try_upgrade_(&map
->lock
, file
, line
)) {
616 last_timestamp
= map
->timestamp
;
617 sx_sunlock_(&map
->lock
, file
, line
);
618 vm_map_process_deferred();
620 * If the map's timestamp does not change while the
621 * map is unlocked, then the upgrade succeeds.
623 sx_xlock_(&map
->lock
, file
, line
);
624 if (last_timestamp
!= map
->timestamp
) {
625 sx_xunlock_(&map
->lock
, file
, line
);
635 _vm_map_lock_downgrade(vm_map_t map
, const char *file
, int line
)
638 if (map
->system_map
) {
639 mtx_assert_(&map
->system_mtx
, MA_OWNED
, file
, line
);
641 sx_downgrade_(&map
->lock
, file
, line
);
647 * Returns a non-zero value if the caller holds a write (exclusive) lock
648 * on the specified map and the value "0" otherwise.
651 vm_map_locked(vm_map_t map
)
655 return (mtx_owned(&map
->system_mtx
));
657 return (sx_xlocked(&map
->lock
));
662 _vm_map_assert_locked(vm_map_t map
, const char *file
, int line
)
666 mtx_assert_(&map
->system_mtx
, MA_OWNED
, file
, line
);
668 sx_assert_(&map
->lock
, SA_XLOCKED
, file
, line
);
671 #define VM_MAP_ASSERT_LOCKED(map) \
672 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
674 #define VM_MAP_ASSERT_LOCKED(map)
678 * _vm_map_unlock_and_wait:
680 * Atomically releases the lock on the specified map and puts the calling
681 * thread to sleep. The calling thread will remain asleep until either
682 * vm_map_wakeup() is performed on the map or the specified timeout is
685 * WARNING! This function does not perform deferred deallocations of
686 * objects and map entries. Therefore, the calling thread is expected to
687 * reacquire the map lock after reawakening and later perform an ordinary
688 * unlock operation, such as vm_map_unlock(), before completing its
689 * operation on the map.
692 _vm_map_unlock_and_wait(vm_map_t map
, int timo
, const char *file
, int line
)
695 mtx_lock(&map_sleep_mtx
);
697 mtx_unlock_flags_(&map
->system_mtx
, 0, file
, line
);
699 sx_xunlock_(&map
->lock
, file
, line
);
700 return (msleep(&map
->root
, &map_sleep_mtx
, PDROP
| PVM
, "vmmaps",
707 * Awaken any threads that have slept on the map using
708 * vm_map_unlock_and_wait().
711 vm_map_wakeup(vm_map_t map
)
715 * Acquire and release map_sleep_mtx to prevent a wakeup()
716 * from being performed (and lost) between the map unlock
717 * and the msleep() in _vm_map_unlock_and_wait().
719 mtx_lock(&map_sleep_mtx
);
720 mtx_unlock(&map_sleep_mtx
);
725 vm_map_busy(vm_map_t map
)
728 VM_MAP_ASSERT_LOCKED(map
);
733 vm_map_unbusy(vm_map_t map
)
736 VM_MAP_ASSERT_LOCKED(map
);
737 KASSERT(map
->busy
, ("vm_map_unbusy: not busy"));
738 if (--map
->busy
== 0 && (map
->flags
& MAP_BUSY_WAKEUP
)) {
739 vm_map_modflags(map
, 0, MAP_BUSY_WAKEUP
);
745 vm_map_wait_busy(vm_map_t map
)
748 VM_MAP_ASSERT_LOCKED(map
);
750 vm_map_modflags(map
, MAP_BUSY_WAKEUP
, 0);
752 msleep(&map
->busy
, &map
->system_mtx
, 0, "mbusy", 0);
754 sx_sleep(&map
->busy
, &map
->lock
, 0, "mbusy", 0);
760 vmspace_resident_count(struct vmspace
*vmspace
)
762 return pmap_resident_count(vmspace_pmap(vmspace
));
768 * Creates and returns a new empty VM map with
769 * the given physical map structure, and having
770 * the given lower and upper address bounds.
773 vm_map_create(pmap_t pmap
, vm_offset_t min
, vm_offset_t max
)
777 result
= uma_zalloc(mapzone
, M_WAITOK
);
778 CTR1(KTR_VM
, "vm_map_create: %p", result
);
779 _vm_map_init(result
, pmap
, min
, max
);
784 * Initialize an existing vm_map structure
785 * such as that in the vmspace structure.
788 _vm_map_init(vm_map_t map
, pmap_t pmap
, vm_offset_t min
, vm_offset_t max
)
791 map
->header
.next
= map
->header
.prev
= &map
->header
;
792 map
->needs_wakeup
= FALSE
;
795 map
->min_offset
= min
;
796 map
->max_offset
= max
;
804 vm_map_init(vm_map_t map
, pmap_t pmap
, vm_offset_t min
, vm_offset_t max
)
807 _vm_map_init(map
, pmap
, min
, max
);
808 mtx_init(&map
->system_mtx
, "system map", NULL
, MTX_DEF
| MTX_DUPOK
);
809 sx_init(&map
->lock
, "user map");
813 * vm_map_entry_dispose: [ internal use only ]
815 * Inverse of vm_map_entry_create.
818 vm_map_entry_dispose(vm_map_t map
, vm_map_entry_t entry
)
820 uma_zfree(map
->system_map
? kmapentzone
: mapentzone
, entry
);
824 * vm_map_entry_create: [ internal use only ]
826 * Allocates a VM map entry for insertion.
827 * No entry fields are filled in.
829 static vm_map_entry_t
830 vm_map_entry_create(vm_map_t map
)
832 vm_map_entry_t new_entry
;
835 new_entry
= uma_zalloc(kmapentzone
, M_NOWAIT
);
837 new_entry
= uma_zalloc(mapentzone
, M_WAITOK
);
838 if (new_entry
== NULL
)
839 panic("vm_map_entry_create: kernel resources exhausted");
844 * vm_map_entry_set_behavior:
846 * Set the expected access behavior, either normal, random, or
850 vm_map_entry_set_behavior(vm_map_entry_t entry
, u_char behavior
)
852 entry
->eflags
= (entry
->eflags
& ~MAP_ENTRY_BEHAV_MASK
) |
853 (behavior
& MAP_ENTRY_BEHAV_MASK
);
857 * vm_map_entry_set_max_free:
859 * Set the max_free field in a vm_map_entry.
862 vm_map_entry_set_max_free(vm_map_entry_t entry
)
865 entry
->max_free
= entry
->adj_free
;
866 if (entry
->left
!= NULL
&& entry
->left
->max_free
> entry
->max_free
)
867 entry
->max_free
= entry
->left
->max_free
;
868 if (entry
->right
!= NULL
&& entry
->right
->max_free
> entry
->max_free
)
869 entry
->max_free
= entry
->right
->max_free
;
873 * vm_map_entry_splay:
875 * The Sleator and Tarjan top-down splay algorithm with the
876 * following variation. Max_free must be computed bottom-up, so
877 * on the downward pass, maintain the left and right spines in
878 * reverse order. Then, make a second pass up each side to fix
879 * the pointers and compute max_free. The time bound is O(log n)
882 * The new root is the vm_map_entry containing "addr", or else an
883 * adjacent entry (lower or higher) if addr is not in the tree.
885 * The map must be locked, and leaves it so.
887 * Returns: the new root.
889 static vm_map_entry_t
890 vm_map_entry_splay(vm_offset_t addr
, vm_map_entry_t root
)
892 vm_map_entry_t llist
, rlist
;
893 vm_map_entry_t ltree
, rtree
;
896 /* Special case of empty tree. */
901 * Pass One: Splay down the tree until we find addr or a NULL
902 * pointer where addr would go. llist and rlist are the two
903 * sides in reverse order (bottom-up), with llist linked by
904 * the right pointer and rlist linked by the left pointer in
905 * the vm_map_entry. Wait until Pass Two to set max_free on
911 /* root is never NULL in here. */
912 if (addr
< root
->start
) {
916 if (addr
< y
->start
&& y
->left
!= NULL
) {
917 /* Rotate right and put y on rlist. */
918 root
->left
= y
->right
;
920 vm_map_entry_set_max_free(root
);
925 /* Put root on rlist. */
930 } else if (addr
>= root
->end
) {
934 if (addr
>= y
->end
&& y
->right
!= NULL
) {
935 /* Rotate left and put y on llist. */
936 root
->right
= y
->left
;
938 vm_map_entry_set_max_free(root
);
943 /* Put root on llist. */
953 * Pass Two: Walk back up the two spines, flip the pointers
954 * and set max_free. The subtrees of the root go at the
955 * bottom of llist and rlist.
958 while (llist
!= NULL
) {
960 llist
->right
= ltree
;
961 vm_map_entry_set_max_free(llist
);
966 while (rlist
!= NULL
) {
969 vm_map_entry_set_max_free(rlist
);
975 * Final assembly: add ltree and rtree as subtrees of root.
979 vm_map_entry_set_max_free(root
);
985 * vm_map_entry_{un,}link:
987 * Insert/remove entries from maps.
990 vm_map_entry_link(vm_map_t map
,
991 vm_map_entry_t after_where
,
992 vm_map_entry_t entry
)
996 "vm_map_entry_link: map %p, nentries %d, entry %p, after %p", map
,
997 map
->nentries
, entry
, after_where
);
998 VM_MAP_ASSERT_LOCKED(map
);
999 KASSERT(after_where
== &map
->header
||
1000 after_where
->end
<= entry
->start
,
1001 ("vm_map_entry_link: prev end %jx new start %jx overlap",
1002 (uintmax_t)after_where
->end
, (uintmax_t)entry
->start
));
1003 KASSERT(after_where
->next
== &map
->header
||
1004 entry
->end
<= after_where
->next
->start
,
1005 ("vm_map_entry_link: new end %jx next start %jx overlap",
1006 (uintmax_t)entry
->end
, (uintmax_t)after_where
->next
->start
));
1009 entry
->prev
= after_where
;
1010 entry
->next
= after_where
->next
;
1011 entry
->next
->prev
= entry
;
1012 after_where
->next
= entry
;
1014 if (after_where
!= &map
->header
) {
1015 if (after_where
!= map
->root
)
1016 vm_map_entry_splay(after_where
->start
, map
->root
);
1017 entry
->right
= after_where
->right
;
1018 entry
->left
= after_where
;
1019 after_where
->right
= NULL
;
1020 after_where
->adj_free
= entry
->start
- after_where
->end
;
1021 vm_map_entry_set_max_free(after_where
);
1023 entry
->right
= map
->root
;
1026 entry
->adj_free
= (entry
->next
== &map
->header
? map
->max_offset
:
1027 entry
->next
->start
) - entry
->end
;
1028 vm_map_entry_set_max_free(entry
);
1033 vm_map_entry_unlink(vm_map_t map
,
1034 vm_map_entry_t entry
)
1036 vm_map_entry_t next
, prev
, root
;
1038 VM_MAP_ASSERT_LOCKED(map
);
1039 if (entry
!= map
->root
)
1040 vm_map_entry_splay(entry
->start
, map
->root
);
1041 if (entry
->left
== NULL
)
1042 root
= entry
->right
;
1044 root
= vm_map_entry_splay(entry
->start
, entry
->left
);
1045 root
->right
= entry
->right
;
1046 root
->adj_free
= (entry
->next
== &map
->header
? map
->max_offset
:
1047 entry
->next
->start
) - root
->end
;
1048 vm_map_entry_set_max_free(root
);
1057 CTR3(KTR_VM
, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map
,
1058 map
->nentries
, entry
);
1062 * vm_map_entry_resize_free:
1064 * Recompute the amount of free space following a vm_map_entry
1065 * and propagate that value up the tree. Call this function after
1066 * resizing a map entry in-place, that is, without a call to
1067 * vm_map_entry_link() or _unlink().
1069 * The map must be locked, and leaves it so.
1072 vm_map_entry_resize_free(vm_map_t map
, vm_map_entry_t entry
)
1076 * Using splay trees without parent pointers, propagating
1077 * max_free up the tree is done by moving the entry to the
1078 * root and making the change there.
1080 if (entry
!= map
->root
)
1081 map
->root
= vm_map_entry_splay(entry
->start
, map
->root
);
1083 entry
->adj_free
= (entry
->next
== &map
->header
? map
->max_offset
:
1084 entry
->next
->start
) - entry
->end
;
1085 vm_map_entry_set_max_free(entry
);
1089 * vm_map_lookup_entry: [ internal use only ]
1091 * Finds the map entry containing (or
1092 * immediately preceding) the specified address
1093 * in the given map; the entry is returned
1094 * in the "entry" parameter. The boolean
1095 * result indicates whether the address is
1096 * actually contained in the map.
1099 vm_map_lookup_entry(
1101 vm_offset_t address
,
1102 vm_map_entry_t
*entry
) /* OUT */
1108 * If the map is empty, then the map entry immediately preceding
1109 * "address" is the map's header.
1113 *entry
= &map
->header
;
1114 else if (address
>= cur
->start
&& cur
->end
> address
) {
1117 } else if ((locked
= vm_map_locked(map
)) ||
1118 sx_try_upgrade(&map
->lock
)) {
1120 * Splay requires a write lock on the map. However, it only
1121 * restructures the binary search tree; it does not otherwise
1122 * change the map. Thus, the map's timestamp need not change
1123 * on a temporary upgrade.
1125 map
->root
= cur
= vm_map_entry_splay(address
, cur
);
1127 sx_downgrade(&map
->lock
);
1130 * If "address" is contained within a map entry, the new root
1131 * is that map entry. Otherwise, the new root is a map entry
1132 * immediately before or after "address".
1134 if (address
>= cur
->start
) {
1136 if (cur
->end
> address
)
1142 * Since the map is only locked for read access, perform a
1143 * standard binary search tree lookup for "address".
1146 if (address
< cur
->start
) {
1147 if (cur
->left
== NULL
) {
1152 } else if (cur
->end
> address
) {
1156 if (cur
->right
== NULL
) {
1169 * Inserts the given whole VM object into the target
1170 * map at the specified address range. The object's
1171 * size should match that of the address range.
1173 * Requires that the map be locked, and leaves it so.
1175 * If object is non-NULL, ref count must be bumped by caller
1176 * prior to making call to account for the new entry.
1179 vm_map_insert(vm_map_t map
, vm_object_t object
, vm_ooffset_t offset
,
1180 vm_offset_t start
, vm_offset_t end
, vm_prot_t prot
, vm_prot_t max
, int cow
)
1182 vm_map_entry_t new_entry
, prev_entry
, temp_entry
;
1183 vm_eflags_t protoeflags
;
1185 vm_inherit_t inheritance
;
1187 VM_MAP_ASSERT_LOCKED(map
);
1188 KASSERT((object
!= kmem_object
&& object
!= kernel_object
) ||
1189 (cow
& MAP_COPY_ON_WRITE
) == 0,
1190 ("vm_map_insert: kmem or kernel object and COW"));
1191 KASSERT(object
== NULL
|| (cow
& MAP_NOFAULT
) == 0,
1192 ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1195 * Check that the start and end points are not bogus.
1197 if ((start
< map
->min_offset
) || (end
> map
->max_offset
) ||
1199 return (KERN_INVALID_ADDRESS
);
1202 * Find the entry prior to the proposed starting address; if it's part
1203 * of an existing entry, this range is bogus.
1205 if (vm_map_lookup_entry(map
, start
, &temp_entry
))
1206 return (KERN_NO_SPACE
);
1208 prev_entry
= temp_entry
;
1211 * Assert that the next entry doesn't overlap the end point.
1213 if ((prev_entry
->next
!= &map
->header
) &&
1214 (prev_entry
->next
->start
< end
))
1215 return (KERN_NO_SPACE
);
1218 if (cow
& MAP_COPY_ON_WRITE
)
1219 protoeflags
|= MAP_ENTRY_COW
| MAP_ENTRY_NEEDS_COPY
;
1220 if (cow
& MAP_NOFAULT
)
1221 protoeflags
|= MAP_ENTRY_NOFAULT
;
1222 if (cow
& MAP_DISABLE_SYNCER
)
1223 protoeflags
|= MAP_ENTRY_NOSYNC
;
1224 if (cow
& MAP_DISABLE_COREDUMP
)
1225 protoeflags
|= MAP_ENTRY_NOCOREDUMP
;
1226 if (cow
& MAP_STACK_GROWS_DOWN
)
1227 protoeflags
|= MAP_ENTRY_GROWS_DOWN
;
1228 if (cow
& MAP_STACK_GROWS_UP
)
1229 protoeflags
|= MAP_ENTRY_GROWS_UP
;
1230 if (cow
& MAP_VN_WRITECOUNT
)
1231 protoeflags
|= MAP_ENTRY_VN_WRITECNT
;
1232 if (cow
& MAP_INHERIT_SHARE
)
1233 inheritance
= VM_INHERIT_SHARE
;
1235 inheritance
= VM_INHERIT_DEFAULT
;
1238 if (cow
& (MAP_ACC_NO_CHARGE
| MAP_NOFAULT
))
1240 if ((cow
& MAP_ACC_CHARGED
) || ((prot
& VM_PROT_WRITE
) &&
1241 ((protoeflags
& MAP_ENTRY_NEEDS_COPY
) || object
== NULL
))) {
1242 if (!(cow
& MAP_ACC_CHARGED
) && !swap_reserve(end
- start
))
1243 return (KERN_RESOURCE_SHORTAGE
);
1244 KASSERT(object
== NULL
|| (protoeflags
& MAP_ENTRY_NEEDS_COPY
) ||
1245 object
->cred
== NULL
,
1246 ("OVERCOMMIT: vm_map_insert o %p", object
));
1247 cred
= curthread
->td_ucred
;
1251 /* Expand the kernel pmap, if necessary. */
1252 if (map
== kernel_map
&& end
> kernel_vm_end
)
1253 pmap_growkernel(end
);
1254 if (object
!= NULL
) {
1256 * OBJ_ONEMAPPING must be cleared unless this mapping
1257 * is trivially proven to be the only mapping for any
1258 * of the object's pages. (Object granularity
1259 * reference counting is insufficient to recognize
1260 * aliases with precision.)
1262 VM_OBJECT_WLOCK(object
);
1263 if (object
->ref_count
> 1 || object
->shadow_count
!= 0)
1264 vm_object_clear_flag(object
, OBJ_ONEMAPPING
);
1265 VM_OBJECT_WUNLOCK(object
);
1267 else if ((prev_entry
!= &map
->header
) &&
1268 (prev_entry
->eflags
== protoeflags
) &&
1269 (cow
& (MAP_STACK_GROWS_DOWN
| MAP_STACK_GROWS_UP
)) == 0 &&
1270 (prev_entry
->end
== start
) &&
1271 (prev_entry
->wired_count
== 0) &&
1272 (prev_entry
->cred
== cred
||
1273 (prev_entry
->object
.vm_object
!= NULL
&&
1274 (prev_entry
->object
.vm_object
->cred
== cred
))) &&
1275 vm_object_coalesce(prev_entry
->object
.vm_object
,
1277 (vm_size_t
)(prev_entry
->end
- prev_entry
->start
),
1278 (vm_size_t
)(end
- prev_entry
->end
), cred
!= NULL
&&
1279 (protoeflags
& MAP_ENTRY_NEEDS_COPY
) == 0)) {
1281 * We were able to extend the object. Determine if we
1282 * can extend the previous map entry to include the
1283 * new range as well.
1285 if ((prev_entry
->inheritance
== inheritance
) &&
1286 (prev_entry
->protection
== prot
) &&
1287 (prev_entry
->max_protection
== max
)) {
1288 map
->size
+= (end
- prev_entry
->end
);
1289 prev_entry
->end
= end
;
1290 vm_map_entry_resize_free(map
, prev_entry
);
1291 vm_map_simplify_entry(map
, prev_entry
);
1292 return (KERN_SUCCESS
);
1296 * If we can extend the object but cannot extend the
1297 * map entry, we have to create a new map entry. We
1298 * must bump the ref count on the extended object to
1299 * account for it. object may be NULL.
1301 object
= prev_entry
->object
.vm_object
;
1302 offset
= prev_entry
->offset
+
1303 (prev_entry
->end
- prev_entry
->start
);
1304 vm_object_reference(object
);
1305 if (cred
!= NULL
&& object
!= NULL
&& object
->cred
!= NULL
&&
1306 !(prev_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
)) {
1307 /* Object already accounts for this uid. */
1315 * Create a new entry
1317 new_entry
= vm_map_entry_create(map
);
1318 new_entry
->start
= start
;
1319 new_entry
->end
= end
;
1320 new_entry
->cred
= NULL
;
1322 new_entry
->eflags
= protoeflags
;
1323 new_entry
->object
.vm_object
= object
;
1324 new_entry
->offset
= offset
;
1325 new_entry
->avail_ssize
= 0;
1327 new_entry
->inheritance
= inheritance
;
1328 new_entry
->protection
= prot
;
1329 new_entry
->max_protection
= max
;
1330 new_entry
->wired_count
= 0;
1331 new_entry
->wiring_thread
= NULL
;
1332 new_entry
->read_ahead
= VM_FAULT_READ_AHEAD_INIT
;
1333 new_entry
->next_read
= start
;
1335 KASSERT(cred
== NULL
|| !ENTRY_CHARGED(new_entry
),
1336 ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry
));
1337 new_entry
->cred
= cred
;
1340 * Insert the new entry into the list
1342 vm_map_entry_link(map
, prev_entry
, new_entry
);
1343 map
->size
+= new_entry
->end
- new_entry
->start
;
1346 * Try to coalesce the new entry with both the previous and next
1347 * entries in the list. Previously, we only attempted to coalesce
1348 * with the previous entry when object is NULL. Here, we handle the
1349 * other cases, which are less common.
1351 vm_map_simplify_entry(map
, new_entry
);
1353 if (cow
& (MAP_PREFAULT
|MAP_PREFAULT_PARTIAL
)) {
1354 vm_map_pmap_enter(map
, start
, prot
,
1355 object
, OFF_TO_IDX(offset
), end
- start
,
1356 cow
& MAP_PREFAULT_PARTIAL
);
1359 return (KERN_SUCCESS
);
1365 * Find the first fit (lowest VM address) for "length" free bytes
1366 * beginning at address >= start in the given map.
1368 * In a vm_map_entry, "adj_free" is the amount of free space
1369 * adjacent (higher address) to this entry, and "max_free" is the
1370 * maximum amount of contiguous free space in its subtree. This
1371 * allows finding a free region in one path down the tree, so
1372 * O(log n) amortized with splay trees.
1374 * The map must be locked, and leaves it so.
1376 * Returns: 0 on success, and starting address in *addr,
1377 * 1 if insufficient space.
1380 vm_map_findspace(vm_map_t map
, vm_offset_t start
, vm_size_t length
,
1381 vm_offset_t
*addr
) /* OUT */
1383 vm_map_entry_t entry
;
1387 * Request must fit within min/max VM address and must avoid
1390 if (start
< map
->min_offset
)
1391 start
= map
->min_offset
;
1392 if (start
+ length
> map
->max_offset
|| start
+ length
< start
)
1395 /* Empty tree means wide open address space. */
1396 if (map
->root
== NULL
) {
1402 * After splay, if start comes before root node, then there
1403 * must be a gap from start to the root.
1405 map
->root
= vm_map_entry_splay(start
, map
->root
);
1406 if (start
+ length
<= map
->root
->start
) {
1412 * Root is the last node that might begin its gap before
1413 * start, and this is the last comparison where address
1414 * wrap might be a problem.
1416 st
= (start
> map
->root
->end
) ? start
: map
->root
->end
;
1417 if (length
<= map
->root
->end
+ map
->root
->adj_free
- st
) {
1422 /* With max_free, can immediately tell if no solution. */
1423 entry
= map
->root
->right
;
1424 if (entry
== NULL
|| length
> entry
->max_free
)
1428 * Search the right subtree in the order: left subtree, root,
1429 * right subtree (first fit). The previous splay implies that
1430 * all regions in the right subtree have addresses > start.
1432 while (entry
!= NULL
) {
1433 if (entry
->left
!= NULL
&& entry
->left
->max_free
>= length
)
1434 entry
= entry
->left
;
1435 else if (entry
->adj_free
>= length
) {
1439 entry
= entry
->right
;
1442 /* Can't get here, so panic if we do. */
1443 panic("vm_map_findspace: max_free corrupt");
1447 vm_map_fixed(vm_map_t map
, vm_object_t object
, vm_ooffset_t offset
,
1448 vm_offset_t start
, vm_size_t length
, vm_prot_t prot
,
1449 vm_prot_t max
, int cow
)
1454 end
= start
+ length
;
1455 KASSERT((cow
& (MAP_STACK_GROWS_DOWN
| MAP_STACK_GROWS_UP
)) == 0 ||
1457 ("vm_map_fixed: non-NULL backing object for stack"));
1459 VM_MAP_RANGE_CHECK(map
, start
, end
);
1460 if ((cow
& MAP_CHECK_EXCL
) == 0)
1461 vm_map_delete(map
, start
, end
);
1462 if ((cow
& (MAP_STACK_GROWS_DOWN
| MAP_STACK_GROWS_UP
)) != 0) {
1463 result
= vm_map_stack_locked(map
, start
, length
, sgrowsiz
,
1466 result
= vm_map_insert(map
, object
, offset
, start
, end
,
1474 * vm_map_find finds an unallocated region in the target address
1475 * map with the given length. The search is defined to be
1476 * first-fit from the specified address; the region found is
1477 * returned in the same parameter.
1479 * If object is non-NULL, ref count must be bumped by caller
1480 * prior to making call to account for the new entry.
1483 vm_map_find(vm_map_t map
, vm_object_t object
, vm_ooffset_t offset
,
1484 vm_offset_t
*addr
, /* IN/OUT */
1485 vm_size_t length
, vm_offset_t max_addr
, int find_space
,
1486 vm_prot_t prot
, vm_prot_t max
, int cow
)
1488 vm_offset_t alignment
, initial_addr
, start
;
1491 KASSERT((cow
& (MAP_STACK_GROWS_DOWN
| MAP_STACK_GROWS_UP
)) == 0 ||
1493 ("vm_map_find: non-NULL backing object for stack"));
1494 if (find_space
== VMFS_OPTIMAL_SPACE
&& (object
== NULL
||
1495 (object
->flags
& OBJ_COLORED
) == 0))
1496 find_space
= VMFS_ANY_SPACE
;
1497 if (find_space
>> 8 != 0) {
1498 KASSERT((find_space
& 0xff) == 0, ("bad VMFS flags"));
1499 alignment
= (vm_offset_t
)1 << (find_space
>> 8);
1502 initial_addr
= *addr
;
1504 start
= initial_addr
;
1507 if (find_space
!= VMFS_NO_SPACE
) {
1508 if (vm_map_findspace(map
, start
, length
, addr
) ||
1509 (max_addr
!= 0 && *addr
+ length
> max_addr
)) {
1511 if (find_space
== VMFS_OPTIMAL_SPACE
) {
1512 find_space
= VMFS_ANY_SPACE
;
1515 return (KERN_NO_SPACE
);
1517 switch (find_space
) {
1518 case VMFS_SUPER_SPACE
:
1519 case VMFS_OPTIMAL_SPACE
:
1520 pmap_align_superpage(object
, offset
, addr
,
1523 case VMFS_ANY_SPACE
:
1526 if ((*addr
& (alignment
- 1)) != 0) {
1527 *addr
&= ~(alignment
- 1);
1535 if ((cow
& (MAP_STACK_GROWS_DOWN
| MAP_STACK_GROWS_UP
)) != 0) {
1536 result
= vm_map_stack_locked(map
, start
, length
,
1537 sgrowsiz
, prot
, max
, cow
);
1539 result
= vm_map_insert(map
, object
, offset
, start
,
1540 start
+ length
, prot
, max
, cow
);
1542 } while (result
== KERN_NO_SPACE
&& find_space
!= VMFS_NO_SPACE
&&
1543 find_space
!= VMFS_ANY_SPACE
);
1549 * vm_map_simplify_entry:
1551 * Simplify the given map entry by merging with either neighbor. This
1552 * routine also has the ability to merge with both neighbors.
1554 * The map must be locked.
1556 * This routine guarantees that the passed entry remains valid (though
1557 * possibly extended). When merging, this routine may delete one or
1561 vm_map_simplify_entry(vm_map_t map
, vm_map_entry_t entry
)
1563 vm_map_entry_t next
, prev
;
1564 vm_size_t prevsize
, esize
;
1566 if ((entry
->eflags
& (MAP_ENTRY_GROWS_DOWN
| MAP_ENTRY_GROWS_UP
|
1567 MAP_ENTRY_IN_TRANSITION
| MAP_ENTRY_IS_SUB_MAP
)) != 0)
1571 if (prev
!= &map
->header
) {
1572 prevsize
= prev
->end
- prev
->start
;
1573 if ( (prev
->end
== entry
->start
) &&
1574 (prev
->object
.vm_object
== entry
->object
.vm_object
) &&
1575 (!prev
->object
.vm_object
||
1576 (prev
->offset
+ prevsize
== entry
->offset
)) &&
1577 (prev
->eflags
== entry
->eflags
) &&
1578 (prev
->protection
== entry
->protection
) &&
1579 (prev
->max_protection
== entry
->max_protection
) &&
1580 (prev
->inheritance
== entry
->inheritance
) &&
1581 (prev
->wired_count
== entry
->wired_count
) &&
1582 (prev
->cred
== entry
->cred
)) {
1583 vm_map_entry_unlink(map
, prev
);
1584 entry
->start
= prev
->start
;
1585 entry
->offset
= prev
->offset
;
1586 if (entry
->prev
!= &map
->header
)
1587 vm_map_entry_resize_free(map
, entry
->prev
);
1590 * If the backing object is a vnode object,
1591 * vm_object_deallocate() calls vrele().
1592 * However, vrele() does not lock the vnode
1593 * because the vnode has additional
1594 * references. Thus, the map lock can be kept
1595 * without causing a lock-order reversal with
1598 * Since we count the number of virtual page
1599 * mappings in object->un_pager.vnp.writemappings,
1600 * the writemappings value should not be adjusted
1601 * when the entry is disposed of.
1603 if (prev
->object
.vm_object
)
1604 vm_object_deallocate(prev
->object
.vm_object
);
1605 if (prev
->cred
!= NULL
)
1607 vm_map_entry_dispose(map
, prev
);
1612 if (next
!= &map
->header
) {
1613 esize
= entry
->end
- entry
->start
;
1614 if ((entry
->end
== next
->start
) &&
1615 (next
->object
.vm_object
== entry
->object
.vm_object
) &&
1616 (!entry
->object
.vm_object
||
1617 (entry
->offset
+ esize
== next
->offset
)) &&
1618 (next
->eflags
== entry
->eflags
) &&
1619 (next
->protection
== entry
->protection
) &&
1620 (next
->max_protection
== entry
->max_protection
) &&
1621 (next
->inheritance
== entry
->inheritance
) &&
1622 (next
->wired_count
== entry
->wired_count
) &&
1623 (next
->cred
== entry
->cred
)) {
1624 vm_map_entry_unlink(map
, next
);
1625 entry
->end
= next
->end
;
1626 vm_map_entry_resize_free(map
, entry
);
1629 * See comment above.
1631 if (next
->object
.vm_object
)
1632 vm_object_deallocate(next
->object
.vm_object
);
1633 if (next
->cred
!= NULL
)
1635 vm_map_entry_dispose(map
, next
);
1640 * vm_map_clip_start: [ internal use only ]
1642 * Asserts that the given entry begins at or after
1643 * the specified address; if necessary,
1644 * it splits the entry into two.
1646 #define vm_map_clip_start(map, entry, startaddr) \
1648 if (startaddr > entry->start) \
1649 _vm_map_clip_start(map, entry, startaddr); \
1653 * This routine is called only when it is known that
1654 * the entry must be split.
1657 _vm_map_clip_start(vm_map_t map
, vm_map_entry_t entry
, vm_offset_t start
)
1659 vm_map_entry_t new_entry
;
1661 VM_MAP_ASSERT_LOCKED(map
);
1664 * Split off the front portion -- note that we must insert the new
1665 * entry BEFORE this one, so that this entry has the specified
1668 vm_map_simplify_entry(map
, entry
);
1671 * If there is no object backing this entry, we might as well create
1672 * one now. If we defer it, an object can get created after the map
1673 * is clipped, and individual objects will be created for the split-up
1674 * map. This is a bit of a hack, but is also about the best place to
1675 * put this improvement.
1677 if (entry
->object
.vm_object
== NULL
&& !map
->system_map
) {
1679 object
= vm_object_allocate(OBJT_DEFAULT
,
1680 atop(entry
->end
- entry
->start
));
1681 entry
->object
.vm_object
= object
;
1683 if (entry
->cred
!= NULL
) {
1684 object
->cred
= entry
->cred
;
1685 object
->charge
= entry
->end
- entry
->start
;
1688 } else if (entry
->object
.vm_object
!= NULL
&&
1689 ((entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) == 0) &&
1690 entry
->cred
!= NULL
) {
1691 VM_OBJECT_WLOCK(entry
->object
.vm_object
);
1692 KASSERT(entry
->object
.vm_object
->cred
== NULL
,
1693 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry
));
1694 entry
->object
.vm_object
->cred
= entry
->cred
;
1695 entry
->object
.vm_object
->charge
= entry
->end
- entry
->start
;
1696 VM_OBJECT_WUNLOCK(entry
->object
.vm_object
);
1700 new_entry
= vm_map_entry_create(map
);
1701 *new_entry
= *entry
;
1703 new_entry
->end
= start
;
1704 entry
->offset
+= (start
- entry
->start
);
1705 entry
->start
= start
;
1706 if (new_entry
->cred
!= NULL
)
1707 crhold(entry
->cred
);
1709 vm_map_entry_link(map
, entry
->prev
, new_entry
);
1711 if ((entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) == 0) {
1712 vm_object_reference(new_entry
->object
.vm_object
);
1714 * The object->un_pager.vnp.writemappings for the
1715 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
1716 * kept as is here. The virtual pages are
1717 * re-distributed among the clipped entries, so the sum is
1724 * vm_map_clip_end: [ internal use only ]
1726 * Asserts that the given entry ends at or before
1727 * the specified address; if necessary,
1728 * it splits the entry into two.
1730 #define vm_map_clip_end(map, entry, endaddr) \
1732 if ((endaddr) < (entry->end)) \
1733 _vm_map_clip_end((map), (entry), (endaddr)); \
1737 * This routine is called only when it is known that
1738 * the entry must be split.
1741 _vm_map_clip_end(vm_map_t map
, vm_map_entry_t entry
, vm_offset_t end
)
1743 vm_map_entry_t new_entry
;
1745 VM_MAP_ASSERT_LOCKED(map
);
1748 * If there is no object backing this entry, we might as well create
1749 * one now. If we defer it, an object can get created after the map
1750 * is clipped, and individual objects will be created for the split-up
1751 * map. This is a bit of a hack, but is also about the best place to
1752 * put this improvement.
1754 if (entry
->object
.vm_object
== NULL
&& !map
->system_map
) {
1756 object
= vm_object_allocate(OBJT_DEFAULT
,
1757 atop(entry
->end
- entry
->start
));
1758 entry
->object
.vm_object
= object
;
1760 if (entry
->cred
!= NULL
) {
1761 object
->cred
= entry
->cred
;
1762 object
->charge
= entry
->end
- entry
->start
;
1765 } else if (entry
->object
.vm_object
!= NULL
&&
1766 ((entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) == 0) &&
1767 entry
->cred
!= NULL
) {
1768 VM_OBJECT_WLOCK(entry
->object
.vm_object
);
1769 KASSERT(entry
->object
.vm_object
->cred
== NULL
,
1770 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry
));
1771 entry
->object
.vm_object
->cred
= entry
->cred
;
1772 entry
->object
.vm_object
->charge
= entry
->end
- entry
->start
;
1773 VM_OBJECT_WUNLOCK(entry
->object
.vm_object
);
1778 * Create a new entry and insert it AFTER the specified entry
1780 new_entry
= vm_map_entry_create(map
);
1781 *new_entry
= *entry
;
1783 new_entry
->start
= entry
->end
= end
;
1784 new_entry
->offset
+= (end
- entry
->start
);
1785 if (new_entry
->cred
!= NULL
)
1786 crhold(entry
->cred
);
1788 vm_map_entry_link(map
, entry
, new_entry
);
1790 if ((entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) == 0) {
1791 vm_object_reference(new_entry
->object
.vm_object
);
1796 * vm_map_submap: [ kernel use only ]
1798 * Mark the given range as handled by a subordinate map.
1800 * This range must have been created with vm_map_find,
1801 * and no other operations may have been performed on this
1802 * range prior to calling vm_map_submap.
1804 * Only a limited number of operations can be performed
1805 * within this rage after calling vm_map_submap:
1807 * [Don't try vm_map_copy!]
1809 * To remove a submapping, one must first remove the
1810 * range from the superior map, and then destroy the
1811 * submap (if desired). [Better yet, don't try it.]
1820 vm_map_entry_t entry
;
1821 int result
= KERN_INVALID_ARGUMENT
;
1825 VM_MAP_RANGE_CHECK(map
, start
, end
);
1827 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1828 vm_map_clip_start(map
, entry
, start
);
1830 entry
= entry
->next
;
1832 vm_map_clip_end(map
, entry
, end
);
1834 if ((entry
->start
== start
) && (entry
->end
== end
) &&
1835 ((entry
->eflags
& MAP_ENTRY_COW
) == 0) &&
1836 (entry
->object
.vm_object
== NULL
)) {
1837 entry
->object
.sub_map
= submap
;
1838 entry
->eflags
|= MAP_ENTRY_IS_SUB_MAP
;
1839 result
= KERN_SUCCESS
;
1847 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
1849 #define MAX_INIT_PT 96
1852 * vm_map_pmap_enter:
1854 * Preload the specified map's pmap with mappings to the specified
1855 * object's memory-resident pages. No further physical pages are
1856 * allocated, and no further virtual pages are retrieved from secondary
1857 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a
1858 * limited number of page mappings are created at the low-end of the
1859 * specified address range. (For this purpose, a superpage mapping
1860 * counts as one page mapping.) Otherwise, all resident pages within
1861 * the specified address range are mapped. Because these mappings are
1862 * being created speculatively, cached pages are not reactivated and
1866 vm_map_pmap_enter(vm_map_t map
, vm_offset_t addr
, vm_prot_t prot
,
1867 vm_object_t object
, vm_pindex_t pindex
, vm_size_t size
, int flags
)
1870 vm_page_t p
, p_start
;
1871 vm_pindex_t mask
, psize
, threshold
, tmpidx
;
1873 if ((prot
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) == 0 || object
== NULL
)
1875 VM_OBJECT_RLOCK(object
);
1876 if (object
->type
== OBJT_DEVICE
|| object
->type
== OBJT_SG
) {
1877 VM_OBJECT_RUNLOCK(object
);
1878 VM_OBJECT_WLOCK(object
);
1879 if (object
->type
== OBJT_DEVICE
|| object
->type
== OBJT_SG
) {
1880 pmap_object_init_pt(map
->pmap
, addr
, object
, pindex
,
1882 VM_OBJECT_WUNLOCK(object
);
1885 VM_OBJECT_LOCK_DOWNGRADE(object
);
1889 if (psize
+ pindex
> object
->size
) {
1890 if (object
->size
< pindex
) {
1891 VM_OBJECT_RUNLOCK(object
);
1894 psize
= object
->size
- pindex
;
1899 threshold
= MAX_INIT_PT
;
1901 p
= vm_page_find_least(object
, pindex
);
1903 * Assert: the variable p is either (1) the page with the
1904 * least pindex greater than or equal to the parameter pindex
1908 p
!= NULL
&& (tmpidx
= p
->pindex
- pindex
) < psize
;
1909 p
= TAILQ_NEXT(p
, listq
)) {
1911 * don't allow an madvise to blow away our really
1912 * free pages allocating pv entries.
1914 if (((flags
& MAP_PREFAULT_MADVISE
) != 0 &&
1915 vm_cnt
.v_free_count
< vm_cnt
.v_free_reserved
) ||
1916 ((flags
& MAP_PREFAULT_PARTIAL
) != 0 &&
1917 tmpidx
>= threshold
)) {
1921 if (p
->valid
== VM_PAGE_BITS_ALL
) {
1922 if (p_start
== NULL
) {
1923 start
= addr
+ ptoa(tmpidx
);
1926 /* Jump ahead if a superpage mapping is possible. */
1927 if (p
->psind
> 0 && ((addr
+ ptoa(tmpidx
)) &
1928 (pagesizes
[p
->psind
] - 1)) == 0) {
1929 mask
= atop(pagesizes
[p
->psind
]) - 1;
1930 if (tmpidx
+ mask
< psize
&&
1931 vm_page_ps_is_valid(p
)) {
1936 } else if (p_start
!= NULL
) {
1937 pmap_enter_object(map
->pmap
, start
, addr
+
1938 ptoa(tmpidx
), p_start
, prot
);
1942 if (p_start
!= NULL
)
1943 pmap_enter_object(map
->pmap
, start
, addr
+ ptoa(psize
),
1945 VM_OBJECT_RUNLOCK(object
);
1951 * Sets the protection of the specified address
1952 * region in the target map. If "set_max" is
1953 * specified, the maximum protection is to be set;
1954 * otherwise, only the current protection is affected.
1957 vm_map_protect(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
1958 vm_prot_t new_prot
, boolean_t set_max
)
1960 vm_map_entry_t current
, entry
;
1966 return (KERN_SUCCESS
);
1970 VM_MAP_RANGE_CHECK(map
, start
, end
);
1972 if (vm_map_lookup_entry(map
, start
, &entry
)) {
1973 vm_map_clip_start(map
, entry
, start
);
1975 entry
= entry
->next
;
1979 * Make a first pass to check for protection violations.
1982 while ((current
!= &map
->header
) && (current
->start
< end
)) {
1983 if (current
->eflags
& MAP_ENTRY_IS_SUB_MAP
) {
1985 return (KERN_INVALID_ARGUMENT
);
1987 if ((new_prot
& current
->max_protection
) != new_prot
) {
1989 return (KERN_PROTECTION_FAILURE
);
1991 current
= current
->next
;
1996 * Do an accounting pass for private read-only mappings that
1997 * now will do cow due to allowed write (e.g. debugger sets
1998 * breakpoint on text segment)
2000 for (current
= entry
; (current
!= &map
->header
) &&
2001 (current
->start
< end
); current
= current
->next
) {
2003 vm_map_clip_end(map
, current
, end
);
2006 ((new_prot
& ~(current
->protection
)) & VM_PROT_WRITE
) == 0 ||
2007 ENTRY_CHARGED(current
)) {
2011 cred
= curthread
->td_ucred
;
2012 obj
= current
->object
.vm_object
;
2014 if (obj
== NULL
|| (current
->eflags
& MAP_ENTRY_NEEDS_COPY
)) {
2015 if (!swap_reserve(current
->end
- current
->start
)) {
2017 return (KERN_RESOURCE_SHORTAGE
);
2020 current
->cred
= cred
;
2024 VM_OBJECT_WLOCK(obj
);
2025 if (obj
->type
!= OBJT_DEFAULT
&& obj
->type
!= OBJT_SWAP
) {
2026 VM_OBJECT_WUNLOCK(obj
);
2031 * Charge for the whole object allocation now, since
2032 * we cannot distinguish between non-charged and
2033 * charged clipped mapping of the same object later.
2035 KASSERT(obj
->charge
== 0,
2036 ("vm_map_protect: object %p overcharged (entry %p)",
2038 if (!swap_reserve(ptoa(obj
->size
))) {
2039 VM_OBJECT_WUNLOCK(obj
);
2041 return (KERN_RESOURCE_SHORTAGE
);
2046 obj
->charge
= ptoa(obj
->size
);
2047 VM_OBJECT_WUNLOCK(obj
);
2051 * Go back and fix up protections. [Note that clipping is not
2052 * necessary the second time.]
2055 while ((current
!= &map
->header
) && (current
->start
< end
)) {
2056 old_prot
= current
->protection
;
2059 current
->protection
=
2060 (current
->max_protection
= new_prot
) &
2063 current
->protection
= new_prot
;
2066 * For user wired map entries, the normal lazy evaluation of
2067 * write access upgrades through soft page faults is
2068 * undesirable. Instead, immediately copy any pages that are
2069 * copy-on-write and enable write access in the physical map.
2071 if ((current
->eflags
& MAP_ENTRY_USER_WIRED
) != 0 &&
2072 (current
->protection
& VM_PROT_WRITE
) != 0 &&
2073 (old_prot
& VM_PROT_WRITE
) == 0)
2074 vm_fault_copy_entry(map
, map
, current
, current
, NULL
);
2077 * When restricting access, update the physical map. Worry
2078 * about copy-on-write here.
2080 if ((old_prot
& ~current
->protection
) != 0) {
2081 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2083 pmap_protect(map
->pmap
, current
->start
,
2085 current
->protection
& MASK(current
));
2088 vm_map_simplify_entry(map
, current
);
2089 current
= current
->next
;
2092 return (KERN_SUCCESS
);
2098 * This routine traverses a processes map handling the madvise
2099 * system call. Advisories are classified as either those effecting
2100 * the vm_map_entry structure, or those effecting the underlying
2110 vm_map_entry_t current
, entry
;
2114 * Some madvise calls directly modify the vm_map_entry, in which case
2115 * we need to use an exclusive lock on the map and we need to perform
2116 * various clipping operations. Otherwise we only need a read-lock
2121 case MADV_SEQUENTIAL
:
2128 return (KERN_SUCCESS
);
2136 return (KERN_SUCCESS
);
2137 vm_map_lock_read(map
);
2140 return (KERN_INVALID_ARGUMENT
);
2144 * Locate starting entry and clip if necessary.
2146 VM_MAP_RANGE_CHECK(map
, start
, end
);
2148 if (vm_map_lookup_entry(map
, start
, &entry
)) {
2150 vm_map_clip_start(map
, entry
, start
);
2152 entry
= entry
->next
;
2157 * madvise behaviors that are implemented in the vm_map_entry.
2159 * We clip the vm_map_entry so that behavioral changes are
2160 * limited to the specified address range.
2162 for (current
= entry
;
2163 (current
!= &map
->header
) && (current
->start
< end
);
2164 current
= current
->next
2166 if (current
->eflags
& MAP_ENTRY_IS_SUB_MAP
)
2169 vm_map_clip_end(map
, current
, end
);
2173 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_NORMAL
);
2175 case MADV_SEQUENTIAL
:
2176 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_SEQUENTIAL
);
2179 vm_map_entry_set_behavior(current
, MAP_ENTRY_BEHAV_RANDOM
);
2182 current
->eflags
|= MAP_ENTRY_NOSYNC
;
2185 current
->eflags
&= ~MAP_ENTRY_NOSYNC
;
2188 current
->eflags
|= MAP_ENTRY_NOCOREDUMP
;
2191 current
->eflags
&= ~MAP_ENTRY_NOCOREDUMP
;
2196 vm_map_simplify_entry(map
, current
);
2200 vm_pindex_t pstart
, pend
;
2203 * madvise behaviors that are implemented in the underlying
2206 * Since we don't clip the vm_map_entry, we have to clip
2207 * the vm_object pindex and count.
2209 for (current
= entry
;
2210 (current
!= &map
->header
) && (current
->start
< end
);
2211 current
= current
->next
2213 vm_offset_t useEnd
, useStart
;
2215 if (current
->eflags
& MAP_ENTRY_IS_SUB_MAP
)
2218 pstart
= OFF_TO_IDX(current
->offset
);
2219 pend
= pstart
+ atop(current
->end
- current
->start
);
2220 useStart
= current
->start
;
2221 useEnd
= current
->end
;
2223 if (current
->start
< start
) {
2224 pstart
+= atop(start
- current
->start
);
2227 if (current
->end
> end
) {
2228 pend
-= atop(current
->end
- end
);
2236 * Perform the pmap_advise() before clearing
2237 * PGA_REFERENCED in vm_page_advise(). Otherwise, a
2238 * concurrent pmap operation, such as pmap_remove(),
2239 * could clear a reference in the pmap and set
2240 * PGA_REFERENCED on the page before the pmap_advise()
2241 * had completed. Consequently, the page would appear
2242 * referenced based upon an old reference that
2243 * occurred before this pmap_advise() ran.
2245 if (behav
== MADV_DONTNEED
|| behav
== MADV_FREE
)
2246 pmap_advise(map
->pmap
, useStart
, useEnd
,
2249 vm_object_madvise(current
->object
.vm_object
, pstart
,
2253 * Pre-populate paging structures in the
2254 * WILLNEED case. For wired entries, the
2255 * paging structures are already populated.
2257 if (behav
== MADV_WILLNEED
&&
2258 current
->wired_count
== 0) {
2259 vm_map_pmap_enter(map
,
2261 current
->protection
,
2262 current
->object
.vm_object
,
2264 ptoa(pend
- pstart
),
2265 MAP_PREFAULT_MADVISE
2269 vm_map_unlock_read(map
);
2278 * Sets the inheritance of the specified address
2279 * range in the target map. Inheritance
2280 * affects how the map will be shared with
2281 * child maps at the time of vmspace_fork.
2284 vm_map_inherit(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2285 vm_inherit_t new_inheritance
)
2287 vm_map_entry_t entry
;
2288 vm_map_entry_t temp_entry
;
2290 switch (new_inheritance
) {
2291 case VM_INHERIT_NONE
:
2292 case VM_INHERIT_COPY
:
2293 case VM_INHERIT_SHARE
:
2296 return (KERN_INVALID_ARGUMENT
);
2299 return (KERN_SUCCESS
);
2301 VM_MAP_RANGE_CHECK(map
, start
, end
);
2302 if (vm_map_lookup_entry(map
, start
, &temp_entry
)) {
2304 vm_map_clip_start(map
, entry
, start
);
2306 entry
= temp_entry
->next
;
2307 while ((entry
!= &map
->header
) && (entry
->start
< end
)) {
2308 vm_map_clip_end(map
, entry
, end
);
2309 entry
->inheritance
= new_inheritance
;
2310 vm_map_simplify_entry(map
, entry
);
2311 entry
= entry
->next
;
2314 return (KERN_SUCCESS
);
2320 * Implements both kernel and user unwiring.
2323 vm_map_unwire(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2326 vm_map_entry_t entry
, first_entry
, tmp_entry
;
2327 vm_offset_t saved_start
;
2328 unsigned int last_timestamp
;
2330 boolean_t need_wakeup
, result
, user_unwire
;
2333 return (KERN_SUCCESS
);
2334 user_unwire
= (flags
& VM_MAP_WIRE_USER
) ? TRUE
: FALSE
;
2336 VM_MAP_RANGE_CHECK(map
, start
, end
);
2337 if (!vm_map_lookup_entry(map
, start
, &first_entry
)) {
2338 if (flags
& VM_MAP_WIRE_HOLESOK
)
2339 first_entry
= first_entry
->next
;
2342 return (KERN_INVALID_ADDRESS
);
2345 last_timestamp
= map
->timestamp
;
2346 entry
= first_entry
;
2347 while (entry
!= &map
->header
&& entry
->start
< end
) {
2348 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
2350 * We have not yet clipped the entry.
2352 saved_start
= (start
>= entry
->start
) ? start
:
2354 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
2355 if (vm_map_unlock_and_wait(map
, 0)) {
2357 * Allow interruption of user unwiring?
2361 if (last_timestamp
+1 != map
->timestamp
) {
2363 * Look again for the entry because the map was
2364 * modified while it was unlocked.
2365 * Specifically, the entry may have been
2366 * clipped, merged, or deleted.
2368 if (!vm_map_lookup_entry(map
, saved_start
,
2370 if (flags
& VM_MAP_WIRE_HOLESOK
)
2371 tmp_entry
= tmp_entry
->next
;
2373 if (saved_start
== start
) {
2375 * First_entry has been deleted.
2378 return (KERN_INVALID_ADDRESS
);
2381 rv
= KERN_INVALID_ADDRESS
;
2385 if (entry
== first_entry
)
2386 first_entry
= tmp_entry
;
2391 last_timestamp
= map
->timestamp
;
2394 vm_map_clip_start(map
, entry
, start
);
2395 vm_map_clip_end(map
, entry
, end
);
2397 * Mark the entry in case the map lock is released. (See
2400 KASSERT((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) == 0 &&
2401 entry
->wiring_thread
== NULL
,
2402 ("owned map entry %p", entry
));
2403 entry
->eflags
|= MAP_ENTRY_IN_TRANSITION
;
2404 entry
->wiring_thread
= curthread
;
2406 * Check the map for holes in the specified region.
2407 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2409 if (((flags
& VM_MAP_WIRE_HOLESOK
) == 0) &&
2410 (entry
->end
< end
&& (entry
->next
== &map
->header
||
2411 entry
->next
->start
> entry
->end
))) {
2413 rv
= KERN_INVALID_ADDRESS
;
2417 * If system unwiring, require that the entry is system wired.
2420 vm_map_entry_system_wired_count(entry
) == 0) {
2422 rv
= KERN_INVALID_ARGUMENT
;
2425 entry
= entry
->next
;
2429 need_wakeup
= FALSE
;
2430 if (first_entry
== NULL
) {
2431 result
= vm_map_lookup_entry(map
, start
, &first_entry
);
2432 if (!result
&& (flags
& VM_MAP_WIRE_HOLESOK
))
2433 first_entry
= first_entry
->next
;
2435 KASSERT(result
, ("vm_map_unwire: lookup failed"));
2437 for (entry
= first_entry
; entry
!= &map
->header
&& entry
->start
< end
;
2438 entry
= entry
->next
) {
2440 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2441 * space in the unwired region could have been mapped
2442 * while the map lock was dropped for draining
2443 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread
2444 * could be simultaneously wiring this new mapping
2445 * entry. Detect these cases and skip any entries
2446 * marked as in transition by us.
2448 if ((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) == 0 ||
2449 entry
->wiring_thread
!= curthread
) {
2450 KASSERT((flags
& VM_MAP_WIRE_HOLESOK
) != 0,
2451 ("vm_map_unwire: !HOLESOK and new/changed entry"));
2455 if (rv
== KERN_SUCCESS
&& (!user_unwire
||
2456 (entry
->eflags
& MAP_ENTRY_USER_WIRED
))) {
2458 entry
->eflags
&= ~MAP_ENTRY_USER_WIRED
;
2459 if (entry
->wired_count
== 1)
2460 vm_map_entry_unwire(map
, entry
);
2462 entry
->wired_count
--;
2464 KASSERT((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) != 0,
2465 ("vm_map_unwire: in-transition flag missing %p", entry
));
2466 KASSERT(entry
->wiring_thread
== curthread
,
2467 ("vm_map_unwire: alien wire %p", entry
));
2468 entry
->eflags
&= ~MAP_ENTRY_IN_TRANSITION
;
2469 entry
->wiring_thread
= NULL
;
2470 if (entry
->eflags
& MAP_ENTRY_NEEDS_WAKEUP
) {
2471 entry
->eflags
&= ~MAP_ENTRY_NEEDS_WAKEUP
;
2474 vm_map_simplify_entry(map
, entry
);
2483 * vm_map_wire_entry_failure:
2485 * Handle a wiring failure on the given entry.
2487 * The map should be locked.
2490 vm_map_wire_entry_failure(vm_map_t map
, vm_map_entry_t entry
,
2491 vm_offset_t failed_addr
)
2494 VM_MAP_ASSERT_LOCKED(map
);
2495 KASSERT((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) != 0 &&
2496 entry
->wired_count
== 1,
2497 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry
));
2498 KASSERT(failed_addr
< entry
->end
,
2499 ("vm_map_wire_entry_failure: entry %p was fully wired", entry
));
2502 * If any pages at the start of this entry were successfully wired,
2505 if (failed_addr
> entry
->start
) {
2506 pmap_unwire(map
->pmap
, entry
->start
, failed_addr
);
2507 vm_object_unwire(entry
->object
.vm_object
, entry
->offset
,
2508 failed_addr
- entry
->start
, PQ_ACTIVE
);
2512 * Assign an out-of-range value to represent the failure to wire this
2515 entry
->wired_count
= -1;
2521 * Implements both kernel and user wiring.
2524 vm_map_wire(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
2527 vm_map_entry_t entry
, first_entry
, tmp_entry
;
2528 vm_offset_t faddr
, saved_end
, saved_start
;
2529 unsigned int last_timestamp
;
2531 boolean_t need_wakeup
, result
, user_wire
;
2535 return (KERN_SUCCESS
);
2537 if (flags
& VM_MAP_WIRE_WRITE
)
2538 prot
|= VM_PROT_WRITE
;
2539 user_wire
= (flags
& VM_MAP_WIRE_USER
) ? TRUE
: FALSE
;
2541 VM_MAP_RANGE_CHECK(map
, start
, end
);
2542 if (!vm_map_lookup_entry(map
, start
, &first_entry
)) {
2543 if (flags
& VM_MAP_WIRE_HOLESOK
)
2544 first_entry
= first_entry
->next
;
2547 return (KERN_INVALID_ADDRESS
);
2550 last_timestamp
= map
->timestamp
;
2551 entry
= first_entry
;
2552 while (entry
!= &map
->header
&& entry
->start
< end
) {
2553 if (entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) {
2555 * We have not yet clipped the entry.
2557 saved_start
= (start
>= entry
->start
) ? start
:
2559 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
2560 if (vm_map_unlock_and_wait(map
, 0)) {
2562 * Allow interruption of user wiring?
2566 if (last_timestamp
+ 1 != map
->timestamp
) {
2568 * Look again for the entry because the map was
2569 * modified while it was unlocked.
2570 * Specifically, the entry may have been
2571 * clipped, merged, or deleted.
2573 if (!vm_map_lookup_entry(map
, saved_start
,
2575 if (flags
& VM_MAP_WIRE_HOLESOK
)
2576 tmp_entry
= tmp_entry
->next
;
2578 if (saved_start
== start
) {
2580 * first_entry has been deleted.
2583 return (KERN_INVALID_ADDRESS
);
2586 rv
= KERN_INVALID_ADDRESS
;
2590 if (entry
== first_entry
)
2591 first_entry
= tmp_entry
;
2596 last_timestamp
= map
->timestamp
;
2599 vm_map_clip_start(map
, entry
, start
);
2600 vm_map_clip_end(map
, entry
, end
);
2602 * Mark the entry in case the map lock is released. (See
2605 KASSERT((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) == 0 &&
2606 entry
->wiring_thread
== NULL
,
2607 ("owned map entry %p", entry
));
2608 entry
->eflags
|= MAP_ENTRY_IN_TRANSITION
;
2609 entry
->wiring_thread
= curthread
;
2610 if ((entry
->protection
& (VM_PROT_READ
| VM_PROT_EXECUTE
)) == 0
2611 || (entry
->protection
& prot
) != prot
) {
2612 entry
->eflags
|= MAP_ENTRY_WIRE_SKIPPED
;
2613 if ((flags
& VM_MAP_WIRE_HOLESOK
) == 0) {
2615 rv
= KERN_INVALID_ADDRESS
;
2620 if (entry
->wired_count
== 0) {
2621 entry
->wired_count
++;
2622 saved_start
= entry
->start
;
2623 saved_end
= entry
->end
;
2626 * Release the map lock, relying on the in-transition
2627 * mark. Mark the map busy for fork.
2632 faddr
= saved_start
;
2635 * Simulate a fault to get the page and enter
2636 * it into the physical map.
2638 if ((rv
= vm_fault(map
, faddr
, VM_PROT_NONE
,
2639 VM_FAULT_WIRE
)) != KERN_SUCCESS
)
2641 } while ((faddr
+= PAGE_SIZE
) < saved_end
);
2644 if (last_timestamp
+ 1 != map
->timestamp
) {
2646 * Look again for the entry because the map was
2647 * modified while it was unlocked. The entry
2648 * may have been clipped, but NOT merged or
2651 result
= vm_map_lookup_entry(map
, saved_start
,
2653 KASSERT(result
, ("vm_map_wire: lookup failed"));
2654 if (entry
== first_entry
)
2655 first_entry
= tmp_entry
;
2659 while (entry
->end
< saved_end
) {
2661 * In case of failure, handle entries
2662 * that were not fully wired here;
2663 * fully wired entries are handled
2666 if (rv
!= KERN_SUCCESS
&&
2668 vm_map_wire_entry_failure(map
,
2670 entry
= entry
->next
;
2673 last_timestamp
= map
->timestamp
;
2674 if (rv
!= KERN_SUCCESS
) {
2675 vm_map_wire_entry_failure(map
, entry
, faddr
);
2679 } else if (!user_wire
||
2680 (entry
->eflags
& MAP_ENTRY_USER_WIRED
) == 0) {
2681 entry
->wired_count
++;
2684 * Check the map for holes in the specified region.
2685 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2688 if (((flags
& VM_MAP_WIRE_HOLESOK
) == 0) &&
2689 (entry
->end
< end
&& (entry
->next
== &map
->header
||
2690 entry
->next
->start
> entry
->end
))) {
2692 rv
= KERN_INVALID_ADDRESS
;
2695 entry
= entry
->next
;
2699 need_wakeup
= FALSE
;
2700 if (first_entry
== NULL
) {
2701 result
= vm_map_lookup_entry(map
, start
, &first_entry
);
2702 if (!result
&& (flags
& VM_MAP_WIRE_HOLESOK
))
2703 first_entry
= first_entry
->next
;
2705 KASSERT(result
, ("vm_map_wire: lookup failed"));
2707 for (entry
= first_entry
; entry
!= &map
->header
&& entry
->start
< end
;
2708 entry
= entry
->next
) {
2709 if ((entry
->eflags
& MAP_ENTRY_WIRE_SKIPPED
) != 0)
2710 goto next_entry_done
;
2713 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2714 * space in the unwired region could have been mapped
2715 * while the map lock was dropped for faulting in the
2716 * pages or draining MAP_ENTRY_IN_TRANSITION.
2717 * Moreover, another thread could be simultaneously
2718 * wiring this new mapping entry. Detect these cases
2719 * and skip any entries marked as in transition by us.
2721 if ((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) == 0 ||
2722 entry
->wiring_thread
!= curthread
) {
2723 KASSERT((flags
& VM_MAP_WIRE_HOLESOK
) != 0,
2724 ("vm_map_wire: !HOLESOK and new/changed entry"));
2728 if (rv
== KERN_SUCCESS
) {
2730 entry
->eflags
|= MAP_ENTRY_USER_WIRED
;
2731 } else if (entry
->wired_count
== -1) {
2733 * Wiring failed on this entry. Thus, unwiring is
2736 entry
->wired_count
= 0;
2737 } else if (!user_wire
||
2738 (entry
->eflags
& MAP_ENTRY_USER_WIRED
) == 0) {
2740 * Undo the wiring. Wiring succeeded on this entry
2741 * but failed on a later entry.
2743 if (entry
->wired_count
== 1)
2744 vm_map_entry_unwire(map
, entry
);
2746 entry
->wired_count
--;
2749 KASSERT((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) != 0,
2750 ("vm_map_wire: in-transition flag missing %p", entry
));
2751 KASSERT(entry
->wiring_thread
== curthread
,
2752 ("vm_map_wire: alien wire %p", entry
));
2753 entry
->eflags
&= ~(MAP_ENTRY_IN_TRANSITION
|
2754 MAP_ENTRY_WIRE_SKIPPED
);
2755 entry
->wiring_thread
= NULL
;
2756 if (entry
->eflags
& MAP_ENTRY_NEEDS_WAKEUP
) {
2757 entry
->eflags
&= ~MAP_ENTRY_NEEDS_WAKEUP
;
2760 vm_map_simplify_entry(map
, entry
);
2771 * Push any dirty cached pages in the address range to their pager.
2772 * If syncio is TRUE, dirty pages are written synchronously.
2773 * If invalidate is TRUE, any cached pages are freed as well.
2775 * If the size of the region from start to end is zero, we are
2776 * supposed to flush all modified pages within the region containing
2777 * start. Unfortunately, a region can be split or coalesced with
2778 * neighboring regions, making it difficult to determine what the
2779 * original region was. Therefore, we approximate this requirement by
2780 * flushing the current region containing start.
2782 * Returns an error if any part of the specified range is not mapped.
2790 boolean_t invalidate
)
2792 vm_map_entry_t current
;
2793 vm_map_entry_t entry
;
2796 vm_ooffset_t offset
;
2797 unsigned int last_timestamp
;
2800 vm_map_lock_read(map
);
2801 VM_MAP_RANGE_CHECK(map
, start
, end
);
2802 if (!vm_map_lookup_entry(map
, start
, &entry
)) {
2803 vm_map_unlock_read(map
);
2804 return (KERN_INVALID_ADDRESS
);
2805 } else if (start
== end
) {
2806 start
= entry
->start
;
2810 * Make a first pass to check for user-wired memory and holes.
2812 for (current
= entry
; current
!= &map
->header
&& current
->start
< end
;
2813 current
= current
->next
) {
2814 if (invalidate
&& (current
->eflags
& MAP_ENTRY_USER_WIRED
)) {
2815 vm_map_unlock_read(map
);
2816 return (KERN_INVALID_ARGUMENT
);
2818 if (end
> current
->end
&&
2819 (current
->next
== &map
->header
||
2820 current
->end
!= current
->next
->start
)) {
2821 vm_map_unlock_read(map
);
2822 return (KERN_INVALID_ADDRESS
);
2827 pmap_remove(map
->pmap
, start
, end
);
2831 * Make a second pass, cleaning/uncaching pages from the indicated
2834 for (current
= entry
; current
!= &map
->header
&& current
->start
< end
;) {
2835 offset
= current
->offset
+ (start
- current
->start
);
2836 size
= (end
<= current
->end
? end
: current
->end
) - start
;
2837 if (current
->eflags
& MAP_ENTRY_IS_SUB_MAP
) {
2839 vm_map_entry_t tentry
;
2842 smap
= current
->object
.sub_map
;
2843 vm_map_lock_read(smap
);
2844 (void) vm_map_lookup_entry(smap
, offset
, &tentry
);
2845 tsize
= tentry
->end
- offset
;
2848 object
= tentry
->object
.vm_object
;
2849 offset
= tentry
->offset
+ (offset
- tentry
->start
);
2850 vm_map_unlock_read(smap
);
2852 object
= current
->object
.vm_object
;
2854 vm_object_reference(object
);
2855 last_timestamp
= map
->timestamp
;
2856 vm_map_unlock_read(map
);
2857 if (!vm_object_sync(object
, offset
, size
, syncio
, invalidate
))
2860 vm_object_deallocate(object
);
2861 vm_map_lock_read(map
);
2862 if (last_timestamp
== map
->timestamp
||
2863 !vm_map_lookup_entry(map
, start
, ¤t
))
2864 current
= current
->next
;
2867 vm_map_unlock_read(map
);
2868 return (failed
? KERN_FAILURE
: KERN_SUCCESS
);
2872 * vm_map_entry_unwire: [ internal use only ]
2874 * Make the region specified by this entry pageable.
2876 * The map in question should be locked.
2877 * [This is the reason for this routine's existence.]
2880 vm_map_entry_unwire(vm_map_t map
, vm_map_entry_t entry
)
2883 VM_MAP_ASSERT_LOCKED(map
);
2884 KASSERT(entry
->wired_count
> 0,
2885 ("vm_map_entry_unwire: entry %p isn't wired", entry
));
2886 pmap_unwire(map
->pmap
, entry
->start
, entry
->end
);
2887 vm_object_unwire(entry
->object
.vm_object
, entry
->offset
, entry
->end
-
2888 entry
->start
, PQ_ACTIVE
);
2889 entry
->wired_count
= 0;
2893 vm_map_entry_deallocate(vm_map_entry_t entry
, boolean_t system_map
)
2896 if ((entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) == 0)
2897 vm_object_deallocate(entry
->object
.vm_object
);
2898 uma_zfree(system_map
? kmapentzone
: mapentzone
, entry
);
2902 * vm_map_entry_delete: [ internal use only ]
2904 * Deallocate the given entry from the target map.
2907 vm_map_entry_delete(vm_map_t map
, vm_map_entry_t entry
)
2910 vm_pindex_t offidxstart
, offidxend
, count
, size1
;
2913 vm_map_entry_unlink(map
, entry
);
2914 object
= entry
->object
.vm_object
;
2915 size
= entry
->end
- entry
->start
;
2918 if (entry
->cred
!= NULL
) {
2919 swap_release_by_cred(size
, entry
->cred
);
2920 crfree(entry
->cred
);
2923 if ((entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) == 0 &&
2925 KASSERT(entry
->cred
== NULL
|| object
->cred
== NULL
||
2926 (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
),
2927 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry
));
2928 count
= OFF_TO_IDX(size
);
2929 offidxstart
= OFF_TO_IDX(entry
->offset
);
2930 offidxend
= offidxstart
+ count
;
2931 VM_OBJECT_WLOCK(object
);
2932 if (object
->ref_count
!= 1 &&
2933 ((object
->flags
& (OBJ_NOSPLIT
|OBJ_ONEMAPPING
)) == OBJ_ONEMAPPING
||
2934 object
== kernel_object
|| object
== kmem_object
)) {
2935 vm_object_collapse(object
);
2938 * The option OBJPR_NOTMAPPED can be passed here
2939 * because vm_map_delete() already performed
2940 * pmap_remove() on the only mapping to this range
2943 vm_object_page_remove(object
, offidxstart
, offidxend
,
2945 if (object
->type
== OBJT_SWAP
)
2946 swap_pager_freespace(object
, offidxstart
, count
);
2947 if (offidxend
>= object
->size
&&
2948 offidxstart
< object
->size
) {
2949 size1
= object
->size
;
2950 object
->size
= offidxstart
;
2951 if (object
->cred
!= NULL
) {
2952 size1
-= object
->size
;
2953 KASSERT(object
->charge
>= ptoa(size1
),
2954 ("vm_map_entry_delete: object->charge < 0"));
2955 swap_release_by_cred(ptoa(size1
), object
->cred
);
2956 object
->charge
-= ptoa(size1
);
2960 VM_OBJECT_WUNLOCK(object
);
2962 entry
->object
.vm_object
= NULL
;
2963 if (map
->system_map
)
2964 vm_map_entry_deallocate(entry
, TRUE
);
2966 entry
->next
= curthread
->td_map_def_user
;
2967 curthread
->td_map_def_user
= entry
;
2972 * vm_map_delete: [ internal use only ]
2974 * Deallocates the given address range from the target
2978 vm_map_delete(vm_map_t map
, vm_offset_t start
, vm_offset_t end
)
2980 vm_map_entry_t entry
;
2981 vm_map_entry_t first_entry
;
2983 VM_MAP_ASSERT_LOCKED(map
);
2985 return (KERN_SUCCESS
);
2988 * Find the start of the region, and clip it
2990 if (!vm_map_lookup_entry(map
, start
, &first_entry
))
2991 entry
= first_entry
->next
;
2993 entry
= first_entry
;
2994 vm_map_clip_start(map
, entry
, start
);
2998 * Step through all entries in this region
3000 while ((entry
!= &map
->header
) && (entry
->start
< end
)) {
3001 vm_map_entry_t next
;
3004 * Wait for wiring or unwiring of an entry to complete.
3005 * Also wait for any system wirings to disappear on
3008 if ((entry
->eflags
& MAP_ENTRY_IN_TRANSITION
) != 0 ||
3009 (vm_map_pmap(map
) != kernel_pmap
&&
3010 vm_map_entry_system_wired_count(entry
) != 0)) {
3011 unsigned int last_timestamp
;
3012 vm_offset_t saved_start
;
3013 vm_map_entry_t tmp_entry
;
3015 saved_start
= entry
->start
;
3016 entry
->eflags
|= MAP_ENTRY_NEEDS_WAKEUP
;
3017 last_timestamp
= map
->timestamp
;
3018 (void) vm_map_unlock_and_wait(map
, 0);
3020 if (last_timestamp
+ 1 != map
->timestamp
) {
3022 * Look again for the entry because the map was
3023 * modified while it was unlocked.
3024 * Specifically, the entry may have been
3025 * clipped, merged, or deleted.
3027 if (!vm_map_lookup_entry(map
, saved_start
,
3029 entry
= tmp_entry
->next
;
3032 vm_map_clip_start(map
, entry
,
3038 vm_map_clip_end(map
, entry
, end
);
3043 * Unwire before removing addresses from the pmap; otherwise,
3044 * unwiring will put the entries back in the pmap.
3046 if (entry
->wired_count
!= 0) {
3047 vm_map_entry_unwire(map
, entry
);
3050 pmap_remove(map
->pmap
, entry
->start
, entry
->end
);
3053 * Delete the entry only after removing all pmap
3054 * entries pointing to its pages. (Otherwise, its
3055 * page frames may be reallocated, and any modify bits
3056 * will be set in the wrong object!)
3058 vm_map_entry_delete(map
, entry
);
3061 return (KERN_SUCCESS
);
3067 * Remove the given address range from the target map.
3068 * This is the exported form of vm_map_delete.
3071 vm_map_remove(vm_map_t map
, vm_offset_t start
, vm_offset_t end
)
3076 VM_MAP_RANGE_CHECK(map
, start
, end
);
3077 result
= vm_map_delete(map
, start
, end
);
3083 * vm_map_check_protection:
3085 * Assert that the target map allows the specified privilege on the
3086 * entire address region given. The entire region must be allocated.
3088 * WARNING! This code does not and should not check whether the
3089 * contents of the region is accessible. For example a smaller file
3090 * might be mapped into a larger address space.
3092 * NOTE! This code is also called by munmap().
3094 * The map must be locked. A read lock is sufficient.
3097 vm_map_check_protection(vm_map_t map
, vm_offset_t start
, vm_offset_t end
,
3098 vm_prot_t protection
)
3100 vm_map_entry_t entry
;
3101 vm_map_entry_t tmp_entry
;
3103 if (!vm_map_lookup_entry(map
, start
, &tmp_entry
))
3107 while (start
< end
) {
3108 if (entry
== &map
->header
)
3113 if (start
< entry
->start
)
3116 * Check protection associated with entry.
3118 if ((entry
->protection
& protection
) != protection
)
3120 /* go to next entry */
3122 entry
= entry
->next
;
3128 * vm_map_copy_entry:
3130 * Copies the contents of the source entry to the destination
3131 * entry. The entries *must* be aligned properly.
3137 vm_map_entry_t src_entry
,
3138 vm_map_entry_t dst_entry
,
3139 vm_ooffset_t
*fork_charge
)
3141 vm_object_t src_object
;
3142 vm_map_entry_t fake_entry
;
3147 VM_MAP_ASSERT_LOCKED(dst_map
);
3149 if ((dst_entry
->eflags
|src_entry
->eflags
) & MAP_ENTRY_IS_SUB_MAP
)
3152 if (src_entry
->wired_count
== 0 ||
3153 (src_entry
->protection
& VM_PROT_WRITE
) == 0) {
3155 * If the source entry is marked needs_copy, it is already
3158 if ((src_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) == 0 &&
3159 (src_entry
->protection
& VM_PROT_WRITE
) != 0) {
3160 pmap_protect(src_map
->pmap
,
3163 src_entry
->protection
& ~VM_PROT_WRITE
);
3167 * Make a copy of the object.
3169 size
= src_entry
->end
- src_entry
->start
;
3170 if ((src_object
= src_entry
->object
.vm_object
) != NULL
) {
3171 VM_OBJECT_WLOCK(src_object
);
3172 charged
= ENTRY_CHARGED(src_entry
);
3173 if ((src_object
->handle
== NULL
) &&
3174 (src_object
->type
== OBJT_DEFAULT
||
3175 src_object
->type
== OBJT_SWAP
)) {
3176 vm_object_collapse(src_object
);
3177 if ((src_object
->flags
& (OBJ_NOSPLIT
|OBJ_ONEMAPPING
)) == OBJ_ONEMAPPING
) {
3178 vm_object_split(src_entry
);
3179 src_object
= src_entry
->object
.vm_object
;
3182 vm_object_reference_locked(src_object
);
3183 vm_object_clear_flag(src_object
, OBJ_ONEMAPPING
);
3184 if (src_entry
->cred
!= NULL
&&
3185 !(src_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
)) {
3186 KASSERT(src_object
->cred
== NULL
,
3187 ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3189 src_object
->cred
= src_entry
->cred
;
3190 src_object
->charge
= size
;
3192 VM_OBJECT_WUNLOCK(src_object
);
3193 dst_entry
->object
.vm_object
= src_object
;
3195 cred
= curthread
->td_ucred
;
3197 dst_entry
->cred
= cred
;
3198 *fork_charge
+= size
;
3199 if (!(src_entry
->eflags
&
3200 MAP_ENTRY_NEEDS_COPY
)) {
3202 src_entry
->cred
= cred
;
3203 *fork_charge
+= size
;
3206 src_entry
->eflags
|= (MAP_ENTRY_COW
|MAP_ENTRY_NEEDS_COPY
);
3207 dst_entry
->eflags
|= (MAP_ENTRY_COW
|MAP_ENTRY_NEEDS_COPY
);
3208 dst_entry
->offset
= src_entry
->offset
;
3209 if (src_entry
->eflags
& MAP_ENTRY_VN_WRITECNT
) {
3211 * MAP_ENTRY_VN_WRITECNT cannot
3212 * indicate write reference from
3213 * src_entry, since the entry is
3214 * marked as needs copy. Allocate a
3215 * fake entry that is used to
3216 * decrement object->un_pager.vnp.writecount
3217 * at the appropriate time. Attach
3218 * fake_entry to the deferred list.
3220 fake_entry
= vm_map_entry_create(dst_map
);
3221 fake_entry
->eflags
= MAP_ENTRY_VN_WRITECNT
;
3222 src_entry
->eflags
&= ~MAP_ENTRY_VN_WRITECNT
;
3223 vm_object_reference(src_object
);
3224 fake_entry
->object
.vm_object
= src_object
;
3225 fake_entry
->start
= src_entry
->start
;
3226 fake_entry
->end
= src_entry
->end
;
3227 fake_entry
->next
= curthread
->td_map_def_user
;
3228 curthread
->td_map_def_user
= fake_entry
;
3231 dst_entry
->object
.vm_object
= NULL
;
3232 dst_entry
->offset
= 0;
3233 if (src_entry
->cred
!= NULL
) {
3234 dst_entry
->cred
= curthread
->td_ucred
;
3235 crhold(dst_entry
->cred
);
3236 *fork_charge
+= size
;
3240 pmap_copy(dst_map
->pmap
, src_map
->pmap
, dst_entry
->start
,
3241 dst_entry
->end
- dst_entry
->start
, src_entry
->start
);
3244 * We don't want to make writeable wired pages copy-on-write.
3245 * Immediately copy these pages into the new map by simulating
3246 * page faults. The new pages are pageable.
3248 vm_fault_copy_entry(dst_map
, src_map
, dst_entry
, src_entry
,
3254 * vmspace_map_entry_forked:
3255 * Update the newly-forked vmspace each time a map entry is inherited
3256 * or copied. The values for vm_dsize and vm_tsize are approximate
3257 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3260 vmspace_map_entry_forked(const struct vmspace
*vm1
, struct vmspace
*vm2
,
3261 vm_map_entry_t entry
)
3263 vm_size_t entrysize
;
3266 entrysize
= entry
->end
- entry
->start
;
3267 vm2
->vm_map
.size
+= entrysize
;
3268 if (entry
->eflags
& (MAP_ENTRY_GROWS_DOWN
| MAP_ENTRY_GROWS_UP
)) {
3269 vm2
->vm_ssize
+= btoc(entrysize
);
3270 } else if (entry
->start
>= (vm_offset_t
)vm1
->vm_daddr
&&
3271 entry
->start
< (vm_offset_t
)vm1
->vm_daddr
+ ctob(vm1
->vm_dsize
)) {
3272 newend
= MIN(entry
->end
,
3273 (vm_offset_t
)vm1
->vm_daddr
+ ctob(vm1
->vm_dsize
));
3274 vm2
->vm_dsize
+= btoc(newend
- entry
->start
);
3275 } else if (entry
->start
>= (vm_offset_t
)vm1
->vm_taddr
&&
3276 entry
->start
< (vm_offset_t
)vm1
->vm_taddr
+ ctob(vm1
->vm_tsize
)) {
3277 newend
= MIN(entry
->end
,
3278 (vm_offset_t
)vm1
->vm_taddr
+ ctob(vm1
->vm_tsize
));
3279 vm2
->vm_tsize
+= btoc(newend
- entry
->start
);
3285 * Create a new process vmspace structure and vm_map
3286 * based on those of an existing process. The new map
3287 * is based on the old map, according to the inheritance
3288 * values on the regions in that map.
3290 * XXX It might be worth coalescing the entries added to the new vmspace.
3292 * The source map must not be locked.
3295 vmspace_fork(struct vmspace
*vm1
, vm_ooffset_t
*fork_charge
)
3297 struct vmspace
*vm2
;
3298 vm_map_t new_map
, old_map
;
3299 vm_map_entry_t new_entry
, old_entry
;
3303 old_map
= &vm1
->vm_map
;
3304 /* Copy immutable fields of vm1 to vm2. */
3305 vm2
= vmspace_alloc(old_map
->min_offset
, old_map
->max_offset
, NULL
);
3308 vm2
->vm_taddr
= vm1
->vm_taddr
;
3309 vm2
->vm_daddr
= vm1
->vm_daddr
;
3310 vm2
->vm_maxsaddr
= vm1
->vm_maxsaddr
;
3311 vm_map_lock(old_map
);
3313 vm_map_wait_busy(old_map
);
3314 new_map
= &vm2
->vm_map
;
3315 locked
= vm_map_trylock(new_map
); /* trylock to silence WITNESS */
3316 KASSERT(locked
, ("vmspace_fork: lock failed"));
3318 old_entry
= old_map
->header
.next
;
3320 while (old_entry
!= &old_map
->header
) {
3321 if (old_entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
)
3322 panic("vm_map_fork: encountered a submap");
3324 switch (old_entry
->inheritance
) {
3325 case VM_INHERIT_NONE
:
3328 case VM_INHERIT_SHARE
:
3330 * Clone the entry, creating the shared object if necessary.
3332 object
= old_entry
->object
.vm_object
;
3333 if (object
== NULL
) {
3334 object
= vm_object_allocate(OBJT_DEFAULT
,
3335 atop(old_entry
->end
- old_entry
->start
));
3336 old_entry
->object
.vm_object
= object
;
3337 old_entry
->offset
= 0;
3338 if (old_entry
->cred
!= NULL
) {
3339 object
->cred
= old_entry
->cred
;
3340 object
->charge
= old_entry
->end
-
3342 old_entry
->cred
= NULL
;
3347 * Add the reference before calling vm_object_shadow
3348 * to insure that a shadow object is created.
3350 vm_object_reference(object
);
3351 if (old_entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) {
3352 vm_object_shadow(&old_entry
->object
.vm_object
,
3354 old_entry
->end
- old_entry
->start
);
3355 old_entry
->eflags
&= ~MAP_ENTRY_NEEDS_COPY
;
3356 /* Transfer the second reference too. */
3357 vm_object_reference(
3358 old_entry
->object
.vm_object
);
3361 * As in vm_map_simplify_entry(), the
3362 * vnode lock will not be acquired in
3363 * this call to vm_object_deallocate().
3365 vm_object_deallocate(object
);
3366 object
= old_entry
->object
.vm_object
;
3368 VM_OBJECT_WLOCK(object
);
3369 vm_object_clear_flag(object
, OBJ_ONEMAPPING
);
3370 if (old_entry
->cred
!= NULL
) {
3371 KASSERT(object
->cred
== NULL
, ("vmspace_fork both cred"));
3372 object
->cred
= old_entry
->cred
;
3373 object
->charge
= old_entry
->end
- old_entry
->start
;
3374 old_entry
->cred
= NULL
;
3378 * Assert the correct state of the vnode
3379 * v_writecount while the object is locked, to
3380 * not relock it later for the assertion
3383 if (old_entry
->eflags
& MAP_ENTRY_VN_WRITECNT
&&
3384 object
->type
== OBJT_VNODE
) {
3385 KASSERT(((struct vnode
*)object
->handle
)->
3387 ("vmspace_fork: v_writecount %p", object
));
3388 KASSERT(object
->un_pager
.vnp
.writemappings
> 0,
3389 ("vmspace_fork: vnp.writecount %p",
3392 VM_OBJECT_WUNLOCK(object
);
3395 * Clone the entry, referencing the shared object.
3397 new_entry
= vm_map_entry_create(new_map
);
3398 *new_entry
= *old_entry
;
3399 new_entry
->eflags
&= ~(MAP_ENTRY_USER_WIRED
|
3400 MAP_ENTRY_IN_TRANSITION
);
3401 new_entry
->wiring_thread
= NULL
;
3402 new_entry
->wired_count
= 0;
3403 if (new_entry
->eflags
& MAP_ENTRY_VN_WRITECNT
) {
3404 vnode_pager_update_writecount(object
,
3405 new_entry
->start
, new_entry
->end
);
3409 * Insert the entry into the new map -- we know we're
3410 * inserting at the end of the new map.
3412 vm_map_entry_link(new_map
, new_map
->header
.prev
,
3414 vmspace_map_entry_forked(vm1
, vm2
, new_entry
);
3417 * Update the physical map
3419 pmap_copy(new_map
->pmap
, old_map
->pmap
,
3421 (old_entry
->end
- old_entry
->start
),
3425 case VM_INHERIT_COPY
:
3427 * Clone the entry and link into the map.
3429 new_entry
= vm_map_entry_create(new_map
);
3430 *new_entry
= *old_entry
;
3432 * Copied entry is COW over the old object.
3434 new_entry
->eflags
&= ~(MAP_ENTRY_USER_WIRED
|
3435 MAP_ENTRY_IN_TRANSITION
| MAP_ENTRY_VN_WRITECNT
);
3436 new_entry
->wiring_thread
= NULL
;
3437 new_entry
->wired_count
= 0;
3438 new_entry
->object
.vm_object
= NULL
;
3439 new_entry
->cred
= NULL
;
3440 vm_map_entry_link(new_map
, new_map
->header
.prev
,
3442 vmspace_map_entry_forked(vm1
, vm2
, new_entry
);
3443 vm_map_copy_entry(old_map
, new_map
, old_entry
,
3444 new_entry
, fork_charge
);
3447 old_entry
= old_entry
->next
;
3450 * Use inlined vm_map_unlock() to postpone handling the deferred
3451 * map entries, which cannot be done until both old_map and
3452 * new_map locks are released.
3454 sx_xunlock(&old_map
->lock
);
3455 sx_xunlock(&new_map
->lock
);
3456 vm_map_process_deferred();
3462 vm_map_stack(vm_map_t map
, vm_offset_t addrbos
, vm_size_t max_ssize
,
3463 vm_prot_t prot
, vm_prot_t max
, int cow
)
3465 vm_size_t growsize
, init_ssize
;
3466 rlim_t lmemlim
, vmemlim
;
3469 growsize
= sgrowsiz
;
3470 init_ssize
= (max_ssize
< growsize
) ? max_ssize
: growsize
;
3472 lmemlim
= lim_cur(curthread
, RLIMIT_MEMLOCK
);
3473 vmemlim
= lim_cur(curthread
, RLIMIT_VMEM
);
3474 if (!old_mlock
&& map
->flags
& MAP_WIREFUTURE
) {
3475 if (ptoa(pmap_wired_count(map
->pmap
)) + init_ssize
> lmemlim
) {
3480 /* If we would blow our VMEM resource limit, no go */
3481 if (map
->size
+ init_ssize
> vmemlim
) {
3485 rv
= vm_map_stack_locked(map
, addrbos
, max_ssize
, growsize
, prot
,
3493 vm_map_stack_locked(vm_map_t map
, vm_offset_t addrbos
, vm_size_t max_ssize
,
3494 vm_size_t growsize
, vm_prot_t prot
, vm_prot_t max
, int cow
)
3496 vm_map_entry_t new_entry
, prev_entry
;
3497 vm_offset_t bot
, top
;
3498 vm_size_t init_ssize
;
3502 * The stack orientation is piggybacked with the cow argument.
3503 * Extract it into orient and mask the cow argument so that we
3504 * don't pass it around further.
3505 * NOTE: We explicitly allow bi-directional stacks.
3507 orient
= cow
& (MAP_STACK_GROWS_DOWN
|MAP_STACK_GROWS_UP
);
3508 KASSERT(orient
!= 0, ("No stack grow direction"));
3510 if (addrbos
< vm_map_min(map
) ||
3511 addrbos
> vm_map_max(map
) ||
3512 addrbos
+ max_ssize
< addrbos
)
3513 return (KERN_NO_SPACE
);
3515 init_ssize
= (max_ssize
< growsize
) ? max_ssize
: growsize
;
3517 /* If addr is already mapped, no go */
3518 if (vm_map_lookup_entry(map
, addrbos
, &prev_entry
))
3519 return (KERN_NO_SPACE
);
3522 * If we can't accommodate max_ssize in the current mapping, no go.
3523 * However, we need to be aware that subsequent user mappings might
3524 * map into the space we have reserved for stack, and currently this
3525 * space is not protected.
3527 * Hopefully we will at least detect this condition when we try to
3530 if ((prev_entry
->next
!= &map
->header
) &&
3531 (prev_entry
->next
->start
< addrbos
+ max_ssize
))
3532 return (KERN_NO_SPACE
);
3535 * We initially map a stack of only init_ssize. We will grow as
3536 * needed later. Depending on the orientation of the stack (i.e.
3537 * the grow direction) we either map at the top of the range, the
3538 * bottom of the range or in the middle.
3540 * Note: we would normally expect prot and max to be VM_PROT_ALL,
3541 * and cow to be 0. Possibly we should eliminate these as input
3542 * parameters, and just pass these values here in the insert call.
3544 if (orient
== MAP_STACK_GROWS_DOWN
)
3545 bot
= addrbos
+ max_ssize
- init_ssize
;
3546 else if (orient
== MAP_STACK_GROWS_UP
)
3549 bot
= round_page(addrbos
+ max_ssize
/2 - init_ssize
/2);
3550 top
= bot
+ init_ssize
;
3551 rv
= vm_map_insert(map
, NULL
, 0, bot
, top
, prot
, max
, cow
);
3553 /* Now set the avail_ssize amount. */
3554 if (rv
== KERN_SUCCESS
) {
3555 new_entry
= prev_entry
->next
;
3556 if (new_entry
->end
!= top
|| new_entry
->start
!= bot
)
3557 panic("Bad entry start/end for new stack entry");
3559 new_entry
->avail_ssize
= max_ssize
- init_ssize
;
3560 KASSERT((orient
& MAP_STACK_GROWS_DOWN
) == 0 ||
3561 (new_entry
->eflags
& MAP_ENTRY_GROWS_DOWN
) != 0,
3562 ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3563 KASSERT((orient
& MAP_STACK_GROWS_UP
) == 0 ||
3564 (new_entry
->eflags
& MAP_ENTRY_GROWS_UP
) != 0,
3565 ("new entry lacks MAP_ENTRY_GROWS_UP"));
3571 static int stack_guard_page
= 0;
3572 SYSCTL_INT(_security_bsd
, OID_AUTO
, stack_guard_page
, CTLFLAG_RWTUN
,
3573 &stack_guard_page
, 0,
3574 "Insert stack guard page ahead of the growable segments.");
3576 /* Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
3577 * desired address is already mapped, or if we successfully grow
3578 * the stack. Also returns KERN_SUCCESS if addr is outside the
3579 * stack range (this is strange, but preserves compatibility with
3580 * the grow function in vm_machdep.c).
3583 vm_map_growstack(struct proc
*p
, vm_offset_t addr
)
3585 vm_map_entry_t next_entry
, prev_entry
;
3586 vm_map_entry_t new_entry
, stack_entry
;
3587 struct vmspace
*vm
= p
->p_vmspace
;
3588 vm_map_t map
= &vm
->vm_map
;
3591 size_t grow_amount
, max_grow
;
3592 rlim_t lmemlim
, stacklim
, vmemlim
;
3593 int is_procstack
, rv
;
3602 lmemlim
= lim_cur(curthread
, RLIMIT_MEMLOCK
);
3603 stacklim
= lim_cur(curthread
, RLIMIT_STACK
);
3604 vmemlim
= lim_cur(curthread
, RLIMIT_VMEM
);
3607 vm_map_lock_read(map
);
3609 /* If addr is already in the entry range, no need to grow.*/
3610 if (vm_map_lookup_entry(map
, addr
, &prev_entry
)) {
3611 vm_map_unlock_read(map
);
3612 return (KERN_SUCCESS
);
3615 next_entry
= prev_entry
->next
;
3616 if (!(prev_entry
->eflags
& MAP_ENTRY_GROWS_UP
)) {
3618 * This entry does not grow upwards. Since the address lies
3619 * beyond this entry, the next entry (if one exists) has to
3620 * be a downward growable entry. The entry list header is
3621 * never a growable entry, so it suffices to check the flags.
3623 if (!(next_entry
->eflags
& MAP_ENTRY_GROWS_DOWN
)) {
3624 vm_map_unlock_read(map
);
3625 return (KERN_SUCCESS
);
3627 stack_entry
= next_entry
;
3630 * This entry grows upward. If the next entry does not at
3631 * least grow downwards, this is the entry we need to grow.
3632 * otherwise we have two possible choices and we have to
3635 if (next_entry
->eflags
& MAP_ENTRY_GROWS_DOWN
) {
3637 * We have two choices; grow the entry closest to
3638 * the address to minimize the amount of growth.
3640 if (addr
- prev_entry
->end
<= next_entry
->start
- addr
)
3641 stack_entry
= prev_entry
;
3643 stack_entry
= next_entry
;
3645 stack_entry
= prev_entry
;
3648 if (stack_entry
== next_entry
) {
3649 KASSERT(stack_entry
->eflags
& MAP_ENTRY_GROWS_DOWN
, ("foo"));
3650 KASSERT(addr
< stack_entry
->start
, ("foo"));
3651 end
= (prev_entry
!= &map
->header
) ? prev_entry
->end
:
3652 stack_entry
->start
- stack_entry
->avail_ssize
;
3653 grow_amount
= roundup(stack_entry
->start
- addr
, PAGE_SIZE
);
3654 max_grow
= stack_entry
->start
- end
;
3656 KASSERT(stack_entry
->eflags
& MAP_ENTRY_GROWS_UP
, ("foo"));
3657 KASSERT(addr
>= stack_entry
->end
, ("foo"));
3658 end
= (next_entry
!= &map
->header
) ? next_entry
->start
:
3659 stack_entry
->end
+ stack_entry
->avail_ssize
;
3660 grow_amount
= roundup(addr
+ 1 - stack_entry
->end
, PAGE_SIZE
);
3661 max_grow
= end
- stack_entry
->end
;
3664 if (grow_amount
> stack_entry
->avail_ssize
) {
3665 vm_map_unlock_read(map
);
3666 return (KERN_NO_SPACE
);
3670 * If there is no longer enough space between the entries nogo, and
3671 * adjust the available space. Note: this should only happen if the
3672 * user has mapped into the stack area after the stack was created,
3673 * and is probably an error.
3675 * This also effectively destroys any guard page the user might have
3676 * intended by limiting the stack size.
3678 if (grow_amount
+ (stack_guard_page
? PAGE_SIZE
: 0) > max_grow
) {
3679 if (vm_map_lock_upgrade(map
))
3682 stack_entry
->avail_ssize
= max_grow
;
3685 return (KERN_NO_SPACE
);
3688 is_procstack
= (addr
>= (vm_offset_t
)vm
->vm_maxsaddr
&&
3689 addr
< (vm_offset_t
)p
->p_sysent
->sv_usrstack
) ? 1 : 0;
3692 * If this is the main process stack, see if we're over the stack
3695 if (is_procstack
&& (ctob(vm
->vm_ssize
) + grow_amount
> stacklim
)) {
3696 vm_map_unlock_read(map
);
3697 return (KERN_NO_SPACE
);
3702 if (is_procstack
&& racct_set(p
, RACCT_STACK
,
3703 ctob(vm
->vm_ssize
) + grow_amount
)) {
3705 vm_map_unlock_read(map
);
3706 return (KERN_NO_SPACE
);
3712 /* Round up the grow amount modulo sgrowsiz */
3713 growsize
= sgrowsiz
;
3714 grow_amount
= roundup(grow_amount
, growsize
);
3715 if (grow_amount
> stack_entry
->avail_ssize
)
3716 grow_amount
= stack_entry
->avail_ssize
;
3717 if (is_procstack
&& (ctob(vm
->vm_ssize
) + grow_amount
> stacklim
)) {
3718 grow_amount
= trunc_page((vm_size_t
)stacklim
) -
3723 limit
= racct_get_available(p
, RACCT_STACK
);
3725 if (is_procstack
&& (ctob(vm
->vm_ssize
) + grow_amount
> limit
))
3726 grow_amount
= limit
- ctob(vm
->vm_ssize
);
3728 if (!old_mlock
&& map
->flags
& MAP_WIREFUTURE
) {
3729 if (ptoa(pmap_wired_count(map
->pmap
)) + grow_amount
> lmemlim
) {
3730 vm_map_unlock_read(map
);
3737 if (racct_set(p
, RACCT_MEMLOCK
,
3738 ptoa(pmap_wired_count(map
->pmap
)) + grow_amount
)) {
3740 vm_map_unlock_read(map
);
3748 /* If we would blow our VMEM resource limit, no go */
3749 if (map
->size
+ grow_amount
> vmemlim
) {
3750 vm_map_unlock_read(map
);
3757 if (racct_set(p
, RACCT_VMEM
, map
->size
+ grow_amount
)) {
3759 vm_map_unlock_read(map
);
3767 if (vm_map_lock_upgrade(map
))
3770 if (stack_entry
== next_entry
) {
3774 /* Get the preliminary new entry start value */
3775 addr
= stack_entry
->start
- grow_amount
;
3778 * If this puts us into the previous entry, cut back our
3779 * growth to the available space. Also, see the note above.
3782 stack_entry
->avail_ssize
= max_grow
;
3784 if (stack_guard_page
)
3788 rv
= vm_map_insert(map
, NULL
, 0, addr
, stack_entry
->start
,
3789 next_entry
->protection
, next_entry
->max_protection
,
3790 MAP_STACK_GROWS_DOWN
);
3792 /* Adjust the available stack space by the amount we grew. */
3793 if (rv
== KERN_SUCCESS
) {
3794 new_entry
= prev_entry
->next
;
3795 KASSERT(new_entry
== stack_entry
->prev
, ("foo"));
3796 KASSERT(new_entry
->end
== stack_entry
->start
, ("foo"));
3797 KASSERT(new_entry
->start
== addr
, ("foo"));
3798 KASSERT((new_entry
->eflags
& MAP_ENTRY_GROWS_DOWN
) !=
3799 0, ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
3800 grow_amount
= new_entry
->end
- new_entry
->start
;
3801 new_entry
->avail_ssize
= stack_entry
->avail_ssize
-
3803 stack_entry
->eflags
&= ~MAP_ENTRY_GROWS_DOWN
;
3809 addr
= stack_entry
->end
+ grow_amount
;
3812 * If this puts us into the next entry, cut back our growth
3813 * to the available space. Also, see the note above.
3816 stack_entry
->avail_ssize
= end
- stack_entry
->end
;
3818 if (stack_guard_page
)
3822 grow_amount
= addr
- stack_entry
->end
;
3823 cred
= stack_entry
->cred
;
3824 if (cred
== NULL
&& stack_entry
->object
.vm_object
!= NULL
)
3825 cred
= stack_entry
->object
.vm_object
->cred
;
3826 if (cred
!= NULL
&& !swap_reserve_by_cred(grow_amount
, cred
))
3828 /* Grow the underlying object if applicable. */
3829 else if (stack_entry
->object
.vm_object
== NULL
||
3830 vm_object_coalesce(stack_entry
->object
.vm_object
,
3831 stack_entry
->offset
,
3832 (vm_size_t
)(stack_entry
->end
- stack_entry
->start
),
3833 (vm_size_t
)grow_amount
, cred
!= NULL
)) {
3834 map
->size
+= (addr
- stack_entry
->end
);
3835 /* Update the current entry. */
3836 stack_entry
->end
= addr
;
3837 stack_entry
->avail_ssize
-= grow_amount
;
3838 vm_map_entry_resize_free(map
, stack_entry
);
3844 if (rv
== KERN_SUCCESS
&& is_procstack
)
3845 vm
->vm_ssize
+= btoc(grow_amount
);
3850 * Heed the MAP_WIREFUTURE flag if it was set for this process.
3852 if (rv
== KERN_SUCCESS
&& (map
->flags
& MAP_WIREFUTURE
)) {
3854 (stack_entry
== next_entry
) ? addr
: addr
- grow_amount
,
3855 (stack_entry
== next_entry
) ? stack_entry
->start
: addr
,
3856 (p
->p_flag
& P_SYSTEM
)
3857 ? VM_MAP_WIRE_SYSTEM
|VM_MAP_WIRE_NOHOLES
3858 : VM_MAP_WIRE_USER
|VM_MAP_WIRE_NOHOLES
);
3863 if (racct_enable
&& rv
!= KERN_SUCCESS
) {
3865 error
= racct_set(p
, RACCT_VMEM
, map
->size
);
3866 KASSERT(error
== 0, ("decreasing RACCT_VMEM failed"));
3868 error
= racct_set(p
, RACCT_MEMLOCK
,
3869 ptoa(pmap_wired_count(map
->pmap
)));
3870 KASSERT(error
== 0, ("decreasing RACCT_MEMLOCK failed"));
3872 error
= racct_set(p
, RACCT_STACK
, ctob(vm
->vm_ssize
));
3873 KASSERT(error
== 0, ("decreasing RACCT_STACK failed"));
3882 * Unshare the specified VM space for exec. If other processes are
3883 * mapped to it, then create a new one. The new vmspace is null.
3886 vmspace_exec(struct proc
*p
, vm_offset_t minuser
, vm_offset_t maxuser
)
3888 struct vmspace
*oldvmspace
= p
->p_vmspace
;
3889 struct vmspace
*newvmspace
;
3891 KASSERT((curthread
->td_pflags
& TDP_EXECVMSPC
) == 0,
3892 ("vmspace_exec recursed"));
3893 newvmspace
= vmspace_alloc(minuser
, maxuser
, NULL
);
3894 if (newvmspace
== NULL
)
3896 newvmspace
->vm_swrss
= oldvmspace
->vm_swrss
;
3898 * This code is written like this for prototype purposes. The
3899 * goal is to avoid running down the vmspace here, but let the
3900 * other process's that are still using the vmspace to finally
3901 * run it down. Even though there is little or no chance of blocking
3902 * here, it is a good idea to keep this form for future mods.
3904 PROC_VMSPACE_LOCK(p
);
3905 p
->p_vmspace
= newvmspace
;
3906 PROC_VMSPACE_UNLOCK(p
);
3907 if (p
== curthread
->td_proc
)
3908 pmap_activate(curthread
);
3909 curthread
->td_pflags
|= TDP_EXECVMSPC
;
3914 * Unshare the specified VM space for forcing COW. This
3915 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3918 vmspace_unshare(struct proc
*p
)
3920 struct vmspace
*oldvmspace
= p
->p_vmspace
;
3921 struct vmspace
*newvmspace
;
3922 vm_ooffset_t fork_charge
;
3924 if (oldvmspace
->vm_refcnt
== 1)
3927 newvmspace
= vmspace_fork(oldvmspace
, &fork_charge
);
3928 if (newvmspace
== NULL
)
3930 if (!swap_reserve_by_cred(fork_charge
, p
->p_ucred
)) {
3931 vmspace_free(newvmspace
);
3934 PROC_VMSPACE_LOCK(p
);
3935 p
->p_vmspace
= newvmspace
;
3936 PROC_VMSPACE_UNLOCK(p
);
3937 if (p
== curthread
->td_proc
)
3938 pmap_activate(curthread
);
3939 vmspace_free(oldvmspace
);
3946 * Finds the VM object, offset, and
3947 * protection for a given virtual address in the
3948 * specified map, assuming a page fault of the
3951 * Leaves the map in question locked for read; return
3952 * values are guaranteed until a vm_map_lookup_done
3953 * call is performed. Note that the map argument
3954 * is in/out; the returned map must be used in
3955 * the call to vm_map_lookup_done.
3957 * A handle (out_entry) is returned for use in
3958 * vm_map_lookup_done, to make that fast.
3960 * If a lookup is requested with "write protection"
3961 * specified, the map may be changed to perform virtual
3962 * copying operations, although the data referenced will
3966 vm_map_lookup(vm_map_t
*var_map
, /* IN/OUT */
3968 vm_prot_t fault_typea
,
3969 vm_map_entry_t
*out_entry
, /* OUT */
3970 vm_object_t
*object
, /* OUT */
3971 vm_pindex_t
*pindex
, /* OUT */
3972 vm_prot_t
*out_prot
, /* OUT */
3973 boolean_t
*wired
) /* OUT */
3975 vm_map_entry_t entry
;
3976 vm_map_t map
= *var_map
;
3978 vm_prot_t fault_type
= fault_typea
;
3979 vm_object_t eobject
;
3985 vm_map_lock_read(map
);
3988 * Lookup the faulting address.
3990 if (!vm_map_lookup_entry(map
, vaddr
, out_entry
)) {
3991 vm_map_unlock_read(map
);
3992 return (KERN_INVALID_ADDRESS
);
4000 if (entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) {
4001 vm_map_t old_map
= map
;
4003 *var_map
= map
= entry
->object
.sub_map
;
4004 vm_map_unlock_read(old_map
);
4009 * Check whether this task is allowed to have this page.
4011 prot
= entry
->protection
;
4012 fault_type
&= (VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
);
4013 if ((fault_type
& prot
) != fault_type
|| prot
== VM_PROT_NONE
) {
4014 vm_map_unlock_read(map
);
4015 return (KERN_PROTECTION_FAILURE
);
4017 KASSERT((prot
& VM_PROT_WRITE
) == 0 || (entry
->eflags
&
4018 (MAP_ENTRY_USER_WIRED
| MAP_ENTRY_NEEDS_COPY
)) !=
4019 (MAP_ENTRY_USER_WIRED
| MAP_ENTRY_NEEDS_COPY
),
4020 ("entry %p flags %x", entry
, entry
->eflags
));
4021 if ((fault_typea
& VM_PROT_COPY
) != 0 &&
4022 (entry
->max_protection
& VM_PROT_WRITE
) == 0 &&
4023 (entry
->eflags
& MAP_ENTRY_COW
) == 0) {
4024 vm_map_unlock_read(map
);
4025 return (KERN_PROTECTION_FAILURE
);
4029 * If this page is not pageable, we have to get it for all possible
4032 *wired
= (entry
->wired_count
!= 0);
4034 fault_type
= entry
->protection
;
4035 size
= entry
->end
- entry
->start
;
4037 * If the entry was copy-on-write, we either ...
4039 if (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) {
4041 * If we want to write the page, we may as well handle that
4042 * now since we've got the map locked.
4044 * If we don't need to write the page, we just demote the
4045 * permissions allowed.
4047 if ((fault_type
& VM_PROT_WRITE
) != 0 ||
4048 (fault_typea
& VM_PROT_COPY
) != 0) {
4050 * Make a new object, and place it in the object
4051 * chain. Note that no new references have appeared
4052 * -- one just moved from the map to the new
4055 if (vm_map_lock_upgrade(map
))
4058 if (entry
->cred
== NULL
) {
4060 * The debugger owner is charged for
4063 cred
= curthread
->td_ucred
;
4065 if (!swap_reserve_by_cred(size
, cred
)) {
4068 return (KERN_RESOURCE_SHORTAGE
);
4072 vm_object_shadow(&entry
->object
.vm_object
,
4073 &entry
->offset
, size
);
4074 entry
->eflags
&= ~MAP_ENTRY_NEEDS_COPY
;
4075 eobject
= entry
->object
.vm_object
;
4076 if (eobject
->cred
!= NULL
) {
4078 * The object was not shadowed.
4080 swap_release_by_cred(size
, entry
->cred
);
4081 crfree(entry
->cred
);
4083 } else if (entry
->cred
!= NULL
) {
4084 VM_OBJECT_WLOCK(eobject
);
4085 eobject
->cred
= entry
->cred
;
4086 eobject
->charge
= size
;
4087 VM_OBJECT_WUNLOCK(eobject
);
4091 vm_map_lock_downgrade(map
);
4094 * We're attempting to read a copy-on-write page --
4095 * don't allow writes.
4097 prot
&= ~VM_PROT_WRITE
;
4102 * Create an object if necessary.
4104 if (entry
->object
.vm_object
== NULL
&&
4106 if (vm_map_lock_upgrade(map
))
4108 entry
->object
.vm_object
= vm_object_allocate(OBJT_DEFAULT
,
4111 if (entry
->cred
!= NULL
) {
4112 VM_OBJECT_WLOCK(entry
->object
.vm_object
);
4113 entry
->object
.vm_object
->cred
= entry
->cred
;
4114 entry
->object
.vm_object
->charge
= size
;
4115 VM_OBJECT_WUNLOCK(entry
->object
.vm_object
);
4118 vm_map_lock_downgrade(map
);
4122 * Return the object/offset from this entry. If the entry was
4123 * copy-on-write or empty, it has been fixed up.
4125 *pindex
= OFF_TO_IDX((vaddr
- entry
->start
) + entry
->offset
);
4126 *object
= entry
->object
.vm_object
;
4129 return (KERN_SUCCESS
);
4133 * vm_map_lookup_locked:
4135 * Lookup the faulting address. A version of vm_map_lookup that returns
4136 * KERN_FAILURE instead of blocking on map lock or memory allocation.
4139 vm_map_lookup_locked(vm_map_t
*var_map
, /* IN/OUT */
4141 vm_prot_t fault_typea
,
4142 vm_map_entry_t
*out_entry
, /* OUT */
4143 vm_object_t
*object
, /* OUT */
4144 vm_pindex_t
*pindex
, /* OUT */
4145 vm_prot_t
*out_prot
, /* OUT */
4146 boolean_t
*wired
) /* OUT */
4148 vm_map_entry_t entry
;
4149 vm_map_t map
= *var_map
;
4151 vm_prot_t fault_type
= fault_typea
;
4154 * Lookup the faulting address.
4156 if (!vm_map_lookup_entry(map
, vaddr
, out_entry
))
4157 return (KERN_INVALID_ADDRESS
);
4162 * Fail if the entry refers to a submap.
4164 if (entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
)
4165 return (KERN_FAILURE
);
4168 * Check whether this task is allowed to have this page.
4170 prot
= entry
->protection
;
4171 fault_type
&= VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
;
4172 if ((fault_type
& prot
) != fault_type
)
4173 return (KERN_PROTECTION_FAILURE
);
4176 * If this page is not pageable, we have to get it for all possible
4179 *wired
= (entry
->wired_count
!= 0);
4181 fault_type
= entry
->protection
;
4183 if (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) {
4185 * Fail if the entry was copy-on-write for a write fault.
4187 if (fault_type
& VM_PROT_WRITE
)
4188 return (KERN_FAILURE
);
4190 * We're attempting to read a copy-on-write page --
4191 * don't allow writes.
4193 prot
&= ~VM_PROT_WRITE
;
4197 * Fail if an object should be created.
4199 if (entry
->object
.vm_object
== NULL
&& !map
->system_map
)
4200 return (KERN_FAILURE
);
4203 * Return the object/offset from this entry. If the entry was
4204 * copy-on-write or empty, it has been fixed up.
4206 *pindex
= OFF_TO_IDX((vaddr
- entry
->start
) + entry
->offset
);
4207 *object
= entry
->object
.vm_object
;
4210 return (KERN_SUCCESS
);
4214 * vm_map_lookup_done:
4216 * Releases locks acquired by a vm_map_lookup
4217 * (according to the handle returned by that lookup).
4220 vm_map_lookup_done(vm_map_t map
, vm_map_entry_t entry
)
4223 * Unlock the main-level map
4225 vm_map_unlock_read(map
);
4228 #include "opt_ddb.h"
4230 #include <sys/kernel.h>
4232 #include <ddb/ddb.h>
4235 vm_map_print(vm_map_t map
)
4237 vm_map_entry_t entry
;
4239 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4241 (void *)map
->pmap
, map
->nentries
, map
->timestamp
);
4244 for (entry
= map
->header
.next
; entry
!= &map
->header
;
4245 entry
= entry
->next
) {
4246 db_iprintf("map entry %p: start=%p, end=%p\n",
4247 (void *)entry
, (void *)entry
->start
, (void *)entry
->end
);
4249 static char *inheritance_name
[4] =
4250 {"share", "copy", "none", "donate_copy"};
4252 db_iprintf(" prot=%x/%x/%s",
4254 entry
->max_protection
,
4255 inheritance_name
[(int)(unsigned char)entry
->inheritance
]);
4256 if (entry
->wired_count
!= 0)
4257 db_printf(", wired");
4259 if (entry
->eflags
& MAP_ENTRY_IS_SUB_MAP
) {
4260 db_printf(", share=%p, offset=0x%jx\n",
4261 (void *)entry
->object
.sub_map
,
4262 (uintmax_t)entry
->offset
);
4263 if ((entry
->prev
== &map
->header
) ||
4264 (entry
->prev
->object
.sub_map
!=
4265 entry
->object
.sub_map
)) {
4267 vm_map_print((vm_map_t
)entry
->object
.sub_map
);
4271 if (entry
->cred
!= NULL
)
4272 db_printf(", ruid %d", entry
->cred
->cr_ruid
);
4273 db_printf(", object=%p, offset=0x%jx",
4274 (void *)entry
->object
.vm_object
,
4275 (uintmax_t)entry
->offset
);
4276 if (entry
->object
.vm_object
&& entry
->object
.vm_object
->cred
)
4277 db_printf(", obj ruid %d charge %jx",
4278 entry
->object
.vm_object
->cred
->cr_ruid
,
4279 (uintmax_t)entry
->object
.vm_object
->charge
);
4280 if (entry
->eflags
& MAP_ENTRY_COW
)
4281 db_printf(", copy (%s)",
4282 (entry
->eflags
& MAP_ENTRY_NEEDS_COPY
) ? "needed" : "done");
4285 if ((entry
->prev
== &map
->header
) ||
4286 (entry
->prev
->object
.vm_object
!=
4287 entry
->object
.vm_object
)) {
4289 vm_object_print((db_expr_t
)(intptr_t)
4290 entry
->object
.vm_object
,
4299 DB_SHOW_COMMAND(map
, map
)
4303 db_printf("usage: show map <addr>\n");
4306 vm_map_print((vm_map_t
)addr
);
4309 DB_SHOW_COMMAND(procvm
, procvm
)
4314 p
= (struct proc
*) addr
;
4319 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4320 (void *)p
, (void *)p
->p_vmspace
, (void *)&p
->p_vmspace
->vm_map
,
4321 (void *)vmspace_pmap(p
->p_vmspace
));
4323 vm_map_print((vm_map_t
)&p
->p_vmspace
->vm_map
);