sbin/hammer: Use uuid_compare(3) instead of bcmp(3)
[dragonfly.git] / sys / vm / vm_map.c
blob187eb0e3d77b51926bdd02108869e90e6d9e1ff9
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
62 * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $
66 * Virtual memory mapping module.
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/proc.h>
73 #include <sys/serialize.h>
74 #include <sys/lock.h>
75 #include <sys/vmmeter.h>
76 #include <sys/mman.h>
77 #include <sys/vnode.h>
78 #include <sys/resourcevar.h>
79 #include <sys/shm.h>
80 #include <sys/tree.h>
81 #include <sys/malloc.h>
82 #include <sys/objcache.h>
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/pmap.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_kern.h>
92 #include <vm/vm_extern.h>
93 #include <vm/swap_pager.h>
94 #include <vm/vm_zone.h>
96 #include <sys/random.h>
97 #include <sys/sysctl.h>
98 #include <sys/spinlock.h>
100 #include <sys/thread2.h>
101 #include <sys/spinlock2.h>
104 * Virtual memory maps provide for the mapping, protection, and sharing
105 * of virtual memory objects. In addition, this module provides for an
106 * efficient virtual copy of memory from one map to another.
108 * Synchronization is required prior to most operations.
110 * Maps consist of an ordered doubly-linked list of simple entries.
111 * A hint and a RB tree is used to speed-up lookups.
113 * Callers looking to modify maps specify start/end addresses which cause
114 * the related map entry to be clipped if necessary, and then later
115 * recombined if the pieces remained compatible.
117 * Virtual copy operations are performed by copying VM object references
118 * from one map to another, and then marking both regions as copy-on-write.
120 static boolean_t vmspace_ctor(void *obj, void *privdata, int ocflags);
121 static void vmspace_dtor(void *obj, void *privdata);
122 static void vmspace_terminate(struct vmspace *vm, int final);
124 MALLOC_DEFINE(M_VMSPACE, "vmspace", "vmspace objcache backingstore");
125 static struct objcache *vmspace_cache;
128 * per-cpu page table cross mappings are initialized in early boot
129 * and might require a considerable number of vm_map_entry structures.
131 #define MAPENTRYBSP_CACHE (MAXCPU+1)
132 #define MAPENTRYAP_CACHE 8
134 static struct vm_zone mapentzone_store;
135 static vm_zone_t mapentzone;
137 static struct vm_map_entry map_entry_init[MAX_MAPENT];
138 static struct vm_map_entry cpu_map_entry_init_bsp[MAPENTRYBSP_CACHE];
139 static struct vm_map_entry cpu_map_entry_init_ap[MAXCPU][MAPENTRYAP_CACHE];
141 static int randomize_mmap;
142 SYSCTL_INT(_vm, OID_AUTO, randomize_mmap, CTLFLAG_RW, &randomize_mmap, 0,
143 "Randomize mmap offsets");
144 static int vm_map_relock_enable = 1;
145 SYSCTL_INT(_vm, OID_AUTO, map_relock_enable, CTLFLAG_RW,
146 &vm_map_relock_enable, 0, "Randomize mmap offsets");
148 static void vmspace_drop_notoken(struct vmspace *vm);
149 static void vm_map_entry_shadow(vm_map_entry_t entry, int addref);
150 static vm_map_entry_t vm_map_entry_create(vm_map_t map, int *);
151 static void vm_map_entry_dispose (vm_map_t map, vm_map_entry_t entry, int *);
152 static void _vm_map_clip_end (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
153 static void _vm_map_clip_start (vm_map_t, vm_map_entry_t, vm_offset_t, int *);
154 static void vm_map_entry_delete (vm_map_t, vm_map_entry_t, int *);
155 static void vm_map_entry_unwire (vm_map_t, vm_map_entry_t);
156 static void vm_map_copy_entry (vm_map_t, vm_map_t, vm_map_entry_t,
157 vm_map_entry_t);
158 static void vm_map_unclip_range (vm_map_t map, vm_map_entry_t start_entry, vm_offset_t start, vm_offset_t end, int *count, int flags);
161 * Initialize the vm_map module. Must be called before any other vm_map
162 * routines.
164 * Map and entry structures are allocated from the general purpose
165 * memory pool with some exceptions:
167 * - The kernel map is allocated statically.
168 * - Initial kernel map entries are allocated out of a static pool.
169 * - We must set ZONE_SPECIAL here or the early boot code can get
170 * stuck if there are >63 cores.
172 * These restrictions are necessary since malloc() uses the
173 * maps and requires map entries.
175 * Called from the low level boot code only.
177 void
178 vm_map_startup(void)
180 mapentzone = &mapentzone_store;
181 zbootinit(mapentzone, "MAP ENTRY", sizeof (struct vm_map_entry),
182 map_entry_init, MAX_MAPENT);
183 mapentzone_store.zflags |= ZONE_SPECIAL;
187 * Called prior to any vmspace allocations.
189 * Called from the low level boot code only.
191 void
192 vm_init2(void)
194 vmspace_cache = objcache_create_mbacked(M_VMSPACE,
195 sizeof(struct vmspace),
196 0, ncpus * 4,
197 vmspace_ctor, vmspace_dtor,
198 NULL);
199 zinitna(mapentzone, NULL, 0, 0, ZONE_USE_RESERVE | ZONE_SPECIAL);
200 pmap_init2();
201 vm_object_init2();
205 * objcache support. We leave the pmap root cached as long as possible
206 * for performance reasons.
208 static
209 boolean_t
210 vmspace_ctor(void *obj, void *privdata, int ocflags)
212 struct vmspace *vm = obj;
214 bzero(vm, sizeof(*vm));
215 vm->vm_refcnt = VM_REF_DELETED;
217 return 1;
220 static
221 void
222 vmspace_dtor(void *obj, void *privdata)
224 struct vmspace *vm = obj;
226 KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
227 pmap_puninit(vmspace_pmap(vm));
231 * Red black tree functions
233 * The caller must hold the related map lock.
235 static int rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b);
236 RB_GENERATE(vm_map_rb_tree, vm_map_entry, rb_entry, rb_vm_map_compare);
238 /* a->start is address, and the only field has to be initialized */
239 static int
240 rb_vm_map_compare(vm_map_entry_t a, vm_map_entry_t b)
242 if (a->start < b->start)
243 return(-1);
244 else if (a->start > b->start)
245 return(1);
246 return(0);
250 * Initialize vmspace ref/hold counts vmspace0. There is a holdcnt for
251 * every refcnt.
253 void
254 vmspace_initrefs(struct vmspace *vm)
256 vm->vm_refcnt = 1;
257 vm->vm_holdcnt = 1;
261 * Allocate a vmspace structure, including a vm_map and pmap.
262 * Initialize numerous fields. While the initial allocation is zerod,
263 * subsequence reuse from the objcache leaves elements of the structure
264 * intact (particularly the pmap), so portions must be zerod.
266 * Returns a referenced vmspace.
268 * No requirements.
270 struct vmspace *
271 vmspace_alloc(vm_offset_t min, vm_offset_t max)
273 struct vmspace *vm;
275 vm = objcache_get(vmspace_cache, M_WAITOK);
277 bzero(&vm->vm_startcopy,
278 (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy);
279 vm_map_init(&vm->vm_map, min, max, NULL); /* initializes token */
282 * NOTE: hold to acquires token for safety.
284 * On return vmspace is referenced (refs=1, hold=1). That is,
285 * each refcnt also has a holdcnt. There can be additional holds
286 * (holdcnt) above and beyond the refcnt. Finalization is handled in
287 * two stages, one on refs 1->0, and the the second on hold 1->0.
289 KKASSERT(vm->vm_holdcnt == 0);
290 KKASSERT(vm->vm_refcnt == VM_REF_DELETED);
291 vmspace_initrefs(vm);
292 vmspace_hold(vm);
293 pmap_pinit(vmspace_pmap(vm)); /* (some fields reused) */
294 vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
295 vm->vm_shm = NULL;
296 vm->vm_flags = 0;
297 cpu_vmspace_alloc(vm);
298 vmspace_drop(vm);
300 return (vm);
304 * NOTE: Can return 0 if the vmspace is exiting.
307 vmspace_getrefs(struct vmspace *vm)
309 int32_t n;
311 n = vm->vm_refcnt;
312 cpu_ccfence();
313 if (n & VM_REF_DELETED)
314 n = -1;
315 return n;
318 void
319 vmspace_hold(struct vmspace *vm)
321 atomic_add_int(&vm->vm_holdcnt, 1);
322 lwkt_gettoken(&vm->vm_map.token);
326 * Drop with final termination interlock.
328 void
329 vmspace_drop(struct vmspace *vm)
331 lwkt_reltoken(&vm->vm_map.token);
332 vmspace_drop_notoken(vm);
335 static void
336 vmspace_drop_notoken(struct vmspace *vm)
338 if (atomic_fetchadd_int(&vm->vm_holdcnt, -1) == 1) {
339 if (vm->vm_refcnt & VM_REF_DELETED)
340 vmspace_terminate(vm, 1);
345 * A vmspace object must not be in a terminated state to be able to obtain
346 * additional refs on it.
348 * These are official references to the vmspace, the count is used to check
349 * for vmspace sharing. Foreign accessors should use 'hold' and not 'ref'.
351 * XXX we need to combine hold & ref together into one 64-bit field to allow
352 * holds to prevent stage-1 termination.
354 void
355 vmspace_ref(struct vmspace *vm)
357 uint32_t n;
359 atomic_add_int(&vm->vm_holdcnt, 1);
360 n = atomic_fetchadd_int(&vm->vm_refcnt, 1);
361 KKASSERT((n & VM_REF_DELETED) == 0);
365 * Release a ref on the vmspace. On the 1->0 transition we do stage-1
366 * termination of the vmspace. Then, on the final drop of the hold we
367 * will do stage-2 final termination.
369 void
370 vmspace_rel(struct vmspace *vm)
372 uint32_t n;
375 * Drop refs. Each ref also has a hold which is also dropped.
377 * When refs hits 0 compete to get the VM_REF_DELETED flag (hold
378 * prevent finalization) to start termination processing.
379 * Finalization occurs when the last hold count drops to 0.
381 n = atomic_fetchadd_int(&vm->vm_refcnt, -1) - 1;
382 while (n == 0) {
383 if (atomic_cmpset_int(&vm->vm_refcnt, 0, VM_REF_DELETED)) {
384 vmspace_terminate(vm, 0);
385 break;
387 n = vm->vm_refcnt;
388 cpu_ccfence();
390 vmspace_drop_notoken(vm);
394 * This is called during exit indicating that the vmspace is no
395 * longer in used by an exiting process, but the process has not yet
396 * been reaped.
398 * We drop refs, allowing for stage-1 termination, but maintain a holdcnt
399 * to prevent stage-2 until the process is reaped. Note hte order of
400 * operation, we must hold first.
402 * No requirements.
404 void
405 vmspace_relexit(struct vmspace *vm)
407 atomic_add_int(&vm->vm_holdcnt, 1);
408 vmspace_rel(vm);
412 * Called during reap to disconnect the remainder of the vmspace from
413 * the process. On the hold drop the vmspace termination is finalized.
415 * No requirements.
417 void
418 vmspace_exitfree(struct proc *p)
420 struct vmspace *vm;
422 vm = p->p_vmspace;
423 p->p_vmspace = NULL;
424 vmspace_drop_notoken(vm);
428 * Called in two cases:
430 * (1) When the last refcnt is dropped and the vmspace becomes inactive,
431 * called with final == 0. refcnt will be (u_int)-1 at this point,
432 * and holdcnt will still be non-zero.
434 * (2) When holdcnt becomes 0, called with final == 1. There should no
435 * longer be anyone with access to the vmspace.
437 * VMSPACE_EXIT1 flags the primary deactivation
438 * VMSPACE_EXIT2 flags the last reap
440 static void
441 vmspace_terminate(struct vmspace *vm, int final)
443 int count;
445 lwkt_gettoken(&vm->vm_map.token);
446 if (final == 0) {
447 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) == 0);
448 vm->vm_flags |= VMSPACE_EXIT1;
451 * Get rid of most of the resources. Leave the kernel pmap
452 * intact.
454 * If the pmap does not contain wired pages we can bulk-delete
455 * the pmap as a performance optimization before removing the
456 * related mappings.
458 * If the pmap contains wired pages we cannot do this
459 * pre-optimization because currently vm_fault_unwire()
460 * expects the pmap pages to exist and will not decrement
461 * p->wire_count if they do not.
463 shmexit(vm);
464 if (vmspace_pmap(vm)->pm_stats.wired_count) {
465 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
466 VM_MAX_USER_ADDRESS);
467 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
468 VM_MAX_USER_ADDRESS);
469 } else {
470 pmap_remove_pages(vmspace_pmap(vm), VM_MIN_USER_ADDRESS,
471 VM_MAX_USER_ADDRESS);
472 vm_map_remove(&vm->vm_map, VM_MIN_USER_ADDRESS,
473 VM_MAX_USER_ADDRESS);
475 lwkt_reltoken(&vm->vm_map.token);
476 } else {
477 KKASSERT((vm->vm_flags & VMSPACE_EXIT1) != 0);
478 KKASSERT((vm->vm_flags & VMSPACE_EXIT2) == 0);
481 * Get rid of remaining basic resources.
483 vm->vm_flags |= VMSPACE_EXIT2;
484 shmexit(vm);
486 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
487 vm_map_lock(&vm->vm_map);
488 cpu_vmspace_free(vm);
491 * Lock the map, to wait out all other references to it.
492 * Delete all of the mappings and pages they hold, then call
493 * the pmap module to reclaim anything left.
495 vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
496 vm->vm_map.max_offset, &count);
497 vm_map_unlock(&vm->vm_map);
498 vm_map_entry_release(count);
500 pmap_release(vmspace_pmap(vm));
501 lwkt_reltoken(&vm->vm_map.token);
502 objcache_put(vmspace_cache, vm);
507 * Swap useage is determined by taking the proportional swap used by
508 * VM objects backing the VM map. To make up for fractional losses,
509 * if the VM object has any swap use at all the associated map entries
510 * count for at least 1 swap page.
512 * No requirements.
514 vm_offset_t
515 vmspace_swap_count(struct vmspace *vm)
517 vm_map_t map = &vm->vm_map;
518 vm_map_entry_t cur;
519 vm_object_t object;
520 vm_offset_t count = 0;
521 vm_offset_t n;
523 vmspace_hold(vm);
524 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
525 switch(cur->maptype) {
526 case VM_MAPTYPE_NORMAL:
527 case VM_MAPTYPE_VPAGETABLE:
528 if ((object = cur->object.vm_object) == NULL)
529 break;
530 if (object->swblock_count) {
531 n = (cur->end - cur->start) / PAGE_SIZE;
532 count += object->swblock_count *
533 SWAP_META_PAGES * n / object->size + 1;
535 break;
536 default:
537 break;
540 vmspace_drop(vm);
542 return(count);
546 * Calculate the approximate number of anonymous pages in use by
547 * this vmspace. To make up for fractional losses, we count each
548 * VM object as having at least 1 anonymous page.
550 * No requirements.
552 vm_offset_t
553 vmspace_anonymous_count(struct vmspace *vm)
555 vm_map_t map = &vm->vm_map;
556 vm_map_entry_t cur;
557 vm_object_t object;
558 vm_offset_t count = 0;
560 vmspace_hold(vm);
561 for (cur = map->header.next; cur != &map->header; cur = cur->next) {
562 switch(cur->maptype) {
563 case VM_MAPTYPE_NORMAL:
564 case VM_MAPTYPE_VPAGETABLE:
565 if ((object = cur->object.vm_object) == NULL)
566 break;
567 if (object->type != OBJT_DEFAULT &&
568 object->type != OBJT_SWAP) {
569 break;
571 count += object->resident_page_count;
572 break;
573 default:
574 break;
577 vmspace_drop(vm);
579 return(count);
583 * Initialize an existing vm_map structure such as that in the vmspace
584 * structure. The pmap is initialized elsewhere.
586 * No requirements.
588 void
589 vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap)
591 map->header.next = map->header.prev = &map->header;
592 RB_INIT(&map->rb_root);
593 spin_init(&map->ilock_spin, "ilock");
594 map->ilock_base = NULL;
595 map->nentries = 0;
596 map->size = 0;
597 map->system_map = 0;
598 map->min_offset = min;
599 map->max_offset = max;
600 map->pmap = pmap;
601 map->timestamp = 0;
602 map->flags = 0;
603 bzero(&map->freehint, sizeof(map->freehint));
604 lwkt_token_init(&map->token, "vm_map");
605 lockinit(&map->lock, "vm_maplk", (hz + 9) / 10, 0);
609 * Find the first possible free address for the specified request length.
610 * Returns 0 if we don't have one cached.
612 static
613 vm_offset_t
614 vm_map_freehint_find(vm_map_t map, vm_size_t length, vm_size_t align)
616 vm_map_freehint_t *scan;
618 scan = &map->freehint[0];
619 while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
620 if (scan->length == length && scan->align == align)
621 return(scan->start);
622 ++scan;
624 return 0;
628 * Unconditionally set the freehint. Called by vm_map_findspace() after
629 * it finds an address. This will help us iterate optimally on the next
630 * similar findspace.
632 static
633 void
634 vm_map_freehint_update(vm_map_t map, vm_offset_t start,
635 vm_size_t length, vm_size_t align)
637 vm_map_freehint_t *scan;
639 scan = &map->freehint[0];
640 while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
641 if (scan->length == length && scan->align == align) {
642 scan->start = start;
643 return;
645 ++scan;
647 scan = &map->freehint[map->freehint_newindex & VM_MAP_FFMASK];
648 scan->start = start;
649 scan->align = align;
650 scan->length = length;
651 ++map->freehint_newindex;
655 * Update any existing freehints (for any alignment), for the hole we just
656 * added.
658 static
659 void
660 vm_map_freehint_hole(vm_map_t map, vm_offset_t start, vm_size_t length)
662 vm_map_freehint_t *scan;
664 scan = &map->freehint[0];
665 while (scan < &map->freehint[VM_MAP_FFCOUNT]) {
666 if (scan->length <= length && scan->start > start)
667 scan->start = start;
668 ++scan;
673 * Shadow the vm_map_entry's object. This typically needs to be done when
674 * a write fault is taken on an entry which had previously been cloned by
675 * fork(). The shared object (which might be NULL) must become private so
676 * we add a shadow layer above it.
678 * Object allocation for anonymous mappings is defered as long as possible.
679 * When creating a shadow, however, the underlying object must be instantiated
680 * so it can be shared.
682 * If the map segment is governed by a virtual page table then it is
683 * possible to address offsets beyond the mapped area. Just allocate
684 * a maximally sized object for this case.
686 * If addref is non-zero an additional reference is added to the returned
687 * entry. This mechanic exists because the additional reference might have
688 * to be added atomically and not after return to prevent a premature
689 * collapse.
691 * The vm_map must be exclusively locked.
692 * No other requirements.
694 static
695 void
696 vm_map_entry_shadow(vm_map_entry_t entry, int addref)
698 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
699 vm_object_shadow(&entry->object.vm_object, &entry->offset,
700 0x7FFFFFFF, addref); /* XXX */
701 } else {
702 vm_object_shadow(&entry->object.vm_object, &entry->offset,
703 atop(entry->end - entry->start), addref);
705 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
709 * Allocate an object for a vm_map_entry.
711 * Object allocation for anonymous mappings is defered as long as possible.
712 * This function is called when we can defer no longer, generally when a map
713 * entry might be split or forked or takes a page fault.
715 * If the map segment is governed by a virtual page table then it is
716 * possible to address offsets beyond the mapped area. Just allocate
717 * a maximally sized object for this case.
719 * The vm_map must be exclusively locked.
720 * No other requirements.
722 void
723 vm_map_entry_allocate_object(vm_map_entry_t entry)
725 vm_object_t obj;
727 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
728 obj = vm_object_allocate(OBJT_DEFAULT, 0x7FFFFFFF); /* XXX */
729 } else {
730 obj = vm_object_allocate(OBJT_DEFAULT,
731 atop(entry->end - entry->start));
733 entry->object.vm_object = obj;
734 entry->offset = 0;
738 * Set an initial negative count so the first attempt to reserve
739 * space preloads a bunch of vm_map_entry's for this cpu. Also
740 * pre-allocate 2 vm_map_entries which will be needed by zalloc() to
741 * map a new page for vm_map_entry structures. SMP systems are
742 * particularly sensitive.
744 * This routine is called in early boot so we cannot just call
745 * vm_map_entry_reserve().
747 * Called from the low level boot code only (for each cpu)
749 * WARNING! Take care not to have too-big a static/BSS structure here
750 * as MAXCPU can be 256+, otherwise the loader's 64MB heap
751 * can get blown out by the kernel plus the initrd image.
753 void
754 vm_map_entry_reserve_cpu_init(globaldata_t gd)
756 vm_map_entry_t entry;
757 int count;
758 int i;
760 gd->gd_vme_avail -= MAP_RESERVE_COUNT * 2;
761 if (gd->gd_cpuid == 0) {
762 entry = &cpu_map_entry_init_bsp[0];
763 count = MAPENTRYBSP_CACHE;
764 } else {
765 entry = &cpu_map_entry_init_ap[gd->gd_cpuid][0];
766 count = MAPENTRYAP_CACHE;
768 for (i = 0; i < count; ++i, ++entry) {
769 entry->next = gd->gd_vme_base;
770 gd->gd_vme_base = entry;
775 * Reserves vm_map_entry structures so code later on can manipulate
776 * map_entry structures within a locked map without blocking trying
777 * to allocate a new vm_map_entry.
779 * No requirements.
782 vm_map_entry_reserve(int count)
784 struct globaldata *gd = mycpu;
785 vm_map_entry_t entry;
788 * Make sure we have enough structures in gd_vme_base to handle
789 * the reservation request.
791 * The critical section protects access to the per-cpu gd.
793 crit_enter();
794 while (gd->gd_vme_avail < count) {
795 entry = zalloc(mapentzone);
796 entry->next = gd->gd_vme_base;
797 gd->gd_vme_base = entry;
798 ++gd->gd_vme_avail;
800 gd->gd_vme_avail -= count;
801 crit_exit();
803 return(count);
807 * Releases previously reserved vm_map_entry structures that were not
808 * used. If we have too much junk in our per-cpu cache clean some of
809 * it out.
811 * No requirements.
813 void
814 vm_map_entry_release(int count)
816 struct globaldata *gd = mycpu;
817 vm_map_entry_t entry;
819 crit_enter();
820 gd->gd_vme_avail += count;
821 while (gd->gd_vme_avail > MAP_RESERVE_SLOP) {
822 entry = gd->gd_vme_base;
823 KKASSERT(entry != NULL);
824 gd->gd_vme_base = entry->next;
825 --gd->gd_vme_avail;
826 crit_exit();
827 zfree(mapentzone, entry);
828 crit_enter();
830 crit_exit();
834 * Reserve map entry structures for use in kernel_map itself. These
835 * entries have *ALREADY* been reserved on a per-cpu basis when the map
836 * was inited. This function is used by zalloc() to avoid a recursion
837 * when zalloc() itself needs to allocate additional kernel memory.
839 * This function works like the normal reserve but does not load the
840 * vm_map_entry cache (because that would result in an infinite
841 * recursion). Note that gd_vme_avail may go negative. This is expected.
843 * Any caller of this function must be sure to renormalize after
844 * potentially eating entries to ensure that the reserve supply
845 * remains intact.
847 * No requirements.
850 vm_map_entry_kreserve(int count)
852 struct globaldata *gd = mycpu;
854 crit_enter();
855 gd->gd_vme_avail -= count;
856 crit_exit();
857 KASSERT(gd->gd_vme_base != NULL,
858 ("no reserved entries left, gd_vme_avail = %d",
859 gd->gd_vme_avail));
860 return(count);
864 * Release previously reserved map entries for kernel_map. We do not
865 * attempt to clean up like the normal release function as this would
866 * cause an unnecessary (but probably not fatal) deep procedure call.
868 * No requirements.
870 void
871 vm_map_entry_krelease(int count)
873 struct globaldata *gd = mycpu;
875 crit_enter();
876 gd->gd_vme_avail += count;
877 crit_exit();
881 * Allocates a VM map entry for insertion. No entry fields are filled in.
883 * The entries should have previously been reserved. The reservation count
884 * is tracked in (*countp).
886 * No requirements.
888 static vm_map_entry_t
889 vm_map_entry_create(vm_map_t map, int *countp)
891 struct globaldata *gd = mycpu;
892 vm_map_entry_t entry;
894 KKASSERT(*countp > 0);
895 --*countp;
896 crit_enter();
897 entry = gd->gd_vme_base;
898 KASSERT(entry != NULL, ("gd_vme_base NULL! count %d", *countp));
899 gd->gd_vme_base = entry->next;
900 crit_exit();
902 return(entry);
906 * Dispose of a vm_map_entry that is no longer being referenced.
908 * No requirements.
910 static void
911 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry, int *countp)
913 struct globaldata *gd = mycpu;
915 ++*countp;
916 crit_enter();
917 entry->next = gd->gd_vme_base;
918 gd->gd_vme_base = entry;
919 crit_exit();
924 * Insert/remove entries from maps.
926 * The related map must be exclusively locked.
927 * The caller must hold map->token
928 * No other requirements.
930 static __inline void
931 vm_map_entry_link(vm_map_t map,
932 vm_map_entry_t after_where,
933 vm_map_entry_t entry)
935 ASSERT_VM_MAP_LOCKED(map);
937 map->nentries++;
938 entry->prev = after_where;
939 entry->next = after_where->next;
940 entry->next->prev = entry;
941 after_where->next = entry;
942 if (vm_map_rb_tree_RB_INSERT(&map->rb_root, entry))
943 panic("vm_map_entry_link: dup addr map %p ent %p", map, entry);
946 static __inline void
947 vm_map_entry_unlink(vm_map_t map,
948 vm_map_entry_t entry)
950 vm_map_entry_t prev;
951 vm_map_entry_t next;
953 ASSERT_VM_MAP_LOCKED(map);
955 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
956 panic("vm_map_entry_unlink: attempt to mess with "
957 "locked entry! %p", entry);
959 prev = entry->prev;
960 next = entry->next;
961 next->prev = prev;
962 prev->next = next;
963 vm_map_rb_tree_RB_REMOVE(&map->rb_root, entry);
964 map->nentries--;
968 * Finds the map entry containing (or immediately preceding) the specified
969 * address in the given map. The entry is returned in (*entry).
971 * The boolean result indicates whether the address is actually contained
972 * in the map.
974 * The related map must be locked.
975 * No other requirements.
977 boolean_t
978 vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
980 vm_map_entry_t tmp;
981 vm_map_entry_t last;
983 ASSERT_VM_MAP_LOCKED(map);
986 * Locate the record from the top of the tree. 'last' tracks the
987 * closest prior record and is returned if no match is found, which
988 * in binary tree terms means tracking the most recent right-branch
989 * taken. If there is no prior record, &map->header is returned.
991 last = &map->header;
992 tmp = RB_ROOT(&map->rb_root);
994 while (tmp) {
995 if (address >= tmp->start) {
996 if (address < tmp->end) {
997 *entry = tmp;
998 return(TRUE);
1000 last = tmp;
1001 tmp = RB_RIGHT(tmp, rb_entry);
1002 } else {
1003 tmp = RB_LEFT(tmp, rb_entry);
1006 *entry = last;
1007 return (FALSE);
1011 * Inserts the given whole VM object into the target map at the specified
1012 * address range. The object's size should match that of the address range.
1014 * The map must be exclusively locked.
1015 * The object must be held.
1016 * The caller must have reserved sufficient vm_map_entry structures.
1018 * If object is non-NULL, ref count must be bumped by caller prior to
1019 * making call to account for the new entry.
1022 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
1023 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
1024 vm_maptype_t maptype, vm_subsys_t id,
1025 vm_prot_t prot, vm_prot_t max, int cow)
1027 vm_map_entry_t new_entry;
1028 vm_map_entry_t prev_entry;
1029 vm_map_entry_t temp_entry;
1030 vm_eflags_t protoeflags;
1031 int must_drop = 0;
1032 vm_object_t object;
1034 if (maptype == VM_MAPTYPE_UKSMAP)
1035 object = NULL;
1036 else
1037 object = map_object;
1039 ASSERT_VM_MAP_LOCKED(map);
1040 if (object)
1041 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1044 * Check that the start and end points are not bogus.
1046 if ((start < map->min_offset) || (end > map->max_offset) ||
1047 (start >= end))
1048 return (KERN_INVALID_ADDRESS);
1051 * Find the entry prior to the proposed starting address; if it's part
1052 * of an existing entry, this range is bogus.
1054 if (vm_map_lookup_entry(map, start, &temp_entry))
1055 return (KERN_NO_SPACE);
1057 prev_entry = temp_entry;
1060 * Assert that the next entry doesn't overlap the end point.
1063 if ((prev_entry->next != &map->header) &&
1064 (prev_entry->next->start < end))
1065 return (KERN_NO_SPACE);
1067 protoeflags = 0;
1069 if (cow & MAP_COPY_ON_WRITE)
1070 protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
1072 if (cow & MAP_NOFAULT) {
1073 protoeflags |= MAP_ENTRY_NOFAULT;
1075 KASSERT(object == NULL,
1076 ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1078 if (cow & MAP_DISABLE_SYNCER)
1079 protoeflags |= MAP_ENTRY_NOSYNC;
1080 if (cow & MAP_DISABLE_COREDUMP)
1081 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1082 if (cow & MAP_IS_STACK)
1083 protoeflags |= MAP_ENTRY_STACK;
1084 if (cow & MAP_IS_KSTACK)
1085 protoeflags |= MAP_ENTRY_KSTACK;
1087 lwkt_gettoken(&map->token);
1089 if (object) {
1091 * When object is non-NULL, it could be shared with another
1092 * process. We have to set or clear OBJ_ONEMAPPING
1093 * appropriately.
1095 * NOTE: This flag is only applicable to DEFAULT and SWAP
1096 * objects and will already be clear in other types
1097 * of objects, so a shared object lock is ok for
1098 * VNODE objects.
1100 if ((object->ref_count > 1) || (object->shadow_count != 0)) {
1101 vm_object_clear_flag(object, OBJ_ONEMAPPING);
1104 else if ((prev_entry != &map->header) &&
1105 (prev_entry->eflags == protoeflags) &&
1106 (prev_entry->end == start) &&
1107 (prev_entry->wired_count == 0) &&
1108 (prev_entry->id == id) &&
1109 prev_entry->maptype == maptype &&
1110 maptype == VM_MAPTYPE_NORMAL &&
1111 ((prev_entry->object.vm_object == NULL) ||
1112 vm_object_coalesce(prev_entry->object.vm_object,
1113 OFF_TO_IDX(prev_entry->offset),
1114 (vm_size_t)(prev_entry->end - prev_entry->start),
1115 (vm_size_t)(end - prev_entry->end)))) {
1117 * We were able to extend the object. Determine if we
1118 * can extend the previous map entry to include the
1119 * new range as well.
1121 if ((prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
1122 (prev_entry->protection == prot) &&
1123 (prev_entry->max_protection == max)) {
1124 map->size += (end - prev_entry->end);
1125 prev_entry->end = end;
1126 vm_map_simplify_entry(map, prev_entry, countp);
1127 lwkt_reltoken(&map->token);
1128 return (KERN_SUCCESS);
1132 * If we can extend the object but cannot extend the
1133 * map entry, we have to create a new map entry. We
1134 * must bump the ref count on the extended object to
1135 * account for it. object may be NULL.
1137 * XXX if object is NULL should we set offset to 0 here ?
1139 object = prev_entry->object.vm_object;
1140 offset = prev_entry->offset +
1141 (prev_entry->end - prev_entry->start);
1142 if (object) {
1143 vm_object_hold(object);
1144 vm_object_chain_wait(object, 0);
1145 vm_object_reference_locked(object);
1146 must_drop = 1;
1147 map_object = object;
1152 * NOTE: if conditionals fail, object can be NULL here. This occurs
1153 * in things like the buffer map where we manage kva but do not manage
1154 * backing objects.
1158 * Create a new entry
1161 new_entry = vm_map_entry_create(map, countp);
1162 new_entry->start = start;
1163 new_entry->end = end;
1164 new_entry->id = id;
1166 new_entry->maptype = maptype;
1167 new_entry->eflags = protoeflags;
1168 new_entry->object.map_object = map_object;
1169 new_entry->aux.master_pde = 0; /* in case size is different */
1170 new_entry->aux.map_aux = map_aux;
1171 new_entry->offset = offset;
1173 new_entry->inheritance = VM_INHERIT_DEFAULT;
1174 new_entry->protection = prot;
1175 new_entry->max_protection = max;
1176 new_entry->wired_count = 0;
1179 * Insert the new entry into the list
1182 vm_map_entry_link(map, prev_entry, new_entry);
1183 map->size += new_entry->end - new_entry->start;
1186 * Don't worry about updating freehint[] when inserting, allow
1187 * addresses to be lower than the actual first free spot.
1189 #if 0
1191 * Temporarily removed to avoid MAP_STACK panic, due to
1192 * MAP_STACK being a huge hack. Will be added back in
1193 * when MAP_STACK (and the user stack mapping) is fixed.
1196 * It may be possible to simplify the entry
1198 vm_map_simplify_entry(map, new_entry, countp);
1199 #endif
1202 * Try to pre-populate the page table. Mappings governed by virtual
1203 * page tables cannot be prepopulated without a lot of work, so
1204 * don't try.
1206 if ((cow & (MAP_PREFAULT|MAP_PREFAULT_PARTIAL)) &&
1207 maptype != VM_MAPTYPE_VPAGETABLE &&
1208 maptype != VM_MAPTYPE_UKSMAP) {
1209 int dorelock = 0;
1210 if (vm_map_relock_enable && (cow & MAP_PREFAULT_RELOCK)) {
1211 dorelock = 1;
1212 vm_object_lock_swap();
1213 vm_object_drop(object);
1215 pmap_object_init_pt(map->pmap, start, prot,
1216 object, OFF_TO_IDX(offset), end - start,
1217 cow & MAP_PREFAULT_PARTIAL);
1218 if (dorelock) {
1219 vm_object_hold(object);
1220 vm_object_lock_swap();
1223 if (must_drop)
1224 vm_object_drop(object);
1226 lwkt_reltoken(&map->token);
1227 return (KERN_SUCCESS);
1231 * Find sufficient space for `length' bytes in the given map, starting at
1232 * `start'. Returns 0 on success, 1 on no space.
1234 * This function will returned an arbitrarily aligned pointer. If no
1235 * particular alignment is required you should pass align as 1. Note that
1236 * the map may return PAGE_SIZE aligned pointers if all the lengths used in
1237 * the map are a multiple of PAGE_SIZE, even if you pass a smaller align
1238 * argument.
1240 * 'align' should be a power of 2 but is not required to be.
1242 * The map must be exclusively locked.
1243 * No other requirements.
1246 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
1247 vm_size_t align, int flags, vm_offset_t *addr)
1249 vm_map_entry_t entry, next;
1250 vm_map_entry_t tmp;
1251 vm_offset_t hole_start;
1252 vm_offset_t end;
1253 vm_offset_t align_mask;
1255 if (start < map->min_offset)
1256 start = map->min_offset;
1257 if (start > map->max_offset)
1258 return (1);
1261 * If the alignment is not a power of 2 we will have to use
1262 * a mod/division, set align_mask to a special value.
1264 if ((align | (align - 1)) + 1 != (align << 1))
1265 align_mask = (vm_offset_t)-1;
1266 else
1267 align_mask = align - 1;
1270 * Use freehint to adjust the start point, hopefully reducing
1271 * the iteration to O(1).
1273 hole_start = vm_map_freehint_find(map, length, align);
1274 if (start < hole_start)
1275 start = hole_start;
1276 if (vm_map_lookup_entry(map, start, &tmp))
1277 start = tmp->end;
1278 entry = tmp;
1281 * Look through the rest of the map, trying to fit a new region in the
1282 * gap between existing regions, or after the very last region.
1284 for (;; start = (entry = next)->end) {
1286 * Adjust the proposed start by the requested alignment,
1287 * be sure that we didn't wrap the address.
1289 if (align_mask == (vm_offset_t)-1)
1290 end = roundup(start, align);
1291 else
1292 end = (start + align_mask) & ~align_mask;
1293 if (end < start)
1294 return (1);
1295 start = end;
1298 * Find the end of the proposed new region. Be sure we didn't
1299 * go beyond the end of the map, or wrap around the address.
1300 * Then check to see if this is the last entry or if the
1301 * proposed end fits in the gap between this and the next
1302 * entry.
1304 end = start + length;
1305 if (end > map->max_offset || end < start)
1306 return (1);
1307 next = entry->next;
1310 * If the next entry's start address is beyond the desired
1311 * end address we may have found a good entry.
1313 * If the next entry is a stack mapping we do not map into
1314 * the stack's reserved space.
1316 * XXX continue to allow mapping into the stack's reserved
1317 * space if doing a MAP_STACK mapping inside a MAP_STACK
1318 * mapping, for backwards compatibility. But the caller
1319 * really should use MAP_STACK | MAP_TRYFIXED if they
1320 * want to do that.
1322 if (next == &map->header)
1323 break;
1324 if (next->start >= end) {
1325 if ((next->eflags & MAP_ENTRY_STACK) == 0)
1326 break;
1327 if (flags & MAP_STACK)
1328 break;
1329 if (next->start - next->aux.avail_ssize >= end)
1330 break;
1335 * Update the freehint
1337 vm_map_freehint_update(map, start, length, align);
1340 * Grow the kernel_map if necessary. pmap_growkernel() will panic
1341 * if it fails. The kernel_map is locked and nothing can steal
1342 * our address space if pmap_growkernel() blocks.
1344 * NOTE: This may be unconditionally called for kldload areas on
1345 * x86_64 because these do not bump kernel_vm_end (which would
1346 * fill 128G worth of page tables!). Therefore we must not
1347 * retry.
1349 if (map == &kernel_map) {
1350 vm_offset_t kstop;
1352 kstop = round_page(start + length);
1353 if (kstop > kernel_vm_end)
1354 pmap_growkernel(start, kstop);
1356 *addr = start;
1357 return (0);
1361 * vm_map_find finds an unallocated region in the target address map with
1362 * the given length and allocates it. The search is defined to be first-fit
1363 * from the specified address; the region found is returned in the same
1364 * parameter.
1366 * If object is non-NULL, ref count must be bumped by caller
1367 * prior to making call to account for the new entry.
1369 * No requirements. This function will lock the map temporarily.
1372 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
1373 vm_ooffset_t offset, vm_offset_t *addr,
1374 vm_size_t length, vm_size_t align, boolean_t fitit,
1375 vm_maptype_t maptype, vm_subsys_t id,
1376 vm_prot_t prot, vm_prot_t max, int cow)
1378 vm_offset_t start;
1379 vm_object_t object;
1380 int result;
1381 int count;
1383 if (maptype == VM_MAPTYPE_UKSMAP)
1384 object = NULL;
1385 else
1386 object = map_object;
1388 start = *addr;
1390 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1391 vm_map_lock(map);
1392 if (object)
1393 vm_object_hold_shared(object);
1394 if (fitit) {
1395 if (vm_map_findspace(map, start, length, align, 0, addr)) {
1396 if (object)
1397 vm_object_drop(object);
1398 vm_map_unlock(map);
1399 vm_map_entry_release(count);
1400 return (KERN_NO_SPACE);
1402 start = *addr;
1404 result = vm_map_insert(map, &count, map_object, map_aux,
1405 offset, start, start + length,
1406 maptype, id, prot, max, cow);
1407 if (object)
1408 vm_object_drop(object);
1409 vm_map_unlock(map);
1410 vm_map_entry_release(count);
1412 return (result);
1416 * Simplify the given map entry by merging with either neighbor. This
1417 * routine also has the ability to merge with both neighbors.
1419 * This routine guarentees that the passed entry remains valid (though
1420 * possibly extended). When merging, this routine may delete one or
1421 * both neighbors. No action is taken on entries which have their
1422 * in-transition flag set.
1424 * The map must be exclusively locked.
1426 void
1427 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
1429 vm_map_entry_t next, prev;
1430 vm_size_t prevsize, esize;
1432 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1433 ++mycpu->gd_cnt.v_intrans_coll;
1434 return;
1437 if (entry->maptype == VM_MAPTYPE_SUBMAP)
1438 return;
1439 if (entry->maptype == VM_MAPTYPE_UKSMAP)
1440 return;
1442 prev = entry->prev;
1443 if (prev != &map->header) {
1444 prevsize = prev->end - prev->start;
1445 if ( (prev->end == entry->start) &&
1446 (prev->maptype == entry->maptype) &&
1447 (prev->object.vm_object == entry->object.vm_object) &&
1448 (!prev->object.vm_object ||
1449 (prev->offset + prevsize == entry->offset)) &&
1450 (prev->eflags == entry->eflags) &&
1451 (prev->protection == entry->protection) &&
1452 (prev->max_protection == entry->max_protection) &&
1453 (prev->inheritance == entry->inheritance) &&
1454 (prev->id == entry->id) &&
1455 (prev->wired_count == entry->wired_count)) {
1456 vm_map_entry_unlink(map, prev);
1457 entry->start = prev->start;
1458 entry->offset = prev->offset;
1459 if (prev->object.vm_object)
1460 vm_object_deallocate(prev->object.vm_object);
1461 vm_map_entry_dispose(map, prev, countp);
1465 next = entry->next;
1466 if (next != &map->header) {
1467 esize = entry->end - entry->start;
1468 if ((entry->end == next->start) &&
1469 (next->maptype == entry->maptype) &&
1470 (next->object.vm_object == entry->object.vm_object) &&
1471 (!entry->object.vm_object ||
1472 (entry->offset + esize == next->offset)) &&
1473 (next->eflags == entry->eflags) &&
1474 (next->protection == entry->protection) &&
1475 (next->max_protection == entry->max_protection) &&
1476 (next->inheritance == entry->inheritance) &&
1477 (next->id == entry->id) &&
1478 (next->wired_count == entry->wired_count)) {
1479 vm_map_entry_unlink(map, next);
1480 entry->end = next->end;
1481 if (next->object.vm_object)
1482 vm_object_deallocate(next->object.vm_object);
1483 vm_map_entry_dispose(map, next, countp);
1489 * Asserts that the given entry begins at or after the specified address.
1490 * If necessary, it splits the entry into two.
1492 #define vm_map_clip_start(map, entry, startaddr, countp) \
1494 if (startaddr > entry->start) \
1495 _vm_map_clip_start(map, entry, startaddr, countp); \
1499 * This routine is called only when it is known that the entry must be split.
1501 * The map must be exclusively locked.
1503 static void
1504 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start,
1505 int *countp)
1507 vm_map_entry_t new_entry;
1510 * Split off the front portion -- note that we must insert the new
1511 * entry BEFORE this one, so that this entry has the specified
1512 * starting address.
1515 vm_map_simplify_entry(map, entry, countp);
1518 * If there is no object backing this entry, we might as well create
1519 * one now. If we defer it, an object can get created after the map
1520 * is clipped, and individual objects will be created for the split-up
1521 * map. This is a bit of a hack, but is also about the best place to
1522 * put this improvement.
1524 if (entry->object.vm_object == NULL && !map->system_map) {
1525 vm_map_entry_allocate_object(entry);
1528 new_entry = vm_map_entry_create(map, countp);
1529 *new_entry = *entry;
1531 new_entry->end = start;
1532 entry->offset += (start - entry->start);
1533 entry->start = start;
1535 vm_map_entry_link(map, entry->prev, new_entry);
1537 switch(entry->maptype) {
1538 case VM_MAPTYPE_NORMAL:
1539 case VM_MAPTYPE_VPAGETABLE:
1540 if (new_entry->object.vm_object) {
1541 vm_object_hold(new_entry->object.vm_object);
1542 vm_object_chain_wait(new_entry->object.vm_object, 0);
1543 vm_object_reference_locked(new_entry->object.vm_object);
1544 vm_object_drop(new_entry->object.vm_object);
1546 break;
1547 default:
1548 break;
1553 * Asserts that the given entry ends at or before the specified address.
1554 * If necessary, it splits the entry into two.
1556 * The map must be exclusively locked.
1558 #define vm_map_clip_end(map, entry, endaddr, countp) \
1560 if (endaddr < entry->end) \
1561 _vm_map_clip_end(map, entry, endaddr, countp); \
1565 * This routine is called only when it is known that the entry must be split.
1567 * The map must be exclusively locked.
1569 static void
1570 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end,
1571 int *countp)
1573 vm_map_entry_t new_entry;
1576 * If there is no object backing this entry, we might as well create
1577 * one now. If we defer it, an object can get created after the map
1578 * is clipped, and individual objects will be created for the split-up
1579 * map. This is a bit of a hack, but is also about the best place to
1580 * put this improvement.
1583 if (entry->object.vm_object == NULL && !map->system_map) {
1584 vm_map_entry_allocate_object(entry);
1588 * Create a new entry and insert it AFTER the specified entry
1591 new_entry = vm_map_entry_create(map, countp);
1592 *new_entry = *entry;
1594 new_entry->start = entry->end = end;
1595 new_entry->offset += (end - entry->start);
1597 vm_map_entry_link(map, entry, new_entry);
1599 switch(entry->maptype) {
1600 case VM_MAPTYPE_NORMAL:
1601 case VM_MAPTYPE_VPAGETABLE:
1602 if (new_entry->object.vm_object) {
1603 vm_object_hold(new_entry->object.vm_object);
1604 vm_object_chain_wait(new_entry->object.vm_object, 0);
1605 vm_object_reference_locked(new_entry->object.vm_object);
1606 vm_object_drop(new_entry->object.vm_object);
1608 break;
1609 default:
1610 break;
1615 * Asserts that the starting and ending region addresses fall within the
1616 * valid range for the map.
1618 #define VM_MAP_RANGE_CHECK(map, start, end) \
1620 if (start < vm_map_min(map)) \
1621 start = vm_map_min(map); \
1622 if (end > vm_map_max(map)) \
1623 end = vm_map_max(map); \
1624 if (start > end) \
1625 start = end; \
1629 * Used to block when an in-transition collison occurs. The map
1630 * is unlocked for the sleep and relocked before the return.
1632 void
1633 vm_map_transition_wait(vm_map_t map)
1635 tsleep_interlock(map, 0);
1636 vm_map_unlock(map);
1637 tsleep(map, PINTERLOCKED, "vment", 0);
1638 vm_map_lock(map);
1642 * When we do blocking operations with the map lock held it is
1643 * possible that a clip might have occured on our in-transit entry,
1644 * requiring an adjustment to the entry in our loop. These macros
1645 * help the pageable and clip_range code deal with the case. The
1646 * conditional costs virtually nothing if no clipping has occured.
1649 #define CLIP_CHECK_BACK(entry, save_start) \
1650 do { \
1651 while (entry->start != save_start) { \
1652 entry = entry->prev; \
1653 KASSERT(entry != &map->header, ("bad entry clip")); \
1655 } while(0)
1657 #define CLIP_CHECK_FWD(entry, save_end) \
1658 do { \
1659 while (entry->end != save_end) { \
1660 entry = entry->next; \
1661 KASSERT(entry != &map->header, ("bad entry clip")); \
1663 } while(0)
1667 * Clip the specified range and return the base entry. The
1668 * range may cover several entries starting at the returned base
1669 * and the first and last entry in the covering sequence will be
1670 * properly clipped to the requested start and end address.
1672 * If no holes are allowed you should pass the MAP_CLIP_NO_HOLES
1673 * flag.
1675 * The MAP_ENTRY_IN_TRANSITION flag will be set for the entries
1676 * covered by the requested range.
1678 * The map must be exclusively locked on entry and will remain locked
1679 * on return. If no range exists or the range contains holes and you
1680 * specified that no holes were allowed, NULL will be returned. This
1681 * routine may temporarily unlock the map in order avoid a deadlock when
1682 * sleeping.
1684 static
1685 vm_map_entry_t
1686 vm_map_clip_range(vm_map_t map, vm_offset_t start, vm_offset_t end,
1687 int *countp, int flags)
1689 vm_map_entry_t start_entry;
1690 vm_map_entry_t entry;
1693 * Locate the entry and effect initial clipping. The in-transition
1694 * case does not occur very often so do not try to optimize it.
1696 again:
1697 if (vm_map_lookup_entry(map, start, &start_entry) == FALSE)
1698 return (NULL);
1699 entry = start_entry;
1700 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
1701 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1702 ++mycpu->gd_cnt.v_intrans_coll;
1703 ++mycpu->gd_cnt.v_intrans_wait;
1704 vm_map_transition_wait(map);
1706 * entry and/or start_entry may have been clipped while
1707 * we slept, or may have gone away entirely. We have
1708 * to restart from the lookup.
1710 goto again;
1714 * Since we hold an exclusive map lock we do not have to restart
1715 * after clipping, even though clipping may block in zalloc.
1717 vm_map_clip_start(map, entry, start, countp);
1718 vm_map_clip_end(map, entry, end, countp);
1719 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
1722 * Scan entries covered by the range. When working on the next
1723 * entry a restart need only re-loop on the current entry which
1724 * we have already locked, since 'next' may have changed. Also,
1725 * even though entry is safe, it may have been clipped so we
1726 * have to iterate forwards through the clip after sleeping.
1728 while (entry->next != &map->header && entry->next->start < end) {
1729 vm_map_entry_t next = entry->next;
1731 if (flags & MAP_CLIP_NO_HOLES) {
1732 if (next->start > entry->end) {
1733 vm_map_unclip_range(map, start_entry,
1734 start, entry->end, countp, flags);
1735 return(NULL);
1739 if (next->eflags & MAP_ENTRY_IN_TRANSITION) {
1740 vm_offset_t save_end = entry->end;
1741 next->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
1742 ++mycpu->gd_cnt.v_intrans_coll;
1743 ++mycpu->gd_cnt.v_intrans_wait;
1744 vm_map_transition_wait(map);
1747 * clips might have occured while we blocked.
1749 CLIP_CHECK_FWD(entry, save_end);
1750 CLIP_CHECK_BACK(start_entry, start);
1751 continue;
1754 * No restart necessary even though clip_end may block, we
1755 * are holding the map lock.
1757 vm_map_clip_end(map, next, end, countp);
1758 next->eflags |= MAP_ENTRY_IN_TRANSITION;
1759 entry = next;
1761 if (flags & MAP_CLIP_NO_HOLES) {
1762 if (entry->end != end) {
1763 vm_map_unclip_range(map, start_entry,
1764 start, entry->end, countp, flags);
1765 return(NULL);
1768 return(start_entry);
1772 * Undo the effect of vm_map_clip_range(). You should pass the same
1773 * flags and the same range that you passed to vm_map_clip_range().
1774 * This code will clear the in-transition flag on the entries and
1775 * wake up anyone waiting. This code will also simplify the sequence
1776 * and attempt to merge it with entries before and after the sequence.
1778 * The map must be locked on entry and will remain locked on return.
1780 * Note that you should also pass the start_entry returned by
1781 * vm_map_clip_range(). However, if you block between the two calls
1782 * with the map unlocked please be aware that the start_entry may
1783 * have been clipped and you may need to scan it backwards to find
1784 * the entry corresponding with the original start address. You are
1785 * responsible for this, vm_map_unclip_range() expects the correct
1786 * start_entry to be passed to it and will KASSERT otherwise.
1788 static
1789 void
1790 vm_map_unclip_range(vm_map_t map, vm_map_entry_t start_entry,
1791 vm_offset_t start, vm_offset_t end,
1792 int *countp, int flags)
1794 vm_map_entry_t entry;
1796 entry = start_entry;
1798 KASSERT(entry->start == start, ("unclip_range: illegal base entry"));
1799 while (entry != &map->header && entry->start < end) {
1800 KASSERT(entry->eflags & MAP_ENTRY_IN_TRANSITION,
1801 ("in-transition flag not set during unclip on: %p",
1802 entry));
1803 KASSERT(entry->end <= end,
1804 ("unclip_range: tail wasn't clipped"));
1805 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
1806 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
1807 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
1808 wakeup(map);
1810 entry = entry->next;
1814 * Simplification does not block so there is no restart case.
1816 entry = start_entry;
1817 while (entry != &map->header && entry->start < end) {
1818 vm_map_simplify_entry(map, entry, countp);
1819 entry = entry->next;
1824 * Mark the given range as handled by a subordinate map.
1826 * This range must have been created with vm_map_find(), and no other
1827 * operations may have been performed on this range prior to calling
1828 * vm_map_submap().
1830 * Submappings cannot be removed.
1832 * No requirements.
1835 vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
1837 vm_map_entry_t entry;
1838 int result = KERN_INVALID_ARGUMENT;
1839 int count;
1841 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1842 vm_map_lock(map);
1844 VM_MAP_RANGE_CHECK(map, start, end);
1846 if (vm_map_lookup_entry(map, start, &entry)) {
1847 vm_map_clip_start(map, entry, start, &count);
1848 } else {
1849 entry = entry->next;
1852 vm_map_clip_end(map, entry, end, &count);
1854 if ((entry->start == start) && (entry->end == end) &&
1855 ((entry->eflags & MAP_ENTRY_COW) == 0) &&
1856 (entry->object.vm_object == NULL)) {
1857 entry->object.sub_map = submap;
1858 entry->maptype = VM_MAPTYPE_SUBMAP;
1859 result = KERN_SUCCESS;
1861 vm_map_unlock(map);
1862 vm_map_entry_release(count);
1864 return (result);
1868 * Sets the protection of the specified address region in the target map.
1869 * If "set_max" is specified, the maximum protection is to be set;
1870 * otherwise, only the current protection is affected.
1872 * The protection is not applicable to submaps, but is applicable to normal
1873 * maps and maps governed by virtual page tables. For example, when operating
1874 * on a virtual page table our protection basically controls how COW occurs
1875 * on the backing object, whereas the virtual page table abstraction itself
1876 * is an abstraction for userland.
1878 * No requirements.
1881 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
1882 vm_prot_t new_prot, boolean_t set_max)
1884 vm_map_entry_t current;
1885 vm_map_entry_t entry;
1886 int count;
1888 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1889 vm_map_lock(map);
1891 VM_MAP_RANGE_CHECK(map, start, end);
1893 if (vm_map_lookup_entry(map, start, &entry)) {
1894 vm_map_clip_start(map, entry, start, &count);
1895 } else {
1896 entry = entry->next;
1900 * Make a first pass to check for protection violations.
1902 current = entry;
1903 while ((current != &map->header) && (current->start < end)) {
1904 if (current->maptype == VM_MAPTYPE_SUBMAP) {
1905 vm_map_unlock(map);
1906 vm_map_entry_release(count);
1907 return (KERN_INVALID_ARGUMENT);
1909 if ((new_prot & current->max_protection) != new_prot) {
1910 vm_map_unlock(map);
1911 vm_map_entry_release(count);
1912 return (KERN_PROTECTION_FAILURE);
1914 current = current->next;
1918 * Go back and fix up protections. [Note that clipping is not
1919 * necessary the second time.]
1921 current = entry;
1923 while ((current != &map->header) && (current->start < end)) {
1924 vm_prot_t old_prot;
1926 vm_map_clip_end(map, current, end, &count);
1928 old_prot = current->protection;
1929 if (set_max) {
1930 current->max_protection = new_prot;
1931 current->protection = new_prot & old_prot;
1932 } else {
1933 current->protection = new_prot;
1937 * Update physical map if necessary. Worry about copy-on-write
1938 * here -- CHECK THIS XXX
1941 if (current->protection != old_prot) {
1942 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
1943 VM_PROT_ALL)
1945 pmap_protect(map->pmap, current->start,
1946 current->end,
1947 current->protection & MASK(current));
1948 #undef MASK
1951 vm_map_simplify_entry(map, current, &count);
1953 current = current->next;
1956 vm_map_unlock(map);
1957 vm_map_entry_release(count);
1958 return (KERN_SUCCESS);
1962 * This routine traverses a processes map handling the madvise
1963 * system call. Advisories are classified as either those effecting
1964 * the vm_map_entry structure, or those effecting the underlying
1965 * objects.
1967 * The <value> argument is used for extended madvise calls.
1969 * No requirements.
1972 vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end,
1973 int behav, off_t value)
1975 vm_map_entry_t current, entry;
1976 int modify_map = 0;
1977 int error = 0;
1978 int count;
1981 * Some madvise calls directly modify the vm_map_entry, in which case
1982 * we need to use an exclusive lock on the map and we need to perform
1983 * various clipping operations. Otherwise we only need a read-lock
1984 * on the map.
1986 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1988 switch(behav) {
1989 case MADV_NORMAL:
1990 case MADV_SEQUENTIAL:
1991 case MADV_RANDOM:
1992 case MADV_NOSYNC:
1993 case MADV_AUTOSYNC:
1994 case MADV_NOCORE:
1995 case MADV_CORE:
1996 case MADV_SETMAP:
1997 modify_map = 1;
1998 vm_map_lock(map);
1999 break;
2000 case MADV_INVAL:
2001 case MADV_WILLNEED:
2002 case MADV_DONTNEED:
2003 case MADV_FREE:
2004 vm_map_lock_read(map);
2005 break;
2006 default:
2007 vm_map_entry_release(count);
2008 return (EINVAL);
2012 * Locate starting entry and clip if necessary.
2015 VM_MAP_RANGE_CHECK(map, start, end);
2017 if (vm_map_lookup_entry(map, start, &entry)) {
2018 if (modify_map)
2019 vm_map_clip_start(map, entry, start, &count);
2020 } else {
2021 entry = entry->next;
2024 if (modify_map) {
2026 * madvise behaviors that are implemented in the vm_map_entry.
2028 * We clip the vm_map_entry so that behavioral changes are
2029 * limited to the specified address range.
2031 for (current = entry;
2032 (current != &map->header) && (current->start < end);
2033 current = current->next
2035 if (current->maptype == VM_MAPTYPE_SUBMAP)
2036 continue;
2038 vm_map_clip_end(map, current, end, &count);
2040 switch (behav) {
2041 case MADV_NORMAL:
2042 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2043 break;
2044 case MADV_SEQUENTIAL:
2045 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2046 break;
2047 case MADV_RANDOM:
2048 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2049 break;
2050 case MADV_NOSYNC:
2051 current->eflags |= MAP_ENTRY_NOSYNC;
2052 break;
2053 case MADV_AUTOSYNC:
2054 current->eflags &= ~MAP_ENTRY_NOSYNC;
2055 break;
2056 case MADV_NOCORE:
2057 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2058 break;
2059 case MADV_CORE:
2060 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2061 break;
2062 case MADV_SETMAP:
2064 * Set the page directory page for a map
2065 * governed by a virtual page table. Mark
2066 * the entry as being governed by a virtual
2067 * page table if it is not.
2069 * XXX the page directory page is stored
2070 * in the avail_ssize field if the map_entry.
2072 * XXX the map simplification code does not
2073 * compare this field so weird things may
2074 * happen if you do not apply this function
2075 * to the entire mapping governed by the
2076 * virtual page table.
2078 if (current->maptype != VM_MAPTYPE_VPAGETABLE) {
2079 error = EINVAL;
2080 break;
2082 current->aux.master_pde = value;
2083 pmap_remove(map->pmap,
2084 current->start, current->end);
2085 break;
2086 case MADV_INVAL:
2088 * Invalidate the related pmap entries, used
2089 * to flush portions of the real kernel's
2090 * pmap when the caller has removed or
2091 * modified existing mappings in a virtual
2092 * page table.
2094 * (exclusive locked map version does not
2095 * need the range interlock).
2097 pmap_remove(map->pmap,
2098 current->start, current->end);
2099 break;
2100 default:
2101 error = EINVAL;
2102 break;
2104 vm_map_simplify_entry(map, current, &count);
2106 vm_map_unlock(map);
2107 } else {
2108 vm_pindex_t pindex;
2109 vm_pindex_t delta;
2112 * madvise behaviors that are implemented in the underlying
2113 * vm_object.
2115 * Since we don't clip the vm_map_entry, we have to clip
2116 * the vm_object pindex and count.
2118 * NOTE! These functions are only supported on normal maps,
2119 * except MADV_INVAL which is also supported on
2120 * virtual page tables.
2122 for (current = entry;
2123 (current != &map->header) && (current->start < end);
2124 current = current->next
2126 vm_offset_t useStart;
2128 if (current->maptype != VM_MAPTYPE_NORMAL &&
2129 (current->maptype != VM_MAPTYPE_VPAGETABLE ||
2130 behav != MADV_INVAL)) {
2131 continue;
2134 pindex = OFF_TO_IDX(current->offset);
2135 delta = atop(current->end - current->start);
2136 useStart = current->start;
2138 if (current->start < start) {
2139 pindex += atop(start - current->start);
2140 delta -= atop(start - current->start);
2141 useStart = start;
2143 if (current->end > end)
2144 delta -= atop(current->end - end);
2146 if ((vm_spindex_t)delta <= 0)
2147 continue;
2149 if (behav == MADV_INVAL) {
2151 * Invalidate the related pmap entries, used
2152 * to flush portions of the real kernel's
2153 * pmap when the caller has removed or
2154 * modified existing mappings in a virtual
2155 * page table.
2157 * (shared locked map version needs the
2158 * interlock, see vm_fault()).
2160 struct vm_map_ilock ilock;
2162 KASSERT(useStart >= VM_MIN_USER_ADDRESS &&
2163 useStart + ptoa(delta) <=
2164 VM_MAX_USER_ADDRESS,
2165 ("Bad range %016jx-%016jx (%016jx)",
2166 useStart, useStart + ptoa(delta),
2167 delta));
2168 vm_map_interlock(map, &ilock,
2169 useStart,
2170 useStart + ptoa(delta));
2171 pmap_remove(map->pmap,
2172 useStart,
2173 useStart + ptoa(delta));
2174 vm_map_deinterlock(map, &ilock);
2175 } else {
2176 vm_object_madvise(current->object.vm_object,
2177 pindex, delta, behav);
2181 * Try to populate the page table. Mappings governed
2182 * by virtual page tables cannot be pre-populated
2183 * without a lot of work so don't try.
2185 if (behav == MADV_WILLNEED &&
2186 current->maptype != VM_MAPTYPE_VPAGETABLE) {
2187 pmap_object_init_pt(
2188 map->pmap,
2189 useStart,
2190 current->protection,
2191 current->object.vm_object,
2192 pindex,
2193 (count << PAGE_SHIFT),
2194 MAP_PREFAULT_MADVISE
2198 vm_map_unlock_read(map);
2200 vm_map_entry_release(count);
2201 return(error);
2206 * Sets the inheritance of the specified address range in the target map.
2207 * Inheritance affects how the map will be shared with child maps at the
2208 * time of vm_map_fork.
2211 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2212 vm_inherit_t new_inheritance)
2214 vm_map_entry_t entry;
2215 vm_map_entry_t temp_entry;
2216 int count;
2218 switch (new_inheritance) {
2219 case VM_INHERIT_NONE:
2220 case VM_INHERIT_COPY:
2221 case VM_INHERIT_SHARE:
2222 break;
2223 default:
2224 return (KERN_INVALID_ARGUMENT);
2227 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2228 vm_map_lock(map);
2230 VM_MAP_RANGE_CHECK(map, start, end);
2232 if (vm_map_lookup_entry(map, start, &temp_entry)) {
2233 entry = temp_entry;
2234 vm_map_clip_start(map, entry, start, &count);
2235 } else
2236 entry = temp_entry->next;
2238 while ((entry != &map->header) && (entry->start < end)) {
2239 vm_map_clip_end(map, entry, end, &count);
2241 entry->inheritance = new_inheritance;
2243 vm_map_simplify_entry(map, entry, &count);
2245 entry = entry->next;
2247 vm_map_unlock(map);
2248 vm_map_entry_release(count);
2249 return (KERN_SUCCESS);
2253 * Implement the semantics of mlock
2256 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t real_end,
2257 boolean_t new_pageable)
2259 vm_map_entry_t entry;
2260 vm_map_entry_t start_entry;
2261 vm_offset_t end;
2262 int rv = KERN_SUCCESS;
2263 int count;
2265 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2266 vm_map_lock(map);
2267 VM_MAP_RANGE_CHECK(map, start, real_end);
2268 end = real_end;
2270 start_entry = vm_map_clip_range(map, start, end, &count,
2271 MAP_CLIP_NO_HOLES);
2272 if (start_entry == NULL) {
2273 vm_map_unlock(map);
2274 vm_map_entry_release(count);
2275 return (KERN_INVALID_ADDRESS);
2278 if (new_pageable == 0) {
2279 entry = start_entry;
2280 while ((entry != &map->header) && (entry->start < end)) {
2281 vm_offset_t save_start;
2282 vm_offset_t save_end;
2285 * Already user wired or hard wired (trivial cases)
2287 if (entry->eflags & MAP_ENTRY_USER_WIRED) {
2288 entry = entry->next;
2289 continue;
2291 if (entry->wired_count != 0) {
2292 entry->wired_count++;
2293 entry->eflags |= MAP_ENTRY_USER_WIRED;
2294 entry = entry->next;
2295 continue;
2299 * A new wiring requires instantiation of appropriate
2300 * management structures and the faulting in of the
2301 * page.
2303 if (entry->maptype == VM_MAPTYPE_NORMAL ||
2304 entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2305 int copyflag = entry->eflags &
2306 MAP_ENTRY_NEEDS_COPY;
2307 if (copyflag && ((entry->protection &
2308 VM_PROT_WRITE) != 0)) {
2309 vm_map_entry_shadow(entry, 0);
2310 } else if (entry->object.vm_object == NULL &&
2311 !map->system_map) {
2312 vm_map_entry_allocate_object(entry);
2315 entry->wired_count++;
2316 entry->eflags |= MAP_ENTRY_USER_WIRED;
2319 * Now fault in the area. Note that vm_fault_wire()
2320 * may release the map lock temporarily, it will be
2321 * relocked on return. The in-transition
2322 * flag protects the entries.
2324 save_start = entry->start;
2325 save_end = entry->end;
2326 rv = vm_fault_wire(map, entry, TRUE, 0);
2327 if (rv) {
2328 CLIP_CHECK_BACK(entry, save_start);
2329 for (;;) {
2330 KASSERT(entry->wired_count == 1, ("bad wired_count on entry"));
2331 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2332 entry->wired_count = 0;
2333 if (entry->end == save_end)
2334 break;
2335 entry = entry->next;
2336 KASSERT(entry != &map->header, ("bad entry clip during backout"));
2338 end = save_start; /* unwire the rest */
2339 break;
2342 * note that even though the entry might have been
2343 * clipped, the USER_WIRED flag we set prevents
2344 * duplication so we do not have to do a
2345 * clip check.
2347 entry = entry->next;
2351 * If we failed fall through to the unwiring section to
2352 * unwire what we had wired so far. 'end' has already
2353 * been adjusted.
2355 if (rv)
2356 new_pageable = 1;
2359 * start_entry might have been clipped if we unlocked the
2360 * map and blocked. No matter how clipped it has gotten
2361 * there should be a fragment that is on our start boundary.
2363 CLIP_CHECK_BACK(start_entry, start);
2367 * Deal with the unwiring case.
2369 if (new_pageable) {
2371 * This is the unwiring case. We must first ensure that the
2372 * range to be unwired is really wired down. We know there
2373 * are no holes.
2375 entry = start_entry;
2376 while ((entry != &map->header) && (entry->start < end)) {
2377 if ((entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
2378 rv = KERN_INVALID_ARGUMENT;
2379 goto done;
2381 KASSERT(entry->wired_count != 0, ("wired count was 0 with USER_WIRED set! %p", entry));
2382 entry = entry->next;
2386 * Now decrement the wiring count for each region. If a region
2387 * becomes completely unwired, unwire its physical pages and
2388 * mappings.
2391 * The map entries are processed in a loop, checking to
2392 * make sure the entry is wired and asserting it has a wired
2393 * count. However, another loop was inserted more-or-less in
2394 * the middle of the unwiring path. This loop picks up the
2395 * "entry" loop variable from the first loop without first
2396 * setting it to start_entry. Naturally, the secound loop
2397 * is never entered and the pages backing the entries are
2398 * never unwired. This can lead to a leak of wired pages.
2400 entry = start_entry;
2401 while ((entry != &map->header) && (entry->start < end)) {
2402 KASSERT(entry->eflags & MAP_ENTRY_USER_WIRED,
2403 ("expected USER_WIRED on entry %p", entry));
2404 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2405 entry->wired_count--;
2406 if (entry->wired_count == 0)
2407 vm_fault_unwire(map, entry);
2408 entry = entry->next;
2411 done:
2412 vm_map_unclip_range(map, start_entry, start, real_end, &count,
2413 MAP_CLIP_NO_HOLES);
2414 map->timestamp++;
2415 vm_map_unlock(map);
2416 vm_map_entry_release(count);
2417 return (rv);
2421 * Sets the pageability of the specified address range in the target map.
2422 * Regions specified as not pageable require locked-down physical
2423 * memory and physical page maps.
2425 * The map must not be locked, but a reference must remain to the map
2426 * throughout the call.
2428 * This function may be called via the zalloc path and must properly
2429 * reserve map entries for kernel_map.
2431 * No requirements.
2434 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t real_end, int kmflags)
2436 vm_map_entry_t entry;
2437 vm_map_entry_t start_entry;
2438 vm_offset_t end;
2439 int rv = KERN_SUCCESS;
2440 int count;
2442 if (kmflags & KM_KRESERVE)
2443 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
2444 else
2445 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
2446 vm_map_lock(map);
2447 VM_MAP_RANGE_CHECK(map, start, real_end);
2448 end = real_end;
2450 start_entry = vm_map_clip_range(map, start, end, &count,
2451 MAP_CLIP_NO_HOLES);
2452 if (start_entry == NULL) {
2453 vm_map_unlock(map);
2454 rv = KERN_INVALID_ADDRESS;
2455 goto failure;
2457 if ((kmflags & KM_PAGEABLE) == 0) {
2459 * Wiring.
2461 * 1. Holding the write lock, we create any shadow or zero-fill
2462 * objects that need to be created. Then we clip each map
2463 * entry to the region to be wired and increment its wiring
2464 * count. We create objects before clipping the map entries
2465 * to avoid object proliferation.
2467 * 2. We downgrade to a read lock, and call vm_fault_wire to
2468 * fault in the pages for any newly wired area (wired_count is
2469 * 1).
2471 * Downgrading to a read lock for vm_fault_wire avoids a
2472 * possible deadlock with another process that may have faulted
2473 * on one of the pages to be wired (it would mark the page busy,
2474 * blocking us, then in turn block on the map lock that we
2475 * hold). Because of problems in the recursive lock package,
2476 * we cannot upgrade to a write lock in vm_map_lookup. Thus,
2477 * any actions that require the write lock must be done
2478 * beforehand. Because we keep the read lock on the map, the
2479 * copy-on-write status of the entries we modify here cannot
2480 * change.
2482 entry = start_entry;
2483 while ((entry != &map->header) && (entry->start < end)) {
2485 * Trivial case if the entry is already wired
2487 if (entry->wired_count) {
2488 entry->wired_count++;
2489 entry = entry->next;
2490 continue;
2494 * The entry is being newly wired, we have to setup
2495 * appropriate management structures. A shadow
2496 * object is required for a copy-on-write region,
2497 * or a normal object for a zero-fill region. We
2498 * do not have to do this for entries that point to sub
2499 * maps because we won't hold the lock on the sub map.
2501 if (entry->maptype == VM_MAPTYPE_NORMAL ||
2502 entry->maptype == VM_MAPTYPE_VPAGETABLE) {
2503 int copyflag = entry->eflags &
2504 MAP_ENTRY_NEEDS_COPY;
2505 if (copyflag && ((entry->protection &
2506 VM_PROT_WRITE) != 0)) {
2507 vm_map_entry_shadow(entry, 0);
2508 } else if (entry->object.vm_object == NULL &&
2509 !map->system_map) {
2510 vm_map_entry_allocate_object(entry);
2514 entry->wired_count++;
2515 entry = entry->next;
2519 * Pass 2.
2523 * HACK HACK HACK HACK
2525 * vm_fault_wire() temporarily unlocks the map to avoid
2526 * deadlocks. The in-transition flag from vm_map_clip_range
2527 * call should protect us from changes while the map is
2528 * unlocked. T
2530 * NOTE: Previously this comment stated that clipping might
2531 * still occur while the entry is unlocked, but from
2532 * what I can tell it actually cannot.
2534 * It is unclear whether the CLIP_CHECK_*() calls
2535 * are still needed but we keep them in anyway.
2537 * HACK HACK HACK HACK
2540 entry = start_entry;
2541 while (entry != &map->header && entry->start < end) {
2543 * If vm_fault_wire fails for any page we need to undo
2544 * what has been done. We decrement the wiring count
2545 * for those pages which have not yet been wired (now)
2546 * and unwire those that have (later).
2548 vm_offset_t save_start = entry->start;
2549 vm_offset_t save_end = entry->end;
2551 if (entry->wired_count == 1)
2552 rv = vm_fault_wire(map, entry, FALSE, kmflags);
2553 if (rv) {
2554 CLIP_CHECK_BACK(entry, save_start);
2555 for (;;) {
2556 KASSERT(entry->wired_count == 1, ("wired_count changed unexpectedly"));
2557 entry->wired_count = 0;
2558 if (entry->end == save_end)
2559 break;
2560 entry = entry->next;
2561 KASSERT(entry != &map->header, ("bad entry clip during backout"));
2563 end = save_start;
2564 break;
2566 CLIP_CHECK_FWD(entry, save_end);
2567 entry = entry->next;
2571 * If a failure occured undo everything by falling through
2572 * to the unwiring code. 'end' has already been adjusted
2573 * appropriately.
2575 if (rv)
2576 kmflags |= KM_PAGEABLE;
2579 * start_entry is still IN_TRANSITION but may have been
2580 * clipped since vm_fault_wire() unlocks and relocks the
2581 * map. No matter how clipped it has gotten there should
2582 * be a fragment that is on our start boundary.
2584 CLIP_CHECK_BACK(start_entry, start);
2587 if (kmflags & KM_PAGEABLE) {
2589 * This is the unwiring case. We must first ensure that the
2590 * range to be unwired is really wired down. We know there
2591 * are no holes.
2593 entry = start_entry;
2594 while ((entry != &map->header) && (entry->start < end)) {
2595 if (entry->wired_count == 0) {
2596 rv = KERN_INVALID_ARGUMENT;
2597 goto done;
2599 entry = entry->next;
2603 * Now decrement the wiring count for each region. If a region
2604 * becomes completely unwired, unwire its physical pages and
2605 * mappings.
2607 entry = start_entry;
2608 while ((entry != &map->header) && (entry->start < end)) {
2609 entry->wired_count--;
2610 if (entry->wired_count == 0)
2611 vm_fault_unwire(map, entry);
2612 entry = entry->next;
2615 done:
2616 vm_map_unclip_range(map, start_entry, start, real_end,
2617 &count, MAP_CLIP_NO_HOLES);
2618 map->timestamp++;
2619 vm_map_unlock(map);
2620 failure:
2621 if (kmflags & KM_KRESERVE)
2622 vm_map_entry_krelease(count);
2623 else
2624 vm_map_entry_release(count);
2625 return (rv);
2629 * Mark a newly allocated address range as wired but do not fault in
2630 * the pages. The caller is expected to load the pages into the object.
2632 * The map must be locked on entry and will remain locked on return.
2633 * No other requirements.
2635 void
2636 vm_map_set_wired_quick(vm_map_t map, vm_offset_t addr, vm_size_t size,
2637 int *countp)
2639 vm_map_entry_t scan;
2640 vm_map_entry_t entry;
2642 entry = vm_map_clip_range(map, addr, addr + size,
2643 countp, MAP_CLIP_NO_HOLES);
2644 for (scan = entry;
2645 scan != &map->header && scan->start < addr + size;
2646 scan = scan->next) {
2647 KKASSERT(scan->wired_count == 0);
2648 scan->wired_count = 1;
2650 vm_map_unclip_range(map, entry, addr, addr + size,
2651 countp, MAP_CLIP_NO_HOLES);
2655 * Push any dirty cached pages in the address range to their pager.
2656 * If syncio is TRUE, dirty pages are written synchronously.
2657 * If invalidate is TRUE, any cached pages are freed as well.
2659 * This routine is called by sys_msync()
2661 * Returns an error if any part of the specified range is not mapped.
2663 * No requirements.
2666 vm_map_clean(vm_map_t map, vm_offset_t start, vm_offset_t end,
2667 boolean_t syncio, boolean_t invalidate)
2669 vm_map_entry_t current;
2670 vm_map_entry_t entry;
2671 vm_size_t size;
2672 vm_object_t object;
2673 vm_object_t tobj;
2674 vm_ooffset_t offset;
2676 vm_map_lock_read(map);
2677 VM_MAP_RANGE_CHECK(map, start, end);
2678 if (!vm_map_lookup_entry(map, start, &entry)) {
2679 vm_map_unlock_read(map);
2680 return (KERN_INVALID_ADDRESS);
2682 lwkt_gettoken(&map->token);
2685 * Make a first pass to check for holes.
2687 for (current = entry; current->start < end; current = current->next) {
2688 if (current->maptype == VM_MAPTYPE_SUBMAP) {
2689 lwkt_reltoken(&map->token);
2690 vm_map_unlock_read(map);
2691 return (KERN_INVALID_ARGUMENT);
2693 if (end > current->end &&
2694 (current->next == &map->header ||
2695 current->end != current->next->start)) {
2696 lwkt_reltoken(&map->token);
2697 vm_map_unlock_read(map);
2698 return (KERN_INVALID_ADDRESS);
2702 if (invalidate)
2703 pmap_remove(vm_map_pmap(map), start, end);
2706 * Make a second pass, cleaning/uncaching pages from the indicated
2707 * objects as we go.
2709 for (current = entry; current->start < end; current = current->next) {
2710 offset = current->offset + (start - current->start);
2711 size = (end <= current->end ? end : current->end) - start;
2713 switch(current->maptype) {
2714 case VM_MAPTYPE_SUBMAP:
2716 vm_map_t smap;
2717 vm_map_entry_t tentry;
2718 vm_size_t tsize;
2720 smap = current->object.sub_map;
2721 vm_map_lock_read(smap);
2722 vm_map_lookup_entry(smap, offset, &tentry);
2723 tsize = tentry->end - offset;
2724 if (tsize < size)
2725 size = tsize;
2726 object = tentry->object.vm_object;
2727 offset = tentry->offset + (offset - tentry->start);
2728 vm_map_unlock_read(smap);
2729 break;
2731 case VM_MAPTYPE_NORMAL:
2732 case VM_MAPTYPE_VPAGETABLE:
2733 object = current->object.vm_object;
2734 break;
2735 default:
2736 object = NULL;
2737 break;
2740 if (object)
2741 vm_object_hold(object);
2744 * Note that there is absolutely no sense in writing out
2745 * anonymous objects, so we track down the vnode object
2746 * to write out.
2747 * We invalidate (remove) all pages from the address space
2748 * anyway, for semantic correctness.
2750 * note: certain anonymous maps, such as MAP_NOSYNC maps,
2751 * may start out with a NULL object.
2753 while (object && (tobj = object->backing_object) != NULL) {
2754 vm_object_hold(tobj);
2755 if (tobj == object->backing_object) {
2756 vm_object_lock_swap();
2757 offset += object->backing_object_offset;
2758 vm_object_drop(object);
2759 object = tobj;
2760 if (object->size < OFF_TO_IDX(offset + size))
2761 size = IDX_TO_OFF(object->size) -
2762 offset;
2763 break;
2765 vm_object_drop(tobj);
2767 if (object && (object->type == OBJT_VNODE) &&
2768 (current->protection & VM_PROT_WRITE) &&
2769 (object->flags & OBJ_NOMSYNC) == 0) {
2771 * Flush pages if writing is allowed, invalidate them
2772 * if invalidation requested. Pages undergoing I/O
2773 * will be ignored by vm_object_page_remove().
2775 * We cannot lock the vnode and then wait for paging
2776 * to complete without deadlocking against vm_fault.
2777 * Instead we simply call vm_object_page_remove() and
2778 * allow it to block internally on a page-by-page
2779 * basis when it encounters pages undergoing async
2780 * I/O.
2782 int flags;
2784 /* no chain wait needed for vnode objects */
2785 vm_object_reference_locked(object);
2786 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY);
2787 flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
2788 flags |= invalidate ? OBJPC_INVAL : 0;
2791 * When operating on a virtual page table just
2792 * flush the whole object. XXX we probably ought
2793 * to
2795 switch(current->maptype) {
2796 case VM_MAPTYPE_NORMAL:
2797 vm_object_page_clean(object,
2798 OFF_TO_IDX(offset),
2799 OFF_TO_IDX(offset + size + PAGE_MASK),
2800 flags);
2801 break;
2802 case VM_MAPTYPE_VPAGETABLE:
2803 vm_object_page_clean(object, 0, 0, flags);
2804 break;
2806 vn_unlock(((struct vnode *)object->handle));
2807 vm_object_deallocate_locked(object);
2809 if (object && invalidate &&
2810 ((object->type == OBJT_VNODE) ||
2811 (object->type == OBJT_DEVICE) ||
2812 (object->type == OBJT_MGTDEVICE))) {
2813 int clean_only =
2814 ((object->type == OBJT_DEVICE) ||
2815 (object->type == OBJT_MGTDEVICE)) ? FALSE : TRUE;
2816 /* no chain wait needed for vnode/device objects */
2817 vm_object_reference_locked(object);
2818 switch(current->maptype) {
2819 case VM_MAPTYPE_NORMAL:
2820 vm_object_page_remove(object,
2821 OFF_TO_IDX(offset),
2822 OFF_TO_IDX(offset + size + PAGE_MASK),
2823 clean_only);
2824 break;
2825 case VM_MAPTYPE_VPAGETABLE:
2826 vm_object_page_remove(object, 0, 0, clean_only);
2827 break;
2829 vm_object_deallocate_locked(object);
2831 start += size;
2832 if (object)
2833 vm_object_drop(object);
2836 lwkt_reltoken(&map->token);
2837 vm_map_unlock_read(map);
2839 return (KERN_SUCCESS);
2843 * Make the region specified by this entry pageable.
2845 * The vm_map must be exclusively locked.
2847 static void
2848 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
2850 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2851 entry->wired_count = 0;
2852 vm_fault_unwire(map, entry);
2856 * Deallocate the given entry from the target map.
2858 * The vm_map must be exclusively locked.
2860 static void
2861 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry, int *countp)
2863 vm_map_entry_unlink(map, entry);
2864 map->size -= entry->end - entry->start;
2866 switch(entry->maptype) {
2867 case VM_MAPTYPE_NORMAL:
2868 case VM_MAPTYPE_VPAGETABLE:
2869 case VM_MAPTYPE_SUBMAP:
2870 vm_object_deallocate(entry->object.vm_object);
2871 break;
2872 case VM_MAPTYPE_UKSMAP:
2873 /* XXX TODO */
2874 break;
2875 default:
2876 break;
2879 vm_map_entry_dispose(map, entry, countp);
2883 * Deallocates the given address range from the target map.
2885 * The vm_map must be exclusively locked.
2888 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end, int *countp)
2890 vm_object_t object;
2891 vm_map_entry_t entry;
2892 vm_map_entry_t first_entry;
2893 vm_offset_t hole_start;
2895 ASSERT_VM_MAP_LOCKED(map);
2896 lwkt_gettoken(&map->token);
2897 again:
2899 * Find the start of the region, and clip it. Set entry to point
2900 * at the first record containing the requested address or, if no
2901 * such record exists, the next record with a greater address. The
2902 * loop will run from this point until a record beyond the termination
2903 * address is encountered.
2905 * Adjust freehint[] for either the clip case or the extension case.
2907 * GGG see other GGG comment.
2909 if (vm_map_lookup_entry(map, start, &first_entry)) {
2910 entry = first_entry;
2911 vm_map_clip_start(map, entry, start, countp);
2912 hole_start = start;
2913 } else {
2914 entry = first_entry->next;
2915 if (entry == &map->header)
2916 hole_start = first_entry->start;
2917 else
2918 hole_start = first_entry->end;
2922 * Step through all entries in this region
2924 while ((entry != &map->header) && (entry->start < end)) {
2925 vm_map_entry_t next;
2926 vm_offset_t s, e;
2927 vm_pindex_t offidxstart, offidxend, count;
2930 * If we hit an in-transition entry we have to sleep and
2931 * retry. It's easier (and not really slower) to just retry
2932 * since this case occurs so rarely and the hint is already
2933 * pointing at the right place. We have to reset the
2934 * start offset so as not to accidently delete an entry
2935 * another process just created in vacated space.
2937 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2938 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2939 start = entry->start;
2940 ++mycpu->gd_cnt.v_intrans_coll;
2941 ++mycpu->gd_cnt.v_intrans_wait;
2942 vm_map_transition_wait(map);
2943 goto again;
2945 vm_map_clip_end(map, entry, end, countp);
2947 s = entry->start;
2948 e = entry->end;
2949 next = entry->next;
2951 offidxstart = OFF_TO_IDX(entry->offset);
2952 count = OFF_TO_IDX(e - s);
2954 switch(entry->maptype) {
2955 case VM_MAPTYPE_NORMAL:
2956 case VM_MAPTYPE_VPAGETABLE:
2957 case VM_MAPTYPE_SUBMAP:
2958 object = entry->object.vm_object;
2959 break;
2960 default:
2961 object = NULL;
2962 break;
2966 * Unwire before removing addresses from the pmap; otherwise,
2967 * unwiring will put the entries back in the pmap.
2969 * Generally speaking, doing a bulk pmap_remove() before
2970 * removing the pages from the VM object is better at
2971 * reducing unnecessary IPIs. The pmap code is now optimized
2972 * to not blindly iterate the range when pt and pd pages
2973 * are missing.
2975 if (entry->wired_count != 0)
2976 vm_map_entry_unwire(map, entry);
2978 offidxend = offidxstart + count;
2980 if (object == &kernel_object) {
2981 pmap_remove(map->pmap, s, e);
2982 vm_object_hold(object);
2983 vm_object_page_remove(object, offidxstart,
2984 offidxend, FALSE);
2985 vm_object_drop(object);
2986 } else if (object && object->type != OBJT_DEFAULT &&
2987 object->type != OBJT_SWAP) {
2989 * vnode object routines cannot be chain-locked,
2990 * but since we aren't removing pages from the
2991 * object here we can use a shared hold.
2993 vm_object_hold_shared(object);
2994 pmap_remove(map->pmap, s, e);
2995 vm_object_drop(object);
2996 } else if (object) {
2997 vm_object_hold(object);
2998 vm_object_chain_acquire(object, 0);
2999 pmap_remove(map->pmap, s, e);
3001 if (object != NULL &&
3002 object->ref_count != 1 &&
3003 (object->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) ==
3004 OBJ_ONEMAPPING &&
3005 (object->type == OBJT_DEFAULT ||
3006 object->type == OBJT_SWAP)) {
3007 vm_object_collapse(object, NULL);
3008 vm_object_page_remove(object, offidxstart,
3009 offidxend, FALSE);
3010 if (object->type == OBJT_SWAP) {
3011 swap_pager_freespace(object,
3012 offidxstart,
3013 count);
3015 if (offidxend >= object->size &&
3016 offidxstart < object->size) {
3017 object->size = offidxstart;
3020 vm_object_chain_release(object);
3021 vm_object_drop(object);
3022 } else if (entry->maptype == VM_MAPTYPE_UKSMAP) {
3023 pmap_remove(map->pmap, s, e);
3027 * Delete the entry (which may delete the object) only after
3028 * removing all pmap entries pointing to its pages.
3029 * (Otherwise, its page frames may be reallocated, and any
3030 * modify bits will be set in the wrong object!)
3032 vm_map_entry_delete(map, entry, countp);
3033 entry = next;
3035 if (entry == &map->header)
3036 vm_map_freehint_hole(map, hole_start, entry->end - hole_start);
3037 else
3038 vm_map_freehint_hole(map, hole_start,
3039 entry->start - hole_start);
3041 lwkt_reltoken(&map->token);
3043 return (KERN_SUCCESS);
3047 * Remove the given address range from the target map.
3048 * This is the exported form of vm_map_delete.
3050 * No requirements.
3053 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3055 int result;
3056 int count;
3058 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3059 vm_map_lock(map);
3060 VM_MAP_RANGE_CHECK(map, start, end);
3061 result = vm_map_delete(map, start, end, &count);
3062 vm_map_unlock(map);
3063 vm_map_entry_release(count);
3065 return (result);
3069 * Assert that the target map allows the specified privilege on the
3070 * entire address region given. The entire region must be allocated.
3072 * The caller must specify whether the vm_map is already locked or not.
3074 boolean_t
3075 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3076 vm_prot_t protection, boolean_t have_lock)
3078 vm_map_entry_t entry;
3079 vm_map_entry_t tmp_entry;
3080 boolean_t result;
3082 if (have_lock == FALSE)
3083 vm_map_lock_read(map);
3085 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
3086 if (have_lock == FALSE)
3087 vm_map_unlock_read(map);
3088 return (FALSE);
3090 entry = tmp_entry;
3092 result = TRUE;
3093 while (start < end) {
3094 if (entry == &map->header) {
3095 result = FALSE;
3096 break;
3099 * No holes allowed!
3102 if (start < entry->start) {
3103 result = FALSE;
3104 break;
3107 * Check protection associated with entry.
3110 if ((entry->protection & protection) != protection) {
3111 result = FALSE;
3112 break;
3114 /* go to next entry */
3116 start = entry->end;
3117 entry = entry->next;
3119 if (have_lock == FALSE)
3120 vm_map_unlock_read(map);
3121 return (result);
3125 * If appropriate this function shadows the original object with a new object
3126 * and moves the VM pages from the original object to the new object.
3127 * The original object will also be collapsed, if possible.
3129 * We can only do this for normal memory objects with a single mapping, and
3130 * it only makes sense to do it if there are 2 or more refs on the original
3131 * object. i.e. typically a memory object that has been extended into
3132 * multiple vm_map_entry's with non-overlapping ranges.
3134 * This makes it easier to remove unused pages and keeps object inheritance
3135 * from being a negative impact on memory usage.
3137 * On return the (possibly new) entry->object.vm_object will have an
3138 * additional ref on it for the caller to dispose of (usually by cloning
3139 * the vm_map_entry). The additional ref had to be done in this routine
3140 * to avoid racing a collapse. The object's ONEMAPPING flag will also be
3141 * cleared.
3143 * The vm_map must be locked and its token held.
3145 static void
3146 vm_map_split(vm_map_entry_t entry)
3148 /* OPTIMIZED */
3149 vm_object_t oobject, nobject, bobject;
3150 vm_offset_t s, e;
3151 vm_page_t m;
3152 vm_pindex_t offidxstart, offidxend, idx;
3153 vm_size_t size;
3154 vm_ooffset_t offset;
3155 int useshadowlist;
3158 * Optimize away object locks for vnode objects. Important exit/exec
3159 * critical path.
3161 * OBJ_ONEMAPPING doesn't apply to vnode objects but clear the flag
3162 * anyway.
3164 oobject = entry->object.vm_object;
3165 if (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) {
3166 vm_object_reference_quick(oobject);
3167 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3168 return;
3172 * Setup. Chain lock the original object throughout the entire
3173 * routine to prevent new page faults from occuring.
3175 * XXX can madvise WILLNEED interfere with us too?
3177 vm_object_hold(oobject);
3178 vm_object_chain_acquire(oobject, 0);
3181 * Original object cannot be split? Might have also changed state.
3183 if (oobject->handle == NULL || (oobject->type != OBJT_DEFAULT &&
3184 oobject->type != OBJT_SWAP)) {
3185 vm_object_chain_release(oobject);
3186 vm_object_reference_locked(oobject);
3187 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3188 vm_object_drop(oobject);
3189 return;
3193 * Collapse original object with its backing store as an
3194 * optimization to reduce chain lengths when possible.
3196 * If ref_count <= 1 there aren't other non-overlapping vm_map_entry's
3197 * for oobject, so there's no point collapsing it.
3199 * Then re-check whether the object can be split.
3201 vm_object_collapse(oobject, NULL);
3203 if (oobject->ref_count <= 1 ||
3204 (oobject->type != OBJT_DEFAULT && oobject->type != OBJT_SWAP) ||
3205 (oobject->flags & (OBJ_NOSPLIT|OBJ_ONEMAPPING)) != OBJ_ONEMAPPING) {
3206 vm_object_chain_release(oobject);
3207 vm_object_reference_locked(oobject);
3208 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3209 vm_object_drop(oobject);
3210 return;
3214 * Acquire the chain lock on the backing object.
3216 * Give bobject an additional ref count for when it will be shadowed
3217 * by nobject.
3219 useshadowlist = 0;
3220 if ((bobject = oobject->backing_object) != NULL) {
3221 if (bobject->type != OBJT_VNODE) {
3222 useshadowlist = 1;
3223 vm_object_hold(bobject);
3224 vm_object_chain_wait(bobject, 0);
3225 /* ref for shadowing below */
3226 vm_object_reference_locked(bobject);
3227 vm_object_chain_acquire(bobject, 0);
3228 KKASSERT(bobject->backing_object == bobject);
3229 KKASSERT((bobject->flags & OBJ_DEAD) == 0);
3230 } else {
3232 * vnodes are not placed on the shadow list but
3233 * they still get another ref for the backing_object
3234 * reference.
3236 vm_object_reference_quick(bobject);
3241 * Calculate the object page range and allocate the new object.
3243 offset = entry->offset;
3244 s = entry->start;
3245 e = entry->end;
3247 offidxstart = OFF_TO_IDX(offset);
3248 offidxend = offidxstart + OFF_TO_IDX(e - s);
3249 size = offidxend - offidxstart;
3251 switch(oobject->type) {
3252 case OBJT_DEFAULT:
3253 nobject = default_pager_alloc(NULL, IDX_TO_OFF(size),
3254 VM_PROT_ALL, 0);
3255 break;
3256 case OBJT_SWAP:
3257 nobject = swap_pager_alloc(NULL, IDX_TO_OFF(size),
3258 VM_PROT_ALL, 0);
3259 break;
3260 default:
3261 /* not reached */
3262 nobject = NULL;
3263 KKASSERT(0);
3266 if (nobject == NULL) {
3267 if (bobject) {
3268 if (useshadowlist) {
3269 vm_object_chain_release(bobject);
3270 vm_object_deallocate(bobject);
3271 vm_object_drop(bobject);
3272 } else {
3273 vm_object_deallocate(bobject);
3276 vm_object_chain_release(oobject);
3277 vm_object_reference_locked(oobject);
3278 vm_object_clear_flag(oobject, OBJ_ONEMAPPING);
3279 vm_object_drop(oobject);
3280 return;
3284 * The new object will replace entry->object.vm_object so it needs
3285 * a second reference (the caller expects an additional ref).
3287 vm_object_hold(nobject);
3288 vm_object_reference_locked(nobject);
3289 vm_object_chain_acquire(nobject, 0);
3292 * nobject shadows bobject (oobject already shadows bobject).
3294 * Adding an object to bobject's shadow list requires refing bobject
3295 * which we did above in the useshadowlist case.
3297 if (bobject) {
3298 nobject->backing_object_offset =
3299 oobject->backing_object_offset + IDX_TO_OFF(offidxstart);
3300 nobject->backing_object = bobject;
3301 if (useshadowlist) {
3302 bobject->shadow_count++;
3303 atomic_add_int(&bobject->generation, 1);
3304 LIST_INSERT_HEAD(&bobject->shadow_head,
3305 nobject, shadow_list);
3306 vm_object_clear_flag(bobject, OBJ_ONEMAPPING); /*XXX*/
3307 vm_object_chain_release(bobject);
3308 vm_object_drop(bobject);
3309 vm_object_set_flag(nobject, OBJ_ONSHADOW);
3314 * Move the VM pages from oobject to nobject
3316 for (idx = 0; idx < size; idx++) {
3317 vm_page_t m;
3319 m = vm_page_lookup_busy_wait(oobject, offidxstart + idx,
3320 TRUE, "vmpg");
3321 if (m == NULL)
3322 continue;
3325 * We must wait for pending I/O to complete before we can
3326 * rename the page.
3328 * We do not have to VM_PROT_NONE the page as mappings should
3329 * not be changed by this operation.
3331 * NOTE: The act of renaming a page updates chaingen for both
3332 * objects.
3334 vm_page_rename(m, nobject, idx);
3335 /* page automatically made dirty by rename and cache handled */
3336 /* page remains busy */
3339 if (oobject->type == OBJT_SWAP) {
3340 vm_object_pip_add(oobject, 1);
3342 * copy oobject pages into nobject and destroy unneeded
3343 * pages in shadow object.
3345 swap_pager_copy(oobject, nobject, offidxstart, 0);
3346 vm_object_pip_wakeup(oobject);
3350 * Wakeup the pages we played with. No spl protection is needed
3351 * for a simple wakeup.
3353 for (idx = 0; idx < size; idx++) {
3354 m = vm_page_lookup(nobject, idx);
3355 if (m) {
3356 KKASSERT(m->flags & PG_BUSY);
3357 vm_page_wakeup(m);
3360 entry->object.vm_object = nobject;
3361 entry->offset = 0LL;
3364 * Cleanup
3366 * NOTE: There is no need to remove OBJ_ONEMAPPING from oobject, the
3367 * related pages were moved and are no longer applicable to the
3368 * original object.
3370 * NOTE: Deallocate oobject (due to its entry->object.vm_object being
3371 * replaced by nobject).
3373 vm_object_chain_release(nobject);
3374 vm_object_drop(nobject);
3375 if (bobject && useshadowlist) {
3376 vm_object_chain_release(bobject);
3377 vm_object_drop(bobject);
3379 vm_object_chain_release(oobject);
3380 /*vm_object_clear_flag(oobject, OBJ_ONEMAPPING);*/
3381 vm_object_deallocate_locked(oobject);
3382 vm_object_drop(oobject);
3386 * Copies the contents of the source entry to the destination
3387 * entry. The entries *must* be aligned properly.
3389 * The vm_maps must be exclusively locked.
3390 * The vm_map's token must be held.
3392 * Because the maps are locked no faults can be in progress during the
3393 * operation.
3395 static void
3396 vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map,
3397 vm_map_entry_t src_entry, vm_map_entry_t dst_entry)
3399 vm_object_t src_object;
3401 if (dst_entry->maptype == VM_MAPTYPE_SUBMAP ||
3402 dst_entry->maptype == VM_MAPTYPE_UKSMAP)
3403 return;
3404 if (src_entry->maptype == VM_MAPTYPE_SUBMAP ||
3405 src_entry->maptype == VM_MAPTYPE_UKSMAP)
3406 return;
3408 if (src_entry->wired_count == 0) {
3410 * If the source entry is marked needs_copy, it is already
3411 * write-protected.
3413 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
3414 pmap_protect(src_map->pmap,
3415 src_entry->start,
3416 src_entry->end,
3417 src_entry->protection & ~VM_PROT_WRITE);
3421 * Make a copy of the object.
3423 * The object must be locked prior to checking the object type
3424 * and for the call to vm_object_collapse() and vm_map_split().
3425 * We cannot use *_hold() here because the split code will
3426 * probably try to destroy the object. The lock is a pool
3427 * token and doesn't care.
3429 * We must bump src_map->timestamp when setting
3430 * MAP_ENTRY_NEEDS_COPY to force any concurrent fault
3431 * to retry, otherwise the concurrent fault might improperly
3432 * install a RW pte when its supposed to be a RO(COW) pte.
3433 * This race can occur because a vnode-backed fault may have
3434 * to temporarily release the map lock.
3436 if (src_entry->object.vm_object != NULL) {
3437 vm_map_split(src_entry);
3438 src_object = src_entry->object.vm_object;
3439 dst_entry->object.vm_object = src_object;
3440 src_entry->eflags |= (MAP_ENTRY_COW |
3441 MAP_ENTRY_NEEDS_COPY);
3442 dst_entry->eflags |= (MAP_ENTRY_COW |
3443 MAP_ENTRY_NEEDS_COPY);
3444 dst_entry->offset = src_entry->offset;
3445 ++src_map->timestamp;
3446 } else {
3447 dst_entry->object.vm_object = NULL;
3448 dst_entry->offset = 0;
3451 pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
3452 dst_entry->end - dst_entry->start, src_entry->start);
3453 } else {
3455 * Of course, wired down pages can't be set copy-on-write.
3456 * Cause wired pages to be copied into the new map by
3457 * simulating faults (the new pages are pageable)
3459 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry);
3464 * vmspace_fork:
3465 * Create a new process vmspace structure and vm_map
3466 * based on those of an existing process. The new map
3467 * is based on the old map, according to the inheritance
3468 * values on the regions in that map.
3470 * The source map must not be locked.
3471 * No requirements.
3473 static void vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3474 vm_map_entry_t old_entry, int *countp);
3475 static void vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3476 vm_map_entry_t old_entry, int *countp);
3478 struct vmspace *
3479 vmspace_fork(struct vmspace *vm1)
3481 struct vmspace *vm2;
3482 vm_map_t old_map = &vm1->vm_map;
3483 vm_map_t new_map;
3484 vm_map_entry_t old_entry;
3485 int count;
3487 lwkt_gettoken(&vm1->vm_map.token);
3488 vm_map_lock(old_map);
3490 vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
3491 lwkt_gettoken(&vm2->vm_map.token);
3492 bcopy(&vm1->vm_startcopy, &vm2->vm_startcopy,
3493 (caddr_t)&vm1->vm_endcopy - (caddr_t)&vm1->vm_startcopy);
3494 new_map = &vm2->vm_map; /* XXX */
3495 new_map->timestamp = 1;
3497 vm_map_lock(new_map);
3499 count = 0;
3500 old_entry = old_map->header.next;
3501 while (old_entry != &old_map->header) {
3502 ++count;
3503 old_entry = old_entry->next;
3506 count = vm_map_entry_reserve(count + MAP_RESERVE_COUNT);
3508 old_entry = old_map->header.next;
3509 while (old_entry != &old_map->header) {
3510 switch(old_entry->maptype) {
3511 case VM_MAPTYPE_SUBMAP:
3512 panic("vm_map_fork: encountered a submap");
3513 break;
3514 case VM_MAPTYPE_UKSMAP:
3515 vmspace_fork_uksmap_entry(old_map, new_map,
3516 old_entry, &count);
3517 break;
3518 case VM_MAPTYPE_NORMAL:
3519 case VM_MAPTYPE_VPAGETABLE:
3520 vmspace_fork_normal_entry(old_map, new_map,
3521 old_entry, &count);
3522 break;
3524 old_entry = old_entry->next;
3527 new_map->size = old_map->size;
3528 vm_map_unlock(old_map);
3529 vm_map_unlock(new_map);
3530 vm_map_entry_release(count);
3532 lwkt_reltoken(&vm2->vm_map.token);
3533 lwkt_reltoken(&vm1->vm_map.token);
3535 return (vm2);
3538 static
3539 void
3540 vmspace_fork_normal_entry(vm_map_t old_map, vm_map_t new_map,
3541 vm_map_entry_t old_entry, int *countp)
3543 vm_map_entry_t new_entry;
3544 vm_object_t object;
3546 switch (old_entry->inheritance) {
3547 case VM_INHERIT_NONE:
3548 break;
3549 case VM_INHERIT_SHARE:
3551 * Clone the entry, creating the shared object if
3552 * necessary.
3554 if (old_entry->object.vm_object == NULL)
3555 vm_map_entry_allocate_object(old_entry);
3557 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3559 * Shadow a map_entry which needs a copy,
3560 * replacing its object with a new object
3561 * that points to the old one. Ask the
3562 * shadow code to automatically add an
3563 * additional ref. We can't do it afterwords
3564 * because we might race a collapse. The call
3565 * to vm_map_entry_shadow() will also clear
3566 * OBJ_ONEMAPPING.
3568 vm_map_entry_shadow(old_entry, 1);
3569 } else if (old_entry->object.vm_object) {
3571 * We will make a shared copy of the object,
3572 * and must clear OBJ_ONEMAPPING.
3574 * Optimize vnode objects. OBJ_ONEMAPPING
3575 * is non-applicable but clear it anyway,
3576 * and its terminal so we don'th ave to deal
3577 * with chains. Reduces SMP conflicts.
3579 * XXX assert that object.vm_object != NULL
3580 * since we allocate it above.
3582 object = old_entry->object.vm_object;
3583 if (object->type == OBJT_VNODE) {
3584 vm_object_reference_quick(object);
3585 vm_object_clear_flag(object,
3586 OBJ_ONEMAPPING);
3587 } else {
3588 vm_object_hold(object);
3589 vm_object_chain_wait(object, 0);
3590 vm_object_reference_locked(object);
3591 vm_object_clear_flag(object,
3592 OBJ_ONEMAPPING);
3593 vm_object_drop(object);
3598 * Clone the entry. We've already bumped the ref on
3599 * any vm_object.
3601 new_entry = vm_map_entry_create(new_map, countp);
3602 *new_entry = *old_entry;
3603 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3604 new_entry->wired_count = 0;
3607 * Insert the entry into the new map -- we know we're
3608 * inserting at the end of the new map.
3611 vm_map_entry_link(new_map, new_map->header.prev,
3612 new_entry);
3615 * Update the physical map
3617 pmap_copy(new_map->pmap, old_map->pmap,
3618 new_entry->start,
3619 (old_entry->end - old_entry->start),
3620 old_entry->start);
3621 break;
3622 case VM_INHERIT_COPY:
3624 * Clone the entry and link into the map.
3626 new_entry = vm_map_entry_create(new_map, countp);
3627 *new_entry = *old_entry;
3628 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3629 new_entry->wired_count = 0;
3630 new_entry->object.vm_object = NULL;
3631 vm_map_entry_link(new_map, new_map->header.prev,
3632 new_entry);
3633 vm_map_copy_entry(old_map, new_map, old_entry,
3634 new_entry);
3635 break;
3640 * When forking user-kernel shared maps, the map might change in the
3641 * child so do not try to copy the underlying pmap entries.
3643 static
3644 void
3645 vmspace_fork_uksmap_entry(vm_map_t old_map, vm_map_t new_map,
3646 vm_map_entry_t old_entry, int *countp)
3648 vm_map_entry_t new_entry;
3650 new_entry = vm_map_entry_create(new_map, countp);
3651 *new_entry = *old_entry;
3652 new_entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3653 new_entry->wired_count = 0;
3654 vm_map_entry_link(new_map, new_map->header.prev,
3655 new_entry);
3659 * Create an auto-grow stack entry
3661 * No requirements.
3664 vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
3665 int flags, vm_prot_t prot, vm_prot_t max, int cow)
3667 vm_map_entry_t prev_entry;
3668 vm_map_entry_t new_stack_entry;
3669 vm_size_t init_ssize;
3670 int rv;
3671 int count;
3672 vm_offset_t tmpaddr;
3674 cow |= MAP_IS_STACK;
3676 if (max_ssize < sgrowsiz)
3677 init_ssize = max_ssize;
3678 else
3679 init_ssize = sgrowsiz;
3681 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3682 vm_map_lock(map);
3685 * Find space for the mapping
3687 if ((flags & (MAP_FIXED | MAP_TRYFIXED)) == 0) {
3688 if (vm_map_findspace(map, addrbos, max_ssize, 1,
3689 flags, &tmpaddr)) {
3690 vm_map_unlock(map);
3691 vm_map_entry_release(count);
3692 return (KERN_NO_SPACE);
3694 addrbos = tmpaddr;
3697 /* If addr is already mapped, no go */
3698 if (vm_map_lookup_entry(map, addrbos, &prev_entry)) {
3699 vm_map_unlock(map);
3700 vm_map_entry_release(count);
3701 return (KERN_NO_SPACE);
3704 #if 0
3705 /* XXX already handled by kern_mmap() */
3706 /* If we would blow our VMEM resource limit, no go */
3707 if (map->size + init_ssize >
3708 curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3709 vm_map_unlock(map);
3710 vm_map_entry_release(count);
3711 return (KERN_NO_SPACE);
3713 #endif
3716 * If we can't accomodate max_ssize in the current mapping,
3717 * no go. However, we need to be aware that subsequent user
3718 * mappings might map into the space we have reserved for
3719 * stack, and currently this space is not protected.
3721 * Hopefully we will at least detect this condition
3722 * when we try to grow the stack.
3724 if ((prev_entry->next != &map->header) &&
3725 (prev_entry->next->start < addrbos + max_ssize)) {
3726 vm_map_unlock(map);
3727 vm_map_entry_release(count);
3728 return (KERN_NO_SPACE);
3732 * We initially map a stack of only init_ssize. We will
3733 * grow as needed later. Since this is to be a grow
3734 * down stack, we map at the top of the range.
3736 * Note: we would normally expect prot and max to be
3737 * VM_PROT_ALL, and cow to be 0. Possibly we should
3738 * eliminate these as input parameters, and just
3739 * pass these values here in the insert call.
3741 rv = vm_map_insert(map, &count, NULL, NULL,
3742 0, addrbos + max_ssize - init_ssize,
3743 addrbos + max_ssize,
3744 VM_MAPTYPE_NORMAL,
3745 VM_SUBSYS_STACK, prot, max, cow);
3747 /* Now set the avail_ssize amount */
3748 if (rv == KERN_SUCCESS) {
3749 if (prev_entry != &map->header)
3750 vm_map_clip_end(map, prev_entry, addrbos + max_ssize - init_ssize, &count);
3751 new_stack_entry = prev_entry->next;
3752 if (new_stack_entry->end != addrbos + max_ssize ||
3753 new_stack_entry->start != addrbos + max_ssize - init_ssize)
3754 panic ("Bad entry start/end for new stack entry");
3755 else
3756 new_stack_entry->aux.avail_ssize = max_ssize - init_ssize;
3759 vm_map_unlock(map);
3760 vm_map_entry_release(count);
3761 return (rv);
3765 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if the
3766 * desired address is already mapped, or if we successfully grow
3767 * the stack. Also returns KERN_SUCCESS if addr is outside the
3768 * stack range (this is strange, but preserves compatibility with
3769 * the grow function in vm_machdep.c).
3771 * No requirements.
3774 vm_map_growstack (vm_map_t map, vm_offset_t addr)
3776 vm_map_entry_t prev_entry;
3777 vm_map_entry_t stack_entry;
3778 vm_map_entry_t new_stack_entry;
3779 struct vmspace *vm;
3780 struct lwp *lp;
3781 struct proc *p;
3782 vm_offset_t end;
3783 int grow_amount;
3784 int rv = KERN_SUCCESS;
3785 int is_procstack;
3786 int use_read_lock = 1;
3787 int count;
3790 * Find the vm
3792 lp = curthread->td_lwp;
3793 p = curthread->td_proc;
3794 KKASSERT(lp != NULL);
3795 vm = lp->lwp_vmspace;
3798 * Growstack is only allowed on the current process. We disallow
3799 * other use cases, e.g. trying to access memory via procfs that
3800 * the stack hasn't grown into.
3802 if (map != &vm->vm_map) {
3803 return KERN_FAILURE;
3806 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
3807 Retry:
3808 if (use_read_lock)
3809 vm_map_lock_read(map);
3810 else
3811 vm_map_lock(map);
3813 /* If addr is already in the entry range, no need to grow.*/
3814 if (vm_map_lookup_entry(map, addr, &prev_entry))
3815 goto done;
3817 if ((stack_entry = prev_entry->next) == &map->header)
3818 goto done;
3819 if (prev_entry == &map->header)
3820 end = stack_entry->start - stack_entry->aux.avail_ssize;
3821 else
3822 end = prev_entry->end;
3825 * This next test mimics the old grow function in vm_machdep.c.
3826 * It really doesn't quite make sense, but we do it anyway
3827 * for compatibility.
3829 * If not growable stack, return success. This signals the
3830 * caller to proceed as he would normally with normal vm.
3832 if (stack_entry->aux.avail_ssize < 1 ||
3833 addr >= stack_entry->start ||
3834 addr < stack_entry->start - stack_entry->aux.avail_ssize) {
3835 goto done;
3838 /* Find the minimum grow amount */
3839 grow_amount = roundup (stack_entry->start - addr, PAGE_SIZE);
3840 if (grow_amount > stack_entry->aux.avail_ssize) {
3841 rv = KERN_NO_SPACE;
3842 goto done;
3846 * If there is no longer enough space between the entries
3847 * nogo, and adjust the available space. Note: this
3848 * should only happen if the user has mapped into the
3849 * stack area after the stack was created, and is
3850 * probably an error.
3852 * This also effectively destroys any guard page the user
3853 * might have intended by limiting the stack size.
3855 if (grow_amount > stack_entry->start - end) {
3856 if (use_read_lock && vm_map_lock_upgrade(map)) {
3857 /* lost lock */
3858 use_read_lock = 0;
3859 goto Retry;
3861 use_read_lock = 0;
3862 stack_entry->aux.avail_ssize = stack_entry->start - end;
3863 rv = KERN_NO_SPACE;
3864 goto done;
3867 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr;
3869 /* If this is the main process stack, see if we're over the
3870 * stack limit.
3872 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3873 p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3874 rv = KERN_NO_SPACE;
3875 goto done;
3878 /* Round up the grow amount modulo SGROWSIZ */
3879 grow_amount = roundup (grow_amount, sgrowsiz);
3880 if (grow_amount > stack_entry->aux.avail_ssize) {
3881 grow_amount = stack_entry->aux.avail_ssize;
3883 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount >
3884 p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
3885 grow_amount = p->p_rlimit[RLIMIT_STACK].rlim_cur -
3886 ctob(vm->vm_ssize);
3889 /* If we would blow our VMEM resource limit, no go */
3890 if (map->size + grow_amount > p->p_rlimit[RLIMIT_VMEM].rlim_cur) {
3891 rv = KERN_NO_SPACE;
3892 goto done;
3895 if (use_read_lock && vm_map_lock_upgrade(map)) {
3896 /* lost lock */
3897 use_read_lock = 0;
3898 goto Retry;
3900 use_read_lock = 0;
3902 /* Get the preliminary new entry start value */
3903 addr = stack_entry->start - grow_amount;
3905 /* If this puts us into the previous entry, cut back our growth
3906 * to the available space. Also, see the note above.
3908 if (addr < end) {
3909 stack_entry->aux.avail_ssize = stack_entry->start - end;
3910 addr = end;
3913 rv = vm_map_insert(map, &count, NULL, NULL,
3914 0, addr, stack_entry->start,
3915 VM_MAPTYPE_NORMAL,
3916 VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0);
3918 /* Adjust the available stack space by the amount we grew. */
3919 if (rv == KERN_SUCCESS) {
3920 if (prev_entry != &map->header)
3921 vm_map_clip_end(map, prev_entry, addr, &count);
3922 new_stack_entry = prev_entry->next;
3923 if (new_stack_entry->end != stack_entry->start ||
3924 new_stack_entry->start != addr)
3925 panic ("Bad stack grow start/end in new stack entry");
3926 else {
3927 new_stack_entry->aux.avail_ssize =
3928 stack_entry->aux.avail_ssize -
3929 (new_stack_entry->end - new_stack_entry->start);
3930 if (is_procstack)
3931 vm->vm_ssize += btoc(new_stack_entry->end -
3932 new_stack_entry->start);
3935 if (map->flags & MAP_WIREFUTURE)
3936 vm_map_unwire(map, new_stack_entry->start,
3937 new_stack_entry->end, FALSE);
3940 done:
3941 if (use_read_lock)
3942 vm_map_unlock_read(map);
3943 else
3944 vm_map_unlock(map);
3945 vm_map_entry_release(count);
3946 return (rv);
3950 * Unshare the specified VM space for exec. If other processes are
3951 * mapped to it, then create a new one. The new vmspace is null.
3953 * No requirements.
3955 void
3956 vmspace_exec(struct proc *p, struct vmspace *vmcopy)
3958 struct vmspace *oldvmspace = p->p_vmspace;
3959 struct vmspace *newvmspace;
3960 vm_map_t map = &p->p_vmspace->vm_map;
3963 * If we are execing a resident vmspace we fork it, otherwise
3964 * we create a new vmspace. Note that exitingcnt is not
3965 * copied to the new vmspace.
3967 lwkt_gettoken(&oldvmspace->vm_map.token);
3968 if (vmcopy) {
3969 newvmspace = vmspace_fork(vmcopy);
3970 lwkt_gettoken(&newvmspace->vm_map.token);
3971 } else {
3972 newvmspace = vmspace_alloc(map->min_offset, map->max_offset);
3973 lwkt_gettoken(&newvmspace->vm_map.token);
3974 bcopy(&oldvmspace->vm_startcopy, &newvmspace->vm_startcopy,
3975 (caddr_t)&oldvmspace->vm_endcopy -
3976 (caddr_t)&oldvmspace->vm_startcopy);
3980 * Finish initializing the vmspace before assigning it
3981 * to the process. The vmspace will become the current vmspace
3982 * if p == curproc.
3984 pmap_pinit2(vmspace_pmap(newvmspace));
3985 pmap_replacevm(p, newvmspace, 0);
3986 lwkt_reltoken(&newvmspace->vm_map.token);
3987 lwkt_reltoken(&oldvmspace->vm_map.token);
3988 vmspace_rel(oldvmspace);
3992 * Unshare the specified VM space for forcing COW. This
3993 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
3995 void
3996 vmspace_unshare(struct proc *p)
3998 struct vmspace *oldvmspace = p->p_vmspace;
3999 struct vmspace *newvmspace;
4001 lwkt_gettoken(&oldvmspace->vm_map.token);
4002 if (vmspace_getrefs(oldvmspace) == 1) {
4003 lwkt_reltoken(&oldvmspace->vm_map.token);
4004 return;
4006 newvmspace = vmspace_fork(oldvmspace);
4007 lwkt_gettoken(&newvmspace->vm_map.token);
4008 pmap_pinit2(vmspace_pmap(newvmspace));
4009 pmap_replacevm(p, newvmspace, 0);
4010 lwkt_reltoken(&newvmspace->vm_map.token);
4011 lwkt_reltoken(&oldvmspace->vm_map.token);
4012 vmspace_rel(oldvmspace);
4016 * vm_map_hint: return the beginning of the best area suitable for
4017 * creating a new mapping with "prot" protection.
4019 * No requirements.
4021 vm_offset_t
4022 vm_map_hint(struct proc *p, vm_offset_t addr, vm_prot_t prot)
4024 struct vmspace *vms = p->p_vmspace;
4026 if (!randomize_mmap || addr != 0) {
4028 * Set a reasonable start point for the hint if it was
4029 * not specified or if it falls within the heap space.
4030 * Hinted mmap()s do not allocate out of the heap space.
4032 if (addr == 0 ||
4033 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
4034 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))) {
4035 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
4038 return addr;
4040 addr = (vm_offset_t)vms->vm_daddr + MAXDSIZ;
4041 addr += karc4random() & (MIN((256 * 1024 * 1024), MAXDSIZ) - 1);
4043 return (round_page(addr));
4047 * Finds the VM object, offset, and protection for a given virtual address
4048 * in the specified map, assuming a page fault of the type specified.
4050 * Leaves the map in question locked for read; return values are guaranteed
4051 * until a vm_map_lookup_done call is performed. Note that the map argument
4052 * is in/out; the returned map must be used in the call to vm_map_lookup_done.
4054 * A handle (out_entry) is returned for use in vm_map_lookup_done, to make
4055 * that fast.
4057 * If a lookup is requested with "write protection" specified, the map may
4058 * be changed to perform virtual copying operations, although the data
4059 * referenced will remain the same.
4061 * No requirements.
4064 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
4065 vm_offset_t vaddr,
4066 vm_prot_t fault_typea,
4067 vm_map_entry_t *out_entry, /* OUT */
4068 vm_object_t *object, /* OUT */
4069 vm_pindex_t *pindex, /* OUT */
4070 vm_prot_t *out_prot, /* OUT */
4071 boolean_t *wired) /* OUT */
4073 vm_map_entry_t entry;
4074 vm_map_t map = *var_map;
4075 vm_prot_t prot;
4076 vm_prot_t fault_type = fault_typea;
4077 int use_read_lock = 1;
4078 int rv = KERN_SUCCESS;
4080 RetryLookup:
4081 if (use_read_lock)
4082 vm_map_lock_read(map);
4083 else
4084 vm_map_lock(map);
4087 * Always do a full lookup. The hint doesn't get us much anymore
4088 * now that the map is RB'd.
4090 cpu_ccfence();
4091 *out_entry = &map->header;
4092 *object = NULL;
4095 vm_map_entry_t tmp_entry;
4097 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) {
4098 rv = KERN_INVALID_ADDRESS;
4099 goto done;
4101 entry = tmp_entry;
4102 *out_entry = entry;
4106 * Handle submaps.
4108 if (entry->maptype == VM_MAPTYPE_SUBMAP) {
4109 vm_map_t old_map = map;
4111 *var_map = map = entry->object.sub_map;
4112 if (use_read_lock)
4113 vm_map_unlock_read(old_map);
4114 else
4115 vm_map_unlock(old_map);
4116 use_read_lock = 1;
4117 goto RetryLookup;
4121 * Check whether this task is allowed to have this page.
4122 * Note the special case for MAP_ENTRY_COW pages with an override.
4123 * This is to implement a forced COW for debuggers.
4125 if (fault_type & VM_PROT_OVERRIDE_WRITE)
4126 prot = entry->max_protection;
4127 else
4128 prot = entry->protection;
4130 fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
4131 if ((fault_type & prot) != fault_type) {
4132 rv = KERN_PROTECTION_FAILURE;
4133 goto done;
4136 if ((entry->eflags & MAP_ENTRY_USER_WIRED) &&
4137 (entry->eflags & MAP_ENTRY_COW) &&
4138 (fault_type & VM_PROT_WRITE) &&
4139 (fault_typea & VM_PROT_OVERRIDE_WRITE) == 0) {
4140 rv = KERN_PROTECTION_FAILURE;
4141 goto done;
4145 * If this page is not pageable, we have to get it for all possible
4146 * accesses.
4148 *wired = (entry->wired_count != 0);
4149 if (*wired)
4150 prot = fault_type = entry->protection;
4153 * Virtual page tables may need to update the accessed (A) bit
4154 * in a page table entry. Upgrade the fault to a write fault for
4155 * that case if the map will support it. If the map does not support
4156 * it the page table entry simply will not be updated.
4158 if (entry->maptype == VM_MAPTYPE_VPAGETABLE) {
4159 if (prot & VM_PROT_WRITE)
4160 fault_type |= VM_PROT_WRITE;
4163 if (curthread->td_lwp && curthread->td_lwp->lwp_vmspace &&
4164 pmap_emulate_ad_bits(&curthread->td_lwp->lwp_vmspace->vm_pmap)) {
4165 if ((prot & VM_PROT_WRITE) == 0)
4166 fault_type |= VM_PROT_WRITE;
4170 * Only NORMAL and VPAGETABLE maps are object-based. UKSMAPs are not.
4172 if (entry->maptype != VM_MAPTYPE_NORMAL &&
4173 entry->maptype != VM_MAPTYPE_VPAGETABLE) {
4174 *object = NULL;
4175 goto skip;
4179 * If the entry was copy-on-write, we either ...
4181 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4183 * If we want to write the page, we may as well handle that
4184 * now since we've got the map locked.
4186 * If we don't need to write the page, we just demote the
4187 * permissions allowed.
4190 if (fault_type & VM_PROT_WRITE) {
4192 * Not allowed if TDF_NOFAULT is set as the shadowing
4193 * operation can deadlock against the faulting
4194 * function due to the copy-on-write.
4196 if (curthread->td_flags & TDF_NOFAULT) {
4197 rv = KERN_FAILURE_NOFAULT;
4198 goto done;
4202 * Make a new object, and place it in the object
4203 * chain. Note that no new references have appeared
4204 * -- one just moved from the map to the new
4205 * object.
4208 if (use_read_lock && vm_map_lock_upgrade(map)) {
4209 /* lost lock */
4210 use_read_lock = 0;
4211 goto RetryLookup;
4213 use_read_lock = 0;
4215 vm_map_entry_shadow(entry, 0);
4216 } else {
4218 * We're attempting to read a copy-on-write page --
4219 * don't allow writes.
4222 prot &= ~VM_PROT_WRITE;
4227 * Create an object if necessary.
4229 if (entry->object.vm_object == NULL && !map->system_map) {
4230 if (use_read_lock && vm_map_lock_upgrade(map)) {
4231 /* lost lock */
4232 use_read_lock = 0;
4233 goto RetryLookup;
4235 use_read_lock = 0;
4236 vm_map_entry_allocate_object(entry);
4240 * Return the object/offset from this entry. If the entry was
4241 * copy-on-write or empty, it has been fixed up.
4243 *object = entry->object.vm_object;
4245 skip:
4246 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4249 * Return whether this is the only map sharing this data. On
4250 * success we return with a read lock held on the map. On failure
4251 * we return with the map unlocked.
4253 *out_prot = prot;
4254 done:
4255 if (rv == KERN_SUCCESS) {
4256 if (use_read_lock == 0)
4257 vm_map_lock_downgrade(map);
4258 } else if (use_read_lock) {
4259 vm_map_unlock_read(map);
4260 } else {
4261 vm_map_unlock(map);
4263 return (rv);
4267 * Releases locks acquired by a vm_map_lookup()
4268 * (according to the handle returned by that lookup).
4270 * No other requirements.
4272 void
4273 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry, int count)
4276 * Unlock the main-level map
4278 vm_map_unlock_read(map);
4279 if (count)
4280 vm_map_entry_release(count);
4284 * Quick hack, needs some help to make it more SMP friendly.
4286 void
4287 vm_map_interlock(vm_map_t map, struct vm_map_ilock *ilock,
4288 vm_offset_t ran_beg, vm_offset_t ran_end)
4290 struct vm_map_ilock *scan;
4292 ilock->ran_beg = ran_beg;
4293 ilock->ran_end = ran_end;
4294 ilock->flags = 0;
4296 spin_lock(&map->ilock_spin);
4297 restart:
4298 for (scan = map->ilock_base; scan; scan = scan->next) {
4299 if (ran_end > scan->ran_beg && ran_beg < scan->ran_end) {
4300 scan->flags |= ILOCK_WAITING;
4301 ssleep(scan, &map->ilock_spin, 0, "ilock", 0);
4302 goto restart;
4305 ilock->next = map->ilock_base;
4306 map->ilock_base = ilock;
4307 spin_unlock(&map->ilock_spin);
4310 void
4311 vm_map_deinterlock(vm_map_t map, struct vm_map_ilock *ilock)
4313 struct vm_map_ilock *scan;
4314 struct vm_map_ilock **scanp;
4316 spin_lock(&map->ilock_spin);
4317 scanp = &map->ilock_base;
4318 while ((scan = *scanp) != NULL) {
4319 if (scan == ilock) {
4320 *scanp = ilock->next;
4321 spin_unlock(&map->ilock_spin);
4322 if (ilock->flags & ILOCK_WAITING)
4323 wakeup(ilock);
4324 return;
4326 scanp = &scan->next;
4328 spin_unlock(&map->ilock_spin);
4329 panic("vm_map_deinterlock: missing ilock!");
4332 #include "opt_ddb.h"
4333 #ifdef DDB
4334 #include <ddb/ddb.h>
4337 * Debugging only
4339 DB_SHOW_COMMAND(map, vm_map_print)
4341 static int nlines;
4342 /* XXX convert args. */
4343 vm_map_t map = (vm_map_t)addr;
4344 boolean_t full = have_addr;
4346 vm_map_entry_t entry;
4348 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4349 (void *)map,
4350 (void *)map->pmap, map->nentries, map->timestamp);
4351 nlines++;
4353 if (!full && db_indent)
4354 return;
4356 db_indent += 2;
4357 for (entry = map->header.next; entry != &map->header;
4358 entry = entry->next) {
4359 db_iprintf("map entry %p: start=%p, end=%p\n",
4360 (void *)entry, (void *)entry->start, (void *)entry->end);
4361 nlines++;
4363 static char *inheritance_name[4] =
4364 {"share", "copy", "none", "donate_copy"};
4366 db_iprintf(" prot=%x/%x/%s",
4367 entry->protection,
4368 entry->max_protection,
4369 inheritance_name[(int)(unsigned char)
4370 entry->inheritance]);
4371 if (entry->wired_count != 0)
4372 db_printf(", wired");
4374 switch(entry->maptype) {
4375 case VM_MAPTYPE_SUBMAP:
4376 /* XXX no %qd in kernel. Truncate entry->offset. */
4377 db_printf(", share=%p, offset=0x%lx\n",
4378 (void *)entry->object.sub_map,
4379 (long)entry->offset);
4380 nlines++;
4381 if ((entry->prev == &map->header) ||
4382 (entry->prev->object.sub_map !=
4383 entry->object.sub_map)) {
4384 db_indent += 2;
4385 vm_map_print((db_expr_t)(intptr_t)
4386 entry->object.sub_map,
4387 full, 0, NULL);
4388 db_indent -= 2;
4390 break;
4391 case VM_MAPTYPE_NORMAL:
4392 case VM_MAPTYPE_VPAGETABLE:
4393 /* XXX no %qd in kernel. Truncate entry->offset. */
4394 db_printf(", object=%p, offset=0x%lx",
4395 (void *)entry->object.vm_object,
4396 (long)entry->offset);
4397 if (entry->eflags & MAP_ENTRY_COW)
4398 db_printf(", copy (%s)",
4399 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4400 db_printf("\n");
4401 nlines++;
4403 if ((entry->prev == &map->header) ||
4404 (entry->prev->object.vm_object !=
4405 entry->object.vm_object)) {
4406 db_indent += 2;
4407 vm_object_print((db_expr_t)(intptr_t)
4408 entry->object.vm_object,
4409 full, 0, NULL);
4410 nlines += 4;
4411 db_indent -= 2;
4413 break;
4414 case VM_MAPTYPE_UKSMAP:
4415 db_printf(", uksmap=%p, offset=0x%lx",
4416 (void *)entry->object.uksmap,
4417 (long)entry->offset);
4418 if (entry->eflags & MAP_ENTRY_COW)
4419 db_printf(", copy (%s)",
4420 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4421 db_printf("\n");
4422 nlines++;
4423 break;
4424 default:
4425 break;
4428 db_indent -= 2;
4429 if (db_indent == 0)
4430 nlines = 0;
4434 * Debugging only
4436 DB_SHOW_COMMAND(procvm, procvm)
4438 struct proc *p;
4440 if (have_addr) {
4441 p = (struct proc *) addr;
4442 } else {
4443 p = curproc;
4446 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4447 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4448 (void *)vmspace_pmap(p->p_vmspace));
4450 vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
4453 #endif /* DDB */