kernel - Tag vm_map_entry structure, slight optimization to zalloc, misc.
[dragonfly.git] / sys / vm / vm_kern.c
blobbca5c6b6da734dd5461d19cd27324b207c2ae209
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
62 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
66 * Kernel memory management.
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_extern.h>
87 struct vm_map kernel_map;
88 struct vm_map clean_map;
89 struct vm_map buffer_map;
92 * Allocate pageable swap-backed anonymous memory
94 void *
95 kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id)
97 int error;
98 vm_pindex_t npages;
100 size = round_page(size);
101 npages = size / PAGE_SIZE;
103 if (kp->map == NULL)
104 kp->map = &kernel_map;
105 kp->data = vm_map_min(&kernel_map);
106 kp->size = size;
107 kp->object = vm_object_allocate(OBJT_DEFAULT, npages);
109 error = vm_map_find(kp->map, kp->object, NULL, 0,
110 &kp->data, size,
111 PAGE_SIZE, TRUE,
112 VM_MAPTYPE_NORMAL, id,
113 VM_PROT_ALL, VM_PROT_ALL, 0);
114 if (error) {
115 kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n",
116 size, error);
117 kp->data = (vm_offset_t)0;
118 kmem_free_swapbacked(kp);
119 return NULL;
121 return ((void *)(intptr_t)kp->data);
124 void
125 kmem_free_swapbacked(kmem_anon_desc_t *kp)
127 if (kp->data) {
129 * The object will be deallocated by kmem_free().
131 kmem_free(kp->map, kp->data, kp->size);
132 kp->data = (vm_offset_t)0;
133 } else {
135 * Failure during allocation, object must be deallocated
136 * manually.
138 vm_object_deallocate(kp->object);
140 kp->object = NULL;
144 * Allocate pageable memory to the kernel's address map. "map" must
145 * be kernel_map or a submap of kernel_map. Caller must adjust map or
146 * enter VM pages itself.
148 * No requirements.
150 vm_offset_t
151 kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id)
153 vm_offset_t addr;
154 int result;
156 size = round_page(size);
157 addr = vm_map_min(map);
158 result = vm_map_find(map, NULL, NULL,
159 (vm_offset_t) 0, &addr, size,
160 PAGE_SIZE, TRUE,
161 VM_MAPTYPE_NORMAL, id,
162 VM_PROT_ALL, VM_PROT_ALL, 0);
163 if (result != KERN_SUCCESS)
164 return (0);
165 return (addr);
169 * Same as kmem_alloc_pageable, except that it create a nofault entry.
171 * No requirements.
173 vm_offset_t
174 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id,
175 vm_size_t align)
177 vm_offset_t addr;
178 int result;
180 size = round_page(size);
181 addr = vm_map_min(map);
182 result = vm_map_find(map, NULL, NULL,
183 (vm_offset_t) 0, &addr, size,
184 align, TRUE,
185 VM_MAPTYPE_NORMAL, id,
186 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
187 if (result != KERN_SUCCESS)
188 return (0);
189 return (addr);
193 * Allocate wired-down memory in the kernel's address map or a submap.
195 * No requirements.
197 vm_offset_t
198 kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags)
200 vm_offset_t addr;
201 vm_offset_t gstart;
202 vm_offset_t i;
203 int count;
204 int cow;
206 size = round_page(size);
208 if (kmflags & KM_KRESERVE)
209 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
210 else
211 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
213 if (kmflags & KM_STACK) {
214 cow = MAP_IS_KSTACK;
215 gstart = PAGE_SIZE;
216 } else {
217 cow = 0;
218 gstart = 0;
222 * Use the kernel object for wired-down kernel pages. Assume that no
223 * region of the kernel object is referenced more than once.
225 * Locate sufficient space in the map. This will give us the final
226 * virtual address for the new memory, and thus will tell us the
227 * offset within the kernel map.
229 vm_map_lock(map);
230 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
231 vm_map_unlock(map);
232 if (kmflags & KM_KRESERVE)
233 vm_map_entry_krelease(count);
234 else
235 vm_map_entry_release(count);
236 return (0);
238 vm_object_hold(&kernel_object);
239 vm_object_reference_locked(&kernel_object);
240 vm_map_insert(map, &count,
241 &kernel_object, NULL,
242 addr, addr, addr + size,
243 VM_MAPTYPE_NORMAL, id,
244 VM_PROT_ALL, VM_PROT_ALL, cow);
245 vm_object_drop(&kernel_object);
247 vm_map_unlock(map);
248 if (kmflags & KM_KRESERVE)
249 vm_map_entry_krelease(count);
250 else
251 vm_map_entry_release(count);
254 * Guarantee that there are pages already in this object before
255 * calling vm_map_wire. This is to prevent the following
256 * scenario:
258 * 1) Threads have swapped out, so that there is a pager for the
259 * kernel_object. 2) The kmsg zone is empty, and so we are
260 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
261 * there is no page, but there is a pager, so we call
262 * pager_data_request. But the kmsg zone is empty, so we must
263 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
264 * we get the data back from the pager, it will be (very stale)
265 * non-zero data. kmem_alloc is defined to return zero-filled memory.
267 * We're intentionally not activating the pages we allocate to prevent a
268 * race with page-out. vm_map_wire will wire the pages.
270 vm_object_hold(&kernel_object);
271 for (i = gstart; i < size; i += PAGE_SIZE) {
272 vm_page_t mem;
274 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i),
275 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL |
276 VM_ALLOC_RETRY);
277 vm_page_unqueue_nowakeup(mem);
278 vm_page_wakeup(mem);
280 vm_object_drop(&kernel_object);
283 * And finally, mark the data as non-pageable.
285 * NOTE: vm_map_wire() handles any kstack guard.
287 vm_map_wire(map, addr, addr + size, kmflags);
289 return (addr);
293 * Release a region of kernel virtual memory allocated with kmem_alloc,
294 * and return the physical pages associated with that region.
296 * WARNING! If the caller entered pages into the region using pmap_kenter()
297 * it must remove the pages using pmap_kremove[_quick]() before freeing the
298 * underlying kmem, otherwise resident_count will be mistabulated.
300 * No requirements.
302 void
303 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
305 vm_map_remove(map, trunc_page(addr), round_page(addr + size));
309 * Used to break a system map into smaller maps, usually to reduce
310 * contention and to provide large KVA spaces for subsystems like the
311 * buffer cache.
313 * parent Map to take range from
314 * result
315 * size Size of range to find
316 * min, max Returned endpoints of map
317 * pageable Can the region be paged
319 * No requirements.
321 void
322 kmem_suballoc(vm_map_t parent, vm_map_t result,
323 vm_offset_t *min, vm_offset_t *max, vm_size_t size)
325 int ret;
327 size = round_page(size);
329 *min = (vm_offset_t) vm_map_min(parent);
330 ret = vm_map_find(parent, NULL, NULL,
331 (vm_offset_t) 0, min, size,
332 PAGE_SIZE, TRUE,
333 VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP,
334 VM_PROT_ALL, VM_PROT_ALL, 0);
335 if (ret != KERN_SUCCESS) {
336 kprintf("kmem_suballoc: bad status return of %d.\n", ret);
337 panic("kmem_suballoc");
339 *max = *min + size;
340 pmap_reference(vm_map_pmap(parent));
341 vm_map_init(result, *min, *max, vm_map_pmap(parent));
342 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
343 panic("kmem_suballoc: unable to change range to submap");
347 * Allocates pageable memory from a sub-map of the kernel. If the submap
348 * has no room, the caller sleeps waiting for more memory in the submap.
350 * No requirements.
352 vm_offset_t
353 kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id)
355 vm_offset_t addr;
356 int count;
358 size = round_page(size);
360 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
362 for (;;) {
364 * To make this work for more than one map, use the map's lock
365 * to lock out sleepers/wakers.
367 vm_map_lock(map);
368 if (vm_map_findspace(map, vm_map_min(map),
369 size, PAGE_SIZE, 0, &addr) == 0) {
370 break;
372 /* no space now; see if we can ever get space */
373 if (vm_map_max(map) - vm_map_min(map) < size) {
374 vm_map_entry_release(count);
375 vm_map_unlock(map);
376 return (0);
378 vm_map_unlock(map);
379 tsleep(map, 0, "kmaw", 0);
381 vm_map_insert(map, &count,
382 NULL, NULL,
383 (vm_offset_t) 0, addr, addr + size,
384 VM_MAPTYPE_NORMAL, id,
385 VM_PROT_ALL, VM_PROT_ALL, 0);
386 vm_map_unlock(map);
387 vm_map_entry_release(count);
389 return (addr);
393 * Allocates a region from the kernel address map and physical pages
394 * within the specified address range to the kernel object. Creates a
395 * wired mapping from this region to these pages, and returns the
396 * region's starting virtual address. The allocated pages are not
397 * necessarily physically contiguous. If M_ZERO is specified through the
398 * given flags, then the pages are zeroed before they are mapped.
400 vm_offset_t
401 kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id,
402 int flags, vm_paddr_t low,
403 vm_paddr_t high, vm_memattr_t memattr)
405 vm_offset_t addr, i, offset;
406 vm_page_t m;
407 int count;
409 size = round_page(size);
410 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
411 vm_map_lock(map);
412 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE,
413 flags, &addr)) {
414 vm_map_unlock(map);
415 vm_map_entry_release(count);
416 return (0);
418 offset = addr - vm_map_min(&kernel_map);
419 vm_object_hold(&kernel_object);
420 vm_object_reference_locked(&kernel_object);
421 vm_map_insert(map, &count,
422 &kernel_object, NULL,
423 offset, addr, addr + size,
424 VM_MAPTYPE_NORMAL, id,
425 VM_PROT_ALL, VM_PROT_ALL, 0);
426 vm_map_unlock(map);
427 vm_map_entry_release(count);
428 vm_object_drop(&kernel_object);
429 for (i = 0; i < size; i += PAGE_SIZE) {
430 m = vm_page_alloc_contig(low, high, PAGE_SIZE, 0, PAGE_SIZE, memattr);
431 if (!m) {
432 return (0);
434 vm_object_hold(&kernel_object);
435 vm_page_insert(m, &kernel_object, OFF_TO_IDX(offset + i));
436 vm_object_drop(&kernel_object);
437 if (flags & M_ZERO)
438 pmap_zero_page(VM_PAGE_TO_PHYS(m));
439 m->valid = VM_PAGE_BITS_ALL;
441 vm_map_wire(map, addr, addr + size, 0);
442 return (addr);
447 * Returns memory to a submap of the kernel, and wakes up any processes
448 * waiting for memory in that map.
450 * No requirements.
452 void
453 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
455 int count;
457 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
458 vm_map_lock(map);
459 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
460 wakeup(map);
461 vm_map_unlock(map);
462 vm_map_entry_release(count);
466 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to
467 * cover areas already allocated or reserved thus far.
469 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end)
470 * are available so the cutouts are the areas around these ranges between
471 * KvaStart and KvaEnd.
473 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
474 * Called from the low level boot code only.
476 void
477 kmem_init(void)
479 vm_offset_t addr;
480 vm_map_t m;
481 int count;
483 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd);
484 vm_map_lock(m);
485 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
486 m->system_map = 1;
487 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
488 addr = KvaStart;
489 if (virtual2_start) {
490 if (addr < virtual2_start) {
491 vm_map_insert(m, &count,
492 NULL, NULL,
493 (vm_offset_t) 0, addr, virtual2_start,
494 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
495 VM_PROT_ALL, VM_PROT_ALL, 0);
497 addr = virtual2_end;
499 if (addr < virtual_start) {
500 vm_map_insert(m, &count,
501 NULL, NULL,
502 (vm_offset_t) 0, addr, virtual_start,
503 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
504 VM_PROT_ALL, VM_PROT_ALL, 0);
506 addr = virtual_end;
507 if (addr < KvaEnd) {
508 vm_map_insert(m, &count,
509 NULL, NULL,
510 (vm_offset_t) 0, addr, KvaEnd,
511 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
512 VM_PROT_ALL, VM_PROT_ALL, 0);
514 /* ... and ending with the completion of the above `insert' */
515 vm_map_unlock(m);
516 vm_map_entry_release(count);
520 * No requirements.
522 static int
523 kvm_size(SYSCTL_HANDLER_ARGS)
525 unsigned long ksize = KvaSize;
527 return sysctl_handle_long(oidp, &ksize, 0, req);
529 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD,
530 0, 0, kvm_size, "LU", "Size of KVM");
533 * No requirements.
535 static int
536 kvm_free(SYSCTL_HANDLER_ARGS)
538 unsigned long kfree = virtual_end - kernel_vm_end;
540 return sysctl_handle_long(oidp, &kfree, 0, req);
542 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD,
543 0, 0, kvm_free, "LU", "Amount of KVM free");