usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / vm / vm_kern.c
blobf94a0a661b8f8f46312e8ed5e214a27d8e7ab928
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * Carnegie Mellon requests users of this software to return to
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
62 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
66 * Kernel memory management.
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc.h>
72 #include <sys/malloc.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
76 #include <vm/vm.h>
77 #include <vm/vm_param.h>
78 #include <sys/lock.h>
79 #include <vm/pmap.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_extern.h>
87 struct vm_map kernel_map;
88 struct vm_map clean_map;
89 struct vm_map buffer_map;
91 static __inline
92 int
93 KMVMCPU(int kmflags)
95 if ((kmflags & KM_CPU_SPEC) == 0)
96 return 0;
97 return VM_ALLOC_CPU(KM_GETCPU(kmflags));
101 * Allocate pageable swap-backed anonymous memory
103 void *
104 kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id)
106 int error;
107 vm_pindex_t npages;
109 size = round_page(size);
110 npages = size / PAGE_SIZE;
112 if (kp->map == NULL)
113 kp->map = &kernel_map;
114 kp->data = vm_map_min(&kernel_map);
115 kp->size = size;
116 kp->object = vm_object_allocate(OBJT_DEFAULT, npages);
118 error = vm_map_find(kp->map, kp->object, NULL, 0,
119 &kp->data, size,
120 PAGE_SIZE, TRUE,
121 VM_MAPTYPE_NORMAL, id,
122 VM_PROT_ALL, VM_PROT_ALL, 0);
123 if (error) {
124 kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n",
125 size, error);
126 kp->data = (vm_offset_t)0;
127 kmem_free_swapbacked(kp);
128 return NULL;
130 return ((void *)(intptr_t)kp->data);
133 void
134 kmem_free_swapbacked(kmem_anon_desc_t *kp)
136 if (kp->data) {
138 * The object will be deallocated by kmem_free().
140 kmem_free(kp->map, kp->data, kp->size);
141 kp->data = (vm_offset_t)0;
142 } else {
144 * Failure during allocation, object must be deallocated
145 * manually.
147 vm_object_deallocate(kp->object);
149 kp->object = NULL;
153 * Allocate pageable memory to the kernel's address map. "map" must
154 * be kernel_map or a submap of kernel_map. Caller must adjust map or
155 * enter VM pages itself.
157 * No requirements.
159 vm_offset_t
160 kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id)
162 vm_offset_t addr;
163 int result;
165 size = round_page(size);
166 addr = vm_map_min(map);
167 result = vm_map_find(map, NULL, NULL,
168 (vm_offset_t) 0, &addr, size,
169 PAGE_SIZE, TRUE,
170 VM_MAPTYPE_NORMAL, id,
171 VM_PROT_ALL, VM_PROT_ALL, 0);
172 if (result != KERN_SUCCESS)
173 return (0);
174 return (addr);
178 * Same as kmem_alloc_pageable, except that it create a nofault entry.
180 * No requirements.
182 vm_offset_t
183 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id,
184 vm_size_t align)
186 vm_offset_t addr;
187 int result;
189 size = round_page(size);
190 addr = vm_map_min(map);
191 result = vm_map_find(map, NULL, NULL,
192 (vm_offset_t) 0, &addr, size,
193 align, TRUE,
194 VM_MAPTYPE_NORMAL, id,
195 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
196 if (result != KERN_SUCCESS)
197 return (0);
198 return (addr);
202 * Allocate wired-down memory in the kernel's address map or a submap.
204 * No requirements.
206 vm_offset_t
207 kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags)
209 vm_offset_t addr;
210 vm_offset_t gstart;
211 vm_offset_t i;
212 int count;
213 int cow;
215 size = round_page(size);
217 if (kmflags & KM_KRESERVE)
218 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
219 else
220 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
222 if (kmflags & KM_STACK) {
223 cow = MAP_IS_KSTACK;
224 gstart = PAGE_SIZE;
225 } else {
226 cow = 0;
227 gstart = 0;
231 * Use the kernel object for wired-down kernel pages. Assume that no
232 * region of the kernel object is referenced more than once.
234 * Locate sufficient space in the map. This will give us the final
235 * virtual address for the new memory, and thus will tell us the
236 * offset within the kernel map.
238 vm_map_lock(map);
239 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
240 vm_map_unlock(map);
241 if (kmflags & KM_KRESERVE)
242 vm_map_entry_krelease(count);
243 else
244 vm_map_entry_release(count);
245 return (0);
247 vm_object_hold(&kernel_object);
248 vm_object_reference_locked(&kernel_object);
249 vm_map_insert(map, &count,
250 &kernel_object, NULL,
251 addr, addr, addr + size,
252 VM_MAPTYPE_NORMAL, id,
253 VM_PROT_ALL, VM_PROT_ALL, cow);
254 vm_object_drop(&kernel_object);
256 vm_map_unlock(map);
257 if (kmflags & KM_KRESERVE)
258 vm_map_entry_krelease(count);
259 else
260 vm_map_entry_release(count);
263 * Guarantee that there are pages already in this object before
264 * calling vm_map_wire. This is to prevent the following
265 * scenario:
267 * 1) Threads have swapped out, so that there is a pager for the
268 * kernel_object. 2) The kmsg zone is empty, and so we are
269 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
270 * there is no page, but there is a pager, so we call
271 * pager_data_request. But the kmsg zone is empty, so we must
272 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
273 * we get the data back from the pager, it will be (very stale)
274 * non-zero data. kmem_alloc is defined to return zero-filled memory.
276 * We're intentionally not activating the pages we allocate to prevent a
277 * race with page-out. vm_map_wire will wire the pages.
279 vm_object_hold(&kernel_object);
280 for (i = gstart; i < size; i += PAGE_SIZE) {
281 vm_page_t mem;
283 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i),
284 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL |
285 VM_ALLOC_RETRY | KMVMCPU(kmflags));
286 vm_page_unqueue_nowakeup(mem);
287 vm_page_wakeup(mem);
289 vm_object_drop(&kernel_object);
292 * And finally, mark the data as non-pageable.
294 * NOTE: vm_map_wire() handles any kstack guard.
296 vm_map_wire(map, addr, addr + size, kmflags);
298 return (addr);
302 * Release a region of kernel virtual memory allocated with kmem_alloc,
303 * and return the physical pages associated with that region.
305 * WARNING! If the caller entered pages into the region using pmap_kenter()
306 * it must remove the pages using pmap_kremove[_quick]() before freeing the
307 * underlying kmem, otherwise resident_count will be mistabulated.
309 * No requirements.
311 void
312 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
314 vm_map_remove(map, trunc_page(addr), round_page(addr + size));
318 * Used to break a system map into smaller maps, usually to reduce
319 * contention and to provide large KVA spaces for subsystems like the
320 * buffer cache.
322 * parent Map to take range from
323 * result
324 * size Size of range to find
325 * min, max Returned endpoints of map
326 * pageable Can the region be paged
328 * No requirements.
330 void
331 kmem_suballoc(vm_map_t parent, vm_map_t result,
332 vm_offset_t *min, vm_offset_t *max, vm_size_t size)
334 int ret;
336 size = round_page(size);
338 *min = (vm_offset_t) vm_map_min(parent);
339 ret = vm_map_find(parent, NULL, NULL,
340 (vm_offset_t) 0, min, size,
341 PAGE_SIZE, TRUE,
342 VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP,
343 VM_PROT_ALL, VM_PROT_ALL, 0);
344 if (ret != KERN_SUCCESS) {
345 kprintf("kmem_suballoc: bad status return of %d.\n", ret);
346 panic("kmem_suballoc");
348 *max = *min + size;
349 pmap_reference(vm_map_pmap(parent));
350 vm_map_init(result, *min, *max, vm_map_pmap(parent));
351 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
352 panic("kmem_suballoc: unable to change range to submap");
356 * Allocates pageable memory from a sub-map of the kernel. If the submap
357 * has no room, the caller sleeps waiting for more memory in the submap.
359 * No requirements.
361 vm_offset_t
362 kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id)
364 vm_offset_t addr;
365 int count;
367 size = round_page(size);
369 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
371 for (;;) {
373 * To make this work for more than one map, use the map's lock
374 * to lock out sleepers/wakers.
376 vm_map_lock(map);
377 if (vm_map_findspace(map, vm_map_min(map),
378 size, PAGE_SIZE, 0, &addr) == 0) {
379 break;
381 /* no space now; see if we can ever get space */
382 if (vm_map_max(map) - vm_map_min(map) < size) {
383 vm_map_entry_release(count);
384 vm_map_unlock(map);
385 return (0);
387 vm_map_unlock(map);
388 tsleep(map, 0, "kmaw", 0);
390 vm_map_insert(map, &count,
391 NULL, NULL,
392 (vm_offset_t) 0, addr, addr + size,
393 VM_MAPTYPE_NORMAL, id,
394 VM_PROT_ALL, VM_PROT_ALL, 0);
395 vm_map_unlock(map);
396 vm_map_entry_release(count);
398 return (addr);
402 * Allocates a region from the kernel address map and physical pages
403 * within the specified address range to the kernel object. Creates a
404 * wired mapping from this region to these pages, and returns the
405 * region's starting virtual address. The allocated pages are not
406 * necessarily physically contiguous. If M_ZERO is specified through the
407 * given flags, then the pages are zeroed before they are mapped.
409 vm_offset_t
410 kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id,
411 int flags, vm_paddr_t low,
412 vm_paddr_t high, vm_memattr_t memattr)
414 vm_offset_t addr, i, offset;
415 vm_page_t m;
416 int count;
418 size = round_page(size);
419 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
420 vm_map_lock(map);
421 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE,
422 flags, &addr)) {
423 vm_map_unlock(map);
424 vm_map_entry_release(count);
425 return (0);
427 offset = addr - vm_map_min(&kernel_map);
428 vm_object_hold(&kernel_object);
429 vm_object_reference_locked(&kernel_object);
430 vm_map_insert(map, &count,
431 &kernel_object, NULL,
432 offset, addr, addr + size,
433 VM_MAPTYPE_NORMAL, id,
434 VM_PROT_ALL, VM_PROT_ALL, 0);
435 vm_map_unlock(map);
436 vm_map_entry_release(count);
437 vm_object_drop(&kernel_object);
438 for (i = 0; i < size; i += PAGE_SIZE) {
439 m = vm_page_alloc_contig(low, high, PAGE_SIZE, 0, PAGE_SIZE, memattr);
440 if (!m) {
441 return (0);
443 vm_object_hold(&kernel_object);
444 vm_page_insert(m, &kernel_object, OFF_TO_IDX(offset + i));
445 vm_object_drop(&kernel_object);
446 if (flags & M_ZERO)
447 pmap_zero_page(VM_PAGE_TO_PHYS(m));
448 m->valid = VM_PAGE_BITS_ALL;
450 vm_map_wire(map, addr, addr + size, 0);
451 return (addr);
456 * Returns memory to a submap of the kernel, and wakes up any processes
457 * waiting for memory in that map.
459 * No requirements.
461 void
462 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
464 int count;
466 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
467 vm_map_lock(map);
468 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
469 wakeup(map);
470 vm_map_unlock(map);
471 vm_map_entry_release(count);
475 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to
476 * cover areas already allocated or reserved thus far.
478 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end)
479 * are available so the cutouts are the areas around these ranges between
480 * KvaStart and KvaEnd.
482 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
483 * Called from the low level boot code only.
485 void
486 kmem_init(void)
488 vm_offset_t addr;
489 vm_map_t m;
490 int count;
492 m = &kernel_map;
493 vm_map_init(m, KvaStart, KvaEnd, &kernel_pmap);
494 vm_map_lock(m);
495 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
496 m->system_map = 1;
497 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
498 addr = KvaStart;
499 if (virtual2_start) {
500 if (addr < virtual2_start) {
501 vm_map_insert(m, &count,
502 NULL, NULL,
503 (vm_offset_t) 0, addr, virtual2_start,
504 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
505 VM_PROT_ALL, VM_PROT_ALL, 0);
507 addr = virtual2_end;
509 if (addr < virtual_start) {
510 vm_map_insert(m, &count,
511 NULL, NULL,
512 (vm_offset_t) 0, addr, virtual_start,
513 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
514 VM_PROT_ALL, VM_PROT_ALL, 0);
516 addr = virtual_end;
517 if (addr < KvaEnd) {
518 vm_map_insert(m, &count,
519 NULL, NULL,
520 (vm_offset_t) 0, addr, KvaEnd,
521 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
522 VM_PROT_ALL, VM_PROT_ALL, 0);
524 /* ... and ending with the completion of the above `insert' */
525 vm_map_unlock(m);
526 vm_map_entry_release(count);
530 * No requirements.
532 static int
533 kvm_size(SYSCTL_HANDLER_ARGS)
535 unsigned long ksize = KvaSize;
537 return sysctl_handle_long(oidp, &ksize, 0, req);
539 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD,
540 0, 0, kvm_size, "LU", "Size of KVM");
543 * No requirements.
545 static int
546 kvm_free(SYSCTL_HANDLER_ARGS)
548 unsigned long kfree = virtual_end - kernel_vm_end;
550 return sysctl_handle_long(oidp, &kfree, 0, req);
552 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD,
553 0, 0, kvm_free, "LU", "Amount of KVM free");