kernel - Force NFSv3 for diskless nfs mount
[dragonfly.git] / sys / vm / vm_kern.c
blob5c505388779e0de563de42697daab1ec46e53ddd
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
66 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
67 * $DragonFly: src/sys/vm/vm_kern.c,v 1.29 2007/06/07 23:14:29 dillon Exp $
71 * Kernel memory management.
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/proc.h>
77 #include <sys/malloc.h>
78 #include <sys/kernel.h>
79 #include <sys/sysctl.h>
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <sys/lock.h>
84 #include <vm/pmap.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_page.h>
88 #include <vm/vm_pageout.h>
89 #include <vm/vm_kern.h>
90 #include <vm/vm_extern.h>
92 struct vm_map kernel_map;
93 struct vm_map clean_map;
94 struct vm_map buffer_map;
97 * Allocate pageable memory to the kernel's address map. "map" must
98 * be kernel_map or a submap of kernel_map.
100 * No requirements.
102 vm_offset_t
103 kmem_alloc_pageable(vm_map_t map, vm_size_t size)
105 vm_offset_t addr;
106 int result;
108 size = round_page(size);
109 addr = vm_map_min(map);
110 result = vm_map_find(map, NULL, (vm_offset_t) 0,
111 &addr, size, PAGE_SIZE,
112 TRUE, VM_MAPTYPE_NORMAL,
113 VM_PROT_ALL, VM_PROT_ALL,
115 if (result != KERN_SUCCESS)
116 return (0);
117 return (addr);
121 * Same as kmem_alloc_pageable, except that it create a nofault entry.
123 * No requirements.
125 vm_offset_t
126 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
128 vm_offset_t addr;
129 int result;
131 size = round_page(size);
132 addr = vm_map_min(map);
133 result = vm_map_find(map, NULL, (vm_offset_t) 0,
134 &addr, size, align,
135 TRUE, VM_MAPTYPE_NORMAL,
136 VM_PROT_ALL, VM_PROT_ALL,
137 MAP_NOFAULT);
138 if (result != KERN_SUCCESS)
139 return (0);
140 return (addr);
144 * Allocate wired-down memory in the kernel's address map or a submap.
146 * No requirements.
148 vm_offset_t
149 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
151 vm_offset_t addr;
152 vm_offset_t i;
153 int count;
155 size = round_page(size);
157 if (kmflags & KM_KRESERVE)
158 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
159 else
160 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
163 * Use the kernel object for wired-down kernel pages. Assume that no
164 * region of the kernel object is referenced more than once.
166 * Locate sufficient space in the map. This will give us the final
167 * virtual address for the new memory, and thus will tell us the
168 * offset within the kernel map.
170 vm_map_lock(map);
171 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
172 vm_map_unlock(map);
173 if (kmflags & KM_KRESERVE)
174 vm_map_entry_krelease(count);
175 else
176 vm_map_entry_release(count);
177 return (0);
179 vm_object_reference(&kernel_object);
180 vm_map_insert(map, &count,
181 &kernel_object, addr, addr, addr + size,
182 VM_MAPTYPE_NORMAL,
183 VM_PROT_ALL, VM_PROT_ALL,
185 vm_map_unlock(map);
186 if (kmflags & KM_KRESERVE)
187 vm_map_entry_krelease(count);
188 else
189 vm_map_entry_release(count);
192 * Guarantee that there are pages already in this object before
193 * calling vm_map_wire. This is to prevent the following
194 * scenario:
196 * 1) Threads have swapped out, so that there is a pager for the
197 * kernel_object. 2) The kmsg zone is empty, and so we are
198 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
199 * there is no page, but there is a pager, so we call
200 * pager_data_request. But the kmsg zone is empty, so we must
201 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
202 * we get the data back from the pager, it will be (very stale)
203 * non-zero data. kmem_alloc is defined to return zero-filled memory.
205 * We're intentionally not activating the pages we allocate to prevent a
206 * race with page-out. vm_map_wire will wire the pages.
209 lwkt_gettoken(&vm_token);
210 for (i = 0; i < size; i += PAGE_SIZE) {
211 vm_page_t mem;
213 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i),
214 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
215 if ((mem->flags & PG_ZERO) == 0)
216 vm_page_zero_fill(mem);
217 mem->valid = VM_PAGE_BITS_ALL;
218 vm_page_flag_clear(mem, PG_ZERO);
219 vm_page_wakeup(mem);
221 lwkt_reltoken(&vm_token);
224 * And finally, mark the data as non-pageable.
226 vm_map_wire(map, (vm_offset_t)addr, addr + size, kmflags);
228 return (addr);
232 * Release a region of kernel virtual memory allocated with kmem_alloc,
233 * and return the physical pages associated with that region.
235 * WARNING! If the caller entered pages into the region using pmap_kenter()
236 * it must remove the pages using pmap_kremove[_quick]() before freeing the
237 * underlying kmem, otherwise resident_count will be mistabulated.
239 * No requirements.
241 void
242 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
244 vm_map_remove(map, trunc_page(addr), round_page(addr + size));
248 * Used to break a system map into smaller maps, usually to reduce
249 * contention and to provide large KVA spaces for subsystems like the
250 * buffer cache.
252 * parent Map to take range from
253 * result
254 * size Size of range to find
255 * min, max Returned endpoints of map
256 * pageable Can the region be paged
258 * No requirements.
260 void
261 kmem_suballoc(vm_map_t parent, vm_map_t result,
262 vm_offset_t *min, vm_offset_t *max, vm_size_t size)
264 int ret;
266 size = round_page(size);
268 lwkt_gettoken(&vm_token);
269 *min = (vm_offset_t) vm_map_min(parent);
270 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
271 min, size, PAGE_SIZE,
272 TRUE, VM_MAPTYPE_UNSPECIFIED,
273 VM_PROT_ALL, VM_PROT_ALL,
275 if (ret != KERN_SUCCESS) {
276 kprintf("kmem_suballoc: bad status return of %d.\n", ret);
277 panic("kmem_suballoc");
279 *max = *min + size;
280 pmap_reference(vm_map_pmap(parent));
281 vm_map_init(result, *min, *max, vm_map_pmap(parent));
282 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
283 panic("kmem_suballoc: unable to change range to submap");
284 lwkt_reltoken(&vm_token);
288 * Allocates pageable memory from a sub-map of the kernel. If the submap
289 * has no room, the caller sleeps waiting for more memory in the submap.
291 * No requirements.
293 vm_offset_t
294 kmem_alloc_wait(vm_map_t map, vm_size_t size)
296 vm_offset_t addr;
297 int count;
299 size = round_page(size);
301 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
303 for (;;) {
305 * To make this work for more than one map, use the map's lock
306 * to lock out sleepers/wakers.
308 vm_map_lock(map);
309 if (vm_map_findspace(map, vm_map_min(map),
310 size, PAGE_SIZE, 0, &addr) == 0) {
311 break;
313 /* no space now; see if we can ever get space */
314 if (vm_map_max(map) - vm_map_min(map) < size) {
315 vm_map_entry_release(count);
316 vm_map_unlock(map);
317 return (0);
319 vm_map_unlock(map);
320 tsleep(map, 0, "kmaw", 0);
322 vm_map_insert(map, &count,
323 NULL, (vm_offset_t) 0,
324 addr, addr + size,
325 VM_MAPTYPE_NORMAL,
326 VM_PROT_ALL, VM_PROT_ALL,
328 vm_map_unlock(map);
329 vm_map_entry_release(count);
331 return (addr);
335 * Returns memory to a submap of the kernel, and wakes up any processes
336 * waiting for memory in that map.
338 * No requirements.
340 void
341 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
343 int count;
345 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
346 vm_map_lock(map);
347 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
348 wakeup(map);
349 vm_map_unlock(map);
350 vm_map_entry_release(count);
354 * Create the kernel_map and insert mappings to cover areas already
355 * allocated or reserved thus far. That is, the area (KvaStart,start)
356 * and (end,KvaEnd) must be marked as allocated.
358 * virtual2_start/end is a cutout Between KvaStart and start,
359 * for x86_64 due to the location of KERNBASE (at -2G).
361 * We could use a min_offset of 0 instead of KvaStart, but since the
362 * min_offset is not used for any calculations other then a bounds check
363 * it does not effect readability. KvaStart is more appropriate.
365 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
366 * Called from the low level boot code only.
368 void
369 kmem_init(vm_offset_t start, vm_offset_t end)
371 vm_offset_t addr;
372 vm_map_t m;
373 int count;
375 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd);
376 vm_map_lock(m);
377 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
378 m->system_map = 1;
379 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
380 addr = KvaStart;
381 if (virtual2_start) {
382 if (addr < virtual2_start) {
383 vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
384 addr, virtual2_start,
385 VM_MAPTYPE_NORMAL,
386 VM_PROT_ALL, VM_PROT_ALL,
389 addr = virtual2_end;
391 if (addr < start) {
392 vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
393 addr, start,
394 VM_MAPTYPE_NORMAL,
395 VM_PROT_ALL, VM_PROT_ALL,
398 addr = end;
399 if (addr < KvaEnd) {
400 vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
401 addr, KvaEnd,
402 VM_MAPTYPE_NORMAL,
403 VM_PROT_ALL, VM_PROT_ALL,
406 /* ... and ending with the completion of the above `insert' */
407 vm_map_unlock(m);
408 vm_map_entry_release(count);
412 * No requirements.
414 static int
415 kvm_size(SYSCTL_HANDLER_ARGS)
417 unsigned long ksize = KvaSize;
419 return sysctl_handle_long(oidp, &ksize, 0, req);
421 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
422 0, 0, kvm_size, "IU", "Size of KVM");
425 * No requirements.
427 static int
428 kvm_free(SYSCTL_HANDLER_ARGS)
430 unsigned long kfree = virtual_end - kernel_vm_end;
432 return sysctl_handle_long(oidp, &kfree, 0, req);
434 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
435 0, 0, kvm_free, "IU", "Amount of KVM free");