vknetd.8: Start sentences on new lines.
[dragonfly.git] / sys / vm / vm_vmspace.c
blobe4988dea201b5af1a0beae783fd819825d0aeef5
1 /*
2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.14 2007/08/15 03:15:07 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
42 #include <sys/mman.h>
43 #include <sys/proc.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/vkernel.h>
47 #include <sys/vmspace.h>
49 #include <vm/vm_extern.h>
50 #include <vm/pmap.h>
52 #include <machine/vmparam.h>
54 #include <sys/spinlock2.h>
55 #include <sys/sysref2.h>
57 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
58 void *id);
59 static void vmspace_entry_delete(struct vmspace_entry *ve,
60 struct vkernel_proc *vkp);
62 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
65 * vmspace_create (void *id, int type, void *data)
67 * Create a VMSPACE under the control of the caller with the specified id.
68 * An id of NULL cannot be used. The type and data fields must currently
69 * be 0.
71 * The vmspace starts out completely empty. Memory may be mapped into the
72 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
73 * with vmspace_mcontrol().
75 int
76 sys_vmspace_create(struct vmspace_create_args *uap)
78 struct vmspace_entry *ve;
79 struct vkernel_proc *vkp;
81 if (vkernel_enable == 0)
82 return (EOPNOTSUPP);
85 * Create a virtual kernel side-structure for the process if one
86 * does not exist.
88 if ((vkp = curproc->p_vkernel) == NULL) {
89 vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
90 vkp->refs = 1;
91 spin_init(&vkp->spin);
92 RB_INIT(&vkp->root);
93 curproc->p_vkernel = vkp;
97 * Create a new VMSPACE
99 if (vkernel_find_vmspace(vkp, uap->id))
100 return (EEXIST);
101 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
102 ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
103 ve->id = uap->id;
104 pmap_pinit2(vmspace_pmap(ve->vmspace));
105 RB_INSERT(vmspace_rb_tree, &vkp->root, ve);
106 return (0);
110 * vmspace_destroy (void *id)
112 * Destroy a VMSPACE.
115 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
117 struct vkernel_proc *vkp;
118 struct vmspace_entry *ve;
120 if ((vkp = curproc->p_vkernel) == NULL)
121 return (EINVAL);
122 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
123 return (ENOENT);
124 if (ve->refs)
125 return (EBUSY);
126 vmspace_entry_delete(ve, vkp);
127 return(0);
131 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
132 * struct vextframe *vframe);
134 * Transfer control to a VMSPACE. Control is returned after the specified
135 * number of microseconds or if a page fault, signal, trap, or system call
136 * occurs. The context is updated as appropriate.
139 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
141 struct vkernel_proc *vkp;
142 struct vkernel_lwp *vklp;
143 struct vmspace_entry *ve;
144 struct lwp *lp;
145 struct proc *p;
146 int framesz;
147 int error;
149 lp = curthread->td_lwp;
150 p = lp->lwp_proc;
152 if ((vkp = p->p_vkernel) == NULL)
153 return (EINVAL);
154 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
155 return (ENOENT);
158 * Signal mailbox interlock
160 if (p->p_flag & P_MAILBOX) {
161 p->p_flag &= ~P_MAILBOX;
162 return (EINTR);
165 switch(uap->cmd) {
166 case VMSPACE_CTL_RUN:
168 * Save the caller's register context, swap VM spaces, and
169 * install the passed register context. Return with
170 * EJUSTRETURN so the syscall code doesn't adjust the context.
172 atomic_add_int(&ve->refs, 1);
173 framesz = sizeof(struct trapframe);
174 if ((vklp = lp->lwp_vkernel) == NULL) {
175 vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
176 M_WAITOK|M_ZERO);
177 lp->lwp_vkernel = vklp;
179 vklp->user_trapframe = uap->tframe;
180 vklp->user_vextframe = uap->vframe;
181 bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
182 bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
183 sizeof(vklp->save_vextframe.vx_tls));
184 error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
185 if (error == 0)
186 error = copyin(&uap->vframe->vx_tls, &curthread->td_tls, sizeof(struct savetls));
187 if (error == 0)
188 error = cpu_sanitize_frame(uap->sysmsg_frame);
189 if (error == 0)
190 error = cpu_sanitize_tls(&curthread->td_tls);
191 if (error) {
192 bcopy(&vklp->save_trapframe, uap->sysmsg_frame, framesz);
193 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
194 sizeof(vklp->save_vextframe.vx_tls));
195 set_user_TLS();
196 atomic_subtract_int(&ve->refs, 1);
197 } else {
198 vklp->ve = ve;
199 pmap_setlwpvm(lp, ve->vmspace);
200 set_user_TLS();
201 set_vkernel_fp(uap->sysmsg_frame);
202 error = EJUSTRETURN;
204 break;
205 default:
206 error = EOPNOTSUPP;
207 break;
209 return(error);
213 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
215 * map memory within a VMSPACE. This function is just like a normal mmap()
216 * but operates on the vmspace's memory map. Most callers use this to create
217 * a MAP_VPAGETABLE mapping.
220 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
222 struct vkernel_proc *vkp;
223 struct vmspace_entry *ve;
224 int error;
226 if ((vkp = curproc->p_vkernel) == NULL)
227 return (EINVAL);
228 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
229 return (ENOENT);
230 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
231 uap->prot, uap->flags,
232 uap->fd, uap->offset, &uap->sysmsg_resultp);
233 return (error);
237 * vmspace_munmap(id, addr, len)
239 * unmap memory within a VMSPACE.
242 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
244 struct vkernel_proc *vkp;
245 struct vmspace_entry *ve;
246 vm_offset_t addr;
247 vm_size_t size, pageoff;
248 vm_map_t map;
250 if ((vkp = curproc->p_vkernel) == NULL)
251 return (EINVAL);
252 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
253 return (ENOENT);
256 * Copied from sys_munmap()
258 addr = (vm_offset_t)uap->addr;
259 size = uap->len;
261 pageoff = (addr & PAGE_MASK);
262 addr -= pageoff;
263 size += pageoff;
264 size = (vm_size_t)round_page(size);
265 if (addr + size < addr)
266 return (EINVAL);
267 if (size == 0)
268 return (0);
270 if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
271 return (EINVAL);
272 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
273 return (EINVAL);
274 map = &ve->vmspace->vm_map;
275 if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE))
276 return (EINVAL);
277 vm_map_remove(map, addr, addr + size);
278 return (0);
282 * vmspace_pread(id, buf, nbyte, flags, offset)
284 * Read data from a vmspace. The number of bytes read is returned or
285 * -1 if an unrecoverable error occured. If the number of bytes read is
286 * less then the request size, a page fault occured in the VMSPACE which
287 * the caller must resolve in order to proceed.
290 sys_vmspace_pread(struct vmspace_pread_args *uap)
292 struct vkernel_proc *vkp;
293 struct vmspace_entry *ve;
295 if ((vkp = curproc->p_vkernel) == NULL)
296 return (EINVAL);
297 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
298 return (ENOENT);
299 return (EINVAL);
303 * vmspace_pwrite(id, buf, nbyte, flags, offset)
305 * Write data to a vmspace. The number of bytes written is returned or
306 * -1 if an unrecoverable error occured. If the number of bytes written is
307 * less then the request size, a page fault occured in the VMSPACE which
308 * the caller must resolve in order to proceed.
311 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
313 struct vkernel_proc *vkp;
314 struct vmspace_entry *ve;
316 if ((vkp = curproc->p_vkernel) == NULL)
317 return (EINVAL);
318 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
319 return (ENOENT);
320 return (EINVAL);
324 * vmspace_mcontrol(id, addr, len, behav, value)
326 * madvise/mcontrol support for a vmspace.
329 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
331 struct vkernel_proc *vkp;
332 struct vmspace_entry *ve;
333 vm_offset_t start, end;
335 if ((vkp = curproc->p_vkernel) == NULL)
336 return (EINVAL);
337 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL)
338 return (ENOENT);
341 * This code is basically copied from sys_mcontrol()
343 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END)
344 return (EINVAL);
346 if (VM_MAX_USER_ADDRESS > 0 &&
347 ((vm_offset_t) uap->addr + uap->len) > VM_MAX_USER_ADDRESS)
348 return (EINVAL);
349 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS)
350 return (EINVAL);
351 if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr)
352 return (EINVAL);
354 start = trunc_page((vm_offset_t) uap->addr);
355 end = round_page((vm_offset_t) uap->addr + uap->len);
357 return (vm_map_madvise(&ve->vmspace->vm_map, start, end,
358 uap->behav, uap->value));
362 * Red black tree functions
364 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
365 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
367 /* a->start is address, and the only field has to be initialized */
368 static int
369 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
371 if ((char *)a->id < (char *)b->id)
372 return(-1);
373 else if ((char *)a->id > (char *)b->id)
374 return(1);
375 return(0);
378 static
380 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
382 struct vkernel_proc *vkp = data;
384 KKASSERT(ve->refs == 0);
385 vmspace_entry_delete(ve, vkp);
386 return(0);
390 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
391 * up the pmap, the vm_map, then destroy the vmspace.
393 static
394 void
395 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
397 RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
399 pmap_remove_pages(vmspace_pmap(ve->vmspace),
400 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
401 vm_map_remove(&ve->vmspace->vm_map,
402 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
403 sysref_put(&ve->vmspace->vm_sysref);
404 kfree(ve, M_VKERNEL);
408 static
409 struct vmspace_entry *
410 vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
412 struct vmspace_entry *ve;
413 struct vmspace_entry key;
415 key.id = id;
416 ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
417 return (ve);
421 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
422 * a vkernel process.
424 void
425 vkernel_inherit(struct proc *p1, struct proc *p2)
427 struct vkernel_proc *vkp;
429 vkp = p1->p_vkernel;
430 KKASSERT(vkp->refs > 0);
431 atomic_add_int(&vkp->refs, 1);
432 p2->p_vkernel = vkp;
435 void
436 vkernel_exit(struct proc *p)
438 struct vkernel_proc *vkp;
439 struct lwp *lp;
440 int freeme = 0;
442 vkp = p->p_vkernel;
444 * Restore the original VM context if we are killed while running
445 * a different one.
447 * This isn't supposed to happen. What is supposed to happen is
448 * that the process should enter vkernel_trap() before the handling
449 * the signal.
451 RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
452 vkernel_lwp_exit(lp);
456 * Dereference the common area
458 p->p_vkernel = NULL;
459 KKASSERT(vkp->refs > 0);
460 spin_lock_wr(&vkp->spin);
461 if (--vkp->refs == 0)
462 freeme = 1;
463 spin_unlock_wr(&vkp->spin);
465 if (freeme) {
466 RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
467 rb_vmspace_delete, vkp);
468 kfree(vkp, M_VKERNEL);
472 void
473 vkernel_lwp_exit(struct lwp *lp)
475 struct vkernel_lwp *vklp;
476 struct vmspace_entry *ve;
478 if ((vklp = lp->lwp_vkernel) != NULL) {
479 if ((ve = vklp->ve) != NULL) {
480 kprintf("Warning, pid %d killed with "
481 "active VC!\n", lp->lwp_proc->p_pid);
482 print_backtrace();
483 pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
484 vklp->ve = NULL;
485 KKASSERT(ve->refs > 0);
486 atomic_subtract_int(&ve->refs, 1);
488 lp->lwp_vkernel = NULL;
489 kfree(vklp, M_VKERNEL);
494 * A VM space under virtual kernel control trapped out or made a system call
495 * or otherwise needs to return control to the virtual kernel context.
498 vkernel_trap(struct lwp *lp, struct trapframe *frame)
500 struct proc *p = lp->lwp_proc;
501 struct vmspace_entry *ve;
502 struct vkernel_lwp *vklp;
503 int error;
506 * Which vmspace entry was running?
508 vklp = lp->lwp_vkernel;
509 KKASSERT(vklp);
510 ve = vklp->ve;
511 KKASSERT(ve != NULL);
514 * Switch the LWP vmspace back to the virtual kernel's VM space.
516 vklp->ve = NULL;
517 pmap_setlwpvm(lp, p->p_vmspace);
518 KKASSERT(ve->refs > 0);
519 atomic_subtract_int(&ve->refs, 1);
522 * Copy the emulated process frame to the virtual kernel process.
523 * The emulated process cannot change TLS descriptors so don't
524 * bother saving them, we already have a copy.
526 * Restore the virtual kernel's saved context so the virtual kernel
527 * process can resume.
529 error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
530 bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
531 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
532 sizeof(vklp->save_vextframe.vx_tls));
533 set_user_TLS();
534 return(error);