kernel: Add a few forgotten crit_exit()s and fix a wrong crit_enter().
[dragonfly.git] / sys / vm / vm_vmspace.c
blob9f8d7bba7bcfcab1601e109f1f9e93533085bf50
1 /*
2 * (MPSAFE)
4 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
42 #include <sys/mman.h>
43 #include <sys/thread.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/vkernel.h>
48 #include <sys/vmspace.h>
50 #include <vm/vm_extern.h>
51 #include <vm/pmap.h>
53 #include <machine/vmparam.h>
55 #include <sys/sysref2.h>
56 #include <sys/mplock2.h>
58 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
59 void *id);
60 static void vmspace_entry_delete(struct vmspace_entry *ve,
61 struct vkernel_proc *vkp);
63 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
66 * vmspace_create (void *id, int type, void *data)
68 * Create a VMSPACE under the control of the caller with the specified id.
69 * An id of NULL cannot be used. The type and data fields must currently
70 * be 0.
72 * The vmspace starts out completely empty. Memory may be mapped into the
73 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
74 * with vmspace_mcontrol().
76 * No requirements.
78 int
79 sys_vmspace_create(struct vmspace_create_args *uap)
81 struct vmspace_entry *ve;
82 struct vkernel_proc *vkp;
83 struct proc *p = curproc;
84 int error;
86 if (vkernel_enable == 0)
87 return (EOPNOTSUPP);
90 * Create a virtual kernel side-structure for the process if one
91 * does not exist.
93 * Implement a simple resolution for SMP races.
95 if ((vkp = p->p_vkernel) == NULL) {
96 vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
97 lwkt_gettoken(&proc_token);
98 if (p->p_vkernel == NULL) {
99 vkp->refs = 1;
100 lwkt_token_init(&vkp->token, "vkernel");
101 RB_INIT(&vkp->root);
102 p->p_vkernel = vkp;
103 } else {
104 kfree(vkp, M_VKERNEL);
105 vkp = p->p_vkernel;
107 lwkt_reltoken(&proc_token);
110 get_mplock();
113 * Create a new VMSPACE, disallow conflicting ids
115 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
116 ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
117 ve->id = uap->id;
118 pmap_pinit2(vmspace_pmap(ve->vmspace));
120 lwkt_gettoken(&vkp->token);
121 if (RB_INSERT(vmspace_rb_tree, &vkp->root, ve)) {
122 vmspace_free(ve->vmspace);
123 ve->vmspace = NULL; /* safety */
124 kfree(ve, M_VKERNEL);
125 error = EEXIST;
126 } else {
127 error = 0;
129 lwkt_reltoken(&vkp->token);
130 rel_mplock();
131 return (error);
135 * Destroy a VMSPACE given its identifier.
137 * No requirements.
140 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
142 struct vkernel_proc *vkp;
143 struct vmspace_entry *ve;
144 int error;
146 get_mplock();
147 if ((vkp = curproc->p_vkernel) == NULL) {
148 error = EINVAL;
149 goto done3;
151 lwkt_gettoken(&vkp->token);
152 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
153 error = ENOENT;
154 goto done2;
156 if (ve->refs) {
157 error = EBUSY;
158 goto done2;
160 vmspace_entry_delete(ve, vkp);
161 error = 0;
162 done2:
163 lwkt_reltoken(&vkp->token);
164 done3:
165 rel_mplock();
166 return(error);
170 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
171 * struct vextframe *vframe);
173 * Transfer control to a VMSPACE. Control is returned after the specified
174 * number of microseconds or if a page fault, signal, trap, or system call
175 * occurs. The context is updated as appropriate.
177 * No requirements.
180 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
182 struct vkernel_proc *vkp;
183 struct vkernel_lwp *vklp;
184 struct vmspace_entry *ve;
185 struct lwp *lp;
186 struct proc *p;
187 int framesz;
188 int error;
190 lp = curthread->td_lwp;
191 p = lp->lwp_proc;
193 if ((vkp = p->p_vkernel) == NULL)
194 return (EINVAL);
196 get_mplock();
197 lwkt_gettoken(&vkp->token);
198 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
199 error = ENOENT;
200 goto done;
203 switch(uap->cmd) {
204 case VMSPACE_CTL_RUN:
206 * Save the caller's register context, swap VM spaces, and
207 * install the passed register context. Return with
208 * EJUSTRETURN so the syscall code doesn't adjust the context.
210 atomic_add_int(&ve->refs, 1);
211 framesz = sizeof(struct trapframe);
212 if ((vklp = lp->lwp_vkernel) == NULL) {
213 vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
214 M_WAITOK|M_ZERO);
215 lp->lwp_vkernel = vklp;
217 vklp->user_trapframe = uap->tframe;
218 vklp->user_vextframe = uap->vframe;
219 bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
220 bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
221 sizeof(vklp->save_vextframe.vx_tls));
222 error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
223 if (error == 0) {
224 error = copyin(&uap->vframe->vx_tls,
225 &curthread->td_tls,
226 sizeof(struct savetls));
228 if (error == 0)
229 error = cpu_sanitize_frame(uap->sysmsg_frame);
230 if (error == 0)
231 error = cpu_sanitize_tls(&curthread->td_tls);
232 if (error) {
233 bcopy(&vklp->save_trapframe, uap->sysmsg_frame,
234 framesz);
235 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
236 sizeof(vklp->save_vextframe.vx_tls));
237 set_user_TLS();
238 atomic_subtract_int(&ve->refs, 1);
239 } else {
240 vklp->ve = ve;
241 pmap_setlwpvm(lp, ve->vmspace);
242 set_user_TLS();
243 set_vkernel_fp(uap->sysmsg_frame);
244 error = EJUSTRETURN;
246 break;
247 default:
248 error = EOPNOTSUPP;
249 break;
251 done:
252 lwkt_reltoken(&vkp->token);
253 rel_mplock();
254 return(error);
258 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
260 * map memory within a VMSPACE. This function is just like a normal mmap()
261 * but operates on the vmspace's memory map. Most callers use this to create
262 * a MAP_VPAGETABLE mapping.
264 * No requirements.
267 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
269 struct vkernel_proc *vkp;
270 struct vmspace_entry *ve;
271 int error;
274 * We hold the vmspace token to serialize calls to vkernel_find_vmspace.
276 lwkt_gettoken(&vmspace_token);
277 if ((vkp = curproc->p_vkernel) == NULL) {
278 error = EINVAL;
279 goto done3;
283 * NOTE: kern_mmap() can block so we need to temporarily ref ve->refs.
285 lwkt_gettoken(&vkp->token);
286 if ((ve = vkernel_find_vmspace(vkp, uap->id)) != NULL) {
287 atomic_add_int(&ve->refs, 1);
288 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
289 uap->prot, uap->flags,
290 uap->fd, uap->offset, &uap->sysmsg_resultp);
291 atomic_subtract_int(&ve->refs, 1);
292 } else {
293 error = ENOENT;
295 lwkt_reltoken(&vkp->token);
296 done3:
297 lwkt_reltoken(&vmspace_token);
298 return (error);
302 * vmspace_munmap(id, addr, len)
304 * unmap memory within a VMSPACE.
306 * No requirements.
309 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
311 struct vkernel_proc *vkp;
312 struct vmspace_entry *ve;
313 vm_offset_t addr;
314 vm_offset_t tmpaddr;
315 vm_size_t size, pageoff;
316 vm_map_t map;
317 int error;
319 get_mplock();
320 if ((vkp = curproc->p_vkernel) == NULL) {
321 error = EINVAL;
322 goto done3;
324 lwkt_gettoken(&vkp->token);
325 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
326 error = ENOENT;
327 goto done2;
331 * NOTE: kern_munmap() can block so we need to temporarily
332 * ref ve->refs.
334 atomic_add_int(&ve->refs, 1);
337 * Copied from sys_munmap()
339 addr = (vm_offset_t)uap->addr;
340 size = uap->len;
342 pageoff = (addr & PAGE_MASK);
343 addr -= pageoff;
344 size += pageoff;
345 size = (vm_size_t)round_page(size);
346 if (size < uap->len) { /* wrap */
347 error = EINVAL;
348 goto done1;
350 tmpaddr = addr + size; /* workaround gcc4 opt */
351 if (tmpaddr < addr) { /* wrap */
352 error = EINVAL;
353 goto done1;
355 if (size == 0) {
356 error = 0;
357 goto done1;
360 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
361 error = EINVAL;
362 goto done1;
364 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) {
365 error = EINVAL;
366 goto done1;
368 map = &ve->vmspace->vm_map;
369 if (!vm_map_check_protection(map, addr, tmpaddr, VM_PROT_NONE, FALSE)) {
370 error = EINVAL;
371 goto done1;
373 vm_map_remove(map, addr, addr + size);
374 error = 0;
375 done1:
376 atomic_subtract_int(&ve->refs, 1);
377 done2:
378 lwkt_reltoken(&vkp->token);
379 done3:
380 rel_mplock();
381 return (error);
385 * vmspace_pread(id, buf, nbyte, flags, offset)
387 * Read data from a vmspace. The number of bytes read is returned or
388 * -1 if an unrecoverable error occured. If the number of bytes read is
389 * less then the request size, a page fault occured in the VMSPACE which
390 * the caller must resolve in order to proceed.
392 * (not implemented yet)
393 * No requirements.
396 sys_vmspace_pread(struct vmspace_pread_args *uap)
398 struct vkernel_proc *vkp;
399 struct vmspace_entry *ve;
400 int error;
402 get_mplock();
403 if ((vkp = curproc->p_vkernel) == NULL) {
404 error = EINVAL;
405 goto done3;
407 lwkt_gettoken(&vkp->token);
408 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
409 error = ENOENT;
410 goto done2;
412 error = EINVAL;
413 done2:
414 lwkt_reltoken(&vkp->token);
415 done3:
416 rel_mplock();
417 return (error);
421 * vmspace_pwrite(id, buf, nbyte, flags, offset)
423 * Write data to a vmspace. The number of bytes written is returned or
424 * -1 if an unrecoverable error occured. If the number of bytes written is
425 * less then the request size, a page fault occured in the VMSPACE which
426 * the caller must resolve in order to proceed.
428 * (not implemented yet)
429 * No requirements.
432 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
434 struct vkernel_proc *vkp;
435 struct vmspace_entry *ve;
436 int error;
438 get_mplock();
439 if ((vkp = curproc->p_vkernel) == NULL) {
440 error = EINVAL;
441 goto done3;
443 lwkt_gettoken(&vkp->token);
444 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
445 error = ENOENT;
446 goto done2;
448 error = EINVAL;
449 done2:
450 lwkt_reltoken(&vkp->token);
451 done3:
452 rel_mplock();
453 return (error);
457 * vmspace_mcontrol(id, addr, len, behav, value)
459 * madvise/mcontrol support for a vmspace.
461 * No requirements.
464 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
466 struct vkernel_proc *vkp;
467 struct vmspace_entry *ve;
468 vm_offset_t start, end;
469 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len;
470 int error;
472 get_mplock();
473 if ((vkp = curproc->p_vkernel) == NULL) {
474 error = EINVAL;
475 goto done3;
477 lwkt_gettoken(&vkp->token);
478 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
479 error = ENOENT;
480 goto done2;
484 * NOTE: kern_madvise() can block so we need to temporarily
485 * ref ve->refs.
487 atomic_add_int(&ve->refs, 1);
490 * This code is basically copied from sys_mcontrol()
492 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) {
493 error = EINVAL;
494 goto done1;
497 if (tmpaddr < (vm_offset_t)uap->addr) {
498 error = EINVAL;
499 goto done1;
501 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
502 error = EINVAL;
503 goto done1;
505 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) {
506 error = EINVAL;
507 goto done1;
510 start = trunc_page((vm_offset_t) uap->addr);
511 end = round_page(tmpaddr);
513 error = vm_map_madvise(&ve->vmspace->vm_map, start, end,
514 uap->behav, uap->value);
515 done1:
516 atomic_subtract_int(&ve->refs, 1);
517 done2:
518 lwkt_reltoken(&vkp->token);
519 done3:
520 rel_mplock();
521 return (error);
525 * Red black tree functions
527 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
528 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
531 * a->start is address, and the only field has to be initialized.
532 * The caller must hold vkp->token.
534 * The caller must hold vkp->token.
536 static int
537 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
539 if ((char *)a->id < (char *)b->id)
540 return(-1);
541 else if ((char *)a->id > (char *)b->id)
542 return(1);
543 return(0);
547 * The caller must hold vkp->token.
549 static
551 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
553 struct vkernel_proc *vkp = data;
555 KKASSERT(ve->refs == 0);
556 vmspace_entry_delete(ve, vkp);
557 return(0);
561 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
562 * up the pmap, the vm_map, then destroy the vmspace.
564 * This function must remove the ve immediately before it might potentially
565 * block.
567 * The caller must hold vkp->token.
569 static
570 void
571 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
573 RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
575 pmap_remove_pages(vmspace_pmap(ve->vmspace),
576 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
577 vm_map_remove(&ve->vmspace->vm_map,
578 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
579 vmspace_free(ve->vmspace);
580 ve->vmspace = NULL; /* safety */
581 kfree(ve, M_VKERNEL);
585 * Locate the ve for (id), return the ve or NULL. If found this function
586 * will bump ve->refs which prevents the ve from being immediately destroyed
587 * (but it can still be removed).
589 * The caller must hold vkp->token.
591 static
592 struct vmspace_entry *
593 vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
595 struct vmspace_entry *ve;
596 struct vmspace_entry key;
598 key.id = id;
599 ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
600 return (ve);
604 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
605 * a vkernel process.
607 * No requirements.
609 void
610 vkernel_inherit(struct proc *p1, struct proc *p2)
612 struct vkernel_proc *vkp;
614 vkp = p1->p_vkernel;
615 KKASSERT(vkp->refs > 0);
616 atomic_add_int(&vkp->refs, 1);
617 p2->p_vkernel = vkp;
621 * No requirements.
623 void
624 vkernel_exit(struct proc *p)
626 struct vkernel_proc *vkp;
627 struct lwp *lp;
629 vkp = p->p_vkernel;
632 * Restore the original VM context if we are killed while running
633 * a different one.
635 * This isn't supposed to happen. What is supposed to happen is
636 * that the process should enter vkernel_trap() before the handling
637 * the signal.
639 RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
640 vkernel_lwp_exit(lp);
644 * Dereference the common area
646 p->p_vkernel = NULL;
647 KKASSERT(vkp->refs > 0);
649 if (atomic_fetchadd_int(&vkp->refs, -1) == 1) {
650 lwkt_gettoken(&vkp->token);
651 RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
652 rb_vmspace_delete, vkp);
653 lwkt_reltoken(&vkp->token);
654 kfree(vkp, M_VKERNEL);
659 * No requirements.
661 void
662 vkernel_lwp_exit(struct lwp *lp)
664 struct vkernel_lwp *vklp;
665 struct vmspace_entry *ve;
667 if ((vklp = lp->lwp_vkernel) != NULL) {
668 if ((ve = vklp->ve) != NULL) {
669 kprintf("Warning, pid %d killed with "
670 "active VC!\n", lp->lwp_proc->p_pid);
671 pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
672 vklp->ve = NULL;
673 KKASSERT(ve->refs > 0);
674 atomic_subtract_int(&ve->refs, 1);
676 lp->lwp_vkernel = NULL;
677 kfree(vklp, M_VKERNEL);
682 * A VM space under virtual kernel control trapped out or made a system call
683 * or otherwise needs to return control to the virtual kernel context.
685 * No requirements.
687 void
688 vkernel_trap(struct lwp *lp, struct trapframe *frame)
690 struct proc *p = lp->lwp_proc;
691 struct vmspace_entry *ve;
692 struct vkernel_lwp *vklp;
693 int error;
696 * Which vmspace entry was running?
698 vklp = lp->lwp_vkernel;
699 KKASSERT(vklp);
700 ve = vklp->ve;
701 KKASSERT(ve != NULL);
704 * Switch the LWP vmspace back to the virtual kernel's VM space.
706 vklp->ve = NULL;
707 pmap_setlwpvm(lp, p->p_vmspace);
708 KKASSERT(ve->refs > 0);
709 atomic_subtract_int(&ve->refs, 1);
710 /* ve is invalid once we kill our ref */
713 * Copy the emulated process frame to the virtual kernel process.
714 * The emulated process cannot change TLS descriptors so don't
715 * bother saving them, we already have a copy.
717 * Restore the virtual kernel's saved context so the virtual kernel
718 * process can resume.
720 error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
721 bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
722 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
723 sizeof(vklp->save_vextframe.vx_tls));
724 set_user_TLS();
725 cpu_vkernel_trap(frame, error);