libkvm - Interim solution to boost swap statistics fields
[dragonfly.git] / sys / vm / vm_vmspace.c
bloba78b9536cd363bfa59f2b718a6f5c58b19f0c746
1 /*
2 * (MPSAFE)
4 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
42 #include <sys/mman.h>
43 #include <sys/thread.h>
44 #include <sys/proc.h>
45 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/vkernel.h>
48 #include <sys/vmspace.h>
50 #include <vm/vm_extern.h>
51 #include <vm/pmap.h>
53 #include <machine/vmparam.h>
54 #include <machine/vmm.h>
56 #include <sys/sysref2.h>
58 static struct vmspace_entry *vkernel_find_vmspace(struct vkernel_proc *vkp,
59 void *id);
60 static void vmspace_entry_delete(struct vmspace_entry *ve,
61 struct vkernel_proc *vkp);
63 static MALLOC_DEFINE(M_VKERNEL, "vkernel", "VKernel structures");
66 * vmspace_create (void *id, int type, void *data)
68 * Create a VMSPACE under the control of the caller with the specified id.
69 * An id of NULL cannot be used. The type and data fields must currently
70 * be 0.
72 * The vmspace starts out completely empty. Memory may be mapped into the
73 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
74 * with vmspace_mcontrol().
76 * No requirements.
78 int
79 sys_vmspace_create(struct vmspace_create_args *uap)
81 struct vmspace_entry *ve;
82 struct vkernel_proc *vkp;
83 struct proc *p = curproc;
84 int error;
86 if (vkernel_enable == 0)
87 return (EOPNOTSUPP);
90 * Create a virtual kernel side-structure for the process if one
91 * does not exist.
93 * Implement a simple resolution for SMP races.
95 if ((vkp = p->p_vkernel) == NULL) {
96 vkp = kmalloc(sizeof(*vkp), M_VKERNEL, M_WAITOK|M_ZERO);
97 lwkt_gettoken(&p->p_token);
98 if (p->p_vkernel == NULL) {
99 vkp->refs = 1;
100 lwkt_token_init(&vkp->token, "vkernel");
101 RB_INIT(&vkp->root);
102 p->p_vkernel = vkp;
103 } else {
104 kfree(vkp, M_VKERNEL);
105 vkp = p->p_vkernel;
107 lwkt_reltoken(&p->p_token);
110 if (curthread->td_vmm)
111 return 0;
114 * Create a new VMSPACE, disallow conflicting ids
116 ve = kmalloc(sizeof(struct vmspace_entry), M_VKERNEL, M_WAITOK|M_ZERO);
117 ve->vmspace = vmspace_alloc(VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
118 ve->id = uap->id;
119 pmap_pinit2(vmspace_pmap(ve->vmspace));
121 lwkt_gettoken(&vkp->token);
122 if (RB_INSERT(vmspace_rb_tree, &vkp->root, ve)) {
123 vmspace_rel(ve->vmspace);
124 ve->vmspace = NULL; /* safety */
125 kfree(ve, M_VKERNEL);
126 error = EEXIST;
127 } else {
128 error = 0;
130 lwkt_reltoken(&vkp->token);
132 return (error);
136 * Destroy a VMSPACE given its identifier.
138 * No requirements.
141 sys_vmspace_destroy(struct vmspace_destroy_args *uap)
143 struct vkernel_proc *vkp;
144 struct vmspace_entry *ve;
145 int error;
147 if ((vkp = curproc->p_vkernel) == NULL) {
148 error = EINVAL;
149 goto done3;
151 lwkt_gettoken(&vkp->token);
152 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
153 error = ENOENT;
154 goto done2;
156 if (ve->refs) {
157 error = EBUSY;
158 goto done2;
160 vmspace_entry_delete(ve, vkp);
161 error = 0;
162 done2:
163 lwkt_reltoken(&vkp->token);
164 done3:
165 return(error);
169 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
170 * struct vextframe *vframe);
172 * Transfer control to a VMSPACE. Control is returned after the specified
173 * number of microseconds or if a page fault, signal, trap, or system call
174 * occurs. The context is updated as appropriate.
176 * No requirements.
179 sys_vmspace_ctl(struct vmspace_ctl_args *uap)
181 struct vkernel_proc *vkp;
182 struct vkernel_lwp *vklp;
183 struct vmspace_entry *ve = NULL;
184 struct lwp *lp;
185 struct proc *p;
186 int framesz;
187 int error;
189 lp = curthread->td_lwp;
190 p = lp->lwp_proc;
192 if ((vkp = p->p_vkernel) == NULL)
193 return (EINVAL);
196 * ve only matters when VMM is not used.
198 if (curthread->td_vmm == NULL) {
199 lwkt_gettoken(&vkp->token);
200 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
201 error = ENOENT;
202 goto done;
206 switch(uap->cmd) {
207 case VMSPACE_CTL_RUN:
209 * Save the caller's register context, swap VM spaces, and
210 * install the passed register context. Return with
211 * EJUSTRETURN so the syscall code doesn't adjust the context.
213 if (curthread->td_vmm == NULL)
214 atomic_add_int(&ve->refs, 1);
216 framesz = sizeof(struct trapframe);
217 if ((vklp = lp->lwp_vkernel) == NULL) {
218 vklp = kmalloc(sizeof(*vklp), M_VKERNEL,
219 M_WAITOK|M_ZERO);
220 lp->lwp_vkernel = vklp;
222 vklp->user_trapframe = uap->tframe;
223 vklp->user_vextframe = uap->vframe;
224 bcopy(uap->sysmsg_frame, &vklp->save_trapframe, framesz);
225 bcopy(&curthread->td_tls, &vklp->save_vextframe.vx_tls,
226 sizeof(vklp->save_vextframe.vx_tls));
227 error = copyin(uap->tframe, uap->sysmsg_frame, framesz);
228 if (error == 0) {
229 error = copyin(&uap->vframe->vx_tls,
230 &curthread->td_tls,
231 sizeof(struct savetls));
233 if (error == 0)
234 error = cpu_sanitize_frame(uap->sysmsg_frame);
235 if (error == 0)
236 error = cpu_sanitize_tls(&curthread->td_tls);
237 if (error) {
238 bcopy(&vklp->save_trapframe, uap->sysmsg_frame,
239 framesz);
240 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
241 sizeof(vklp->save_vextframe.vx_tls));
242 set_user_TLS();
243 if (curthread->td_vmm == NULL)
244 atomic_subtract_int(&ve->refs, 1);
245 } else {
247 * If it's a VMM thread just set the CR3. We also set
248 * the vklp->ve to a key to be able to distinguish
249 * when a vkernel user process runs and when not
250 * (when it's NULL)
252 if (curthread->td_vmm == NULL) {
253 vklp->ve = ve;
254 pmap_setlwpvm(lp, ve->vmspace);
255 } else {
256 vklp->ve = uap->id;
257 vmm_vm_set_guest_cr3((register_t)uap->id);
259 set_user_TLS();
260 set_vkernel_fp(uap->sysmsg_frame);
261 error = EJUSTRETURN;
263 break;
264 default:
265 error = EOPNOTSUPP;
266 break;
268 done:
269 if (curthread->td_vmm == NULL)
270 lwkt_reltoken(&vkp->token);
271 return(error);
275 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
277 * map memory within a VMSPACE. This function is just like a normal mmap()
278 * but operates on the vmspace's memory map. Most callers use this to create
279 * a MAP_VPAGETABLE mapping.
281 * No requirements.
284 sys_vmspace_mmap(struct vmspace_mmap_args *uap)
286 struct vkernel_proc *vkp;
287 struct vmspace_entry *ve;
288 int error;
291 * We hold the vmspace token to serialize calls to vkernel_find_vmspace.
293 lwkt_gettoken(&vmspace_token);
294 if ((vkp = curproc->p_vkernel) == NULL) {
295 error = EINVAL;
296 goto done3;
300 * NOTE: kern_mmap() can block so we need to temporarily ref ve->refs.
302 lwkt_gettoken(&vkp->token);
303 if ((ve = vkernel_find_vmspace(vkp, uap->id)) != NULL) {
304 atomic_add_int(&ve->refs, 1);
305 error = kern_mmap(ve->vmspace, uap->addr, uap->len,
306 uap->prot, uap->flags,
307 uap->fd, uap->offset, &uap->sysmsg_resultp);
308 atomic_subtract_int(&ve->refs, 1);
309 } else {
310 error = ENOENT;
312 lwkt_reltoken(&vkp->token);
313 done3:
314 lwkt_reltoken(&vmspace_token);
315 return (error);
319 * vmspace_munmap(id, addr, len)
321 * unmap memory within a VMSPACE.
323 * No requirements.
326 sys_vmspace_munmap(struct vmspace_munmap_args *uap)
328 struct vkernel_proc *vkp;
329 struct vmspace_entry *ve;
330 vm_offset_t addr;
331 vm_offset_t tmpaddr;
332 vm_size_t size, pageoff;
333 vm_map_t map;
334 int error;
336 if ((vkp = curproc->p_vkernel) == NULL) {
337 error = EINVAL;
338 goto done3;
340 lwkt_gettoken(&vkp->token);
341 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
342 error = ENOENT;
343 goto done2;
347 * NOTE: kern_munmap() can block so we need to temporarily
348 * ref ve->refs.
350 atomic_add_int(&ve->refs, 1);
353 * Copied from sys_munmap()
355 addr = (vm_offset_t)uap->addr;
356 size = uap->len;
358 pageoff = (addr & PAGE_MASK);
359 addr -= pageoff;
360 size += pageoff;
361 size = (vm_size_t)round_page(size);
362 if (size < uap->len) { /* wrap */
363 error = EINVAL;
364 goto done1;
366 tmpaddr = addr + size; /* workaround gcc4 opt */
367 if (tmpaddr < addr) { /* wrap */
368 error = EINVAL;
369 goto done1;
371 if (size == 0) {
372 error = 0;
373 goto done1;
376 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
377 error = EINVAL;
378 goto done1;
380 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) {
381 error = EINVAL;
382 goto done1;
384 map = &ve->vmspace->vm_map;
385 if (!vm_map_check_protection(map, addr, tmpaddr, VM_PROT_NONE, FALSE)) {
386 error = EINVAL;
387 goto done1;
389 vm_map_remove(map, addr, addr + size);
390 error = 0;
391 done1:
392 atomic_subtract_int(&ve->refs, 1);
393 done2:
394 lwkt_reltoken(&vkp->token);
395 done3:
396 return (error);
400 * vmspace_pread(id, buf, nbyte, flags, offset)
402 * Read data from a vmspace. The number of bytes read is returned or
403 * -1 if an unrecoverable error occured. If the number of bytes read is
404 * less then the request size, a page fault occured in the VMSPACE which
405 * the caller must resolve in order to proceed.
407 * (not implemented yet)
408 * No requirements.
411 sys_vmspace_pread(struct vmspace_pread_args *uap)
413 struct vkernel_proc *vkp;
414 struct vmspace_entry *ve;
415 int error;
417 if ((vkp = curproc->p_vkernel) == NULL) {
418 error = EINVAL;
419 goto done3;
421 lwkt_gettoken(&vkp->token);
422 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
423 error = ENOENT;
424 goto done2;
426 error = EINVAL;
427 done2:
428 lwkt_reltoken(&vkp->token);
429 done3:
430 return (error);
434 * vmspace_pwrite(id, buf, nbyte, flags, offset)
436 * Write data to a vmspace. The number of bytes written is returned or
437 * -1 if an unrecoverable error occured. If the number of bytes written is
438 * less then the request size, a page fault occured in the VMSPACE which
439 * the caller must resolve in order to proceed.
441 * (not implemented yet)
442 * No requirements.
445 sys_vmspace_pwrite(struct vmspace_pwrite_args *uap)
447 struct vkernel_proc *vkp;
448 struct vmspace_entry *ve;
449 int error;
451 if ((vkp = curproc->p_vkernel) == NULL) {
452 error = EINVAL;
453 goto done3;
455 lwkt_gettoken(&vkp->token);
456 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
457 error = ENOENT;
458 goto done2;
460 error = EINVAL;
461 done2:
462 lwkt_reltoken(&vkp->token);
463 done3:
464 return (error);
468 * vmspace_mcontrol(id, addr, len, behav, value)
470 * madvise/mcontrol support for a vmspace.
472 * No requirements.
475 sys_vmspace_mcontrol(struct vmspace_mcontrol_args *uap)
477 struct vkernel_proc *vkp;
478 struct vmspace_entry *ve;
479 vm_offset_t start, end;
480 vm_offset_t tmpaddr = (vm_offset_t)uap->addr + uap->len;
481 int error;
483 if ((vkp = curproc->p_vkernel) == NULL) {
484 error = EINVAL;
485 goto done3;
487 lwkt_gettoken(&vkp->token);
488 if ((ve = vkernel_find_vmspace(vkp, uap->id)) == NULL) {
489 error = ENOENT;
490 goto done2;
494 * NOTE: kern_madvise() can block so we need to temporarily
495 * ref ve->refs.
497 atomic_add_int(&ve->refs, 1);
500 * This code is basically copied from sys_mcontrol()
502 if (uap->behav < 0 || uap->behav > MADV_CONTROL_END) {
503 error = EINVAL;
504 goto done1;
507 if (tmpaddr < (vm_offset_t)uap->addr) {
508 error = EINVAL;
509 goto done1;
511 if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) {
512 error = EINVAL;
513 goto done1;
515 if (VM_MIN_USER_ADDRESS > 0 && uap->addr < VM_MIN_USER_ADDRESS) {
516 error = EINVAL;
517 goto done1;
520 start = trunc_page((vm_offset_t) uap->addr);
521 end = round_page(tmpaddr);
523 error = vm_map_madvise(&ve->vmspace->vm_map, start, end,
524 uap->behav, uap->value);
525 done1:
526 atomic_subtract_int(&ve->refs, 1);
527 done2:
528 lwkt_reltoken(&vkp->token);
529 done3:
530 return (error);
534 * Red black tree functions
536 static int rb_vmspace_compare(struct vmspace_entry *, struct vmspace_entry *);
537 RB_GENERATE(vmspace_rb_tree, vmspace_entry, rb_entry, rb_vmspace_compare);
540 * a->start is address, and the only field has to be initialized.
541 * The caller must hold vkp->token.
543 * The caller must hold vkp->token.
545 static int
546 rb_vmspace_compare(struct vmspace_entry *a, struct vmspace_entry *b)
548 if ((char *)a->id < (char *)b->id)
549 return(-1);
550 else if ((char *)a->id > (char *)b->id)
551 return(1);
552 return(0);
556 * The caller must hold vkp->token.
558 static
560 rb_vmspace_delete(struct vmspace_entry *ve, void *data)
562 struct vkernel_proc *vkp = data;
564 KKASSERT(ve->refs == 0);
565 vmspace_entry_delete(ve, vkp);
566 return(0);
570 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
571 * up the pmap, the vm_map, then destroy the vmspace.
573 * This function must remove the ve immediately before it might potentially
574 * block.
576 * The caller must hold vkp->token.
578 static
579 void
580 vmspace_entry_delete(struct vmspace_entry *ve, struct vkernel_proc *vkp)
582 RB_REMOVE(vmspace_rb_tree, &vkp->root, ve);
584 pmap_remove_pages(vmspace_pmap(ve->vmspace),
585 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
586 vm_map_remove(&ve->vmspace->vm_map,
587 VM_MIN_USER_ADDRESS, VM_MAX_USER_ADDRESS);
588 vmspace_rel(ve->vmspace);
589 ve->vmspace = NULL; /* safety */
590 kfree(ve, M_VKERNEL);
594 * Locate the ve for (id), return the ve or NULL. If found this function
595 * will bump ve->refs which prevents the ve from being immediately destroyed
596 * (but it can still be removed).
598 * The caller must hold vkp->token.
600 static
601 struct vmspace_entry *
602 vkernel_find_vmspace(struct vkernel_proc *vkp, void *id)
604 struct vmspace_entry *ve;
605 struct vmspace_entry key;
607 key.id = id;
608 ve = RB_FIND(vmspace_rb_tree, &vkp->root, &key);
609 return (ve);
613 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
614 * a vkernel process.
616 * No requirements.
618 void
619 vkernel_inherit(struct proc *p1, struct proc *p2)
621 struct vkernel_proc *vkp;
623 vkp = p1->p_vkernel;
624 KKASSERT(vkp->refs > 0);
625 atomic_add_int(&vkp->refs, 1);
626 p2->p_vkernel = vkp;
630 * No requirements.
632 void
633 vkernel_exit(struct proc *p)
635 struct vkernel_proc *vkp;
636 struct lwp *lp;
638 vkp = p->p_vkernel;
641 * Restore the original VM context if we are killed while running
642 * a different one.
644 * This isn't supposed to happen. What is supposed to happen is
645 * that the process should enter vkernel_trap() before the handling
646 * the signal.
648 RB_FOREACH(lp, lwp_rb_tree, &p->p_lwp_tree) {
649 vkernel_lwp_exit(lp);
653 * Dereference the common area
655 p->p_vkernel = NULL;
656 KKASSERT(vkp->refs > 0);
658 if (atomic_fetchadd_int(&vkp->refs, -1) == 1) {
659 lwkt_gettoken(&vkp->token);
660 RB_SCAN(vmspace_rb_tree, &vkp->root, NULL,
661 rb_vmspace_delete, vkp);
662 lwkt_reltoken(&vkp->token);
663 kfree(vkp, M_VKERNEL);
668 * No requirements.
670 void
671 vkernel_lwp_exit(struct lwp *lp)
673 struct vkernel_lwp *vklp;
674 struct vmspace_entry *ve;
676 if ((vklp = lp->lwp_vkernel) != NULL) {
677 if (lp->lwp_thread->td_vmm == NULL) {
679 * vkernel thread
681 if ((ve = vklp->ve) != NULL) {
682 kprintf("Warning, pid %d killed with "
683 "active VC!\n", lp->lwp_proc->p_pid);
684 pmap_setlwpvm(lp, lp->lwp_proc->p_vmspace);
685 vklp->ve = NULL;
686 KKASSERT(ve->refs > 0);
687 atomic_subtract_int(&ve->refs, 1);
689 } else {
691 * guest thread
693 vklp->ve = NULL;
695 lp->lwp_vkernel = NULL;
696 kfree(vklp, M_VKERNEL);
701 * A VM space under virtual kernel control trapped out or made a system call
702 * or otherwise needs to return control to the virtual kernel context.
704 * No requirements.
706 void
707 vkernel_trap(struct lwp *lp, struct trapframe *frame)
709 struct proc *p = lp->lwp_proc;
710 struct vmspace_entry *ve;
711 struct vkernel_lwp *vklp;
712 int error;
715 * Which vmspace entry was running?
717 vklp = lp->lwp_vkernel;
718 KKASSERT(vklp);
720 /* If it's a VMM thread just set the vkernel CR3 back */
721 if (curthread->td_vmm == NULL) {
722 ve = vklp->ve;
723 KKASSERT(ve != NULL);
726 * Switch the LWP vmspace back to the virtual kernel's VM space.
728 vklp->ve = NULL;
729 pmap_setlwpvm(lp, p->p_vmspace);
730 KKASSERT(ve->refs > 0);
731 atomic_subtract_int(&ve->refs, 1);
732 /* ve is invalid once we kill our ref */
733 } else {
734 vklp->ve = NULL;
735 vmm_vm_set_guest_cr3(p->p_vkernel->vkernel_cr3);
739 * Copy the emulated process frame to the virtual kernel process.
740 * The emulated process cannot change TLS descriptors so don't
741 * bother saving them, we already have a copy.
743 * Restore the virtual kernel's saved context so the virtual kernel
744 * process can resume.
746 error = copyout(frame, vklp->user_trapframe, sizeof(*frame));
747 bcopy(&vklp->save_trapframe, frame, sizeof(*frame));
748 bcopy(&vklp->save_vextframe.vx_tls, &curthread->td_tls,
749 sizeof(vklp->save_vextframe.vx_tls));
750 set_user_TLS();
751 cpu_vkernel_trap(frame, error);