2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.14 2007/08/15 03:15:07 dillon Exp $
38 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/kern_syscall.h>
45 #include <sys/malloc.h>
46 #include <sys/sysctl.h>
47 #include <sys/vkernel.h>
48 #include <sys/vmspace.h>
50 #include <vm/vm_extern.h>
54 #include <machine/vmparam.h>
56 #include <sys/spinlock2.h>
57 #include <sys/sysref2.h>
59 static struct vmspace_entry
*vkernel_find_vmspace(struct vkernel_proc
*vkp
,
61 static void vmspace_entry_delete(struct vmspace_entry
*ve
,
62 struct vkernel_proc
*vkp
);
64 static MALLOC_DEFINE(M_VKERNEL
, "vkernel", "VKernel structures");
67 * vmspace_create (void *id, int type, void *data)
69 * Create a VMSPACE under the control of the caller with the specified id.
70 * An id of NULL cannot be used. The type and data fields must currently
73 * The vmspace starts out completely empty. Memory may be mapped into the
74 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
75 * with vmspace_mcontrol().
78 sys_vmspace_create(struct vmspace_create_args
*uap
)
80 struct vmspace_entry
*ve
;
81 struct vkernel_proc
*vkp
;
83 if (vkernel_enable
== 0)
87 * Create a virtual kernel side-structure for the process if one
90 if ((vkp
= curproc
->p_vkernel
) == NULL
) {
91 vkp
= kmalloc(sizeof(*vkp
), M_VKERNEL
, M_WAITOK
|M_ZERO
);
93 spin_init(&vkp
->spin
);
95 curproc
->p_vkernel
= vkp
;
99 * Create a new VMSPACE
101 if (vkernel_find_vmspace(vkp
, uap
->id
))
103 ve
= kmalloc(sizeof(struct vmspace_entry
), M_VKERNEL
, M_WAITOK
|M_ZERO
);
104 ve
->vmspace
= vmspace_alloc(VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
106 pmap_pinit2(vmspace_pmap(ve
->vmspace
));
107 RB_INSERT(vmspace_rb_tree
, &vkp
->root
, ve
);
112 * vmspace_destroy (void *id)
117 sys_vmspace_destroy(struct vmspace_destroy_args
*uap
)
119 struct vkernel_proc
*vkp
;
120 struct vmspace_entry
*ve
;
122 if ((vkp
= curproc
->p_vkernel
) == NULL
)
124 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
128 vmspace_entry_delete(ve
, vkp
);
133 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
134 * struct vextframe *vframe);
136 * Transfer control to a VMSPACE. Control is returned after the specified
137 * number of microseconds or if a page fault, signal, trap, or system call
138 * occurs. The context is updated as appropriate.
141 sys_vmspace_ctl(struct vmspace_ctl_args
*uap
)
143 struct vkernel_proc
*vkp
;
144 struct vkernel_lwp
*vklp
;
145 struct vmspace_entry
*ve
;
151 lp
= curthread
->td_lwp
;
154 if ((vkp
= p
->p_vkernel
) == NULL
)
156 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
160 * Signal mailbox interlock
162 if (p
->p_flag
& P_MAILBOX
) {
163 p
->p_flag
&= ~P_MAILBOX
;
168 case VMSPACE_CTL_RUN
:
170 * Save the caller's register context, swap VM spaces, and
171 * install the passed register context. Return with
172 * EJUSTRETURN so the syscall code doesn't adjust the context.
174 atomic_add_int(&ve
->refs
, 1);
175 framesz
= sizeof(struct trapframe
);
176 if ((vklp
= lp
->lwp_vkernel
) == NULL
) {
177 vklp
= kmalloc(sizeof(*vklp
), M_VKERNEL
,
179 lp
->lwp_vkernel
= vklp
;
181 vklp
->user_trapframe
= uap
->tframe
;
182 vklp
->user_vextframe
= uap
->vframe
;
183 bcopy(uap
->sysmsg_frame
, &vklp
->save_trapframe
, framesz
);
184 bcopy(&curthread
->td_tls
, &vklp
->save_vextframe
.vx_tls
,
185 sizeof(vklp
->save_vextframe
.vx_tls
));
186 error
= copyin(uap
->tframe
, uap
->sysmsg_frame
, framesz
);
188 error
= copyin(&uap
->vframe
->vx_tls
, &curthread
->td_tls
, sizeof(struct savetls
));
190 error
= cpu_sanitize_frame(uap
->sysmsg_frame
);
192 error
= cpu_sanitize_tls(&curthread
->td_tls
);
194 bcopy(&vklp
->save_trapframe
, uap
->sysmsg_frame
, framesz
);
195 bcopy(&vklp
->save_vextframe
.vx_tls
, &curthread
->td_tls
,
196 sizeof(vklp
->save_vextframe
.vx_tls
));
198 atomic_subtract_int(&ve
->refs
, 1);
201 pmap_setlwpvm(lp
, ve
->vmspace
);
203 set_vkernel_fp(uap
->sysmsg_frame
);
215 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
217 * map memory within a VMSPACE. This function is just like a normal mmap()
218 * but operates on the vmspace's memory map. Most callers use this to create
219 * a MAP_VPAGETABLE mapping.
222 sys_vmspace_mmap(struct vmspace_mmap_args
*uap
)
224 struct vkernel_proc
*vkp
;
225 struct vmspace_entry
*ve
;
228 if ((vkp
= curproc
->p_vkernel
) == NULL
)
230 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
232 error
= kern_mmap(ve
->vmspace
, uap
->addr
, uap
->len
,
233 uap
->prot
, uap
->flags
,
234 uap
->fd
, uap
->offset
, &uap
->sysmsg_resultp
);
239 * vmspace_munmap(id, addr, len)
241 * unmap memory within a VMSPACE.
244 sys_vmspace_munmap(struct vmspace_munmap_args
*uap
)
246 struct vkernel_proc
*vkp
;
247 struct vmspace_entry
*ve
;
249 vm_size_t size
, pageoff
;
252 if ((vkp
= curproc
->p_vkernel
) == NULL
)
254 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
258 * Copied from sys_munmap()
260 addr
= (vm_offset_t
)uap
->addr
;
263 pageoff
= (addr
& PAGE_MASK
);
266 size
= (vm_size_t
)round_page(size
);
267 if (addr
+ size
< addr
)
272 if (VM_MAX_USER_ADDRESS
> 0 && addr
+ size
> VM_MAX_USER_ADDRESS
)
274 if (VM_MIN_USER_ADDRESS
> 0 && addr
< VM_MIN_USER_ADDRESS
)
276 map
= &ve
->vmspace
->vm_map
;
277 if (!vm_map_check_protection(map
, addr
, addr
+ size
, VM_PROT_NONE
))
279 vm_map_remove(map
, addr
, addr
+ size
);
284 * vmspace_pread(id, buf, nbyte, flags, offset)
286 * Read data from a vmspace. The number of bytes read is returned or
287 * -1 if an unrecoverable error occured. If the number of bytes read is
288 * less then the request size, a page fault occured in the VMSPACE which
289 * the caller must resolve in order to proceed.
292 sys_vmspace_pread(struct vmspace_pread_args
*uap
)
294 struct vkernel_proc
*vkp
;
295 struct vmspace_entry
*ve
;
297 if ((vkp
= curproc
->p_vkernel
) == NULL
)
299 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
305 * vmspace_pwrite(id, buf, nbyte, flags, offset)
307 * Write data to a vmspace. The number of bytes written is returned or
308 * -1 if an unrecoverable error occured. If the number of bytes written is
309 * less then the request size, a page fault occured in the VMSPACE which
310 * the caller must resolve in order to proceed.
313 sys_vmspace_pwrite(struct vmspace_pwrite_args
*uap
)
315 struct vkernel_proc
*vkp
;
316 struct vmspace_entry
*ve
;
318 if ((vkp
= curproc
->p_vkernel
) == NULL
)
320 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
326 * vmspace_mcontrol(id, addr, len, behav, value)
328 * madvise/mcontrol support for a vmspace.
331 sys_vmspace_mcontrol(struct vmspace_mcontrol_args
*uap
)
333 struct vkernel_proc
*vkp
;
334 struct vmspace_entry
*ve
;
335 vm_offset_t start
, end
;
337 if ((vkp
= curproc
->p_vkernel
) == NULL
)
339 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
343 * This code is basically copied from sys_mcontrol()
345 if (uap
->behav
< 0 || uap
->behav
> MADV_CONTROL_END
)
348 if (VM_MAX_USER_ADDRESS
> 0 &&
349 ((vm_offset_t
) uap
->addr
+ uap
->len
) > VM_MAX_USER_ADDRESS
)
351 if (VM_MIN_USER_ADDRESS
> 0 && uap
->addr
< VM_MIN_USER_ADDRESS
)
353 if (((vm_offset_t
) uap
->addr
+ uap
->len
) < (vm_offset_t
) uap
->addr
)
356 start
= trunc_page((vm_offset_t
) uap
->addr
);
357 end
= round_page((vm_offset_t
) uap
->addr
+ uap
->len
);
359 return (vm_map_madvise(&ve
->vmspace
->vm_map
, start
, end
,
360 uap
->behav
, uap
->value
));
364 * Red black tree functions
366 static int rb_vmspace_compare(struct vmspace_entry
*, struct vmspace_entry
*);
367 RB_GENERATE(vmspace_rb_tree
, vmspace_entry
, rb_entry
, rb_vmspace_compare
);
369 /* a->start is address, and the only field has to be initialized */
371 rb_vmspace_compare(struct vmspace_entry
*a
, struct vmspace_entry
*b
)
373 if ((char *)a
->id
< (char *)b
->id
)
375 else if ((char *)a
->id
> (char *)b
->id
)
382 rb_vmspace_delete(struct vmspace_entry
*ve
, void *data
)
384 struct vkernel_proc
*vkp
= data
;
386 KKASSERT(ve
->refs
== 0);
387 vmspace_entry_delete(ve
, vkp
);
392 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
393 * up the pmap, the vm_map, then destroy the vmspace.
397 vmspace_entry_delete(struct vmspace_entry
*ve
, struct vkernel_proc
*vkp
)
399 RB_REMOVE(vmspace_rb_tree
, &vkp
->root
, ve
);
401 pmap_remove_pages(vmspace_pmap(ve
->vmspace
),
402 VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
403 vm_map_remove(&ve
->vmspace
->vm_map
,
404 VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
405 sysref_put(&ve
->vmspace
->vm_sysref
);
406 kfree(ve
, M_VKERNEL
);
411 struct vmspace_entry
*
412 vkernel_find_vmspace(struct vkernel_proc
*vkp
, void *id
)
414 struct vmspace_entry
*ve
;
415 struct vmspace_entry key
;
418 ve
= RB_FIND(vmspace_rb_tree
, &vkp
->root
, &key
);
423 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
427 vkernel_inherit(struct proc
*p1
, struct proc
*p2
)
429 struct vkernel_proc
*vkp
;
432 KKASSERT(vkp
->refs
> 0);
433 atomic_add_int(&vkp
->refs
, 1);
438 vkernel_exit(struct proc
*p
)
440 struct vkernel_proc
*vkp
;
446 * Restore the original VM context if we are killed while running
449 * This isn't supposed to happen. What is supposed to happen is
450 * that the process should enter vkernel_trap() before the handling
453 RB_FOREACH(lp
, lwp_rb_tree
, &p
->p_lwp_tree
) {
454 vkernel_lwp_exit(lp
);
458 * Dereference the common area
461 KKASSERT(vkp
->refs
> 0);
462 spin_lock_wr(&vkp
->spin
);
463 if (--vkp
->refs
== 0)
465 spin_unlock_wr(&vkp
->spin
);
468 RB_SCAN(vmspace_rb_tree
, &vkp
->root
, NULL
,
469 rb_vmspace_delete
, vkp
);
470 kfree(vkp
, M_VKERNEL
);
475 vkernel_lwp_exit(struct lwp
*lp
)
477 struct vkernel_lwp
*vklp
;
478 struct vmspace_entry
*ve
;
480 if ((vklp
= lp
->lwp_vkernel
) != NULL
) {
481 if ((ve
= vklp
->ve
) != NULL
) {
482 kprintf("Warning, pid %d killed with "
483 "active VC!\n", lp
->lwp_proc
->p_pid
);
485 db_print_backtrace();
487 pmap_setlwpvm(lp
, lp
->lwp_proc
->p_vmspace
);
489 KKASSERT(ve
->refs
> 0);
490 atomic_subtract_int(&ve
->refs
, 1);
492 lp
->lwp_vkernel
= NULL
;
493 kfree(vklp
, M_VKERNEL
);
498 * A VM space under virtual kernel control trapped out or made a system call
499 * or otherwise needs to return control to the virtual kernel context.
502 vkernel_trap(struct lwp
*lp
, struct trapframe
*frame
)
504 struct proc
*p
= lp
->lwp_proc
;
505 struct vmspace_entry
*ve
;
506 struct vkernel_lwp
*vklp
;
510 * Which vmspace entry was running?
512 vklp
= lp
->lwp_vkernel
;
515 KKASSERT(ve
!= NULL
);
518 * Switch the LWP vmspace back to the virtual kernel's VM space.
521 pmap_setlwpvm(lp
, p
->p_vmspace
);
522 KKASSERT(ve
->refs
> 0);
523 atomic_subtract_int(&ve
->refs
, 1);
526 * Copy the emulated process frame to the virtual kernel process.
527 * The emulated process cannot change TLS descriptors so don't
528 * bother saving them, we already have a copy.
530 * Restore the virtual kernel's saved context so the virtual kernel
531 * process can resume.
533 error
= copyout(frame
, vklp
->user_trapframe
, sizeof(*frame
));
534 bcopy(&vklp
->save_trapframe
, frame
, sizeof(*frame
));
535 bcopy(&vklp
->save_vextframe
.vx_tls
, &curthread
->td_tls
,
536 sizeof(vklp
->save_vextframe
.vx_tls
));