2 * Copyright (c) 2006 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vm/vm_vmspace.c,v 1.14 2007/08/15 03:15:07 dillon Exp $
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kern_syscall.h>
44 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/vkernel.h>
47 #include <sys/vmspace.h>
49 #include <vm/vm_extern.h>
52 #include <machine/vmparam.h>
54 #include <sys/spinlock2.h>
55 #include <sys/sysref2.h>
57 static struct vmspace_entry
*vkernel_find_vmspace(struct vkernel_proc
*vkp
,
59 static void vmspace_entry_delete(struct vmspace_entry
*ve
,
60 struct vkernel_proc
*vkp
);
62 static MALLOC_DEFINE(M_VKERNEL
, "vkernel", "VKernel structures");
65 * vmspace_create (void *id, int type, void *data)
67 * Create a VMSPACE under the control of the caller with the specified id.
68 * An id of NULL cannot be used. The type and data fields must currently
71 * The vmspace starts out completely empty. Memory may be mapped into the
72 * VMSPACE with vmspace_mmap() and MAP_VPAGETABLE section(s) controlled
73 * with vmspace_mcontrol().
76 sys_vmspace_create(struct vmspace_create_args
*uap
)
78 struct vmspace_entry
*ve
;
79 struct vkernel_proc
*vkp
;
81 if (vkernel_enable
== 0)
85 * Create a virtual kernel side-structure for the process if one
88 if ((vkp
= curproc
->p_vkernel
) == NULL
) {
89 vkp
= kmalloc(sizeof(*vkp
), M_VKERNEL
, M_WAITOK
|M_ZERO
);
91 spin_init(&vkp
->spin
);
93 curproc
->p_vkernel
= vkp
;
97 * Create a new VMSPACE
99 if (vkernel_find_vmspace(vkp
, uap
->id
))
101 ve
= kmalloc(sizeof(struct vmspace_entry
), M_VKERNEL
, M_WAITOK
|M_ZERO
);
102 ve
->vmspace
= vmspace_alloc(VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
104 pmap_pinit2(vmspace_pmap(ve
->vmspace
));
105 RB_INSERT(vmspace_rb_tree
, &vkp
->root
, ve
);
110 * vmspace_destroy (void *id)
115 sys_vmspace_destroy(struct vmspace_destroy_args
*uap
)
117 struct vkernel_proc
*vkp
;
118 struct vmspace_entry
*ve
;
120 if ((vkp
= curproc
->p_vkernel
) == NULL
)
122 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
126 vmspace_entry_delete(ve
, vkp
);
131 * vmspace_ctl (void *id, int cmd, struct trapframe *tframe,
132 * struct vextframe *vframe);
134 * Transfer control to a VMSPACE. Control is returned after the specified
135 * number of microseconds or if a page fault, signal, trap, or system call
136 * occurs. The context is updated as appropriate.
139 sys_vmspace_ctl(struct vmspace_ctl_args
*uap
)
141 struct vkernel_proc
*vkp
;
142 struct vkernel_lwp
*vklp
;
143 struct vmspace_entry
*ve
;
149 lp
= curthread
->td_lwp
;
152 if ((vkp
= p
->p_vkernel
) == NULL
)
154 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
158 * Signal mailbox interlock
160 if (p
->p_flag
& P_MAILBOX
) {
161 p
->p_flag
&= ~P_MAILBOX
;
166 case VMSPACE_CTL_RUN
:
168 * Save the caller's register context, swap VM spaces, and
169 * install the passed register context. Return with
170 * EJUSTRETURN so the syscall code doesn't adjust the context.
172 atomic_add_int(&ve
->refs
, 1);
173 framesz
= sizeof(struct trapframe
);
174 if ((vklp
= lp
->lwp_vkernel
) == NULL
) {
175 vklp
= kmalloc(sizeof(*vklp
), M_VKERNEL
,
177 lp
->lwp_vkernel
= vklp
;
179 vklp
->user_trapframe
= uap
->tframe
;
180 vklp
->user_vextframe
= uap
->vframe
;
181 bcopy(uap
->sysmsg_frame
, &vklp
->save_trapframe
, framesz
);
182 bcopy(&curthread
->td_tls
, &vklp
->save_vextframe
.vx_tls
,
183 sizeof(vklp
->save_vextframe
.vx_tls
));
184 error
= copyin(uap
->tframe
, uap
->sysmsg_frame
, framesz
);
186 error
= copyin(&uap
->vframe
->vx_tls
, &curthread
->td_tls
, sizeof(struct savetls
));
188 error
= cpu_sanitize_frame(uap
->sysmsg_frame
);
190 error
= cpu_sanitize_tls(&curthread
->td_tls
);
192 bcopy(&vklp
->save_trapframe
, uap
->sysmsg_frame
, framesz
);
193 bcopy(&vklp
->save_vextframe
.vx_tls
, &curthread
->td_tls
,
194 sizeof(vklp
->save_vextframe
.vx_tls
));
196 atomic_subtract_int(&ve
->refs
, 1);
199 pmap_setlwpvm(lp
, ve
->vmspace
);
201 set_vkernel_fp(uap
->sysmsg_frame
);
213 * vmspace_mmap(id, addr, len, prot, flags, fd, offset)
215 * map memory within a VMSPACE. This function is just like a normal mmap()
216 * but operates on the vmspace's memory map. Most callers use this to create
217 * a MAP_VPAGETABLE mapping.
220 sys_vmspace_mmap(struct vmspace_mmap_args
*uap
)
222 struct vkernel_proc
*vkp
;
223 struct vmspace_entry
*ve
;
226 if ((vkp
= curproc
->p_vkernel
) == NULL
)
228 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
230 error
= kern_mmap(ve
->vmspace
, uap
->addr
, uap
->len
,
231 uap
->prot
, uap
->flags
,
232 uap
->fd
, uap
->offset
, &uap
->sysmsg_resultp
);
237 * vmspace_munmap(id, addr, len)
239 * unmap memory within a VMSPACE.
242 sys_vmspace_munmap(struct vmspace_munmap_args
*uap
)
244 struct vkernel_proc
*vkp
;
245 struct vmspace_entry
*ve
;
247 vm_size_t size
, pageoff
;
250 if ((vkp
= curproc
->p_vkernel
) == NULL
)
252 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
256 * Copied from sys_munmap()
258 addr
= (vm_offset_t
)uap
->addr
;
261 pageoff
= (addr
& PAGE_MASK
);
264 size
= (vm_size_t
)round_page(size
);
265 if (addr
+ size
< addr
)
270 if (VM_MAX_USER_ADDRESS
> 0 && addr
+ size
> VM_MAX_USER_ADDRESS
)
272 if (VM_MIN_USER_ADDRESS
> 0 && addr
< VM_MIN_USER_ADDRESS
)
274 map
= &ve
->vmspace
->vm_map
;
275 if (!vm_map_check_protection(map
, addr
, addr
+ size
, VM_PROT_NONE
))
277 vm_map_remove(map
, addr
, addr
+ size
);
282 * vmspace_pread(id, buf, nbyte, flags, offset)
284 * Read data from a vmspace. The number of bytes read is returned or
285 * -1 if an unrecoverable error occured. If the number of bytes read is
286 * less then the request size, a page fault occured in the VMSPACE which
287 * the caller must resolve in order to proceed.
290 sys_vmspace_pread(struct vmspace_pread_args
*uap
)
292 struct vkernel_proc
*vkp
;
293 struct vmspace_entry
*ve
;
295 if ((vkp
= curproc
->p_vkernel
) == NULL
)
297 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
303 * vmspace_pwrite(id, buf, nbyte, flags, offset)
305 * Write data to a vmspace. The number of bytes written is returned or
306 * -1 if an unrecoverable error occured. If the number of bytes written is
307 * less then the request size, a page fault occured in the VMSPACE which
308 * the caller must resolve in order to proceed.
311 sys_vmspace_pwrite(struct vmspace_pwrite_args
*uap
)
313 struct vkernel_proc
*vkp
;
314 struct vmspace_entry
*ve
;
316 if ((vkp
= curproc
->p_vkernel
) == NULL
)
318 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
324 * vmspace_mcontrol(id, addr, len, behav, value)
326 * madvise/mcontrol support for a vmspace.
329 sys_vmspace_mcontrol(struct vmspace_mcontrol_args
*uap
)
331 struct vkernel_proc
*vkp
;
332 struct vmspace_entry
*ve
;
333 vm_offset_t start
, end
;
335 if ((vkp
= curproc
->p_vkernel
) == NULL
)
337 if ((ve
= vkernel_find_vmspace(vkp
, uap
->id
)) == NULL
)
341 * This code is basically copied from sys_mcontrol()
343 if (uap
->behav
< 0 || uap
->behav
> MADV_CONTROL_END
)
346 if (VM_MAX_USER_ADDRESS
> 0 &&
347 ((vm_offset_t
) uap
->addr
+ uap
->len
) > VM_MAX_USER_ADDRESS
)
349 if (VM_MIN_USER_ADDRESS
> 0 && uap
->addr
< VM_MIN_USER_ADDRESS
)
351 if (((vm_offset_t
) uap
->addr
+ uap
->len
) < (vm_offset_t
) uap
->addr
)
354 start
= trunc_page((vm_offset_t
) uap
->addr
);
355 end
= round_page((vm_offset_t
) uap
->addr
+ uap
->len
);
357 return (vm_map_madvise(&ve
->vmspace
->vm_map
, start
, end
,
358 uap
->behav
, uap
->value
));
362 * Red black tree functions
364 static int rb_vmspace_compare(struct vmspace_entry
*, struct vmspace_entry
*);
365 RB_GENERATE(vmspace_rb_tree
, vmspace_entry
, rb_entry
, rb_vmspace_compare
);
367 /* a->start is address, and the only field has to be initialized */
369 rb_vmspace_compare(struct vmspace_entry
*a
, struct vmspace_entry
*b
)
371 if ((char *)a
->id
< (char *)b
->id
)
373 else if ((char *)a
->id
> (char *)b
->id
)
380 rb_vmspace_delete(struct vmspace_entry
*ve
, void *data
)
382 struct vkernel_proc
*vkp
= data
;
384 KKASSERT(ve
->refs
== 0);
385 vmspace_entry_delete(ve
, vkp
);
390 * Remove a vmspace_entry from the RB tree and destroy it. We have to clean
391 * up the pmap, the vm_map, then destroy the vmspace.
395 vmspace_entry_delete(struct vmspace_entry
*ve
, struct vkernel_proc
*vkp
)
397 RB_REMOVE(vmspace_rb_tree
, &vkp
->root
, ve
);
399 pmap_remove_pages(vmspace_pmap(ve
->vmspace
),
400 VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
401 vm_map_remove(&ve
->vmspace
->vm_map
,
402 VM_MIN_USER_ADDRESS
, VM_MAX_USER_ADDRESS
);
403 sysref_put(&ve
->vmspace
->vm_sysref
);
404 kfree(ve
, M_VKERNEL
);
409 struct vmspace_entry
*
410 vkernel_find_vmspace(struct vkernel_proc
*vkp
, void *id
)
412 struct vmspace_entry
*ve
;
413 struct vmspace_entry key
;
416 ve
= RB_FIND(vmspace_rb_tree
, &vkp
->root
, &key
);
421 * Manage vkernel refs, used by the kernel when fork()ing or exit()ing
425 vkernel_inherit(struct proc
*p1
, struct proc
*p2
)
427 struct vkernel_proc
*vkp
;
430 KKASSERT(vkp
->refs
> 0);
431 atomic_add_int(&vkp
->refs
, 1);
436 vkernel_exit(struct proc
*p
)
438 struct vkernel_proc
*vkp
;
444 * Restore the original VM context if we are killed while running
447 * This isn't supposed to happen. What is supposed to happen is
448 * that the process should enter vkernel_trap() before the handling
451 RB_FOREACH(lp
, lwp_rb_tree
, &p
->p_lwp_tree
) {
452 vkernel_lwp_exit(lp
);
456 * Dereference the common area
459 KKASSERT(vkp
->refs
> 0);
460 spin_lock_wr(&vkp
->spin
);
461 if (--vkp
->refs
== 0)
463 spin_unlock_wr(&vkp
->spin
);
466 RB_SCAN(vmspace_rb_tree
, &vkp
->root
, NULL
,
467 rb_vmspace_delete
, vkp
);
468 kfree(vkp
, M_VKERNEL
);
473 vkernel_lwp_exit(struct lwp
*lp
)
475 struct vkernel_lwp
*vklp
;
476 struct vmspace_entry
*ve
;
478 if ((vklp
= lp
->lwp_vkernel
) != NULL
) {
479 if ((ve
= vklp
->ve
) != NULL
) {
480 kprintf("Warning, pid %d killed with "
481 "active VC!\n", lp
->lwp_proc
->p_pid
);
483 pmap_setlwpvm(lp
, lp
->lwp_proc
->p_vmspace
);
485 KKASSERT(ve
->refs
> 0);
486 atomic_subtract_int(&ve
->refs
, 1);
488 lp
->lwp_vkernel
= NULL
;
489 kfree(vklp
, M_VKERNEL
);
494 * A VM space under virtual kernel control trapped out or made a system call
495 * or otherwise needs to return control to the virtual kernel context.
498 vkernel_trap(struct lwp
*lp
, struct trapframe
*frame
)
500 struct proc
*p
= lp
->lwp_proc
;
501 struct vmspace_entry
*ve
;
502 struct vkernel_lwp
*vklp
;
506 * Which vmspace entry was running?
508 vklp
= lp
->lwp_vkernel
;
511 KKASSERT(ve
!= NULL
);
514 * Switch the LWP vmspace back to the virtual kernel's VM space.
517 pmap_setlwpvm(lp
, p
->p_vmspace
);
518 KKASSERT(ve
->refs
> 0);
519 atomic_subtract_int(&ve
->refs
, 1);
522 * Copy the emulated process frame to the virtual kernel process.
523 * The emulated process cannot change TLS descriptors so don't
524 * bother saving them, we already have a copy.
526 * Restore the virtual kernel's saved context so the virtual kernel
527 * process can resume.
529 error
= copyout(frame
, vklp
->user_trapframe
, sizeof(*frame
));
530 bcopy(&vklp
->save_trapframe
, frame
, sizeof(*frame
));
531 bcopy(&vklp
->save_vextframe
.vx_tls
, &curthread
->td_tls
,
532 sizeof(vklp
->save_vextframe
.vx_tls
));