kernel/vm: Rename *_putpages()'s 'sync' argument to 'flags'.
[dragonfly.git] / sys / kern / sys_vmm.c
blob202727da74df286b5b5fd06f96eb638cd3e45e8c
1 /*
2 * Copyright (c) 2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Mihai Carabas <mihai.carabas@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/wait.h>
40 #include <sys/vmm.h>
42 #include <sys/thread2.h>
43 #include <sys/spinlock2.h>
45 #include <machine/cpu.h>
46 #include <machine/pmap.h>
47 #include <machine/vmm.h>
48 #include <machine/vmparam.h>
50 #include <vm/vm_map.h>
53 * vmm guest system call:
54 * - init the calling thread structure
55 * - prepare for running in non-root mode
57 int
58 sys_vmm_guest_ctl(struct vmm_guest_ctl_args *uap)
60 int error = 0;
61 struct vmm_guest_options options;
62 struct trapframe *tf = uap->sysmsg_frame;
63 unsigned long stack_limit = USRSTACK;
64 unsigned char stack_page[PAGE_SIZE];
66 clear_quickret();
68 switch (uap->op) {
69 case VMM_GUEST_RUN:
70 error = copyin(uap->options, &options,
71 sizeof(struct vmm_guest_options));
72 if (error) {
73 kprintf("%s: error copyin vmm_guest_options\n",
74 __func__);
75 goto out;
78 while(stack_limit > tf->tf_sp) {
79 stack_limit -= PAGE_SIZE;
80 options.new_stack -= PAGE_SIZE;
82 error = copyin((const void *)stack_limit,
83 (void *)stack_page, PAGE_SIZE);
84 if (error) {
85 kprintf("%s: error copyin stack\n",
86 __func__);
87 goto out;
90 error = copyout((const void *)stack_page,
91 (void *)options.new_stack, PAGE_SIZE);
92 if (error) {
93 kprintf("%s: error copyout stack\n",
94 __func__);
95 goto out;
99 bcopy(tf, &options.tf, sizeof(struct trapframe));
101 error = vmm_vminit(&options);
102 if (error) {
103 if (error == ENODEV) {
104 kprintf("%s: vmm_vminit failed - "
105 "no VMM available \n", __func__);
106 goto out;
108 kprintf("%s: vmm_vminit failed\n", __func__);
109 goto out_exit;
112 generic_lwp_return(curthread->td_lwp, tf);
114 error = vmm_vmrun();
116 break;
117 default:
118 kprintf("%s: INVALID op\n", __func__);
119 error = EINVAL;
120 goto out;
122 out_exit:
123 exit1(W_EXITCODE(error, 0));
124 out:
125 return (error);
129 * The remote IPI will force the cpu out of any VMM mode it is
130 * in. When combined with bumping pm_invgen we can ensure that
131 * INVEPT will be called when it returns.
133 static void
134 vmm_exit_vmm(void *dummy __unused)
139 * Swap the 64 bit value between *dstaddr and *srcaddr in a pmap-safe manner
140 * and invalidate the tlb on all cpus the vkernel is running on.
142 * If dstaddr is NULL, just invalidate the tlb on the current cpu.
144 * v = *srcaddr
145 * v = swap(dstaddr, v)
146 * *dstaddr = v
149 sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args *uap)
151 int error = 0;
152 cpulock_t olock;
153 cpulock_t nlock;
154 cpumask_t mask;
155 struct proc *p = curproc;
156 long v;
158 if (p->p_vmm == NULL)
159 return ENOSYS;
160 if (uap->dstaddr == NULL)
161 return 0;
163 crit_enter_id("vmm_inval");
166 * Acquire CPULOCK_EXCL, spin while we wait. This will prevent
167 * any other cpu trying to use related VMMs to wait for us.
169 KKASSERT(CPUMASK_TESTMASK(p->p_vmm_cpumask, mycpu->gd_cpumask) == 0);
170 for (;;) {
171 olock = p->p_vmm_cpulock & ~CPULOCK_EXCL;
172 cpu_ccfence();
173 nlock = olock | CPULOCK_EXCL;
174 if (atomic_cmpset_int(&p->p_vmm_cpulock, olock, nlock))
175 break;
176 lwkt_process_ipiq();
177 cpu_pause();
181 * Wait for other cpu's to exit VMM mode (for this vkernel). No
182 * new cpus will enter VMM mode while we hold the lock. New waiters
183 * may turn-up though so the wakeup() later on has to be
184 * unconditional.
186 * We must test on p_vmm_cpulock's counter, not the mask, because
187 * VMM entries will set the mask bit unconditionally first
188 * (interlocking our IPI below) and then conditionally bump the
189 * counter.
191 if (olock & CPULOCK_CNTMASK) {
192 mask = p->p_vmm_cpumask;
193 CPUMASK_ANDMASK(mask, mycpu->gd_other_cpus);
194 lwkt_send_ipiq_mask(mask, vmm_exit_vmm, NULL);
195 while (p->p_vmm_cpulock & CPULOCK_CNTMASK) {
196 lwkt_process_ipiq();
197 cpu_pause();
201 #ifndef _KERNEL_VIRTUAL
203 * Ensure that any new entries into VMM mode using
204 * vmm's managed under this process will issue a
205 * INVEPT before resuming.
207 atomic_add_acq_long(&p->p_vmspace->vm_pmap.pm_invgen, 1);
208 #endif
211 * Make the requested modification, wakeup any waiters.
213 v = fuword64(uap->srcaddr);
214 v = swapu64(uap->dstaddr, v);
215 suword64(uap->srcaddr, v);
218 * VMMs on remote cpus will not be re-entered until we
219 * clear the lock.
221 atomic_clear_int(&p->p_vmm_cpulock, CPULOCK_EXCL);
222 #if 0
223 wakeup(&p->p_vmm_cpulock);
224 #endif
226 crit_exit_id("vmm_inval");
228 return error;