2 * Copyright (c) 2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Mihai Carabas <mihai.carabas@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
43 #include <sys/thread2.h>
44 #include <sys/spinlock2.h>
46 #include <machine/cpu.h>
47 #include <machine/vmm.h>
50 * vmm guest system call:
51 * - init the calling thread structure
52 * - prepare for running in non-root mode
55 sys_vmm_guest_ctl(struct vmm_guest_ctl_args
*uap
)
58 struct vmm_guest_options options
;
59 struct trapframe
*tf
= uap
->sysmsg_frame
;
60 unsigned long stack_limit
= USRSTACK
;
61 unsigned char stack_page
[PAGE_SIZE
];
67 error
= copyin(uap
->options
, &options
,
68 sizeof(struct vmm_guest_options
));
70 kprintf("%s: error copyin vmm_guest_options\n",
75 while(stack_limit
> tf
->tf_sp
) {
76 stack_limit
-= PAGE_SIZE
;
77 options
.new_stack
-= PAGE_SIZE
;
79 error
= copyin((const void *)stack_limit
,
80 (void *)stack_page
, PAGE_SIZE
);
82 kprintf("%s: error copyin stack\n",
87 error
= copyout((const void *)stack_page
,
88 (void *)options
.new_stack
, PAGE_SIZE
);
90 kprintf("%s: error copyout stack\n",
96 bcopy(tf
, &options
.tf
, sizeof(struct trapframe
));
98 error
= vmm_vminit(&options
);
100 if (error
== ENODEV
) {
101 kprintf("%s: vmm_vminit failed - "
102 "no VMM available \n", __func__
);
105 kprintf("%s: vmm_vminit failed\n", __func__
);
109 generic_lwp_return(curthread
->td_lwp
, tf
);
115 kprintf("%s: INVALID op\n", __func__
);
120 exit1(W_EXITCODE(error
, 0));
126 * The remote IPI will force the cpu out of any VMM mode it is
127 * in. When combined with bumping pm_invgen we can ensure that
128 * INVEPT will be called when it returns.
131 vmm_exit_vmm(void *dummy __unused
)
136 * Swap the 64 bit value between *dstaddr and *srcaddr in a pmap-safe manner
137 * and invalidate the tlb on all cpus the vkernel is running on.
139 * If dstaddr is NULL, just invalidate the tlb on the current cpu.
142 * v = swap(dstaddr, v)
146 sys_vmm_guest_sync_addr(struct vmm_guest_sync_addr_args
*uap
)
152 struct proc
*p
= curproc
;
155 if (p
->p_vmm
== NULL
)
157 if (uap
->dstaddr
== NULL
)
160 crit_enter_id("vmm_inval");
163 * Acquire CPULOCK_EXCL, spin while we wait. This will prevent
164 * any other cpu trying to use related VMMs to wait for us.
166 KKASSERT(CPUMASK_TESTMASK(p
->p_vmm_cpumask
, mycpu
->gd_cpumask
) == 0);
168 olock
= p
->p_vmm_cpulock
& ~CPULOCK_EXCL
;
170 nlock
= olock
| CPULOCK_EXCL
;
171 if (atomic_cmpset_int(&p
->p_vmm_cpulock
, olock
, nlock
))
178 * Wait for other cpu's to exit VMM mode (for this vkernel). No
179 * new cpus will enter VMM mode while we hold the lock. New waiters
180 * may turn-up though so the wakeup() later on has to be
183 * We must test on p_vmm_cpulock's counter, not the mask, because
184 * VMM entries will set the mask bit unconditionally first
185 * (interlocking our IPI below) and then conditionally bump the
188 if (olock
& CPULOCK_CNTMASK
) {
189 mask
= p
->p_vmm_cpumask
;
190 CPUMASK_ANDMASK(mask
, mycpu
->gd_other_cpus
);
191 lwkt_send_ipiq_mask(mask
, vmm_exit_vmm
, NULL
);
192 while (p
->p_vmm_cpulock
& CPULOCK_CNTMASK
) {
198 #ifndef _KERNEL_VIRTUAL
200 * Ensure that any new entries into VMM mode using
201 * vmm's managed under this process will issue a
202 * INVEPT before resuming.
204 atomic_add_acq_long(&p
->p_vmspace
->vm_pmap
.pm_invgen
, 1);
208 * Make the requested modification, wakeup any waiters.
210 v
= fuword64(uap
->srcaddr
);
211 v
= swapu64(uap
->dstaddr
, v
);
212 suword64(uap
->srcaddr
, v
);
215 * VMMs on remote cpus will not be re-entered until we
218 atomic_clear_int(&p
->p_vmm_cpulock
, CPULOCK_EXCL
);
220 wakeup(&p
->p_vmm_cpulock
);
223 crit_exit_id("vmm_inval");