kernel - Simplify umtx_sleep and umtx_wakeup support
[dragonfly.git] / sys / kern / kern_umtx.c
blobec3af2c62b6103115417edea765da130067c743e
1 /*
2 * (MPSAFE)
4 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
38 * This module implements userland mutex helper functions. umtx_sleep()
39 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup
40 * functions operate on user addresses.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/sysctl.h>
51 #include <sys/module.h>
53 #include <cpu/lwbuf.h>
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
57 #include <sys/lock.h>
58 #include <vm/pmap.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_object.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vm_pageout.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_kern.h>
67 #include <vm/vm_page2.h>
69 #include <machine/vmm.h>
72 * Improve umtx performance by polling for 4uS before going to sleep.
73 * This can avoid many IPIs in typical pthreads mutex situations.
75 #ifdef _RDTSC_SUPPORTED_
76 static int umtx_delay = 4000;
77 SYSCTL_INT(_kern, OID_AUTO, umtx_delay, CTLFLAG_RW, &umtx_delay, 0, "");
78 #endif
81 * If the contents of the userland-supplied pointer matches the specified
82 * value enter an interruptable sleep for up to <timeout> microseconds.
83 * If the contents does not match then return immediately.
85 * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept
86 * and timed out, and EBUSY if the contents of the pointer already does
87 * not match the specified value. A timeout of 0 indicates an unlimited sleep.
88 * EINTR is returned if the call was interrupted by a signal (even if
89 * the signal specifies that the system call should restart).
91 * This function interlocks against call to umtx_wakeup. It does NOT interlock
92 * against changes in *ptr. However, it does not have to. The standard use
93 * of *ptr is to differentiate between an uncontested and a contested mutex
94 * and call umtx_wakeup when releasing a contested mutex. Therefore we can
95 * safely race against changes in *ptr as long as we are properly interlocked
96 * against the umtx_wakeup() call.
98 * The VM page associated with the mutex is held in an attempt to keep
99 * the mutex's physical address consistent, allowing umtx_sleep() and
100 * umtx_wakeup() to use the physical address as their rendezvous. BUT
101 * situations can arise where the physical address may change, particularly
102 * if a threaded program fork()'s and the mutex's memory becomes
103 * copy-on-write. We register an event on the VM page to catch COWs.
105 * umtx_sleep { const int *ptr, int value, int timeout }
108 sys_umtx_sleep(struct umtx_sleep_args *uap)
110 struct lwbuf lwb_cache;
111 struct lwbuf *lwb;
112 vm_page_t m;
113 void *waddr;
114 int offset;
115 int timeout;
116 int error;
118 if (uap->timeout < 0)
119 return (EINVAL);
121 if (curthread->td_vmm) {
122 register_t gpa;
123 vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
124 uap->ptr = (const int *)gpa;
127 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
128 return (EFAULT);
131 * When faulting in the page, force any COW pages to be resolved.
132 * Otherwise the physical page we sleep on my not match the page
133 * being woken up.
135 * The returned page is held, and this hold count prevents it from
136 * being paged out. This is important since we are sleeping on what
137 * is essentially a physical address (which avoids all sorts of
138 * collision-space issues).
140 * WARNING! We can only use vm_fault_page*() for reading data. We
141 * cannot use it for writing data because there is no pmap
142 * interlock to protect against flushes/pageouts.
144 * (XXX with recent code work, this may no longer be an issue)
146 * WARNING! If the user program replaces the mapping of the underlying
147 * uap->ptr, the physical address may change and the umtx code
148 * will not be able to match wakeups with tsleeps.
150 m = vm_fault_page_quick((vm_offset_t)uap->ptr,
151 VM_PROT_READ | VM_PROT_WRITE, &error, NULL);
152 if (m == NULL) {
153 error = EFAULT;
154 goto done;
156 lwb = lwbuf_alloc(m, &lwb_cache);
157 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
159 error = EBUSY;
160 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
161 #ifdef _RDTSC_SUPPORTED_
163 * Poll a little while before sleeping, most mutexes are
164 * short-lived.
166 if (umtx_delay) {
167 int64_t tsc_target;
168 int good = 0;
170 tsc_target = tsc_get_target(umtx_delay);
171 while (tsc_test_target(tsc_target) == 0) {
172 cpu_lfence();
173 if (*(int *)(lwbuf_kva(lwb) + offset) != uap->value) {
174 good = 1;
175 break;
177 cpu_pause();
179 if (good) {
180 error = EBUSY;
181 goto skip;
184 #endif
186 * Calculate the timeout. This will be acccurate to within ~2 ticks.
188 if ((timeout = uap->timeout) != 0) {
189 timeout = (timeout / 1000000) * hz +
190 ((timeout % 1000000) * hz + 999999) / 1000000;
194 * Calculate the physical address of the mutex. This gives us
195 * good distribution between unrelated processes using the
196 * feature.
198 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
201 * Wake us up if the memory location COWs while we are sleeping.
202 * Use a critical section to tighten up the interlock. Also,
203 * tsleep_remove() requires the caller be in a critical section.
205 crit_enter();
208 * We must interlock just before sleeping. If we interlock before
209 * registration the lock operations done by the registration can
210 * interfere with it.
212 * We cannot leave our interlock hanging on return because this
213 * will interfere with umtx_wakeup() calls with limited wakeup
214 * counts.
216 tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX);
217 cpu_lfence();
218 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
219 error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX,
220 "umtxsl", timeout);
221 } else {
222 tsleep_remove(curthread);
223 error = EBUSY;
225 crit_exit();
226 /* Always break out in case of signal, even if restartable */
227 if (error == ERESTART)
228 error = EINTR;
229 } else {
230 error = EBUSY;
232 skip:
233 lwbuf_free(lwb);
234 vm_page_unhold(m);
235 done:
236 return(error);
240 * umtx_wakeup { const int *ptr, int count }
242 * Wakeup the specified number of processes held in umtx_sleep() on the
243 * specified user address. A count of 0 wakes up all waiting processes.
245 * XXX assumes that the physical address space does not exceed the virtual
246 * address space.
249 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
251 vm_page_t m;
252 int offset;
253 int error;
254 void *waddr;
256 if (curthread->td_vmm) {
257 register_t gpa;
258 vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
259 uap->ptr = (const int *)gpa;
263 * WARNING! We can only use vm_fault_page*() for reading data. We
264 * cannot use it for writing data because there is no pmap
265 * interlock to protect against flushes/pageouts.
267 cpu_mfence();
268 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
269 return (EFAULT);
270 m = vm_fault_page_quick((vm_offset_t)uap->ptr,
271 VM_PROT_READ | VM_PROT_WRITE, &error, NULL);
272 if (m == NULL) {
273 error = EFAULT;
274 goto done;
276 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
277 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
279 #if 1
280 if (uap->count == 1) {
281 wakeup_domain_one(waddr, PDOMAIN_UMTX);
282 } else {
283 /* XXX wakes them all up for now */
284 wakeup_domain(waddr, PDOMAIN_UMTX);
286 #else
287 wakeup_domain(waddr, PDOMAIN_UMTX);
288 #endif
289 vm_page_unhold(m);
290 error = 0;
291 done:
292 return(error);