Revert "kqueue: Return value of knote_release is no longer useful."
[dragonfly.git] / sys / kern / kern_umtx.c
blob7f592e279da55c92c5bbc2862b079e09dfbbe803
1 /*
2 * (MPSAFE)
4 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
38 * This module implements userland mutex helper functions. umtx_sleep()
39 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup
40 * functions operate on user addresses.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/module.h>
52 #include <cpu/lwbuf.h>
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <sys/lock.h>
57 #include <vm/pmap.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <vm/vm_pageout.h>
63 #include <vm/vm_extern.h>
64 #include <vm/vm_kern.h>
66 #include <vm/vm_page2.h>
68 #include <machine/vmm.h>
70 static void umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action);
73 * If the contents of the userland-supplied pointer matches the specified
74 * value enter an interruptable sleep for up to <timeout> microseconds.
75 * If the contents does not match then return immediately.
77 * Returns 0 if we slept and were woken up, -1 and EWOULDBLOCK if we slept
78 * and timed out, and EBUSY if the contents of the pointer already does
79 * not match the specified value. A timeout of 0 indicates an unlimited sleep.
80 * EINTR is returned if the call was interrupted by a signal (even if
81 * the signal specifies that the system call should restart).
83 * This function interlocks against call to umtx_wakeup. It does NOT interlock
84 * against changes in *ptr. However, it does not have to. The standard use
85 * of *ptr is to differentiate between an uncontested and a contested mutex
86 * and call umtx_wakeup when releasing a contested mutex. Therefore we can
87 * safely race against changes in *ptr as long as we are properly interlocked
88 * against the umtx_wakeup() call.
90 * The VM page associated with the mutex is held in an attempt to keep
91 * the mutex's physical address consistent, allowing umtx_sleep() and
92 * umtx_wakeup() to use the physical address as their rendezvous. BUT
93 * situations can arise where the physical address may change, particularly
94 * if a threaded program fork()'s and the mutex's memory becomes
95 * copy-on-write. We register an event on the VM page to catch COWs.
97 * umtx_sleep { const int *ptr, int value, int timeout }
99 int
100 sys_umtx_sleep(struct umtx_sleep_args *uap)
102 struct lwbuf lwb_cache;
103 struct lwbuf *lwb;
104 struct vm_page_action action;
105 vm_page_t m;
106 void *waddr;
107 int offset;
108 int timeout;
109 int error = EBUSY;
111 if (uap->timeout < 0)
112 return (EINVAL);
114 if (curthread->td_vmm) {
115 register_t gpa;
116 vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
117 uap->ptr = (const int *)gpa;
120 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
121 return (EFAULT);
124 * When faulting in the page, force any COW pages to be resolved.
125 * Otherwise the physical page we sleep on my not match the page
126 * being woken up.
128 m = vm_fault_page_quick((vm_offset_t)uap->ptr,
129 VM_PROT_READ|VM_PROT_WRITE, &error);
130 if (m == NULL) {
131 error = EFAULT;
132 goto done;
134 lwb = lwbuf_alloc(m, &lwb_cache);
135 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
138 * The critical section is required to interlock the tsleep against
139 * a wakeup from another cpu. The lfence forces synchronization.
141 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
142 if ((timeout = uap->timeout) != 0) {
143 timeout = (timeout / 1000000) * hz +
144 ((timeout % 1000000) * hz + 999999) / 1000000;
146 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
147 crit_enter();
148 tsleep_interlock(waddr, PCATCH | PDOMAIN_UMTX);
149 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
150 vm_page_init_action(m, &action, umtx_sleep_page_action_cow, waddr);
151 vm_page_register_action(&action, VMEVENT_COW);
152 if (*(int *)(lwbuf_kva(lwb) + offset) == uap->value) {
153 error = tsleep(waddr, PCATCH | PINTERLOCKED | PDOMAIN_UMTX,
154 "umtxsl", timeout);
155 } else {
156 error = EBUSY;
158 vm_page_unregister_action(&action);
159 } else {
160 error = EBUSY;
162 crit_exit();
163 /* Always break out in case of signal, even if restartable */
164 if (error == ERESTART)
165 error = EINTR;
166 } else {
167 error = EBUSY;
170 lwbuf_free(lwb);
171 /*vm_page_dirty(m); we don't actually dirty the page */
172 vm_page_unhold(m);
173 done:
174 return(error);
178 * If this page is being copied it may no longer represent the page
179 * underlying our virtual address. Wake up any umtx_sleep()'s
180 * that were waiting on its physical address to force them to retry.
182 static void
183 umtx_sleep_page_action_cow(vm_page_t m, vm_page_action_t action)
185 wakeup_domain(action->data, PDOMAIN_UMTX);
189 * umtx_wakeup { const int *ptr, int count }
191 * Wakeup the specified number of processes held in umtx_sleep() on the
192 * specified user address. A count of 0 wakes up all waiting processes.
194 * XXX assumes that the physical address space does not exceed the virtual
195 * address space.
198 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
200 vm_page_t m;
201 int offset;
202 int error;
203 void *waddr;
205 if (curthread->td_vmm) {
206 register_t gpa;
207 vmm_vm_get_gpa(curproc, &gpa, (register_t) uap->ptr);
208 uap->ptr = (const int *)gpa;
211 cpu_mfence();
212 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
213 return (EFAULT);
214 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
215 if (m == NULL) {
216 error = EFAULT;
217 goto done;
219 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
220 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
222 if (uap->count == 1) {
223 wakeup_domain_one(waddr, PDOMAIN_UMTX);
224 } else {
225 /* XXX wakes them all up for now */
226 wakeup_domain(waddr, PDOMAIN_UMTX);
228 vm_page_unhold(m);
229 error = 0;
230 done:
231 return(error);