Add a new option -s. With this option pkg_search(1) will display the description
[dragonfly.git] / sys / kern / kern_umtx.c
blob67100c365483727baaca306682c892dbf1083fa9
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_umtx.c,v 1.7 2007/07/02 01:30:07 dillon Exp $
38 * This module implements userland mutex helper functions. umtx_sleep()
39 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup
40 * functions operate on user addresses.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/sfbuf.h>
51 #include <sys/module.h>
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_pageout.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_kern.h>
67 * If the contents of the userland-supplied pointer matches the specified
68 * value enter an interruptable sleep for up to <timeout> microseconds.
69 * If the contents does not match then return immediately.
71 * The specified timeout may not exceed 1 second.
73 * Returns 0 if we slept and were woken up, -1 and ETIMEDOUT if we slept
74 * and timed out, and EBUSY if the contents of the pointer did not match
75 * the specified value. A timeout of 0 indicates an unlimited sleep.
76 * EINTR is returned if the call was interrupted by a signal (even if
77 * the signal specifies that the system call should restart).
79 * This function interlocks against call to umtx_wakeup. It does NOT interlock
80 * against changes in *ptr. However, it does not have to. The standard use
81 * of *ptr is to differentiate between an uncontested and a contested mutex
82 * and call umtx_wakeup when releasing a contested mutex. Therefore we can
83 * safely race against changes in *ptr as long as we are properly interlocked
84 * against the umtx_wakeup() call.
86 * The VM page associated with the mutex is held to prevent reuse in order
87 * to guarentee that the physical address remains consistent.
89 * umtx_sleep { const int *ptr, int value, int timeout }
91 int
92 sys_umtx_sleep(struct umtx_sleep_args *uap)
94 int error = EBUSY;
95 struct sf_buf *sf;
96 vm_page_t m;
97 void *waddr;
98 int offset;
99 int timeout;
101 if ((unsigned int)uap->timeout > 1000000)
102 return (EINVAL);
103 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
104 return (EFAULT);
105 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
106 if (m == NULL)
107 return (EFAULT);
108 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
109 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
111 if (*(int *)(sf_buf_kva(sf) + offset) == uap->value) {
112 if ((timeout = uap->timeout) != 0)
113 timeout = (timeout * hz + 999999) / 1000000;
114 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
115 error = tsleep(waddr, PCATCH|PDOMAIN_UMTX, "umtxsl", timeout);
116 /* Always break out in case of signal, even if restartable */
117 if (error == ERESTART)
118 error = EINTR;
119 } else {
120 error = EBUSY;
123 sf_buf_free(sf);
124 vm_page_unhold(m);
125 return(error);
129 * umtx_wakeup { const int *ptr, int count }
131 * Wakeup the specified number of processes held in umtx_sleep() on the
132 * specified user address. A count of 0 wakes up all waiting processes.
134 * XXX assumes that the physical address space does not exceed the virtual
135 * address space.
138 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
140 vm_page_t m;
141 int offset;
142 int error;
143 void *waddr;
145 cpu_mfence();
146 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
147 return (EFAULT);
148 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
149 if (m == NULL)
150 return (EFAULT);
151 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
152 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
154 if (uap->count == 1) {
155 wakeup_domain_one(waddr, PDOMAIN_UMTX);
156 } else {
157 /* XXX wakes them all up for now */
158 wakeup_domain(waddr, PDOMAIN_UMTX);
160 vm_page_unhold(m);
161 return(0);