From within a virtual kernel, make /sbin/shutdown and /sbin/halt actually
[dragonfly/vkernel-mp.git] / sys / kern / kern_umtx.c
blob6c2a58e802d0031dcb2176174567390b598d91b4
1 /*
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com> and David Xu <davidxu@freebsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/kern_umtx.c,v 1.6 2007/01/11 21:53:41 dillon Exp $
38 * This module implements userland mutex helper functions. umtx_sleep()
39 * handling blocking and umtx_wakeup() handles wakeups. The sleep/wakeup
40 * functions operate on user addresses.
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/sysproto.h>
47 #include <sys/sysunion.h>
48 #include <sys/sysent.h>
49 #include <sys/syscall.h>
50 #include <sys/sfbuf.h>
51 #include <sys/module.h>
53 #include <vm/vm.h>
54 #include <vm/vm_param.h>
55 #include <sys/lock.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_pager.h>
61 #include <vm/vm_pageout.h>
62 #include <vm/vm_extern.h>
63 #include <vm/vm_page.h>
64 #include <vm/vm_kern.h>
67 * If the contents of the userland-supplied pointer matches the specified
68 * value enter an interruptable sleep for up to <timeout> microseconds.
69 * If the contents does not match then return immediately.
71 * The specified timeout may not exceed 1 second.
73 * Returns 0 if we slept and were woken up, -1 and ETIMEDOUT if we slept
74 * and timed out, and EBUSY if the contents of the pointer did not match
75 * the specified value. A timeout of 0 indicates an unlimited sleep.
76 * EINTR is returned if the call was interrupted by a signal.
78 * This function interlocks against call to umtx_wakeup. It does NOT interlock
79 * against changes in *ptr. However, it does not have to. The standard use
80 * of *ptr is to differentiate between an uncontested and a contested mutex
81 * and call umtx_wakeup when releasing a contested mutex. Therefore we can
82 * safely race against changes in *ptr as long as we are properly interlocked
83 * against the umtx_wakeup() call.
85 * The VM page associated with the mutex is held to prevent reuse in order
86 * to guarentee that the physical address remains consistent.
88 * umtx_sleep { const int *ptr, int value, int timeout }
90 int
91 sys_umtx_sleep(struct umtx_sleep_args *uap)
93 int error = EBUSY;
94 struct sf_buf *sf;
95 vm_page_t m;
96 void *waddr;
97 int offset;
98 int timeout;
100 if ((unsigned int)uap->timeout > 1000000)
101 return (EINVAL);
102 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
103 return (EFAULT);
104 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
105 if (m == NULL)
106 return (EFAULT);
107 sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
108 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
110 if (*(int *)(sf_buf_kva(sf) + offset) == uap->value) {
111 if ((timeout = uap->timeout) != 0)
112 timeout = (timeout * hz + 999999) / 1000000;
113 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
114 error = tsleep(waddr, PCATCH|PDOMAIN_UMTX, "umtxsl", timeout);
115 /* Can not restart timeout wait. */
116 if (timeout != 0 && error == ERESTART)
117 error = EINTR;
118 } else {
119 error = EBUSY;
122 sf_buf_free(sf);
123 vm_page_unhold(m);
124 return(error);
128 * umtx_wakeup { const int *ptr, int count }
130 * Wakeup the specified number of processes held in umtx_sleep() on the
131 * specified user address. A count of 0 wakes up all waiting processes.
133 * XXX assumes that the physical address space does not exceed the virtual
134 * address space.
137 sys_umtx_wakeup(struct umtx_wakeup_args *uap)
139 vm_page_t m;
140 int offset;
141 int error;
142 void *waddr;
144 cpu_mfence();
145 if ((vm_offset_t)uap->ptr & (sizeof(int) - 1))
146 return (EFAULT);
147 m = vm_fault_page_quick((vm_offset_t)uap->ptr, VM_PROT_READ, &error);
148 if (m == NULL)
149 return (EFAULT);
150 offset = (vm_offset_t)uap->ptr & PAGE_MASK;
151 waddr = (void *)((intptr_t)VM_PAGE_TO_PHYS(m) + offset);
153 if (uap->count == 1) {
154 wakeup_domain_one(waddr, PDOMAIN_UMTX);
155 } else {
156 /* XXX wakes them all up for now */
157 wakeup_domain(waddr, PDOMAIN_UMTX);
159 vm_page_unhold(m);
160 return(0);