Fix the __BSD_VISIBLE check in a few headers.
[dragonfly.git] / lib / libc / sysvipc / lock_generic.c
blob7f2eeccbe4864a6d017d4d841dfd14848f064065
1 /*-
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
28 * $DragonFly: src/lib/libthread_xu/thread/thr_umtx.c,v 1.4 2008/04/14 20:12:41 dillon Exp $
31 #include <assert.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <sys/time.h>
36 #include "sysvipc_lock_generic.h"
39 * This function is used to acquire a contested lock.
41 int
42 __sysv_umtx_lock(volatile umtx_t *mtx, int timo)
44 int v, errval, ret = 0;
46 /* contested */
47 do {
48 v = *mtx;
49 if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) {
50 if (timo == 0)
51 umtx_sleep(mtx, 2, timo);
52 else if ( (errval = umtx_sleep(mtx, 2, timo)) > 0) {
53 if (errval == EAGAIN) {
54 if (atomic_cmpset_acq_int(mtx, 0, 2))
55 ret = 0;
56 else
57 ret = ETIMEDOUT;
58 break;
62 } while (!atomic_cmpset_acq_int(mtx, 0, 2));
64 return (ret);
67 void
68 __sysv_umtx_unlock(volatile umtx_t *mtx)
70 int v;
72 for (;;) {
73 v = *mtx;
74 if (atomic_cmpset_acq_int(mtx, v, v-1)) {
75 if (v != 1) {
76 *mtx = 0;
77 umtx_wakeup(mtx, 1);
79 break;