2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include "thr_private.h"
38 * This function is used to acquire a contested lock.
41 __thr_umtx_lock(volatile umtx_t
*mtx
, int timo
)
43 int v
, errval
, ret
= 0;
48 if (v
== 2 || atomic_cmpset_acq_int(mtx
, 1, 2)) {
50 _umtx_sleep_err(mtx
, 2, timo
);
51 else if ( (errval
= _umtx_sleep_err(mtx
, 2, timo
)) > 0) {
52 if (errval
== EAGAIN
) {
53 if (atomic_cmpset_acq_int(mtx
, 0, 2))
61 } while (!atomic_cmpset_acq_int(mtx
, 0, 2));
67 __thr_umtx_unlock(volatile umtx_t
*mtx
)
73 if (atomic_cmpset_acq_int(mtx
, v
, v
-1)) {
76 _umtx_wakeup_err(mtx
, 1);
84 * Low level timed umtx lock. This function must never return
88 __thr_umtx_timedlock(volatile umtx_t
*mtx
, const struct timespec
*timeout
)
90 struct timespec ts
, ts2
, ts3
;
93 if ((timeout
->tv_sec
< 0) ||
94 (timeout
->tv_sec
== 0 && timeout
->tv_nsec
<= 0))
97 /* XXX there should have MONO timer! */
98 clock_gettime(CLOCK_REALTIME
, &ts
);
99 TIMESPEC_ADD(&ts
, &ts
, timeout
);
104 timo
= (int)(ts2
.tv_nsec
/ 1000);
110 ret
= __thr_umtx_lock(mtx
, timo
);
111 if (ret
!= EINTR
&& ret
!= ETIMEDOUT
)
113 clock_gettime(CLOCK_REALTIME
, &ts3
);
114 TIMESPEC_SUB(&ts2
, &ts
, &ts3
);
115 if (ts2
.tv_sec
< 0 ||
116 (ts2
.tv_sec
== 0 && ts2
.tv_nsec
<= 0)) {
125 _thr_umtx_wait(volatile umtx_t
*mtx
, int exp
, const struct timespec
*timeout
,
128 struct timespec ts
, ts2
, ts3
;
129 int timo
, errval
, ret
= 0;
134 if (timeout
== NULL
) {
135 while ( (errval
= _umtx_sleep_err(mtx
, exp
, 10000000)) > 0) {
138 if (errval
== EINTR
) {
143 if (errval
== ETIMEDOUT
|| errval
== EWOULDBLOCK
) {
146 "thr_umtx_wait: FAULT VALUE CHANGE "
147 "%d -> %d oncond %p\n",
158 if ((timeout
->tv_sec
< 0) ||
159 (timeout
->tv_sec
== 0 && timeout
->tv_nsec
<= 0))
162 clock_gettime(clockid
, &ts
);
163 TIMESPEC_ADD(&ts
, &ts
, timeout
);
168 timo
= (int)(ts2
.tv_nsec
/ 1000);
175 if ( (errval
= _umtx_sleep_err(mtx
, exp
, timo
)) > 0) {
176 if (errval
== EBUSY
) {
179 } else if (errval
== EINTR
) {
185 clock_gettime(clockid
, &ts3
);
186 TIMESPEC_SUB(&ts2
, &ts
, &ts3
);
187 if (ts2
.tv_sec
< 0 || (ts2
.tv_sec
== 0 && ts2
.tv_nsec
<= 0)) {
196 _thr_umtx_wake(volatile umtx_t
*mtx
, int count
)
198 _umtx_wakeup_err(mtx
, count
);