libthread_xu - Improve contended mutex code
[dragonfly.git] / lib / libthread_xu / thread / thr_umtx.c
blob0ef4935d8846e53d0f5cf45419057b4278d6c671
1 /*-
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
30 #include <assert.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <sys/time.h>
35 #include "thr_private.h"
37 #define cpu_ccfence() __asm __volatile("" : : : "memory")
40 * This function is used to acquire a contested lock.
42 * A *mtx value of 1 indicates locked normally.
43 * A *mtx value of 2 indicates locked and contested.
45 int
46 __thr_umtx_lock(volatile umtx_t *mtx, int id, int timo)
48 int v;
49 int errval;
50 int ret = 0;
51 int retry = 4;
53 v = *mtx;
54 cpu_ccfence();
55 id &= 0x3FFFFFFF;
57 for (;;) {
58 cpu_pause();
59 if (v == 0) {
60 if (atomic_fcmpset_int(mtx, &v, id))
61 break;
62 continue;
64 if (--retry) {
65 sched_yield();
66 v = *mtx;
67 continue;
71 * Set the waiting bit. If the fcmpset fails v is loaded
72 * with the current content of the mutex, and if the waiting
73 * bit is already set, we can also sleep.
75 if (atomic_fcmpset_int(mtx, &v, v|0x40000000) ||
76 (v & 0x40000000)) {
77 if (timo == 0) {
78 _umtx_sleep_err(mtx, v|0x40000000, timo);
79 } else if ((errval = _umtx_sleep_err(mtx, v|0x40000000, timo)) > 0) {
80 if (errval == EAGAIN) {
81 if (atomic_cmpset_acq_int(mtx, 0, id))
82 ret = 0;
83 else
84 ret = ETIMEDOUT;
85 break;
89 retry = 4;
91 return (ret);
95 * Inline followup when releasing a mutex. The mutex has been released
96 * but 'v' either doesn't match id or needs a wakeup.
98 void
99 __thr_umtx_unlock(volatile umtx_t *mtx, int v, int id)
101 if (v & 0x40000000) {
102 _umtx_wakeup_err(mtx, 0);
103 v &= 0x3FFFFFFF;
105 THR_ASSERT(v == id, "thr_umtx_unlock: wrong owner");
109 * Low level timed umtx lock. This function must never return
110 * EINTR.
113 __thr_umtx_timedlock(volatile umtx_t *mtx, int id,
114 const struct timespec *timeout)
116 struct timespec ts, ts2, ts3;
117 int timo, ret;
119 if ((timeout->tv_sec < 0) ||
120 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0)) {
121 return (ETIMEDOUT);
124 /* XXX there should have MONO timer! */
125 clock_gettime(CLOCK_REALTIME, &ts);
126 TIMESPEC_ADD(&ts, &ts, timeout);
127 ts2 = *timeout;
129 id &= 0x3FFFFFFF;
131 for (;;) {
132 if (ts2.tv_nsec) {
133 timo = (int)(ts2.tv_nsec / 1000);
134 if (timo == 0)
135 timo = 1;
136 } else {
137 timo = 1000000;
139 ret = __thr_umtx_lock(mtx, id, timo);
140 if (ret != EINTR && ret != ETIMEDOUT)
141 break;
142 clock_gettime(CLOCK_REALTIME, &ts3);
143 TIMESPEC_SUB(&ts2, &ts, &ts3);
144 if (ts2.tv_sec < 0 ||
145 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
146 ret = ETIMEDOUT;
147 break;
150 return (ret);
154 * Regular umtx wait that cannot return EINTR
157 _thr_umtx_wait(volatile umtx_t *mtx, int exp, const struct timespec *timeout,
158 int clockid)
160 struct timespec ts, ts2, ts3;
161 int timo, errval, ret = 0;
163 cpu_ccfence();
164 if (*mtx != exp)
165 return (0);
167 if (timeout == NULL) {
169 * NOTE: If no timeout, EINTR cannot be returned. Ignore
170 * EINTR.
172 while ((errval = _umtx_sleep_err(mtx, exp, 10000000)) > 0) {
173 if (errval == EBUSY)
174 break;
175 #if 0
176 if (errval == ETIMEDOUT || errval == EWOULDBLOCK) {
177 if (*mtx != exp) {
178 fprintf(stderr,
179 "thr_umtx_wait: FAULT VALUE CHANGE "
180 "%d -> %d oncond %p\n",
181 exp, *mtx, mtx);
184 #endif
185 if (*mtx != exp)
186 return(0);
188 return (ret);
192 * Timed waits can return EINTR
194 if ((timeout->tv_sec < 0) ||
195 (timeout->tv_sec == 0 && timeout->tv_nsec <= 0))
196 return (ETIMEDOUT);
198 clock_gettime(clockid, &ts);
199 TIMESPEC_ADD(&ts, &ts, timeout);
200 ts2 = *timeout;
202 for (;;) {
203 if (ts2.tv_nsec) {
204 timo = (int)(ts2.tv_nsec / 1000);
205 if (timo == 0)
206 timo = 1;
207 } else {
208 timo = 1000000;
211 if ((errval = _umtx_sleep_err(mtx, exp, timo)) > 0) {
212 if (errval == EBUSY) {
213 ret = 0;
214 break;
216 if (errval == EINTR) {
217 ret = EINTR;
218 break;
222 clock_gettime(clockid, &ts3);
223 TIMESPEC_SUB(&ts2, &ts, &ts3);
224 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
225 ret = ETIMEDOUT;
226 break;
229 return (ret);
233 * Simple version without a timeout which can also return EINTR
236 _thr_umtx_wait_intr(volatile umtx_t *mtx, int exp)
238 int ret = 0;
239 int errval;
241 cpu_ccfence();
242 for (;;) {
243 if (*mtx != exp)
244 return (0);
245 errval = _umtx_sleep_err(mtx, exp, 10000000);
246 if (errval == 0)
247 break;
248 if (errval == EBUSY)
249 break;
250 if (errval == EINTR) {
251 ret = errval;
252 break;
254 cpu_ccfence();
256 return (ret);
259 void
260 _thr_umtx_wake(volatile umtx_t *mtx, int count)
262 _umtx_wakeup_err(mtx, count);