2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
39 #include <sys/mutex.h>
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
47 #include <machine/atomic.h>
50 * Initialize a new mutex, placing it in an unlocked state with no refs.
53 mtx_init(mtx_t
*mtx
, const char *ident
)
57 mtx
->mtx_owner
= NULL
;
58 mtx
->mtx_exlink
= NULL
;
59 mtx
->mtx_shlink
= NULL
;
60 mtx
->mtx_ident
= ident
;
64 mtx_init_flags(mtx_t
*mtx
, const char *ident
, uint32_t flags
)
67 mtx
->mtx_flags
= flags
;
68 mtx
->mtx_owner
= NULL
;
69 mtx
->mtx_exlink
= NULL
;
70 mtx
->mtx_shlink
= NULL
;
71 mtx
->mtx_ident
= ident
;
75 * Initialize a mtx link structure for deeper control over the mutex
79 mtx_link_init(mtx_link_t
*link
)
81 link
->state
= MTX_LINK_IDLE
;
82 link
->callback
= NULL
;
87 * A link structure initialized this way causes mutex operations to not block,
88 * caller must specify a callback. Caller may still abort the mutex via
92 mtx_link_init_async(mtx_link_t
*link
,
93 void (*callback
)(mtx_link_t
*link
, void *arg
, int error
),
96 link
->state
= MTX_LINK_IDLE
;
97 link
->callback
= callback
;
102 * Deinitialize a mutex
105 mtx_uninit(mtx_t
*mtx
)
111 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
114 * This version of the function allows the mtx_link to be passed in, thus
115 * giving the caller visibility for the link structure which is required
116 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock.
118 * The mutex may be aborted at any time while the passed link structure
122 mtx_lock_ex_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
124 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
125 return(_mtx_lock_ex_link(mtx
, link
, flags
, to
));
126 mtx
->mtx_owner
= curthread
;
127 link
->state
= MTX_LINK_ACQUIRED
;
133 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
134 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
139 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0) {
140 _mtx_lock_ex(mtx
, 0, 0);
143 mtx
->mtx_owner
= curthread
;
147 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
149 * Returns 0 on success, or the tsleep() return code on failure.
150 * An error can only be returned if PCATCH is specified in the flags.
153 mtx_lock_ex(mtx_t
*mtx
, int flags
, int to
)
155 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
156 return(_mtx_lock_ex(mtx
, flags
, to
));
157 mtx
->mtx_owner
= curthread
;
162 mtx_lock_ex_quick(mtx_t
*mtx
)
164 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
165 return(_mtx_lock_ex_quick(mtx
));
166 mtx
->mtx_owner
= curthread
;
171 mtx_lock_sh_link(mtx_t
*mtx
, mtx_link_t
*link
, int flags
, int to
)
173 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, 1) == 0)
174 return(_mtx_lock_sh_link(mtx
, link
, flags
, to
));
175 link
->state
= MTX_LINK_ACQUIRED
;
180 * Share-lock a mutex, block until acquired. Recursion is allowed.
182 * Returns 0 on success, or the tsleep() return code on failure.
183 * An error can only be returned if PCATCH is specified in the flags.
186 mtx_lock_sh(mtx_t
*mtx
, int flags
, int to
)
188 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, 1) == 0)
189 return(_mtx_lock_sh(mtx
, flags
, to
));
194 mtx_lock_sh_quick(mtx_t
*mtx
)
196 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, 1) == 0)
197 return(_mtx_lock_sh_quick(mtx
));
202 * Adds a shared lock reference to a lock already locked shared,
203 * does not block on pending exclusive request.
206 mtx_lock_sh_again(mtx_t
*mtx
)
208 KKASSERT((mtx
->mtx_lock
& MTX_EXCLUSIVE
) == 0 &&
209 (mtx
->mtx_lock
& MTX_MASK
) > 0);
210 atomic_add_int(&mtx
->mtx_lock
, 1);
214 * Short-form exclusive spinlock a mutex. Must be paired with
218 mtx_spinlock(mtx_t
*mtx
)
220 globaldata_t gd
= mycpu
;
223 * Predispose a hard critical section
225 crit_enter_raw(gd
->gd_curthread
);
230 * If we cannot get it trivially get it the hard way.
232 * Note that mtx_owner will be set twice if we fail to get it
233 * trivially, but there's no point conditionalizing it as a
234 * conditional will be slower.
236 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
238 mtx
->mtx_owner
= gd
->gd_curthread
;
242 mtx_spinlock_try(mtx_t
*mtx
)
244 globaldata_t gd
= mycpu
;
247 * Predispose a hard critical section
249 crit_enter_raw(gd
->gd_curthread
);
254 * If we cannot get it trivially call _mtx_spinlock_try(). This
255 * function will clean up the hard critical section if it fails.
257 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
258 return(_mtx_spinlock_try(mtx
));
259 mtx
->mtx_owner
= gd
->gd_curthread
;
264 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
265 * allowed. This form is identical to mtx_spinlock_ex().
267 * Attempt to exclusive-lock a mutex, return 0 on success and
271 mtx_lock_ex_try(mtx_t
*mtx
)
273 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, MTX_EXCLUSIVE
| 1) == 0)
274 return (_mtx_lock_ex_try(mtx
));
275 mtx
->mtx_owner
= curthread
;
280 * Attempt to share-lock a mutex, return 0 on success and
284 mtx_lock_sh_try(mtx_t
*mtx
)
286 if (atomic_cmpset_int(&mtx
->mtx_lock
, 0, 1) == 0)
287 return (_mtx_lock_sh_try(mtx
));
292 * If the lock is held exclusively it must be owned by the caller. If the
293 * lock is already a shared lock this operation is a NOP. A panic will
294 * occur if the lock is not held either shared or exclusive.
296 * The exclusive count is converted to a shared count.
299 mtx_downgrade(mtx_t
*mtx
)
301 globaldata_t gd __debugvar
= mycpu
;
303 KKASSERT((mtx
->mtx_lock
& MTX_EXCLUSIVE
) &&
304 mtx
->mtx_owner
== gd
->gd_curthread
);
305 mtx
->mtx_owner
= NULL
;
306 if (atomic_cmpset_int(&mtx
->mtx_lock
, MTX_EXCLUSIVE
| 1, 1) == 0)
311 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
312 * the shared lock has a count other then 1. Optimize the most likely case
313 * but note that a single cmpset can fail due to WANTED races.
315 * If the lock is held exclusively it must be owned by the caller and
316 * this function will simply return without doing anything. A panic will
317 * occur if the lock is held exclusively by someone other then the caller.
319 * Returns 0 on success, EDEADLK on failure.
322 mtx_upgrade_try(mtx_t
*mtx
)
324 if (atomic_cmpset_int(&mtx
->mtx_lock
, 1, MTX_EXCLUSIVE
| 1)) {
325 mtx
->mtx_owner
= curthread
;
328 return (_mtx_upgrade_try(mtx
));
332 * Optimized unlock cases.
334 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
335 * both blocking and spin methods.
337 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
338 * mutexes and produce less code, but it is ok for code to just use
339 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
340 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
343 mtx_unlock(mtx_t
*mtx
)
345 globaldata_t gd __debugvar
= mycpu
;
346 u_int lock
= mtx
->mtx_lock
;
348 KKASSERT((mtx
->mtx_lock
& MTX_EXCLUSIVE
) == 0 ||
349 mtx
->mtx_owner
== gd
->gd_curthread
);
350 if (lock
== (MTX_EXCLUSIVE
| 1)) {
351 mtx
->mtx_owner
= NULL
;
352 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, 0) == 0)
354 } else if (lock
== 1) {
355 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, 0) == 0)
363 mtx_unlock_ex(mtx_t
*mtx
)
365 globaldata_t gd __debugvar
= mycpu
;
366 u_int lock
= mtx
->mtx_lock
;
368 KKASSERT((mtx
->mtx_lock
& MTX_EXCLUSIVE
) == 0 ||
369 mtx
->mtx_owner
== gd
->gd_curthread
);
370 if (lock
== (MTX_EXCLUSIVE
| 1)) {
371 mtx
->mtx_owner
= NULL
;
372 if (atomic_cmpset_int(&mtx
->mtx_lock
, lock
, 0) == 0)
380 mtx_unlock_sh(mtx_t
*mtx
)
382 if (atomic_cmpset_int(&mtx
->mtx_lock
, 1, 0) == 0)
387 * NOTE: spinlocks are exclusive-only
390 mtx_spinunlock(mtx_t
*mtx
)
392 globaldata_t gd
= mycpu
;
398 crit_exit_raw(gd
->gd_curthread
);
402 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
403 * anyone, including the owner.
406 mtx_islocked(mtx_t
*mtx
)
408 return(mtx
->mtx_lock
!= 0);
412 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
413 * including the owner. Returns FALSE (0) if the mutex is unlocked or
414 * if it is locked shared by one or more entities.
416 * A caller wishing to check whether a lock is owned exclusively by it
417 * should use mtx_owned().
420 mtx_islocked_ex(mtx_t
*mtx
)
422 return((mtx
->mtx_lock
& MTX_EXCLUSIVE
) != 0);
426 * Return TRUE (non-zero) if the mutex is not locked.
429 mtx_notlocked(mtx_t
*mtx
)
431 return(mtx
->mtx_lock
== 0);
435 * Return TRUE (non-zero) if the mutex is not locked exclusively.
436 * The mutex may in an unlocked or shared lock state.
439 mtx_notlocked_ex(mtx_t
*mtx
)
441 return((mtx
->mtx_lock
& MTX_EXCLUSIVE
) != 0);
445 * Return TRUE (non-zero) if the mutex is exclusively locked by
449 mtx_owned(mtx_t
*mtx
)
451 return((mtx
->mtx_lock
& MTX_EXCLUSIVE
) && mtx
->mtx_owner
== curthread
);
455 * Return TRUE (non-zero) if the mutex is not exclusively locked by
459 mtx_notowned(mtx_t
*mtx
)
461 return((mtx
->mtx_lock
& MTX_EXCLUSIVE
) == 0 ||
462 mtx
->mtx_owner
!= curthread
);
466 * Return the shared or exclusive lock count. A return value of 0
467 * indicate that the mutex is not locked.
469 * NOTE: If the mutex is held exclusively by someone other then the
470 * caller the lock count for the other owner is still returned.
474 mtx_lockrefs(mtx_t
*mtx
)
476 return(mtx
->mtx_lock
& MTX_MASK
);
480 * Lock must held and will be released on return. Returns state
481 * which can be passed to mtx_lock_temp_restore() to return the
482 * lock to its previous state.
486 mtx_lock_temp_release(mtx_t
*mtx
)
490 state
= (mtx
->mtx_lock
& MTX_EXCLUSIVE
);
497 * Restore the previous state of a lock released with
498 * mtx_lock_temp_release() or mtx_lock_upgrade().
502 mtx_lock_temp_restore(mtx_t
*mtx
, mtx_state_t state
)
504 if (state
& MTX_EXCLUSIVE
)
505 mtx_lock_ex_quick(mtx
);
507 mtx_lock_sh_quick(mtx
);