Remove an unused include file.
[dragonfly.git] / sys / sys / mutex2.h
blobfd9b36e8c91e0a65a88de11f286f2e20eed25352
1 /*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
43 #endif
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
46 #endif
47 #include <machine/atomic.h>
50 * Initialize a new mutex, placing it in an unlocked state with no refs.
52 static __inline void
53 mtx_init(mtx_t *mtx, const char *ident)
55 mtx->mtx_lock = 0;
56 mtx->mtx_flags = 0;
57 mtx->mtx_owner = NULL;
58 mtx->mtx_exlink = NULL;
59 mtx->mtx_shlink = NULL;
60 mtx->mtx_ident = ident;
63 static __inline void
64 mtx_init_flags(mtx_t *mtx, const char *ident, uint32_t flags)
66 mtx->mtx_lock = 0;
67 mtx->mtx_flags = flags;
68 mtx->mtx_owner = NULL;
69 mtx->mtx_exlink = NULL;
70 mtx->mtx_shlink = NULL;
71 mtx->mtx_ident = ident;
75 * Initialize a mtx link structure for deeper control over the mutex
76 * operation.
78 static __inline void
79 mtx_link_init(mtx_link_t *link)
81 link->state = MTX_LINK_IDLE;
82 link->callback = NULL;
83 link->arg = NULL;
87 * A link structure initialized this way causes mutex operations to not block,
88 * caller must specify a callback. Caller may still abort the mutex via
89 * the link.
91 static __inline void
92 mtx_link_init_async(mtx_link_t *link,
93 void (*callback)(mtx_link_t *link, void *arg, int error),
94 void *arg)
96 link->state = MTX_LINK_IDLE;
97 link->callback = callback;
98 link->arg = arg;
102 * Deinitialize a mutex
104 static __inline void
105 mtx_uninit(mtx_t *mtx)
107 /* empty */
111 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
112 * is allowed.
114 * This version of the function allows the mtx_link to be passed in, thus
115 * giving the caller visibility for the link structure which is required
116 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock.
118 * The mutex may be aborted at any time while the passed link structure
119 * is valid.
121 static __inline int
122 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
124 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
125 return(_mtx_lock_ex_link(mtx, link, flags, to));
126 mtx->mtx_owner = curthread;
127 link->state = MTX_LINK_ACQUIRED;
129 return(0);
133 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
134 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
136 static __inline void
137 mtx_lock(mtx_t *mtx)
139 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
140 _mtx_lock_ex(mtx, 0, 0);
141 return;
143 mtx->mtx_owner = curthread;
147 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
149 * Returns 0 on success, or the tsleep() return code on failure.
150 * An error can only be returned if PCATCH is specified in the flags.
152 static __inline int
153 mtx_lock_ex(mtx_t *mtx, int flags, int to)
155 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
156 return(_mtx_lock_ex(mtx, flags, to));
157 mtx->mtx_owner = curthread;
158 return(0);
161 static __inline int
162 mtx_lock_ex_quick(mtx_t *mtx)
164 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
165 return(_mtx_lock_ex_quick(mtx));
166 mtx->mtx_owner = curthread;
167 return(0);
170 static __inline int
171 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
173 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
174 return(_mtx_lock_sh_link(mtx, link, flags, to));
175 link->state = MTX_LINK_ACQUIRED;
176 return(0);
180 * Share-lock a mutex, block until acquired. Recursion is allowed.
182 * Returns 0 on success, or the tsleep() return code on failure.
183 * An error can only be returned if PCATCH is specified in the flags.
185 static __inline int
186 mtx_lock_sh(mtx_t *mtx, int flags, int to)
188 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
189 return(_mtx_lock_sh(mtx, flags, to));
190 return(0);
193 static __inline int
194 mtx_lock_sh_quick(mtx_t *mtx)
196 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
197 return(_mtx_lock_sh_quick(mtx));
198 return(0);
202 * Adds a shared lock reference to a lock already locked shared,
203 * does not block on pending exclusive request.
205 static __inline void
206 mtx_lock_sh_again(mtx_t *mtx)
208 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 &&
209 (mtx->mtx_lock & MTX_MASK) > 0);
210 atomic_add_int(&mtx->mtx_lock, 1);
214 * Short-form exclusive spinlock a mutex. Must be paired with
215 * mtx_spinunlock().
217 static __inline void
218 mtx_spinlock(mtx_t *mtx)
220 globaldata_t gd = mycpu;
223 * Predispose a hard critical section
225 crit_enter_raw(gd->gd_curthread);
226 ++gd->gd_spinlocks;
227 cpu_ccfence();
230 * If we cannot get it trivially get it the hard way.
232 * Note that mtx_owner will be set twice if we fail to get it
233 * trivially, but there's no point conditionalizing it as a
234 * conditional will be slower.
236 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
237 _mtx_spinlock(mtx);
238 mtx->mtx_owner = gd->gd_curthread;
241 static __inline int
242 mtx_spinlock_try(mtx_t *mtx)
244 globaldata_t gd = mycpu;
247 * Predispose a hard critical section
249 crit_enter_raw(gd->gd_curthread);
250 ++gd->gd_spinlocks;
251 cpu_ccfence();
254 * If we cannot get it trivially call _mtx_spinlock_try(). This
255 * function will clean up the hard critical section if it fails.
257 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
258 return(_mtx_spinlock_try(mtx));
259 mtx->mtx_owner = gd->gd_curthread;
260 return (0);
264 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
265 * allowed. This form is identical to mtx_spinlock_ex().
267 * Attempt to exclusive-lock a mutex, return 0 on success and
268 * EAGAIN on failure.
270 static __inline int
271 mtx_lock_ex_try(mtx_t *mtx)
273 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
274 return (_mtx_lock_ex_try(mtx));
275 mtx->mtx_owner = curthread;
276 return (0);
280 * Attempt to share-lock a mutex, return 0 on success and
281 * EAGAIN on failure.
283 static __inline int
284 mtx_lock_sh_try(mtx_t *mtx)
286 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
287 return (_mtx_lock_sh_try(mtx));
288 return (0);
292 * If the lock is held exclusively it must be owned by the caller. If the
293 * lock is already a shared lock this operation is a NOP. A panic will
294 * occur if the lock is not held either shared or exclusive.
296 * The exclusive count is converted to a shared count.
298 static __inline void
299 mtx_downgrade(mtx_t *mtx)
301 globaldata_t gd __debugvar = mycpu;
303 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) &&
304 mtx->mtx_owner == gd->gd_curthread);
305 mtx->mtx_owner = NULL;
306 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0)
307 _mtx_downgrade(mtx);
311 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
312 * the shared lock has a count other then 1. Optimize the most likely case
313 * but note that a single cmpset can fail due to WANTED races.
315 * If the lock is held exclusively it must be owned by the caller and
316 * this function will simply return without doing anything. A panic will
317 * occur if the lock is held exclusively by someone other then the caller.
319 * Returns 0 on success, EDEADLK on failure.
321 static __inline int
322 mtx_upgrade_try(mtx_t *mtx)
324 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) {
325 mtx->mtx_owner = curthread;
326 return(0);
328 return (_mtx_upgrade_try(mtx));
332 * Optimized unlock cases.
334 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
335 * both blocking and spin methods.
337 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
338 * mutexes and produce less code, but it is ok for code to just use
339 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
340 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
342 static __inline void
343 mtx_unlock(mtx_t *mtx)
345 globaldata_t gd __debugvar = mycpu;
346 u_int lock = mtx->mtx_lock;
348 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
349 mtx->mtx_owner == gd->gd_curthread);
350 if (lock == (MTX_EXCLUSIVE | 1)) {
351 mtx->mtx_owner = NULL;
352 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
353 _mtx_unlock(mtx);
354 } else if (lock == 1) {
355 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
356 _mtx_unlock(mtx);
357 } else {
358 _mtx_unlock(mtx);
362 static __inline void
363 mtx_unlock_ex(mtx_t *mtx)
365 globaldata_t gd __debugvar = mycpu;
366 u_int lock = mtx->mtx_lock;
368 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
369 mtx->mtx_owner == gd->gd_curthread);
370 if (lock == (MTX_EXCLUSIVE | 1)) {
371 mtx->mtx_owner = NULL;
372 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
373 _mtx_unlock(mtx);
374 } else {
375 _mtx_unlock(mtx);
379 static __inline void
380 mtx_unlock_sh(mtx_t *mtx)
382 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
383 _mtx_unlock(mtx);
387 * NOTE: spinlocks are exclusive-only
389 static __inline void
390 mtx_spinunlock(mtx_t *mtx)
392 globaldata_t gd = mycpu;
394 mtx_unlock(mtx);
396 cpu_ccfence();
397 --gd->gd_spinlocks;
398 crit_exit_raw(gd->gd_curthread);
402 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
403 * anyone, including the owner.
405 static __inline int
406 mtx_islocked(mtx_t *mtx)
408 return(mtx->mtx_lock != 0);
412 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
413 * including the owner. Returns FALSE (0) if the mutex is unlocked or
414 * if it is locked shared by one or more entities.
416 * A caller wishing to check whether a lock is owned exclusively by it
417 * should use mtx_owned().
419 static __inline int
420 mtx_islocked_ex(mtx_t *mtx)
422 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
426 * Return TRUE (non-zero) if the mutex is not locked.
428 static __inline int
429 mtx_notlocked(mtx_t *mtx)
431 return(mtx->mtx_lock == 0);
435 * Return TRUE (non-zero) if the mutex is not locked exclusively.
436 * The mutex may in an unlocked or shared lock state.
438 static __inline int
439 mtx_notlocked_ex(mtx_t *mtx)
441 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
445 * Return TRUE (non-zero) if the mutex is exclusively locked by
446 * the caller.
448 static __inline int
449 mtx_owned(mtx_t *mtx)
451 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
455 * Return TRUE (non-zero) if the mutex is not exclusively locked by
456 * the caller.
458 static __inline int
459 mtx_notowned(mtx_t *mtx)
461 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
462 mtx->mtx_owner != curthread);
466 * Return the shared or exclusive lock count. A return value of 0
467 * indicate that the mutex is not locked.
469 * NOTE: If the mutex is held exclusively by someone other then the
470 * caller the lock count for the other owner is still returned.
472 static __inline
474 mtx_lockrefs(mtx_t *mtx)
476 return(mtx->mtx_lock & MTX_MASK);
480 * Lock must held and will be released on return. Returns state
481 * which can be passed to mtx_lock_temp_restore() to return the
482 * lock to its previous state.
484 static __inline
485 mtx_state_t
486 mtx_lock_temp_release(mtx_t *mtx)
488 mtx_state_t state;
490 state = (mtx->mtx_lock & MTX_EXCLUSIVE);
491 mtx_unlock(mtx);
493 return state;
497 * Restore the previous state of a lock released with
498 * mtx_lock_temp_release() or mtx_lock_upgrade().
500 static __inline
501 void
502 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state)
504 if (state & MTX_EXCLUSIVE)
505 mtx_lock_ex_quick(mtx);
506 else
507 mtx_lock_sh_quick(mtx);
510 #endif