kernel - Fix callout_stop/callout_reset rearm race
[dragonfly.git] / sys / sys / mutex2.h
blob028adf5809fb4b0f52c95136875dba09dfcf8802
1 /*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
43 #endif
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
46 #endif
47 #include <machine/atomic.h>
50 * Initialize a new mutex, placing it in an unlocked state with no refs.
52 static __inline void
53 mtx_init(mtx_t *mtx, const char *ident)
55 mtx->mtx_lock = 0;
56 mtx->mtx_owner = NULL;
57 mtx->mtx_exlink = NULL;
58 mtx->mtx_shlink = NULL;
59 mtx->mtx_ident = ident;
63 * Initialize a mtx link structure for deeper control over the mutex
64 * operation.
66 static __inline void
67 mtx_link_init(mtx_link_t *link)
69 link->state = MTX_LINK_IDLE;
70 link->callback = NULL;
71 link->arg = NULL;
75 * A link structure initialized this way causes mutex operations to not block,
76 * caller must specify a callback. Caller may still abort the mutex via
77 * the link.
79 static __inline void
80 mtx_link_init_async(mtx_link_t *link,
81 void (*callback)(mtx_link_t *link, void *arg, int error),
82 void *arg)
84 link->state = MTX_LINK_IDLE;
85 link->callback = callback;
86 link->arg = arg;
90 * Deinitialize a mutex
92 static __inline void
93 mtx_uninit(mtx_t *mtx)
95 /* empty */
99 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
100 * is allowed.
102 * This version of the function allows the mtx_link to be passed in, thus
103 * giving the caller visibility for the link structure which is required
104 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock.
106 * The mutex may be aborted at any time while the passed link structure
107 * is valid.
109 static __inline int
110 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
112 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
113 return(_mtx_lock_ex_link(mtx, link, flags, to));
114 mtx->mtx_owner = curthread;
115 link->state = MTX_LINK_ACQUIRED;
117 return(0);
121 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
122 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
124 static __inline void
125 mtx_lock(mtx_t *mtx)
127 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
128 _mtx_lock_ex(mtx, 0, 0);
129 return;
131 mtx->mtx_owner = curthread;
135 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
137 * Returns 0 on success, or the tsleep() return code on failure.
138 * An error can only be returned if PCATCH is specified in the flags.
140 static __inline int
141 mtx_lock_ex(mtx_t *mtx, int flags, int to)
143 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
144 return(_mtx_lock_ex(mtx, flags, to));
145 mtx->mtx_owner = curthread;
146 return(0);
149 static __inline int
150 mtx_lock_ex_quick(mtx_t *mtx)
152 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
153 return(_mtx_lock_ex_quick(mtx));
154 mtx->mtx_owner = curthread;
155 return(0);
158 static __inline int
159 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
161 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
162 return(_mtx_lock_sh_link(mtx, link, flags, to));
163 link->state = MTX_LINK_ACQUIRED;
164 return(0);
168 * Share-lock a mutex, block until acquired. Recursion is allowed.
170 * Returns 0 on success, or the tsleep() return code on failure.
171 * An error can only be returned if PCATCH is specified in the flags.
173 static __inline int
174 mtx_lock_sh(mtx_t *mtx, int flags, int to)
176 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
177 return(_mtx_lock_sh(mtx, flags, to));
178 return(0);
181 static __inline int
182 mtx_lock_sh_quick(mtx_t *mtx)
184 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
185 return(_mtx_lock_sh_quick(mtx));
186 return(0);
190 * Adds a shared lock reference to a lock already locked shared,
191 * does not block on pending exclusive request.
193 static __inline void
194 mtx_lock_sh_again(mtx_t *mtx)
196 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 &&
197 (mtx->mtx_lock & MTX_MASK) > 0);
198 atomic_add_int(&mtx->mtx_lock, 1);
202 * Short-form exclusive spinlock a mutex. Must be paired with
203 * mtx_spinunlock().
205 static __inline void
206 mtx_spinlock(mtx_t *mtx)
208 globaldata_t gd = mycpu;
211 * Predispose a hard critical section
213 ++gd->gd_curthread->td_critcount;
214 cpu_ccfence();
215 ++gd->gd_spinlocks;
218 * If we cannot get it trivially get it the hard way.
220 * Note that mtx_owner will be set twice if we fail to get it
221 * trivially, but there's no point conditionalizing it as a
222 * conditional will be slower.
224 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
225 _mtx_spinlock(mtx);
226 mtx->mtx_owner = gd->gd_curthread;
229 static __inline int
230 mtx_spinlock_try(mtx_t *mtx)
232 globaldata_t gd = mycpu;
235 * Predispose a hard critical section
237 ++gd->gd_curthread->td_critcount;
238 cpu_ccfence();
239 ++gd->gd_spinlocks;
242 * If we cannot get it trivially call _mtx_spinlock_try(). This
243 * function will clean up the hard critical section if it fails.
245 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
246 return(_mtx_spinlock_try(mtx));
247 mtx->mtx_owner = gd->gd_curthread;
248 return (0);
252 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
253 * allowed. This form is identical to mtx_spinlock_ex().
255 * Attempt to exclusive-lock a mutex, return 0 on success and
256 * EAGAIN on failure.
258 static __inline int
259 mtx_lock_ex_try(mtx_t *mtx)
261 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
262 return (_mtx_lock_ex_try(mtx));
263 mtx->mtx_owner = curthread;
264 return (0);
268 * Attempt to share-lock a mutex, return 0 on success and
269 * EAGAIN on failure.
271 static __inline int
272 mtx_lock_sh_try(mtx_t *mtx)
274 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
275 return (_mtx_lock_sh_try(mtx));
276 return (0);
280 * If the lock is held exclusively it must be owned by the caller. If the
281 * lock is already a shared lock this operation is a NOP. A panic will
282 * occur if the lock is not held either shared or exclusive.
284 * The exclusive count is converted to a shared count.
286 static __inline void
287 mtx_downgrade(mtx_t *mtx)
289 mtx->mtx_owner = NULL;
290 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0)
291 _mtx_downgrade(mtx);
295 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
296 * the shared lock has a count other then 1. Optimize the most likely case
297 * but note that a single cmpset can fail due to WANTED races.
299 * If the lock is held exclusively it must be owned by the caller and
300 * this function will simply return without doing anything. A panic will
301 * occur if the lock is held exclusively by someone other then the caller.
303 * Returns 0 on success, EDEADLK on failure.
305 static __inline int
306 mtx_upgrade_try(mtx_t *mtx)
308 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) {
309 mtx->mtx_owner = curthread;
310 return(0);
312 return (_mtx_upgrade_try(mtx));
316 * Optimized unlock cases.
318 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
319 * both blocking and spin methods.
321 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
322 * mutexes and produce less code, but it is ok for code to just use
323 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
324 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
326 static __inline void
327 mtx_unlock(mtx_t *mtx)
329 u_int lock = mtx->mtx_lock;
331 if (lock == (MTX_EXCLUSIVE | 1)) {
332 mtx->mtx_owner = NULL;
333 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
334 _mtx_unlock(mtx);
335 } else if (lock == 1) {
336 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
337 _mtx_unlock(mtx);
338 } else {
339 _mtx_unlock(mtx);
343 static __inline void
344 mtx_unlock_ex(mtx_t *mtx)
346 u_int lock = mtx->mtx_lock;
348 if (lock == (MTX_EXCLUSIVE | 1)) {
349 mtx->mtx_owner = NULL;
350 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
351 _mtx_unlock(mtx);
352 } else {
353 _mtx_unlock(mtx);
357 static __inline void
358 mtx_unlock_sh(mtx_t *mtx)
360 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
361 _mtx_unlock(mtx);
365 * NOTE: spinlocks are exclusive-only
367 static __inline void
368 mtx_spinunlock(mtx_t *mtx)
370 globaldata_t gd = mycpu;
372 mtx_unlock(mtx);
374 --gd->gd_spinlocks;
375 cpu_ccfence();
376 --gd->gd_curthread->td_critcount;
380 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
381 * anyone, including the owner.
383 static __inline int
384 mtx_islocked(mtx_t *mtx)
386 return(mtx->mtx_lock != 0);
390 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
391 * including the owner. Returns FALSE (0) if the mutex is unlocked or
392 * if it is locked shared by one or more entities.
394 * A caller wishing to check whether a lock is owned exclusively by it
395 * should use mtx_owned().
397 static __inline int
398 mtx_islocked_ex(mtx_t *mtx)
400 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
404 * Return TRUE (non-zero) if the mutex is not locked.
406 static __inline int
407 mtx_notlocked(mtx_t *mtx)
409 return(mtx->mtx_lock == 0);
413 * Return TRUE (non-zero) if the mutex is not locked exclusively.
414 * The mutex may in an unlocked or shared lock state.
416 static __inline int
417 mtx_notlocked_ex(mtx_t *mtx)
419 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
423 * Return TRUE (non-zero) if the mutex is exclusively locked by
424 * the caller.
426 static __inline int
427 mtx_owned(mtx_t *mtx)
429 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
433 * Return TRUE (non-zero) if the mutex is not exclusively locked by
434 * the caller.
436 static __inline int
437 mtx_notowned(mtx_t *mtx)
439 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
440 mtx->mtx_owner != curthread);
444 * Return the shared or exclusive lock count. A return value of 0
445 * indicate that the mutex is not locked.
447 * NOTE: If the mutex is held exclusively by someone other then the
448 * caller the lock count for the other owner is still returned.
450 static __inline
452 mtx_lockrefs(mtx_t *mtx)
454 return(mtx->mtx_lock & MTX_MASK);
458 * Lock must held and will be released on return. Returns state
459 * which can be passed to mtx_lock_temp_restore() to return the
460 * lock to its previous state.
462 static __inline
463 mtx_state_t
464 mtx_lock_temp_release(mtx_t *mtx)
466 mtx_state_t state;
468 state = (mtx->mtx_lock & MTX_EXCLUSIVE);
469 mtx_unlock(mtx);
471 return state;
475 * Restore the previous state of a lock released with
476 * mtx_lock_temp_release() or mtx_lock_upgrade().
478 static __inline
479 void
480 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state)
482 if (state & MTX_EXCLUSIVE)
483 mtx_lock_ex_quick(mtx);
484 else
485 mtx_lock_sh_quick(mtx);
488 #endif