ipfw: Add icmpcodes support.
[dragonfly.git] / sys / sys / mutex2.h
bloba0bc402a57258825211a1f3fe35201a146254434
1 /*
2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #ifndef _SYS_MUTEX2_H_
36 #define _SYS_MUTEX2_H_
38 #ifndef _SYS_MUTEX_H_
39 #include <sys/mutex.h>
40 #endif
41 #ifndef _SYS_THREAD2_H_
42 #include <sys/thread2.h>
43 #endif
44 #ifndef _SYS_GLOBALDATA_H_
45 #include <sys/globaldata.h>
46 #endif
47 #include <machine/atomic.h>
50 * Initialize a new mutex, placing it in an unlocked state with no refs.
52 static __inline void
53 mtx_init(mtx_t *mtx, const char *ident)
55 mtx->mtx_lock = 0;
56 mtx->mtx_flags = 0;
57 mtx->mtx_owner = NULL;
58 mtx->mtx_exlink = NULL;
59 mtx->mtx_shlink = NULL;
60 mtx->mtx_ident = ident;
63 static __inline void
64 mtx_init_flags(mtx_t *mtx, const char *ident, uint32_t flags)
66 mtx->mtx_lock = 0;
67 mtx->mtx_flags = flags;
68 mtx->mtx_owner = NULL;
69 mtx->mtx_exlink = NULL;
70 mtx->mtx_shlink = NULL;
71 mtx->mtx_ident = ident;
75 * Initialize a mtx link structure for deeper control over the mutex
76 * operation.
78 static __inline void
79 mtx_link_init(mtx_link_t *link)
81 link->state = MTX_LINK_IDLE;
82 link->callback = NULL;
83 link->arg = NULL;
87 * A link structure initialized this way causes mutex operations to not block,
88 * caller must specify a callback. Caller may still abort the mutex via
89 * the link.
91 static __inline void
92 mtx_link_init_async(mtx_link_t *link,
93 void (*callback)(mtx_link_t *link, void *arg, int error),
94 void *arg)
96 link->state = MTX_LINK_IDLE;
97 link->callback = callback;
98 link->arg = arg;
102 * Deinitialize a mutex
104 static __inline void
105 mtx_uninit(mtx_t *mtx)
107 /* empty */
111 * Exclusive-lock a mutex, block until acquired or aborted. Recursion
112 * is allowed.
114 * This version of the function allows the mtx_link to be passed in, thus
115 * giving the caller visibility for the link structure which is required
116 * when calling mtx_abort_ex_link() or when requesting an asynchronous lock.
118 * The mutex may be aborted at any time while the passed link structure
119 * is valid.
121 static __inline int
122 mtx_lock_ex_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
124 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
125 return(_mtx_lock_ex_link(mtx, link, flags, to));
126 mtx->mtx_owner = curthread;
127 link->state = MTX_LINK_ACQUIRED;
129 return(0);
133 * Short-form exclusive-lock a mutex, block until acquired. Recursion is
134 * allowed. This is equivalent to mtx_lock_ex(mtx, "mtxex", 0, 0).
136 static __inline void
137 mtx_lock(mtx_t *mtx)
139 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0) {
140 _mtx_lock_ex(mtx, 0, 0);
141 return;
143 mtx->mtx_owner = curthread;
147 * Exclusive-lock a mutex, block until acquired. Recursion is allowed.
149 * Returns 0 on success, or the tsleep() return code on failure.
150 * An error can only be returned if PCATCH is specified in the flags.
152 static __inline int
153 mtx_lock_ex(mtx_t *mtx, int flags, int to)
155 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
156 return(_mtx_lock_ex(mtx, flags, to));
157 mtx->mtx_owner = curthread;
158 return(0);
161 static __inline int
162 mtx_lock_ex_quick(mtx_t *mtx)
164 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
165 return(_mtx_lock_ex_quick(mtx));
166 mtx->mtx_owner = curthread;
167 return(0);
170 static __inline int
171 mtx_lock_sh_link(mtx_t *mtx, mtx_link_t *link, int flags, int to)
173 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
174 return(_mtx_lock_sh_link(mtx, link, flags, to));
175 link->state = MTX_LINK_ACQUIRED;
176 return(0);
180 * Share-lock a mutex, block until acquired. Recursion is allowed.
182 * Returns 0 on success, or the tsleep() return code on failure.
183 * An error can only be returned if PCATCH is specified in the flags.
185 static __inline int
186 mtx_lock_sh(mtx_t *mtx, int flags, int to)
188 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
189 return(_mtx_lock_sh(mtx, flags, to));
190 return(0);
193 static __inline int
194 mtx_lock_sh_quick(mtx_t *mtx)
196 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
197 return(_mtx_lock_sh_quick(mtx));
198 return(0);
202 * Adds a shared lock reference to a lock already locked shared,
203 * does not block on pending exclusive request.
205 static __inline void
206 mtx_lock_sh_again(mtx_t *mtx)
208 KKASSERT((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 &&
209 (mtx->mtx_lock & MTX_MASK) > 0);
210 atomic_add_int(&mtx->mtx_lock, 1);
214 * Short-form exclusive spinlock a mutex. Must be paired with
215 * mtx_spinunlock().
217 static __inline void
218 mtx_spinlock(mtx_t *mtx)
220 globaldata_t gd = mycpu;
223 * Predispose a hard critical section
225 crit_enter_raw(gd->gd_curthread);
226 ++gd->gd_spinlocks;
227 cpu_ccfence();
230 * If we cannot get it trivially get it the hard way.
232 * Note that mtx_owner will be set twice if we fail to get it
233 * trivially, but there's no point conditionalizing it as a
234 * conditional will be slower.
236 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
237 _mtx_spinlock(mtx);
238 mtx->mtx_owner = gd->gd_curthread;
241 static __inline int
242 mtx_spinlock_try(mtx_t *mtx)
244 globaldata_t gd = mycpu;
247 * Predispose a hard critical section
249 crit_enter_raw(gd->gd_curthread);
250 ++gd->gd_spinlocks;
251 cpu_ccfence();
254 * If we cannot get it trivially call _mtx_spinlock_try(). This
255 * function will clean up the hard critical section if it fails.
257 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
258 return(_mtx_spinlock_try(mtx));
259 mtx->mtx_owner = gd->gd_curthread;
260 return (0);
264 * Short-form exclusive-lock a mutex, spin until acquired. Recursion is
265 * allowed. This form is identical to mtx_spinlock_ex().
267 * Attempt to exclusive-lock a mutex, return 0 on success and
268 * EAGAIN on failure.
270 static __inline int
271 mtx_lock_ex_try(mtx_t *mtx)
273 if (atomic_cmpset_int(&mtx->mtx_lock, 0, MTX_EXCLUSIVE | 1) == 0)
274 return (_mtx_lock_ex_try(mtx));
275 mtx->mtx_owner = curthread;
276 return (0);
280 * Attempt to share-lock a mutex, return 0 on success and
281 * EAGAIN on failure.
283 static __inline int
284 mtx_lock_sh_try(mtx_t *mtx)
286 if (atomic_cmpset_int(&mtx->mtx_lock, 0, 1) == 0)
287 return (_mtx_lock_sh_try(mtx));
288 return (0);
292 * If the lock is held exclusively it must be owned by the caller. If the
293 * lock is already a shared lock this operation is a NOP. A panic will
294 * occur if the lock is not held either shared or exclusive.
296 * The exclusive count is converted to a shared count.
298 static __inline void
299 mtx_downgrade(mtx_t *mtx)
301 mtx->mtx_owner = NULL;
302 if (atomic_cmpset_int(&mtx->mtx_lock, MTX_EXCLUSIVE | 1, 1) == 0)
303 _mtx_downgrade(mtx);
307 * Upgrade a shared lock to an exclusive lock. The upgrade will fail if
308 * the shared lock has a count other then 1. Optimize the most likely case
309 * but note that a single cmpset can fail due to WANTED races.
311 * If the lock is held exclusively it must be owned by the caller and
312 * this function will simply return without doing anything. A panic will
313 * occur if the lock is held exclusively by someone other then the caller.
315 * Returns 0 on success, EDEADLK on failure.
317 static __inline int
318 mtx_upgrade_try(mtx_t *mtx)
320 if (atomic_cmpset_int(&mtx->mtx_lock, 1, MTX_EXCLUSIVE | 1)) {
321 mtx->mtx_owner = curthread;
322 return(0);
324 return (_mtx_upgrade_try(mtx));
328 * Optimized unlock cases.
330 * NOTE: mtx_unlock() handles any type of mutex: exclusive, shared, and
331 * both blocking and spin methods.
333 * The mtx_unlock_ex/sh() forms are optimized for exclusive or shared
334 * mutexes and produce less code, but it is ok for code to just use
335 * mtx_unlock() and, in fact, if code uses the short-form mtx_lock()
336 * or mtx_spinlock() to lock it should also use mtx_unlock() to unlock.
338 static __inline void
339 mtx_unlock(mtx_t *mtx)
341 u_int lock = mtx->mtx_lock;
343 if (lock == (MTX_EXCLUSIVE | 1)) {
344 mtx->mtx_owner = NULL;
345 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
346 _mtx_unlock(mtx);
347 } else if (lock == 1) {
348 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
349 _mtx_unlock(mtx);
350 } else {
351 _mtx_unlock(mtx);
355 static __inline void
356 mtx_unlock_ex(mtx_t *mtx)
358 u_int lock = mtx->mtx_lock;
360 if (lock == (MTX_EXCLUSIVE | 1)) {
361 mtx->mtx_owner = NULL;
362 if (atomic_cmpset_int(&mtx->mtx_lock, lock, 0) == 0)
363 _mtx_unlock(mtx);
364 } else {
365 _mtx_unlock(mtx);
369 static __inline void
370 mtx_unlock_sh(mtx_t *mtx)
372 if (atomic_cmpset_int(&mtx->mtx_lock, 1, 0) == 0)
373 _mtx_unlock(mtx);
377 * NOTE: spinlocks are exclusive-only
379 static __inline void
380 mtx_spinunlock(mtx_t *mtx)
382 globaldata_t gd = mycpu;
384 mtx_unlock(mtx);
386 cpu_ccfence();
387 --gd->gd_spinlocks;
388 crit_exit_raw(gd->gd_curthread);
392 * Return TRUE (non-zero) if the mutex is locked shared or exclusive by
393 * anyone, including the owner.
395 static __inline int
396 mtx_islocked(mtx_t *mtx)
398 return(mtx->mtx_lock != 0);
402 * Return TRUE (non-zero) if the mutex is locked exclusively by anyone,
403 * including the owner. Returns FALSE (0) if the mutex is unlocked or
404 * if it is locked shared by one or more entities.
406 * A caller wishing to check whether a lock is owned exclusively by it
407 * should use mtx_owned().
409 static __inline int
410 mtx_islocked_ex(mtx_t *mtx)
412 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
416 * Return TRUE (non-zero) if the mutex is not locked.
418 static __inline int
419 mtx_notlocked(mtx_t *mtx)
421 return(mtx->mtx_lock == 0);
425 * Return TRUE (non-zero) if the mutex is not locked exclusively.
426 * The mutex may in an unlocked or shared lock state.
428 static __inline int
429 mtx_notlocked_ex(mtx_t *mtx)
431 return((mtx->mtx_lock & MTX_EXCLUSIVE) != 0);
435 * Return TRUE (non-zero) if the mutex is exclusively locked by
436 * the caller.
438 static __inline int
439 mtx_owned(mtx_t *mtx)
441 return((mtx->mtx_lock & MTX_EXCLUSIVE) && mtx->mtx_owner == curthread);
445 * Return TRUE (non-zero) if the mutex is not exclusively locked by
446 * the caller.
448 static __inline int
449 mtx_notowned(mtx_t *mtx)
451 return((mtx->mtx_lock & MTX_EXCLUSIVE) == 0 ||
452 mtx->mtx_owner != curthread);
456 * Return the shared or exclusive lock count. A return value of 0
457 * indicate that the mutex is not locked.
459 * NOTE: If the mutex is held exclusively by someone other then the
460 * caller the lock count for the other owner is still returned.
462 static __inline
464 mtx_lockrefs(mtx_t *mtx)
466 return(mtx->mtx_lock & MTX_MASK);
470 * Lock must held and will be released on return. Returns state
471 * which can be passed to mtx_lock_temp_restore() to return the
472 * lock to its previous state.
474 static __inline
475 mtx_state_t
476 mtx_lock_temp_release(mtx_t *mtx)
478 mtx_state_t state;
480 state = (mtx->mtx_lock & MTX_EXCLUSIVE);
481 mtx_unlock(mtx);
483 return state;
487 * Restore the previous state of a lock released with
488 * mtx_lock_temp_release() or mtx_lock_upgrade().
490 static __inline
491 void
492 mtx_lock_temp_restore(mtx_t *mtx, mtx_state_t state)
494 if (state & MTX_EXCLUSIVE)
495 mtx_lock_ex_quick(mtx);
496 else
497 mtx_lock_sh_quick(mtx);
500 #endif