2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com> and
7 * Michael Neumann <mneumann@ntecs.de>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/kernel.h>
34 #include <sys/spinlock.h>
35 #include <sys/spinlock2.h>
37 #include <machine/atomic.h>
39 #include <linux/ww_mutex.h>
42 ww_acquire_init(struct ww_acquire_ctx
*ctx
, struct ww_class
*ww_class
)
44 ctx
->stamp
= atomic_fetchadd_long(&ww_class
->stamp
, 1);
46 ctx
->ww_class
= ww_class
;
50 ww_acquire_done(struct ww_acquire_ctx
*ctx __unused
)
55 ww_acquire_fini(struct ww_acquire_ctx
*ctx __unused
)
60 ww_mutex_init(struct ww_mutex
*ww
, struct ww_class
*ww_class
)
62 lockinit(&ww
->base
, ww_class
->name
, 0, LK_CANRECURSE
);
64 ww
->stamp
= 0xFFFFFFFFFFFFFFFFLU
;
69 ww_mutex_destroy(struct ww_mutex
*ww
)
71 lockuninit(&ww
->base
);
75 * Optimized lock path.
77 * (slow) is optional as long as we block normally on the initial lock.
78 * Currently not implemented.
82 __wwlock(struct ww_mutex
*ww
, struct ww_acquire_ctx
*ctx
,
83 bool slow __unused
, bool intr
)
85 int flags
= LK_EXCLUSIVE
;
92 * Normal mutex if ctx is NULL
95 error
= lockmgr(&ww
->base
, flags
);
102 * A normal blocking lock can be used when ctx->acquired is 0 (no
103 * prior locks are held). If prior locks are held then we cannot
106 * In the non-blocking case setup our tsleep interlock using
110 if (ctx
->acquired
!= 0) {
111 atomic_swap_int(&ww
->blocked
, 1);
113 tsleep_interlock(ww
, (intr
? PCATCH
: 0));
115 error
= lockmgr(&ww
->base
, flags
);
118 ww
->stamp
= ctx
->stamp
;
124 * EINTR or ERESTART returns -EINTR. ENOLCK and EWOULDBLOCK
125 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
131 * acquired can only be non-zero in this path.
132 * NOTE: ww->ctx is not MPSAFE.
133 * NOTE: ww->stamp is heuristical, a race is possible.
135 KKASSERT(ctx
->acquired
> 0);
138 * Unwind if we aren't the oldest.
140 if (ctx
->stamp
> ww
->stamp
)
144 * We have priority over the currently held lock. We have
145 * already setup the interlock so we can tsleep() until the
146 * remote wakes us up (which may have already happened).
148 * error is zero if woken up
149 * EINTR / ERESTART - signal
150 * EWOULDBLOCK - timeout expired (if not 0)
152 if (flags
& LK_NOWAIT
) {
153 error
= tsleep(ww
, PINTERLOCKED
| (intr
? PCATCH
: 0),
154 ctx
->ww_class
->name
, 0);
155 if (intr
&& (error
== EINTR
|| error
== ERESTART
))
164 ww_mutex_lock(struct ww_mutex
*ww
, struct ww_acquire_ctx
*ctx
)
166 return __wwlock(ww
, ctx
, 0, 0);
170 ww_mutex_lock_slow(struct ww_mutex
*ww
, struct ww_acquire_ctx
*ctx
)
172 return __wwlock(ww
, ctx
, 1, 0);
176 ww_mutex_lock_interruptible(struct ww_mutex
*ww
, struct ww_acquire_ctx
*ctx
)
178 return __wwlock(ww
, ctx
, 0, 1);
182 ww_mutex_lock_slow_interruptible(struct ww_mutex
*ww
,
183 struct ww_acquire_ctx
*ctx
)
185 return __wwlock(ww
, ctx
, 1, 1);
189 ww_mutex_unlock(struct ww_mutex
*ww
)
191 struct ww_acquire_ctx
*ctx
;
195 KKASSERT(ctx
->acquired
> 0);
198 ww
->stamp
= 0xFFFFFFFFFFFFFFFFLU
;
200 lockmgr(&ww
->base
, LK_RELEASE
);
201 if (atomic_swap_int(&ww
->blocked
, 0))