inet6: require RTF_ANNOUNCE to proxy NS
[dragonfly.git] / sys / dev / drm / linux_wwmutex.c
blob0609420eb808c98e1ca05103c6d1aefb50c6a119
1 /*-
2 * Copyright (c) 2003-2011 The DragonFly Project. All rights reserved.
3 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Matthew Dillon <dillon@backplane.com> and
7 * Michael Neumann <mneumann@ntecs.de>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/types.h>
32 #include <sys/errno.h>
33 #include <sys/kernel.h>
34 #include <sys/spinlock.h>
35 #include <sys/spinlock2.h>
37 #include <machine/atomic.h>
39 #include <linux/ww_mutex.h>
41 void
42 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class)
44 ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
45 ctx->acquired = 0;
46 ctx->ww_class = ww_class;
49 void
50 ww_acquire_done(struct ww_acquire_ctx *ctx __unused)
54 void
55 ww_acquire_fini(struct ww_acquire_ctx *ctx __unused)
59 void
60 ww_mutex_init(struct ww_mutex *ww, struct ww_class *ww_class)
62 lockinit(&ww->base, ww_class->name, 0, LK_CANRECURSE);
63 ww->ctx = NULL;
64 ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
65 ww->blocked = 0;
68 void
69 ww_mutex_destroy(struct ww_mutex *ww)
71 lockuninit(&ww->base);
75 * Optimized lock path.
77 * (slow) is optional as long as we block normally on the initial lock.
78 * Currently not implemented.
80 static __inline
81 int
82 __wwlock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx,
83 bool slow __unused, bool intr)
85 int flags = LK_EXCLUSIVE;
86 int error;
88 if (intr)
89 flags |= LK_PCATCH;
92 * Normal mutex if ctx is NULL
94 if (ctx == NULL) {
95 error = lockmgr(&ww->base, flags);
96 if (error)
97 error = -EINTR;
98 return error;
102 * A normal blocking lock can be used when ctx->acquired is 0 (no
103 * prior locks are held). If prior locks are held then we cannot
104 * block here.
106 * In the non-blocking case setup our tsleep interlock using
107 * ww->blocked first.
109 for (;;) {
110 if (ctx->acquired != 0) {
111 atomic_swap_int(&ww->blocked, 1);
112 flags |= LK_NOWAIT;
113 tsleep_interlock(ww, (intr ? PCATCH : 0));
115 error = lockmgr(&ww->base, flags);
116 if (error == 0) {
117 ww->ctx = ctx;
118 ww->stamp = ctx->stamp;
119 ++ctx->acquired;
120 return 0;
124 * EINTR or ERESTART returns -EINTR. ENOLCK and EWOULDBLOCK
125 * cannot happen (LK_SLEEPFAIL not set, timeout is not set).
127 if (error != EBUSY)
128 return -EINTR;
131 * acquired can only be non-zero in this path.
132 * NOTE: ww->ctx is not MPSAFE.
133 * NOTE: ww->stamp is heuristical, a race is possible.
135 KKASSERT(ctx->acquired > 0);
138 * Unwind if we aren't the oldest.
140 if (ctx->stamp > ww->stamp)
141 return -EDEADLK;
144 * We have priority over the currently held lock. We have
145 * already setup the interlock so we can tsleep() until the
146 * remote wakes us up (which may have already happened).
148 * error is zero if woken up
149 * EINTR / ERESTART - signal
150 * EWOULDBLOCK - timeout expired (if not 0)
152 if (flags & LK_NOWAIT) {
153 error = tsleep(ww, PINTERLOCKED | (intr ? PCATCH : 0),
154 ctx->ww_class->name, 0);
155 if (intr && (error == EINTR || error == ERESTART))
156 return -EINTR;
157 flags &= ~LK_NOWAIT;
159 /* retry */
164 ww_mutex_lock(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
166 return __wwlock(ww, ctx, 0, 0);
170 ww_mutex_lock_slow(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
172 return __wwlock(ww, ctx, 1, 0);
176 ww_mutex_lock_interruptible(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
178 return __wwlock(ww, ctx, 0, 1);
182 ww_mutex_lock_slow_interruptible(struct ww_mutex *ww,
183 struct ww_acquire_ctx *ctx)
185 return __wwlock(ww, ctx, 1, 1);
188 void
189 ww_mutex_unlock(struct ww_mutex *ww)
191 struct ww_acquire_ctx *ctx;
193 ctx = ww->ctx;
194 if (ctx) {
195 KKASSERT(ctx->acquired > 0);
196 --ctx->acquired;
197 ww->ctx = NULL;
198 ww->stamp = 0xFFFFFFFFFFFFFFFFLU;
200 lockmgr(&ww->base, LK_RELEASE);
201 if (atomic_swap_int(&ww->blocked, 0))
202 wakeup(ww);