drm/i915: Update to Linux 3.16
[dragonfly.git] / sys / dev / drm / include / linux / ww_mutex.h
blob7fea59c4df9f5b38b40cf4040908af097d774f54
1 /*
2 * Copyright (c) 2015 Michael Neumann <mneumann@ntecs.de>
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #ifndef _LINUX_WW_MUTEX_H_
28 #define _LINUX_WW_MUTEX_H_
31 * A basic, unoptimized implementation of wound/wait mutexes for DragonFly
32 * modelled after the Linux API [1].
34 * [1]: http://lxr.free-electrons.com/source/include/linux/ww_mutex.h
37 #include <sys/errno.h>
38 #include <sys/types.h>
39 #include <machine/atomic.h>
40 #include <sys/spinlock.h>
41 #include <sys/spinlock2.h>
42 #include <sys/stdbool.h>
44 struct ww_class {
45 volatile u_long stamp;
46 const char *name;
49 struct ww_acquire_ctx {
50 u_long stamp;
51 struct ww_class *ww_class;
54 struct ww_mutex {
55 struct spinlock lock;
56 volatile int acquired;
57 volatile struct ww_acquire_ctx *ctx;
58 volatile struct thread *owner;
61 #define DEFINE_WW_CLASS(classname) \
62 struct ww_class classname = { \
63 .stamp = 0, \
64 .name = #classname \
67 static inline void
68 ww_acquire_init(struct ww_acquire_ctx *ctx, struct ww_class *ww_class) {
69 ctx->stamp = atomic_fetchadd_long(&ww_class->stamp, 1);
70 ctx->ww_class = ww_class;
73 static inline void
74 ww_acquire_done(__unused struct ww_acquire_ctx *ctx) {
77 static inline void
78 ww_acquire_fini(__unused struct ww_acquire_ctx *ctx) {
81 static inline void
82 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) {
83 spin_init(&lock->lock, ww_class->name);
84 lock->acquired = 0;
85 lock->ctx = NULL;
86 lock->owner = NULL;
89 static inline bool
90 ww_mutex_is_locked(struct ww_mutex *lock) {
91 bool res = false;
92 spin_lock(&lock->lock);
93 if (lock->acquired > 0) res = true;
94 spin_unlock(&lock->lock);
95 return res;
99 * Return 1 if lock could be acquired, else 0 (contended).
101 static inline int
102 ww_mutex_trylock(struct ww_mutex *lock) {
103 int res = 1;
104 KKASSERT(curthread);
106 spin_lock(&lock->lock);
108 * In case no one holds the ww_mutex yet, we acquire it.
110 if (lock->acquired == 0) {
111 KKASSERT(lock->ctx == NULL);
112 lock->acquired += 1;
113 lock->owner = curthread;
116 * In case we already hold the ww_mutex, increase a count.
118 else if (lock->owner == curthread) {
119 lock->acquired += 1;
121 else {
122 res = 0;
124 spin_unlock(&lock->lock);
125 return res;
129 * When `slow` is `true`, it will always block if the ww_mutex is contended.
130 * It is assumed that the called will not hold any (ww_mutex) resources when
131 * calling the slow path as this could lead to deadlocks.
133 * When `intr` is `true`, the ssleep will be interruptable.
135 static inline int
136 __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx, bool slow, bool intr) {
137 int err;
139 KKASSERT(curthread);
141 spin_lock(&lock->lock);
142 for (;;) {
144 * In case no one holds the ww_mutex yet, we acquire it.
146 if (lock->acquired == 0) {
147 KKASSERT(lock->ctx == NULL);
148 lock->acquired += 1;
149 lock->ctx = ctx;
150 lock->owner = curthread;
151 err = 0;
152 break;
155 * In case we already hold the ww_mutex, simply increase
156 * a count and return -ALREADY.
158 else if (lock->owner == curthread) {
159 KKASSERT(lock->ctx == ctx);
160 lock->acquired += 1;
161 err = -EALREADY;
162 break;
165 * This is the contention case where the ww_mutex is
166 * already held by another context.
168 else {
170 * Three cases:
172 * - We are in the slow-path (first lock to obtain).
174 * - No context was specified. We assume a single
175 * resouce, so there is no danger of a deadlock.
177 * - An `older` process (`ctx`) tries to acquire a
178 * lock already held by a `younger` process.
179 * We put the `older` process to sleep until
180 * the `younger` process gives up all it's
181 * resources.
183 if (slow || ctx == NULL || ctx->stamp < lock->ctx->stamp) {
184 int s = ssleep(lock, &lock->lock,
185 intr ? PCATCH : 0,
186 ctx ? ctx->ww_class->name : "ww_mutex_lock", 0);
187 if (intr && (s == EINTR || s == ERESTART)) {
188 // XXX: Should we handle ERESTART?
189 err = -EINTR;
190 break;
194 * If a `younger` process tries to acquire a lock
195 * already held by an `older` process, we `wound` it,
196 * i.e. we return -EDEADLK because there is a potential
197 * risk for a deadlock. The `younger` process then
198 * should give up all it's resources and try again to
199 * acquire the lock in question, this time in a
200 * blocking manner.
202 else {
203 err = -EDEADLK;
204 break;
208 } /* for */
209 spin_unlock(&lock->lock);
210 return err;
213 static inline int
214 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
215 return __ww_mutex_lock(lock, ctx, false, false);
218 static inline void
219 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
220 (void)__ww_mutex_lock(lock, ctx, true, false);
223 static inline int
224 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
225 return __ww_mutex_lock(lock, ctx, false, true);
228 static inline int __must_check
229 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) {
230 return __ww_mutex_lock(lock, ctx, true, true);
233 static inline void
234 ww_mutex_unlock(struct ww_mutex *lock) {
235 spin_lock(&lock->lock);
236 KKASSERT(lock->owner == curthread);
237 KKASSERT(lock->acquired > 0);
239 --lock->acquired;
240 if (lock->acquired > 0) {
241 spin_unlock(&lock->lock);
242 return;
245 KKASSERT(lock->acquired == 0);
246 lock->ctx = NULL;
247 lock->owner = NULL;
248 spin_unlock(&lock->lock);
249 wakeup(lock);
252 static inline void
253 ww_mutex_destroy(struct ww_mutex *lock) {
254 KKASSERT(lock->acquired == 0);
255 KKASSERT(lock->ctx == NULL);
256 KKASSERT(lock->owner == NULL);
257 spin_uninit(&lock->lock);
260 #endif /* _LINUX_WW_MUTEX_H_ */