hammer2 - More involved refactoring of chain_repparent, cleanup
[dragonfly.git] / sys / vfs / hammer2 / hammer2_ccms.c
blob3d255ef076773f298d3e751a61214c3178166609
1 /*
2 * Copyright (c) 2006,2012-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * The Cache Coherency Management System (CCMS)
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/objcache.h>
43 #include <sys/sysctl.h>
44 #include <sys/uio.h>
45 #include <machine/limits.h>
47 #include <sys/spinlock2.h>
49 #include "hammer2_ccms.h"
50 #include "hammer2.h"
52 int ccms_debug = 0;
54 void
55 ccms_cst_init(ccms_cst_t *cst)
57 bzero(cst, sizeof(*cst));
58 spin_init(&cst->spin, "ccmscst");
61 void
62 ccms_cst_uninit(ccms_cst_t *cst)
64 KKASSERT(cst->count == 0);
65 if (cst->state != CCMS_STATE_INVALID) {
66 /* XXX */
70 /************************************************************************
71 * CST SUPPORT FUNCTIONS *
72 ************************************************************************/
75 * Acquire local cache state & lock. If the current thread already holds
76 * the lock exclusively we bump the exclusive count, even if the thread is
77 * trying to get a shared lock.
79 void
80 ccms_thread_lock(ccms_cst_t *cst, ccms_state_t state)
83 * Regardless of the type of lock requested if the current thread
84 * already holds an exclusive lock we bump the exclusive count and
85 * return. This requires no spinlock.
87 LOCKENTER;
88 if (cst->count < 0 && cst->td == curthread) {
89 --cst->count;
90 return;
94 * Otherwise use the spinlock to interlock the operation and sleep
95 * as necessary.
97 hammer2_spin_ex(&cst->spin);
98 if (state == CCMS_STATE_SHARED) {
99 while (cst->count < 0 || cst->upgrade) {
100 cst->blocked = 1;
101 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
103 ++cst->count;
104 KKASSERT(cst->td == NULL);
105 } else if (state == CCMS_STATE_EXCLUSIVE) {
106 while (cst->count != 0 || cst->upgrade) {
107 cst->blocked = 1;
108 ssleep(cst, &cst->spin, 0, "ccmslck", hz);
110 cst->count = -1;
111 cst->td = curthread;
112 } else {
113 hammer2_spin_unex(&cst->spin);
114 panic("ccms_thread_lock: bad state %d\n", state);
116 hammer2_spin_unex(&cst->spin);
120 * Same as ccms_thread_lock() but acquires the lock non-blocking. Returns
121 * 0 on success, EBUSY on failure.
124 ccms_thread_lock_nonblock(ccms_cst_t *cst, ccms_state_t state)
126 if (cst->count < 0 && cst->td == curthread) {
127 --cst->count;
128 LOCKENTER;
129 return(0);
132 hammer2_spin_ex(&cst->spin);
133 if (state == CCMS_STATE_SHARED) {
134 if (cst->count < 0 || cst->upgrade) {
135 hammer2_spin_unex(&cst->spin);
136 return (EBUSY);
138 ++cst->count;
139 KKASSERT(cst->td == NULL);
140 } else if (state == CCMS_STATE_EXCLUSIVE) {
141 if (cst->count != 0 || cst->upgrade) {
142 hammer2_spin_unex(&cst->spin);
143 return (EBUSY);
145 cst->count = -1;
146 cst->td = curthread;
147 } else {
148 hammer2_spin_unex(&cst->spin);
149 panic("ccms_thread_lock_nonblock: bad state %d\n", state);
151 hammer2_spin_unex(&cst->spin);
152 LOCKENTER;
153 return(0);
156 ccms_state_t
157 ccms_thread_lock_temp_release(ccms_cst_t *cst)
159 if (cst->count < 0) {
160 ccms_thread_unlock(cst);
161 return(CCMS_STATE_EXCLUSIVE);
163 if (cst->count > 0) {
164 ccms_thread_unlock(cst);
165 return(CCMS_STATE_SHARED);
167 return (CCMS_STATE_INVALID);
170 void
171 ccms_thread_lock_temp_restore(ccms_cst_t *cst, ccms_state_t ostate)
173 ccms_thread_lock(cst, ostate);
177 * Temporarily upgrade a thread lock for making local structural changes.
178 * No new shared or exclusive locks can be acquired by others while we are
179 * upgrading, but other upgraders are allowed.
181 ccms_state_t
182 ccms_thread_lock_upgrade(ccms_cst_t *cst)
185 * Nothing to do if already exclusive
187 if (cst->count < 0) {
188 KKASSERT(cst->td == curthread);
189 return(CCMS_STATE_EXCLUSIVE);
193 * Convert a shared lock to exclusive.
195 if (cst->count > 0) {
196 hammer2_spin_ex(&cst->spin);
197 ++cst->upgrade;
198 --cst->count;
199 while (cst->count) {
200 cst->blocked = 1;
201 ssleep(cst, &cst->spin, 0, "ccmsupg", hz);
203 cst->count = -1;
204 cst->td = curthread;
205 hammer2_spin_unex(&cst->spin);
206 return(CCMS_STATE_SHARED);
208 panic("ccms_thread_lock_upgrade: not locked");
209 /* NOT REACHED */
210 return(0);
213 void
214 ccms_thread_lock_downgrade(ccms_cst_t *cst, ccms_state_t ostate)
216 if (ostate == CCMS_STATE_SHARED) {
217 KKASSERT(cst->td == curthread);
218 KKASSERT(cst->count == -1);
219 hammer2_spin_ex(&cst->spin);
220 --cst->upgrade;
221 cst->count = 1;
222 cst->td = NULL;
223 if (cst->blocked) {
224 cst->blocked = 0;
225 hammer2_spin_unex(&cst->spin);
226 wakeup(cst);
227 } else {
228 hammer2_spin_unex(&cst->spin);
231 /* else nothing to do if excl->excl */
235 * Release a local thread lock
237 void
238 ccms_thread_unlock(ccms_cst_t *cst)
240 LOCKEXIT;
241 if (cst->count < 0) {
243 * Exclusive
245 KKASSERT(cst->td == curthread);
246 if (cst->count < -1) {
247 ++cst->count;
248 return;
250 hammer2_spin_ex(&cst->spin);
251 KKASSERT(cst->count == -1);
252 cst->count = 0;
253 cst->td = NULL;
254 if (cst->blocked) {
255 cst->blocked = 0;
256 hammer2_spin_unex(&cst->spin);
257 wakeup(cst);
258 return;
260 hammer2_spin_unex(&cst->spin);
261 } else if (cst->count > 0) {
263 * Shared
265 hammer2_spin_ex(&cst->spin);
266 if (--cst->count == 0 && cst->blocked) {
267 cst->blocked = 0;
268 hammer2_spin_unex(&cst->spin);
269 wakeup(cst);
270 return;
272 hammer2_spin_unex(&cst->spin);
273 } else {
274 panic("ccms_thread_unlock: bad zero count\n");
278 void
279 ccms_thread_lock_setown(ccms_cst_t *cst)
281 KKASSERT(cst->count < 0);
282 cst->td = curthread;
286 * Release a previously upgraded local thread lock
288 void
289 ccms_thread_unlock_upgraded(ccms_cst_t *cst, ccms_state_t ostate)
291 if (ostate == CCMS_STATE_SHARED) {
292 LOCKEXIT;
293 KKASSERT(cst->td == curthread);
294 KKASSERT(cst->count == -1);
295 hammer2_spin_ex(&cst->spin);
296 --cst->upgrade;
297 cst->count = 0;
298 cst->td = NULL;
299 if (cst->blocked) {
300 cst->blocked = 0;
301 hammer2_spin_unex(&cst->spin);
302 wakeup(cst);
303 } else {
304 hammer2_spin_unex(&cst->spin);
306 } else {
307 ccms_thread_unlock(cst);
312 ccms_thread_lock_owned(ccms_cst_t *cst)
314 return(cst->count < 0 && cst->td == curthread);