4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2012 by Delphix. All rights reserved.
29 #include <sys/refcount.h>
30 #include <sys/rrwlock.h>
33 * This file contains the implementation of a re-entrant read
34 * reader/writer lock (aka "rrwlock").
36 * This is a normal reader/writer lock with the additional feature
37 * of allowing threads who have already obtained a read lock to
38 * re-enter another read lock (re-entrant read) - even if there are
41 * Callers who have not obtained a read lock give waiting writers priority.
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
45 * allow a caller who has already obtained a read lock to be able to
46 * then grab a write lock without first dropping all read locks, and
49 * The rrwlock_t uses tsd (thread specific data) to keep a list of
50 * nodes (rrw_node_t), where each node keeps track of which specific
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
52 * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53 * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54 * tsd list can represent a different rrwlock_t. This allows a thread
55 * to enter multiple and unique rrwlock_ts for read locks at the same time.
57 * Since using tsd exposes some overhead, the rrwlock_t only needs to
58 * keep tsd data when writers are waiting. If no writers are waiting, then
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60 * is needed. Once a writer attempts to grab the lock, readers then
61 * keep tsd data and bump the linked readers count (rr_linked_rcount).
63 * If there are waiting writers and there are anonymous readers, then a
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
65 * we allow the read to proceed (otherwise it could deadlock). Since once
66 * waiting writers are active, readers no longer bump the anonymous count,
67 * the anonymous readers will eventually flush themselves out. At this point,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70 * we must let the proceed. If they are not, then the reader blocks for the
71 * waiting writers. Hence, we do not starve writers.
74 /* global key for TSD */
77 typedef struct rrw_node
{
78 struct rrw_node
*rn_next
;
84 rrn_find(rrwlock_t
*rrl
)
88 if (refcount_count(&rrl
->rr_linked_rcount
) == 0)
91 for (rn
= tsd_get(rrw_tsd_key
); rn
!= NULL
; rn
= rn
->rn_next
) {
92 if (rn
->rn_rrl
== rrl
)
99 * Add a node to the head of the singly linked list.
102 rrn_add(rrwlock_t
*rrl
, void *tag
)
106 rn
= kmem_alloc(sizeof (*rn
), KM_SLEEP
);
108 rn
->rn_next
= tsd_get(rrw_tsd_key
);
110 VERIFY(tsd_set(rrw_tsd_key
, rn
) == 0);
114 * If a node is found for 'rrl', then remove the node from this
115 * thread's list and return TRUE; otherwise return FALSE.
118 rrn_find_and_remove(rrwlock_t
*rrl
, void *tag
)
121 rrw_node_t
*prev
= NULL
;
123 if (refcount_count(&rrl
->rr_linked_rcount
) == 0)
126 for (rn
= tsd_get(rrw_tsd_key
); rn
!= NULL
; rn
= rn
->rn_next
) {
127 if (rn
->rn_rrl
== rrl
&& rn
->rn_tag
== tag
) {
129 prev
->rn_next
= rn
->rn_next
;
131 VERIFY(tsd_set(rrw_tsd_key
, rn
->rn_next
) == 0);
132 kmem_free(rn
, sizeof (*rn
));
141 rrw_init(rrwlock_t
*rrl
, boolean_t track_all
)
143 mutex_init(&rrl
->rr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
144 cv_init(&rrl
->rr_cv
, NULL
, CV_DEFAULT
, NULL
);
145 rrl
->rr_writer
= NULL
;
146 refcount_create(&rrl
->rr_anon_rcount
);
147 refcount_create(&rrl
->rr_linked_rcount
);
148 rrl
->rr_writer_wanted
= B_FALSE
;
149 rrl
->rr_track_all
= track_all
;
153 rrw_destroy(rrwlock_t
*rrl
)
155 mutex_destroy(&rrl
->rr_lock
);
156 cv_destroy(&rrl
->rr_cv
);
157 ASSERT(rrl
->rr_writer
== NULL
);
158 refcount_destroy(&rrl
->rr_anon_rcount
);
159 refcount_destroy(&rrl
->rr_linked_rcount
);
163 rrw_enter_read(rrwlock_t
*rrl
, void *tag
)
165 mutex_enter(&rrl
->rr_lock
);
166 #if !defined(DEBUG) && defined(_KERNEL)
167 if (rrl
->rr_writer
== NULL
&& !rrl
->rr_writer_wanted
&&
168 !rrl
->rr_track_all
) {
169 rrl
->rr_anon_rcount
.rc_count
++;
170 mutex_exit(&rrl
->rr_lock
);
173 DTRACE_PROBE(zfs__rrwfastpath__rdmiss
);
175 ASSERT(rrl
->rr_writer
!= curthread
);
176 ASSERT(refcount_count(&rrl
->rr_anon_rcount
) >= 0);
178 while (rrl
->rr_writer
!= NULL
|| (rrl
->rr_writer_wanted
&&
179 refcount_is_zero(&rrl
->rr_anon_rcount
) &&
180 rrn_find(rrl
) == NULL
))
181 cv_wait(&rrl
->rr_cv
, &rrl
->rr_lock
);
183 if (rrl
->rr_writer_wanted
|| rrl
->rr_track_all
) {
184 /* may or may not be a re-entrant enter */
186 (void) refcount_add(&rrl
->rr_linked_rcount
, tag
);
188 (void) refcount_add(&rrl
->rr_anon_rcount
, tag
);
190 ASSERT(rrl
->rr_writer
== NULL
);
191 mutex_exit(&rrl
->rr_lock
);
195 rrw_enter_write(rrwlock_t
*rrl
)
197 mutex_enter(&rrl
->rr_lock
);
198 ASSERT(rrl
->rr_writer
!= curthread
);
200 while (refcount_count(&rrl
->rr_anon_rcount
) > 0 ||
201 refcount_count(&rrl
->rr_linked_rcount
) > 0 ||
202 rrl
->rr_writer
!= NULL
) {
203 rrl
->rr_writer_wanted
= B_TRUE
;
204 cv_wait(&rrl
->rr_cv
, &rrl
->rr_lock
);
206 rrl
->rr_writer_wanted
= B_FALSE
;
207 rrl
->rr_writer
= curthread
;
208 mutex_exit(&rrl
->rr_lock
);
212 rrw_enter(rrwlock_t
*rrl
, krw_t rw
, void *tag
)
215 rrw_enter_read(rrl
, tag
);
217 rrw_enter_write(rrl
);
221 rrw_exit(rrwlock_t
*rrl
, void *tag
)
223 mutex_enter(&rrl
->rr_lock
);
224 #if !defined(DEBUG) && defined(_KERNEL)
225 if (!rrl
->rr_writer
&& rrl
->rr_linked_rcount
.rc_count
== 0) {
226 rrl
->rr_anon_rcount
.rc_count
--;
227 if (rrl
->rr_anon_rcount
.rc_count
== 0)
228 cv_broadcast(&rrl
->rr_cv
);
229 mutex_exit(&rrl
->rr_lock
);
232 DTRACE_PROBE(zfs__rrwfastpath__exitmiss
);
234 ASSERT(!refcount_is_zero(&rrl
->rr_anon_rcount
) ||
235 !refcount_is_zero(&rrl
->rr_linked_rcount
) ||
236 rrl
->rr_writer
!= NULL
);
238 if (rrl
->rr_writer
== NULL
) {
240 if (rrn_find_and_remove(rrl
, tag
)) {
241 count
= refcount_remove(&rrl
->rr_linked_rcount
, tag
);
243 ASSERT(!rrl
->rr_track_all
);
244 count
= refcount_remove(&rrl
->rr_anon_rcount
, tag
);
247 cv_broadcast(&rrl
->rr_cv
);
249 ASSERT(rrl
->rr_writer
== curthread
);
250 ASSERT(refcount_is_zero(&rrl
->rr_anon_rcount
) &&
251 refcount_is_zero(&rrl
->rr_linked_rcount
));
252 rrl
->rr_writer
= NULL
;
253 cv_broadcast(&rrl
->rr_cv
);
255 mutex_exit(&rrl
->rr_lock
);
259 * If the lock was created with track_all, rrw_held(RW_READER) will return
260 * B_TRUE iff the current thread has the lock for reader. Otherwise it may
261 * return B_TRUE if any thread has the lock for reader.
264 rrw_held(rrwlock_t
*rrl
, krw_t rw
)
268 mutex_enter(&rrl
->rr_lock
);
269 if (rw
== RW_WRITER
) {
270 held
= (rrl
->rr_writer
== curthread
);
272 held
= (!refcount_is_zero(&rrl
->rr_anon_rcount
) ||
273 rrn_find(rrl
) != NULL
);
275 mutex_exit(&rrl
->rr_lock
);
281 rrw_tsd_destroy(void *arg
)
283 rrw_node_t
*rn
= arg
;
285 panic("thread %p terminating with rrw lock %p held",
286 (void *)curthread
, (void *)rn
->rn_rrl
);
291 * A reader-mostly lock implementation, tuning above reader-writer locks
292 * for hightly parallel read acquisitions, while pessimizing writes.
294 * The idea is to split single busy lock into array of locks, so that
295 * each reader can lock only one of them for read, depending on result
296 * of simple hash function. That proportionally reduces lock congestion.
297 * Writer same time has to sequentially aquire write on all the locks.
298 * That makes write aquisition proportionally slower, but in places where
299 * it is used (filesystem unmount) performance is not critical.
301 * All the functions below are direct wrappers around functions above.
304 rrm_init(rrmlock_t
*rrl
, boolean_t track_all
)
308 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
309 rrw_init(&rrl
->locks
[i
], track_all
);
313 rrm_destroy(rrmlock_t
*rrl
)
317 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
318 rrw_destroy(&rrl
->locks
[i
]);
322 rrm_enter(rrmlock_t
*rrl
, krw_t rw
, void *tag
)
325 rrm_enter_read(rrl
, tag
);
327 rrm_enter_write(rrl
);
331 * This maps the current thread to a specific lock. Note that the lock
332 * must be released by the same thread that acquired it. We do this
333 * mapping by taking the thread pointer mod a prime number. We examine
334 * only the low 32 bits of the thread pointer, because 32-bit division
335 * is faster than 64-bit division, and the high 32 bits have little
338 #define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
341 rrm_enter_read(rrmlock_t
*rrl
, void *tag
)
343 rrw_enter_read(&rrl
->locks
[RRM_TD_LOCK()], tag
);
347 rrm_enter_write(rrmlock_t
*rrl
)
351 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
352 rrw_enter_write(&rrl
->locks
[i
]);
356 rrm_exit(rrmlock_t
*rrl
, void *tag
)
360 if (rrl
->locks
[0].rr_writer
== curthread
) {
361 for (i
= 0; i
< RRM_NUM_LOCKS
; i
++)
362 rrw_exit(&rrl
->locks
[i
], tag
);
364 rrw_exit(&rrl
->locks
[RRM_TD_LOCK()], tag
);
369 rrm_held(rrmlock_t
*rrl
, krw_t rw
)
371 if (rw
== RW_WRITER
) {
372 return (rrw_held(&rrl
->locks
[0], rw
));
374 return (rrw_held(&rrl
->locks
[RRM_TD_LOCK()], rw
));