4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * This file contains the code to implement file range locking in
28 * ZFS, although there isn't much specific to ZFS (all that comes to mind
29 * support for growing the blocksize).
33 * Defined in zfs_rlock.h but essentially:
34 * rl = zfs_range_lock(zp, off, len, lock_type);
35 * zfs_range_unlock(rl);
36 * zfs_range_reduce(rl, off, len);
40 * An AVL tree is used to maintain the state of the existing ranges
41 * that are locked for exclusive (writer) or shared (reader) use.
42 * The starting range offset is used for searching and sorting the tree.
46 * The (hopefully) usual case is of no overlaps or contention for
47 * locks. On entry to zfs_lock_range() a rl_t is allocated; the tree
48 * searched that finds no overlap, and *this* rl_t is placed in the tree.
50 * Overlaps/Reference counting/Proxy locks
51 * ---------------------------------------
52 * The avl code only allows one node at a particular offset. Also it's very
53 * inefficient to search through all previous entries looking for overlaps
54 * (because the very 1st in the ordered list might be at offset 0 but
55 * cover the whole file).
56 * So this implementation uses reference counts and proxy range locks.
57 * Firstly, only reader locks use reference counts and proxy locks,
58 * because writer locks are exclusive.
59 * When a reader lock overlaps with another then a proxy lock is created
60 * for that range and replaces the original lock. If the overlap
61 * is exact then the reference count of the proxy is simply incremented.
62 * Otherwise, the proxy lock is split into smaller lock ranges and
63 * new proxy locks created for non overlapping ranges.
64 * The reference counts are adjusted accordingly.
65 * Meanwhile, the orginal lock is kept around (this is the callers handle)
66 * and its offset and length are used when releasing the lock.
70 * In order to make wakeups efficient and to ensure multiple continuous
71 * readers on a range don't starve a writer for the same range lock,
72 * two condition variables are allocated in each rl_t.
73 * If a writer (or reader) can't get a range it initialises the writer
74 * (or reader) cv; sets a flag saying there's a writer (or reader) waiting;
75 * and waits on that cv. When a thread unlocks that range it wakes up all
76 * writers then all readers before destroying the lock.
80 * Append mode writes need to lock a range at the end of a file.
81 * The offset of the end of the file is determined under the
82 * range locking mutex, and the lock type converted from RL_APPEND to
83 * RL_WRITER and the range locked.
87 * ZFS supports multiple block sizes currently upto 128K. The smallest
88 * block size is used for the file which is grown as needed. During this
89 * growth all other writers and readers must be excluded.
90 * So if the block size needs to be grown then the whole file is
91 * exclusively locked, then later the caller will reduce the lock
92 * range to just the range to be written using zfs_reduce_range.
95 #include <sys/zfs_rlock.h>
98 * Check if a write lock can be grabbed, or wait and recheck until available.
101 zfs_range_lock_writer(znode_t
*zp
, rl_t
*new)
103 avl_tree_t
*tree
= &zp
->z_range_avl
;
107 uint64_t off
= new->r_off
;
108 uint64_t len
= new->r_len
;
112 * Range locking is also used by zvol and uses a
113 * dummied up znode. However, for zvol, we don't need to
114 * append or grow blocksize, and besides we don't have
115 * a "sa" data or z_zfsvfs - so skip that processing.
117 * Yes, this is ugly, and would be solved by not handling
118 * grow or append in range lock code. If that was done then
119 * we could make the range locking code generically available
120 * to other non-zfs consumers.
122 if (zp
->z_vnode
) { /* caller is ZPL */
124 * If in append mode pick up the current end of file.
125 * This is done under z_range_lock to avoid races.
127 if (new->r_type
== RL_APPEND
)
128 new->r_off
= zp
->z_size
;
131 * If we need to grow the block size then grab the whole
132 * file range. This is also done under z_range_lock to
135 end_size
= MAX(zp
->z_size
, new->r_off
+ len
);
136 if (end_size
> zp
->z_blksz
&& (!ISP2(zp
->z_blksz
) ||
137 zp
->z_blksz
< zp
->z_zfsvfs
->z_max_blksz
)) {
139 new->r_len
= UINT64_MAX
;
144 * First check for the usual case of no locks
146 if (avl_numnodes(tree
) == 0) {
147 new->r_type
= RL_WRITER
; /* convert to writer */
153 * Look for any locks in the range.
155 rl
= avl_find(tree
, new, &where
);
157 goto wait
; /* already locked at same offset */
159 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
160 if (rl
&& (rl
->r_off
< new->r_off
+ new->r_len
))
163 rl
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
164 if (rl
&& rl
->r_off
+ rl
->r_len
> new->r_off
)
167 new->r_type
= RL_WRITER
; /* convert possible RL_APPEND */
168 avl_insert(tree
, new, where
);
171 if (!rl
->r_write_wanted
) {
172 cv_init(&rl
->r_wr_cv
, NULL
, CV_DEFAULT
, NULL
);
173 rl
->r_write_wanted
= B_TRUE
;
175 cv_wait(&rl
->r_wr_cv
, &zp
->z_range_lock
);
177 /* reset to original */
184 * If this is an original (non-proxy) lock then replace it by
185 * a proxy and return the proxy.
188 zfs_range_proxify(avl_tree_t
*tree
, rl_t
*rl
)
193 return (rl
); /* already a proxy */
195 ASSERT3U(rl
->r_cnt
, ==, 1);
196 ASSERT(rl
->r_write_wanted
== B_FALSE
);
197 ASSERT(rl
->r_read_wanted
== B_FALSE
);
198 avl_remove(tree
, rl
);
201 /* create a proxy range lock */
202 proxy
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
203 proxy
->r_off
= rl
->r_off
;
204 proxy
->r_len
= rl
->r_len
;
206 proxy
->r_type
= RL_READER
;
207 proxy
->r_proxy
= B_TRUE
;
208 proxy
->r_write_wanted
= B_FALSE
;
209 proxy
->r_read_wanted
= B_FALSE
;
210 avl_add(tree
, proxy
);
216 * Split the range lock at the supplied offset
217 * returning the *front* proxy.
220 zfs_range_split(avl_tree_t
*tree
, rl_t
*rl
, uint64_t off
)
224 ASSERT3U(rl
->r_len
, >, 1);
225 ASSERT3U(off
, >, rl
->r_off
);
226 ASSERT3U(off
, <, rl
->r_off
+ rl
->r_len
);
227 ASSERT(rl
->r_write_wanted
== B_FALSE
);
228 ASSERT(rl
->r_read_wanted
== B_FALSE
);
230 /* create the rear proxy range lock */
231 rear
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
233 rear
->r_len
= rl
->r_off
+ rl
->r_len
- off
;
234 rear
->r_cnt
= rl
->r_cnt
;
235 rear
->r_type
= RL_READER
;
236 rear
->r_proxy
= B_TRUE
;
237 rear
->r_write_wanted
= B_FALSE
;
238 rear
->r_read_wanted
= B_FALSE
;
240 front
= zfs_range_proxify(tree
, rl
);
241 front
->r_len
= off
- rl
->r_off
;
243 avl_insert_here(tree
, rear
, front
, AVL_AFTER
);
248 * Create and add a new proxy range lock for the supplied range.
251 zfs_range_new_proxy(avl_tree_t
*tree
, uint64_t off
, uint64_t len
)
256 rl
= kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
260 rl
->r_type
= RL_READER
;
261 rl
->r_proxy
= B_TRUE
;
262 rl
->r_write_wanted
= B_FALSE
;
263 rl
->r_read_wanted
= B_FALSE
;
268 zfs_range_add_reader(avl_tree_t
*tree
, rl_t
*new, rl_t
*prev
, avl_index_t where
)
271 uint64_t off
= new->r_off
;
272 uint64_t len
= new->r_len
;
275 * prev arrives either:
276 * - pointing to an entry at the same offset
277 * - pointing to the entry with the closest previous offset whose
278 * range may overlap with the new range
279 * - null, if there were no ranges starting before the new one
282 if (prev
->r_off
+ prev
->r_len
<= off
) {
284 } else if (prev
->r_off
!= off
) {
286 * convert to proxy if needed then
287 * split this entry and bump ref count
289 prev
= zfs_range_split(tree
, prev
, off
);
290 prev
= AVL_NEXT(tree
, prev
); /* move to rear range */
293 ASSERT((prev
== NULL
) || (prev
->r_off
== off
));
298 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
300 if (next
== NULL
|| off
+ len
<= next
->r_off
) {
301 /* no overlaps, use the original new rl_t in the tree */
302 avl_insert(tree
, new, where
);
306 if (off
< next
->r_off
) {
307 /* Add a proxy for initial range before the overlap */
308 zfs_range_new_proxy(tree
, off
, next
->r_off
- off
);
311 new->r_cnt
= 0; /* will use proxies in tree */
313 * We now search forward through the ranges, until we go past the end
314 * of the new range. For each entry we make it a proxy if it
315 * isn't already, then bump its reference count. If there's any
316 * gaps between the ranges then we create a new proxy range.
318 for (prev
= NULL
; next
; prev
= next
, next
= AVL_NEXT(tree
, next
)) {
319 if (off
+ len
<= next
->r_off
)
321 if (prev
&& prev
->r_off
+ prev
->r_len
< next
->r_off
) {
323 ASSERT3U(next
->r_off
, >, prev
->r_off
+ prev
->r_len
);
324 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
325 next
->r_off
- (prev
->r_off
+ prev
->r_len
));
327 if (off
+ len
== next
->r_off
+ next
->r_len
) {
328 /* exact overlap with end */
329 next
= zfs_range_proxify(tree
, next
);
333 if (off
+ len
< next
->r_off
+ next
->r_len
) {
334 /* new range ends in the middle of this block */
335 next
= zfs_range_split(tree
, next
, off
+ len
);
339 ASSERT3U(off
+ len
, >, next
->r_off
+ next
->r_len
);
340 next
= zfs_range_proxify(tree
, next
);
344 /* Add the remaining end range. */
345 zfs_range_new_proxy(tree
, prev
->r_off
+ prev
->r_len
,
346 (off
+ len
) - (prev
->r_off
+ prev
->r_len
));
350 * Check if a reader lock can be grabbed, or wait and recheck until available.
353 zfs_range_lock_reader(znode_t
*zp
, rl_t
*new)
355 avl_tree_t
*tree
= &zp
->z_range_avl
;
358 uint64_t off
= new->r_off
;
359 uint64_t len
= new->r_len
;
362 * Look for any writer locks in the range.
365 prev
= avl_find(tree
, new, &where
);
367 prev
= (rl_t
*)avl_nearest(tree
, where
, AVL_BEFORE
);
370 * Check the previous range for a writer lock overlap.
372 if (prev
&& (off
< prev
->r_off
+ prev
->r_len
)) {
373 if ((prev
->r_type
== RL_WRITER
) || (prev
->r_write_wanted
)) {
374 if (!prev
->r_read_wanted
) {
375 cv_init(&prev
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
376 prev
->r_read_wanted
= B_TRUE
;
378 cv_wait(&prev
->r_rd_cv
, &zp
->z_range_lock
);
381 if (off
+ len
< prev
->r_off
+ prev
->r_len
)
386 * Search through the following ranges to see if there's
387 * write lock any overlap.
390 next
= AVL_NEXT(tree
, prev
);
392 next
= (rl_t
*)avl_nearest(tree
, where
, AVL_AFTER
);
393 for (; next
; next
= AVL_NEXT(tree
, next
)) {
394 if (off
+ len
<= next
->r_off
)
396 if ((next
->r_type
== RL_WRITER
) || (next
->r_write_wanted
)) {
397 if (!next
->r_read_wanted
) {
398 cv_init(&next
->r_rd_cv
, NULL
, CV_DEFAULT
, NULL
);
399 next
->r_read_wanted
= B_TRUE
;
401 cv_wait(&next
->r_rd_cv
, &zp
->z_range_lock
);
404 if (off
+ len
<= next
->r_off
+ next
->r_len
)
410 * Add the read lock, which may involve splitting existing
411 * locks and bumping ref counts (r_cnt).
413 zfs_range_add_reader(tree
, new, prev
, where
);
417 * Lock a range (offset, length) as either shared (RL_READER)
418 * or exclusive (RL_WRITER). Returns the range lock structure
419 * for later unlocking or reduce range (if entire file
420 * previously locked as RL_WRITER).
423 zfs_range_lock(znode_t
*zp
, uint64_t off
, uint64_t len
, rl_type_t type
)
427 ASSERT(type
== RL_READER
|| type
== RL_WRITER
|| type
== RL_APPEND
);
429 new = kmem_alloc(sizeof (rl_t
), KM_SLEEP
);
432 if (len
+ off
< off
) /* overflow */
433 len
= UINT64_MAX
- off
;
435 new->r_cnt
= 1; /* assume it's going to be in the tree */
437 new->r_proxy
= B_FALSE
;
438 new->r_write_wanted
= B_FALSE
;
439 new->r_read_wanted
= B_FALSE
;
441 mutex_enter(&zp
->z_range_lock
);
442 if (type
== RL_READER
) {
444 * First check for the usual case of no locks
446 if (avl_numnodes(&zp
->z_range_avl
) == 0)
447 avl_add(&zp
->z_range_avl
, new);
449 zfs_range_lock_reader(zp
, new);
451 zfs_range_lock_writer(zp
, new); /* RL_WRITER or RL_APPEND */
452 mutex_exit(&zp
->z_range_lock
);
457 * Unlock a reader lock
460 zfs_range_unlock_reader(znode_t
*zp
, rl_t
*remove
)
462 avl_tree_t
*tree
= &zp
->z_range_avl
;
467 * The common case is when the remove entry is in the tree
468 * (cnt == 1) meaning there's been no other reader locks overlapping
469 * with this one. Otherwise the remove entry will have been
470 * removed from the tree and replaced by proxies (one or
471 * more ranges mapping to the entire range).
473 if (remove
->r_cnt
== 1) {
474 avl_remove(tree
, remove
);
475 if (remove
->r_write_wanted
) {
476 cv_broadcast(&remove
->r_wr_cv
);
477 cv_destroy(&remove
->r_wr_cv
);
479 if (remove
->r_read_wanted
) {
480 cv_broadcast(&remove
->r_rd_cv
);
481 cv_destroy(&remove
->r_rd_cv
);
484 ASSERT3U(remove
->r_cnt
, ==, 0);
485 ASSERT3U(remove
->r_write_wanted
, ==, 0);
486 ASSERT3U(remove
->r_read_wanted
, ==, 0);
488 * Find start proxy representing this reader lock,
489 * then decrement ref count on all proxies
490 * that make up this range, freeing them as needed.
492 rl
= avl_find(tree
, remove
, NULL
);
495 ASSERT(rl
->r_type
== RL_READER
);
496 for (len
= remove
->r_len
; len
!= 0; rl
= next
) {
499 next
= AVL_NEXT(tree
, rl
);
501 ASSERT(rl
->r_off
+ rl
->r_len
== next
->r_off
);
503 ASSERT(next
->r_type
== RL_READER
);
506 if (rl
->r_cnt
== 0) {
507 avl_remove(tree
, rl
);
508 if (rl
->r_write_wanted
) {
509 cv_broadcast(&rl
->r_wr_cv
);
510 cv_destroy(&rl
->r_wr_cv
);
512 if (rl
->r_read_wanted
) {
513 cv_broadcast(&rl
->r_rd_cv
);
514 cv_destroy(&rl
->r_rd_cv
);
516 kmem_free(rl
, sizeof (rl_t
));
520 kmem_free(remove
, sizeof (rl_t
));
524 * Unlock range and destroy range lock structure.
527 zfs_range_unlock(rl_t
*rl
)
529 znode_t
*zp
= rl
->r_zp
;
531 ASSERT(rl
->r_type
== RL_WRITER
|| rl
->r_type
== RL_READER
);
532 ASSERT(rl
->r_cnt
== 1 || rl
->r_cnt
== 0);
533 ASSERT(!rl
->r_proxy
);
535 mutex_enter(&zp
->z_range_lock
);
536 if (rl
->r_type
== RL_WRITER
) {
537 /* writer locks can't be shared or split */
538 avl_remove(&zp
->z_range_avl
, rl
);
539 mutex_exit(&zp
->z_range_lock
);
540 if (rl
->r_write_wanted
) {
541 cv_broadcast(&rl
->r_wr_cv
);
542 cv_destroy(&rl
->r_wr_cv
);
544 if (rl
->r_read_wanted
) {
545 cv_broadcast(&rl
->r_rd_cv
);
546 cv_destroy(&rl
->r_rd_cv
);
548 kmem_free(rl
, sizeof (rl_t
));
551 * lock may be shared, let zfs_range_unlock_reader()
552 * release the lock and free the rl_t
554 zfs_range_unlock_reader(zp
, rl
);
555 mutex_exit(&zp
->z_range_lock
);
560 * Reduce range locked as RL_WRITER from whole file to specified range.
561 * Asserts the whole file is exclusivly locked and so there's only one
565 zfs_range_reduce(rl_t
*rl
, uint64_t off
, uint64_t len
)
567 znode_t
*zp
= rl
->r_zp
;
569 /* Ensure there are no other locks */
570 ASSERT(avl_numnodes(&zp
->z_range_avl
) == 1);
571 ASSERT(rl
->r_off
== 0);
572 ASSERT(rl
->r_type
== RL_WRITER
);
573 ASSERT(!rl
->r_proxy
);
574 ASSERT3U(rl
->r_len
, ==, UINT64_MAX
);
575 ASSERT3U(rl
->r_cnt
, ==, 1);
577 mutex_enter(&zp
->z_range_lock
);
580 mutex_exit(&zp
->z_range_lock
);
581 if (rl
->r_write_wanted
)
582 cv_broadcast(&rl
->r_wr_cv
);
583 if (rl
->r_read_wanted
)
584 cv_broadcast(&rl
->r_rd_cv
);
588 * AVL comparison function used to order range locks
589 * Locks are ordered on the start offset of the range.
592 zfs_range_compare(const void *arg1
, const void *arg2
)
594 const rl_t
*rl1
= arg1
;
595 const rl_t
*rl2
= arg2
;
597 if (rl1
->r_off
> rl2
->r_off
)
599 if (rl1
->r_off
< rl2
->r_off
)