2 * Copyright (c) 2008 Isilon Inc http://www.isilon.com/
3 * Authors: Doug Rabson <dfr@rabson.org>
4 * Developed with Red Inc: Alfred Perlstein <alfred@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Copyright (c) 1982, 1986, 1989, 1993
29 * The Regents of the University of California. All rights reserved.
31 * This code is derived from software contributed to Berkeley by
32 * Scooter Morris at Genentech Inc.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 4. Neither the name of the University nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
61 #include <sys/cdefs.h>
62 __FBSDID("$FreeBSD$");
64 #include "opt_debug_lockf.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/limits.h>
72 #include <sys/mount.h>
73 #include <sys/mutex.h>
76 #include <sys/unistd.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/fcntl.h>
80 #include <sys/lockf.h>
81 #include <sys/taskqueue.h>
84 #include <sys/sysctl.h>
86 #include <ufs/ufs/quota.h>
87 #include <ufs/ufs/inode.h>
89 static int lockf_debug
= 0; /* control debug output */
90 SYSCTL_INT(_debug
, OID_AUTO
, lockf_debug
, CTLFLAG_RW
, &lockf_debug
, 0, "");
93 static MALLOC_DEFINE(M_LOCKF
, "lockf", "Byte-range locking structures");
97 struct owner_vertex_list
;
100 #define NOLOCKF (struct lockf_entry *)0
103 static void lf_init(void *);
104 static int lf_hash_owner(caddr_t
, struct flock
*, int);
105 static int lf_owner_matches(struct lock_owner
*, caddr_t
, struct flock
*,
107 static struct lockf_entry
*
108 lf_alloc_lock(struct lock_owner
*);
109 static int lf_free_lock(struct lockf_entry
*);
110 static int lf_clearlock(struct lockf
*, struct lockf_entry
*);
111 static int lf_overlaps(struct lockf_entry
*, struct lockf_entry
*);
112 static int lf_blocks(struct lockf_entry
*, struct lockf_entry
*);
113 static void lf_free_edge(struct lockf_edge
*);
114 static struct lockf_edge
*
116 static void lf_alloc_vertex(struct lockf_entry
*);
117 static int lf_add_edge(struct lockf_entry
*, struct lockf_entry
*);
118 static void lf_remove_edge(struct lockf_edge
*);
119 static void lf_remove_outgoing(struct lockf_entry
*);
120 static void lf_remove_incoming(struct lockf_entry
*);
121 static int lf_add_outgoing(struct lockf
*, struct lockf_entry
*);
122 static int lf_add_incoming(struct lockf
*, struct lockf_entry
*);
123 static int lf_findoverlap(struct lockf_entry
**, struct lockf_entry
*,
125 static struct lockf_entry
*
126 lf_getblock(struct lockf
*, struct lockf_entry
*);
127 static int lf_getlock(struct lockf
*, struct lockf_entry
*, struct flock
*);
128 static void lf_insert_lock(struct lockf
*, struct lockf_entry
*);
129 static void lf_wakeup_lock(struct lockf
*, struct lockf_entry
*);
130 static void lf_update_dependancies(struct lockf
*, struct lockf_entry
*,
131 int all
, struct lockf_entry_list
*);
132 static void lf_set_start(struct lockf
*, struct lockf_entry
*, off_t
,
133 struct lockf_entry_list
*);
134 static void lf_set_end(struct lockf
*, struct lockf_entry
*, off_t
,
135 struct lockf_entry_list
*);
136 static int lf_setlock(struct lockf
*, struct lockf_entry
*,
137 struct vnode
*, void **cookiep
);
138 static int lf_cancel(struct lockf
*, struct lockf_entry
*, void *);
139 static void lf_split(struct lockf
*, struct lockf_entry
*,
140 struct lockf_entry
*, struct lockf_entry_list
*);
142 static int graph_reaches(struct owner_vertex
*x
, struct owner_vertex
*y
,
143 struct owner_vertex_list
*path
);
144 static void graph_check(struct owner_graph
*g
, int checkorder
);
145 static void graph_print_vertices(struct owner_vertex_list
*set
);
147 static int graph_delta_forward(struct owner_graph
*g
,
148 struct owner_vertex
*x
, struct owner_vertex
*y
,
149 struct owner_vertex_list
*delta
);
150 static int graph_delta_backward(struct owner_graph
*g
,
151 struct owner_vertex
*x
, struct owner_vertex
*y
,
152 struct owner_vertex_list
*delta
);
153 static int graph_add_indices(int *indices
, int n
,
154 struct owner_vertex_list
*set
);
155 static int graph_assign_indices(struct owner_graph
*g
, int *indices
,
156 int nextunused
, struct owner_vertex_list
*set
);
157 static int graph_add_edge(struct owner_graph
*g
,
158 struct owner_vertex
*x
, struct owner_vertex
*y
);
159 static void graph_remove_edge(struct owner_graph
*g
,
160 struct owner_vertex
*x
, struct owner_vertex
*y
);
161 static struct owner_vertex
*graph_alloc_vertex(struct owner_graph
*g
,
162 struct lock_owner
*lo
);
163 static void graph_free_vertex(struct owner_graph
*g
,
164 struct owner_vertex
*v
);
165 static struct owner_graph
* graph_init(struct owner_graph
*g
);
167 static void lf_print(char *, struct lockf_entry
*);
168 static void lf_printlist(char *, struct lockf_entry
*);
169 static void lf_print_owner(struct lock_owner
*);
173 * This structure is used to keep track of both local and remote lock
174 * owners. The lf_owner field of the struct lockf_entry points back at
175 * the lock owner structure. Each possible lock owner (local proc for
176 * POSIX fcntl locks, local file for BSD flock locks or <pid,sysid>
177 * pair for remote locks) is represented by a unique instance of
180 * If a lock owner has a lock that blocks some other lock or a lock
181 * that is waiting for some other lock, it also has a vertex in the
185 * (s) locked by state->ls_lock
186 * (S) locked by lf_lock_states_lock
187 * (l) locked by lf_lock_owners_lock
188 * (g) locked by lf_owner_graph_lock
189 * (c) const until freeing
191 #define LOCK_OWNER_HASH_SIZE 256
194 LIST_ENTRY(lock_owner
) lo_link
; /* (l) hash chain */
195 int lo_refs
; /* (l) Number of locks referring to this */
196 int lo_flags
; /* (c) Flags passwd to lf_advlock */
197 caddr_t lo_id
; /* (c) Id value passed to lf_advlock */
198 pid_t lo_pid
; /* (c) Process Id of the lock owner */
199 int lo_sysid
; /* (c) System Id of the lock owner */
200 struct owner_vertex
*lo_vertex
; /* (g) entry in deadlock graph */
203 LIST_HEAD(lock_owner_list
, lock_owner
);
205 static struct sx lf_lock_states_lock
;
206 static struct lockf_list lf_lock_states
; /* (S) */
207 static struct sx lf_lock_owners_lock
;
208 static struct lock_owner_list lf_lock_owners
[LOCK_OWNER_HASH_SIZE
]; /* (l) */
211 * Structures for deadlock detection.
213 * We have two types of directed graph, the first is the set of locks,
214 * both active and pending on a vnode. Within this graph, active locks
215 * are terminal nodes in the graph (i.e. have no out-going
216 * edges). Pending locks have out-going edges to each blocking active
217 * lock that prevents the lock from being granted and also to each
218 * older pending lock that would block them if it was active. The
219 * graph for each vnode is naturally acyclic; new edges are only ever
220 * added to or from new nodes (either new pending locks which only add
221 * out-going edges or new active locks which only add in-coming edges)
222 * therefore they cannot create loops in the lock graph.
224 * The second graph is a global graph of lock owners. Each lock owner
225 * is a vertex in that graph and an edge is added to the graph
226 * whenever an edge is added to a vnode graph, with end points
227 * corresponding to owner of the new pending lock and the owner of the
228 * lock upon which it waits. In order to prevent deadlock, we only add
229 * an edge to this graph if the new edge would not create a cycle.
231 * The lock owner graph is topologically sorted, i.e. if a node has
232 * any outgoing edges, then it has an order strictly less than any
233 * node to which it has an outgoing edge. We preserve this ordering
234 * (and detect cycles) on edge insertion using Algorithm PK from the
235 * paper "A Dynamic Topological Sort Algorithm for Directed Acyclic
236 * Graphs" (ACM Journal of Experimental Algorithms, Vol 11, Article
242 LIST_ENTRY(owner_edge
) e_outlink
; /* (g) link from's out-edge list */
243 LIST_ENTRY(owner_edge
) e_inlink
; /* (g) link to's in-edge list */
244 int e_refs
; /* (g) number of times added */
245 struct owner_vertex
*e_from
; /* (c) out-going from here */
246 struct owner_vertex
*e_to
; /* (c) in-coming to here */
248 LIST_HEAD(owner_edge_list
, owner_edge
);
250 struct owner_vertex
{
251 TAILQ_ENTRY(owner_vertex
) v_link
; /* (g) workspace for edge insertion */
252 uint32_t v_gen
; /* (g) workspace for edge insertion */
253 int v_order
; /* (g) order of vertex in graph */
254 struct owner_edge_list v_outedges
;/* (g) list of out-edges */
255 struct owner_edge_list v_inedges
; /* (g) list of in-edges */
256 struct lock_owner
*v_owner
; /* (c) corresponding lock owner */
258 TAILQ_HEAD(owner_vertex_list
, owner_vertex
);
261 struct owner_vertex
** g_vertices
; /* (g) pointers to vertices */
262 int g_size
; /* (g) number of vertices */
263 int g_space
; /* (g) space allocated for vertices */
264 int *g_indexbuf
; /* (g) workspace for loop detection */
265 uint32_t g_gen
; /* (g) increment when re-ordering */
268 static struct sx lf_owner_graph_lock
;
269 static struct owner_graph lf_owner_graph
;
272 * Initialise various structures and locks.
279 sx_init(&lf_lock_states_lock
, "lock states lock");
280 LIST_INIT(&lf_lock_states
);
282 sx_init(&lf_lock_owners_lock
, "lock owners lock");
283 for (i
= 0; i
< LOCK_OWNER_HASH_SIZE
; i
++)
284 LIST_INIT(&lf_lock_owners
[i
]);
286 sx_init(&lf_owner_graph_lock
, "owner graph lock");
287 graph_init(&lf_owner_graph
);
289 SYSINIT(lf_init
, SI_SUB_LOCK
, SI_ORDER_FIRST
, lf_init
, NULL
);
292 * Generate a hash value for a lock owner.
295 lf_hash_owner(caddr_t id
, struct flock
*fl
, int flags
)
299 if (flags
& F_REMOTE
) {
300 h
= HASHSTEP(0, fl
->l_pid
);
301 h
= HASHSTEP(h
, fl
->l_sysid
);
302 } else if (flags
& F_FLOCK
) {
303 h
= ((uintptr_t) id
) >> 7;
305 struct proc
*p
= (struct proc
*) id
;
306 h
= HASHSTEP(0, p
->p_pid
);
310 return (h
% LOCK_OWNER_HASH_SIZE
);
314 * Return true if a lock owner matches the details passed to
318 lf_owner_matches(struct lock_owner
*lo
, caddr_t id
, struct flock
*fl
,
321 if (flags
& F_REMOTE
) {
322 return lo
->lo_pid
== fl
->l_pid
323 && lo
->lo_sysid
== fl
->l_sysid
;
325 return lo
->lo_id
== id
;
329 static struct lockf_entry
*
330 lf_alloc_lock(struct lock_owner
*lo
)
332 struct lockf_entry
*lf
;
334 lf
= malloc(sizeof(struct lockf_entry
), M_LOCKF
, M_WAITOK
|M_ZERO
);
338 printf("Allocated lock %p\n", lf
);
341 sx_xlock(&lf_lock_owners_lock
);
343 sx_xunlock(&lf_lock_owners_lock
);
351 lf_free_lock(struct lockf_entry
*lock
)
354 KASSERT(lock
->lf_refs
> 0, ("lockf_entry negative ref count %p", lock
));
355 if (--lock
->lf_refs
> 0)
358 * Adjust the lock_owner reference count and
359 * reclaim the entry if this is the last lock
362 struct lock_owner
*lo
= lock
->lf_owner
;
364 KASSERT(LIST_EMPTY(&lock
->lf_outedges
),
365 ("freeing lock with dependancies"));
366 KASSERT(LIST_EMPTY(&lock
->lf_inedges
),
367 ("freeing lock with dependants"));
368 sx_xlock(&lf_lock_owners_lock
);
369 KASSERT(lo
->lo_refs
> 0, ("lock owner refcount"));
371 if (lo
->lo_refs
== 0) {
374 printf("lf_free_lock: freeing lock owner %p\n",
378 sx_xlock(&lf_owner_graph_lock
);
379 graph_free_vertex(&lf_owner_graph
,
381 sx_xunlock(&lf_owner_graph_lock
);
383 LIST_REMOVE(lo
, lo_link
);
387 printf("Freed lock owner %p\n", lo
);
390 sx_unlock(&lf_lock_owners_lock
);
392 if ((lock
->lf_flags
& F_REMOTE
) && lock
->lf_vnode
) {
393 vrele(lock
->lf_vnode
);
394 lock
->lf_vnode
= NULL
;
398 printf("Freed lock %p\n", lock
);
405 * Advisory record locking support
408 lf_advlockasync(struct vop_advlockasync_args
*ap
, struct lockf
**statep
,
411 struct lockf
*state
, *freestate
= NULL
;
412 struct flock
*fl
= ap
->a_fl
;
413 struct lockf_entry
*lock
;
414 struct vnode
*vp
= ap
->a_vp
;
415 caddr_t id
= ap
->a_id
;
416 int flags
= ap
->a_flags
;
418 struct lock_owner
*lo
;
419 off_t start
, end
, oadd
;
423 * Handle the F_UNLKSYS case first - no need to mess about
424 * creating a lock owner for this one.
426 if (ap
->a_op
== F_UNLCKSYS
) {
427 lf_clearremotesys(fl
->l_sysid
);
432 * Convert the flock structure into a start and end.
434 switch (fl
->l_whence
) {
439 * Caller is responsible for adding any necessary offset
440 * when SEEK_CUR is used.
446 if (size
> OFF_MAX
||
447 (fl
->l_start
> 0 && size
> OFF_MAX
- fl
->l_start
))
449 start
= size
+ fl
->l_start
;
464 } else if (fl
->l_len
== 0) {
467 oadd
= fl
->l_len
- 1;
468 if (oadd
> OFF_MAX
- start
)
476 * Avoid the common case of unlocking when inode has no locks.
479 if ((*statep
) == NULL
) {
480 if (ap
->a_op
!= F_SETLK
) {
481 fl
->l_type
= F_UNLCK
;
489 * Map our arguments to an existing lock owner or create one
490 * if this is the first time we have seen this owner.
492 hash
= lf_hash_owner(id
, fl
, flags
);
493 sx_xlock(&lf_lock_owners_lock
);
494 LIST_FOREACH(lo
, &lf_lock_owners
[hash
], lo_link
)
495 if (lf_owner_matches(lo
, id
, fl
, flags
))
499 * We initialise the lock with a reference
500 * count which matches the new lockf_entry
501 * structure created below.
503 lo
= malloc(sizeof(struct lock_owner
), M_LOCKF
,
507 printf("Allocated lock owner %p\n", lo
);
511 lo
->lo_flags
= flags
;
513 if (flags
& F_REMOTE
) {
514 lo
->lo_pid
= fl
->l_pid
;
515 lo
->lo_sysid
= fl
->l_sysid
;
516 } else if (flags
& F_FLOCK
) {
520 struct proc
*p
= (struct proc
*) id
;
521 lo
->lo_pid
= p
->p_pid
;
524 lo
->lo_vertex
= NULL
;
527 if (lockf_debug
& 1) {
528 printf("lf_advlockasync: new lock owner %p ", lo
);
534 LIST_INSERT_HEAD(&lf_lock_owners
[hash
], lo
, lo_link
);
537 * We have seen this lock owner before, increase its
538 * reference count to account for the new lockf_entry
539 * structure we create below.
543 sx_xunlock(&lf_lock_owners_lock
);
546 * Create the lockf structure. We initialise the lf_owner
547 * field here instead of in lf_alloc_lock() to avoid paying
548 * the lf_lock_owners_lock tax twice.
550 lock
= lf_alloc_lock(NULL
);
552 lock
->lf_start
= start
;
556 if (flags
& F_REMOTE
) {
558 * For remote locks, the caller may release its ref to
559 * the vnode at any time - we have to ref it here to
560 * prevent it from being recycled unexpectedly.
566 * XXX The problem is that VTOI is ufs specific, so it will
567 * break LOCKF_DEBUG for all other FS's other than UFS because
568 * it casts the vnode->data ptr to struct inode *.
570 /* lock->lf_inode = VTOI(ap->a_vp); */
571 lock
->lf_inode
= (struct inode
*)0;
572 lock
->lf_type
= fl
->l_type
;
573 LIST_INIT(&lock
->lf_outedges
);
574 LIST_INIT(&lock
->lf_inedges
);
575 lock
->lf_async_task
= ap
->a_task
;
576 lock
->lf_flags
= ap
->a_flags
;
579 * Do the requested operation. First find our state structure
580 * and create a new one if necessary - the caller's *statep
581 * variable and the state's ls_threads count is protected by
582 * the vnode interlock.
585 if (vp
->v_iflag
& VI_DOOMED
) {
592 * Allocate a state structure if necessary.
600 ls
= malloc(sizeof(struct lockf
), M_LOCKF
, M_WAITOK
|M_ZERO
);
601 sx_init(&ls
->ls_lock
, "ls_lock");
602 LIST_INIT(&ls
->ls_active
);
603 LIST_INIT(&ls
->ls_pending
);
606 sx_xlock(&lf_lock_states_lock
);
607 LIST_INSERT_HEAD(&lf_lock_states
, ls
, ls_link
);
608 sx_xunlock(&lf_lock_states_lock
);
611 * Cope if we lost a race with some other thread while
612 * trying to allocate memory.
615 if (vp
->v_iflag
& VI_DOOMED
) {
617 sx_xlock(&lf_lock_states_lock
);
618 LIST_REMOVE(ls
, ls_link
);
619 sx_xunlock(&lf_lock_states_lock
);
620 sx_destroy(&ls
->ls_lock
);
625 if ((*statep
) == NULL
) {
626 state
= *statep
= ls
;
633 sx_xlock(&lf_lock_states_lock
);
634 LIST_REMOVE(ls
, ls_link
);
635 sx_xunlock(&lf_lock_states_lock
);
636 sx_destroy(&ls
->ls_lock
);
644 sx_xlock(&state
->ls_lock
);
646 * Recheck the doomed vnode after state->ls_lock is
647 * locked. lf_purgelocks() requires that no new threads add
648 * pending locks when vnode is marked by VI_DOOMED flag.
651 if (vp
->v_iflag
& VI_DOOMED
) {
655 sx_xunlock(&state
->ls_lock
);
663 error
= lf_setlock(state
, lock
, vp
, ap
->a_cookiep
);
667 error
= lf_clearlock(state
, lock
);
672 error
= lf_getlock(state
, lock
, fl
);
678 error
= lf_cancel(state
, lock
, *ap
->a_cookiep
);
692 * Check for some can't happen stuff. In this case, the active
693 * lock list becoming disordered or containing mutually
694 * blocking locks. We also check the pending list for locks
695 * which should be active (i.e. have no out-going edges).
697 LIST_FOREACH(lock
, &state
->ls_active
, lf_link
) {
698 struct lockf_entry
*lf
;
699 if (LIST_NEXT(lock
, lf_link
))
700 KASSERT((lock
->lf_start
701 <= LIST_NEXT(lock
, lf_link
)->lf_start
),
702 ("locks disordered"));
703 LIST_FOREACH(lf
, &state
->ls_active
, lf_link
) {
706 KASSERT(!lf_blocks(lock
, lf
),
707 ("two conflicting active locks"));
708 if (lock
->lf_owner
== lf
->lf_owner
)
709 KASSERT(!lf_overlaps(lock
, lf
),
710 ("two overlapping locks from same owner"));
713 LIST_FOREACH(lock
, &state
->ls_pending
, lf_link
) {
714 KASSERT(!LIST_EMPTY(&lock
->lf_outedges
),
715 ("pending lock which should be active"));
718 sx_xunlock(&state
->ls_lock
);
721 * If we have removed the last active lock on the vnode and
722 * this is the last thread that was in-progress, we can free
723 * the state structure. We update the caller's pointer inside
724 * the vnode interlock but call free outside.
726 * XXX alternatively, keep the state structure around until
727 * the filesystem recycles - requires a callback from the
734 if (LIST_EMPTY(&state
->ls_active
) && state
->ls_threads
== 0) {
735 KASSERT(LIST_EMPTY(&state
->ls_pending
),
736 ("freeing state with pending locks"));
743 if (freestate
!= NULL
) {
744 sx_xlock(&lf_lock_states_lock
);
745 LIST_REMOVE(freestate
, ls_link
);
746 sx_xunlock(&lf_lock_states_lock
);
747 sx_destroy(&freestate
->ls_lock
);
748 free(freestate
, M_LOCKF
);
752 if (error
== EDOOFUS
) {
753 KASSERT(ap
->a_op
== F_SETLK
, ("EDOOFUS"));
760 lf_advlock(struct vop_advlock_args
*ap
, struct lockf
**statep
, u_quad_t size
)
762 struct vop_advlockasync_args a
;
768 a
.a_flags
= ap
->a_flags
;
772 return (lf_advlockasync(&a
, statep
, size
));
776 lf_purgelocks(struct vnode
*vp
, struct lockf
**statep
)
779 struct lockf_entry
*lock
, *nlock
;
782 * For this to work correctly, the caller must ensure that no
783 * other threads enter the locking system for this vnode,
784 * e.g. by checking VI_DOOMED. We wake up any threads that are
785 * sleeping waiting for locks on this vnode and then free all
786 * the remaining locks.
789 KASSERT(vp
->v_iflag
& VI_DOOMED
,
790 ("lf_purgelocks: vp %p has not vgone yet", vp
));
797 sx_xlock(&state
->ls_lock
);
798 sx_xlock(&lf_owner_graph_lock
);
799 LIST_FOREACH_SAFE(lock
, &state
->ls_pending
, lf_link
, nlock
) {
800 LIST_REMOVE(lock
, lf_link
);
801 lf_remove_outgoing(lock
);
802 lf_remove_incoming(lock
);
805 * If its an async lock, we can just free it
806 * here, otherwise we let the sleeping thread
809 if (lock
->lf_async_task
) {
812 lock
->lf_flags
|= F_INTR
;
816 sx_xunlock(&lf_owner_graph_lock
);
817 sx_xunlock(&state
->ls_lock
);
820 * Wait for all other threads, sleeping and otherwise
824 while (state
->ls_threads
> 1)
825 msleep(state
, VI_MTX(vp
), 0, "purgelocks", 0);
829 * We can just free all the active locks since they
830 * will have no dependancies (we removed them all
831 * above). We don't need to bother locking since we
832 * are the last thread using this state structure.
834 KASSERT(LIST_EMPTY(&state
->ls_pending
),
835 ("lock pending for %p", state
));
836 LIST_FOREACH_SAFE(lock
, &state
->ls_active
, lf_link
, nlock
) {
837 LIST_REMOVE(lock
, lf_link
);
840 sx_xlock(&lf_lock_states_lock
);
841 LIST_REMOVE(state
, ls_link
);
842 sx_xunlock(&lf_lock_states_lock
);
843 sx_destroy(&state
->ls_lock
);
844 free(state
, M_LOCKF
);
851 * Return non-zero if locks 'x' and 'y' overlap.
854 lf_overlaps(struct lockf_entry
*x
, struct lockf_entry
*y
)
857 return (x
->lf_start
<= y
->lf_end
&& x
->lf_end
>= y
->lf_start
);
861 * Return non-zero if lock 'x' is blocked by lock 'y' (or vice versa).
864 lf_blocks(struct lockf_entry
*x
, struct lockf_entry
*y
)
867 return x
->lf_owner
!= y
->lf_owner
868 && (x
->lf_type
== F_WRLCK
|| y
->lf_type
== F_WRLCK
)
869 && lf_overlaps(x
, y
);
873 * Allocate a lock edge from the free list
875 static struct lockf_edge
*
879 return (malloc(sizeof(struct lockf_edge
), M_LOCKF
, M_WAITOK
|M_ZERO
));
886 lf_free_edge(struct lockf_edge
*e
)
894 * Ensure that the lock's owner has a corresponding vertex in the
898 lf_alloc_vertex(struct lockf_entry
*lock
)
900 struct owner_graph
*g
= &lf_owner_graph
;
902 if (!lock
->lf_owner
->lo_vertex
)
903 lock
->lf_owner
->lo_vertex
=
904 graph_alloc_vertex(g
, lock
->lf_owner
);
908 * Attempt to record an edge from lock x to lock y. Return EDEADLK if
909 * the new edge would cause a cycle in the owner graph.
912 lf_add_edge(struct lockf_entry
*x
, struct lockf_entry
*y
)
914 struct owner_graph
*g
= &lf_owner_graph
;
915 struct lockf_edge
*e
;
919 LIST_FOREACH(e
, &x
->lf_outedges
, le_outlink
)
920 KASSERT(e
->le_to
!= y
, ("adding lock edge twice"));
924 * Make sure the two owners have entries in the owner graph.
929 error
= graph_add_edge(g
, x
->lf_owner
->lo_vertex
,
930 y
->lf_owner
->lo_vertex
);
935 LIST_INSERT_HEAD(&x
->lf_outedges
, e
, le_outlink
);
936 LIST_INSERT_HEAD(&y
->lf_inedges
, e
, le_inlink
);
944 * Remove an edge from the lock graph.
947 lf_remove_edge(struct lockf_edge
*e
)
949 struct owner_graph
*g
= &lf_owner_graph
;
950 struct lockf_entry
*x
= e
->le_from
;
951 struct lockf_entry
*y
= e
->le_to
;
953 graph_remove_edge(g
, x
->lf_owner
->lo_vertex
, y
->lf_owner
->lo_vertex
);
954 LIST_REMOVE(e
, le_outlink
);
955 LIST_REMOVE(e
, le_inlink
);
962 * Remove all out-going edges from lock x.
965 lf_remove_outgoing(struct lockf_entry
*x
)
967 struct lockf_edge
*e
;
969 while ((e
= LIST_FIRST(&x
->lf_outedges
)) != NULL
) {
975 * Remove all in-coming edges from lock x.
978 lf_remove_incoming(struct lockf_entry
*x
)
980 struct lockf_edge
*e
;
982 while ((e
= LIST_FIRST(&x
->lf_inedges
)) != NULL
) {
988 * Walk the list of locks for the file and create an out-going edge
989 * from lock to each blocking lock.
992 lf_add_outgoing(struct lockf
*state
, struct lockf_entry
*lock
)
994 struct lockf_entry
*overlap
;
997 LIST_FOREACH(overlap
, &state
->ls_active
, lf_link
) {
999 * We may assume that the active list is sorted by
1002 if (overlap
->lf_start
> lock
->lf_end
)
1004 if (!lf_blocks(lock
, overlap
))
1008 * We've found a blocking lock. Add the corresponding
1009 * edge to the graphs and see if it would cause a
1012 error
= lf_add_edge(lock
, overlap
);
1015 * The only error that lf_add_edge returns is EDEADLK.
1016 * Remove any edges we added and return the error.
1019 lf_remove_outgoing(lock
);
1025 * We also need to add edges to sleeping locks that block
1026 * us. This ensures that lf_wakeup_lock cannot grant two
1027 * mutually blocking locks simultaneously and also enforces a
1028 * 'first come, first served' fairness model. Note that this
1029 * only happens if we are blocked by at least one active lock
1030 * due to the call to lf_getblock in lf_setlock below.
1032 LIST_FOREACH(overlap
, &state
->ls_pending
, lf_link
) {
1033 if (!lf_blocks(lock
, overlap
))
1036 * We've found a blocking lock. Add the corresponding
1037 * edge to the graphs and see if it would cause a
1040 error
= lf_add_edge(lock
, overlap
);
1043 * The only error that lf_add_edge returns is EDEADLK.
1044 * Remove any edges we added and return the error.
1047 lf_remove_outgoing(lock
);
1056 * Walk the list of pending locks for the file and create an in-coming
1057 * edge from lock to each blocking lock.
1060 lf_add_incoming(struct lockf
*state
, struct lockf_entry
*lock
)
1062 struct lockf_entry
*overlap
;
1065 LIST_FOREACH(overlap
, &state
->ls_pending
, lf_link
) {
1066 if (!lf_blocks(lock
, overlap
))
1070 * We've found a blocking lock. Add the corresponding
1071 * edge to the graphs and see if it would cause a
1074 error
= lf_add_edge(overlap
, lock
);
1077 * The only error that lf_add_edge returns is EDEADLK.
1078 * Remove any edges we added and return the error.
1081 lf_remove_incoming(lock
);
1089 * Insert lock into the active list, keeping list entries ordered by
1090 * increasing values of lf_start.
1093 lf_insert_lock(struct lockf
*state
, struct lockf_entry
*lock
)
1095 struct lockf_entry
*lf
, *lfprev
;
1097 if (LIST_EMPTY(&state
->ls_active
)) {
1098 LIST_INSERT_HEAD(&state
->ls_active
, lock
, lf_link
);
1103 LIST_FOREACH(lf
, &state
->ls_active
, lf_link
) {
1104 if (lf
->lf_start
> lock
->lf_start
) {
1105 LIST_INSERT_BEFORE(lf
, lock
, lf_link
);
1110 LIST_INSERT_AFTER(lfprev
, lock
, lf_link
);
1114 * Wake up a sleeping lock and remove it from the pending list now
1115 * that all its dependancies have been resolved. The caller should
1116 * arrange for the lock to be added to the active list, adjusting any
1117 * existing locks for the same owner as needed.
1120 lf_wakeup_lock(struct lockf
*state
, struct lockf_entry
*wakelock
)
1124 * Remove from ls_pending list and wake up the caller
1125 * or start the async notification, as appropriate.
1127 LIST_REMOVE(wakelock
, lf_link
);
1129 if (lockf_debug
& 1)
1130 lf_print("lf_wakeup_lock: awakening", wakelock
);
1131 #endif /* LOCKF_DEBUG */
1132 if (wakelock
->lf_async_task
) {
1133 taskqueue_enqueue(taskqueue_thread
, wakelock
->lf_async_task
);
1140 * Re-check all dependant locks and remove edges to locks that we no
1141 * longer block. If 'all' is non-zero, the lock has been removed and
1142 * we must remove all the dependancies, otherwise it has simply been
1143 * reduced but remains active. Any pending locks which have been been
1144 * unblocked are added to 'granted'
1147 lf_update_dependancies(struct lockf
*state
, struct lockf_entry
*lock
, int all
,
1148 struct lockf_entry_list
*granted
)
1150 struct lockf_edge
*e
, *ne
;
1151 struct lockf_entry
*deplock
;
1153 LIST_FOREACH_SAFE(e
, &lock
->lf_inedges
, le_inlink
, ne
) {
1154 deplock
= e
->le_from
;
1155 if (all
|| !lf_blocks(lock
, deplock
)) {
1156 sx_xlock(&lf_owner_graph_lock
);
1158 sx_xunlock(&lf_owner_graph_lock
);
1159 if (LIST_EMPTY(&deplock
->lf_outedges
)) {
1160 lf_wakeup_lock(state
, deplock
);
1161 LIST_INSERT_HEAD(granted
, deplock
, lf_link
);
1168 * Set the start of an existing active lock, updating dependancies and
1169 * adding any newly woken locks to 'granted'.
1172 lf_set_start(struct lockf
*state
, struct lockf_entry
*lock
, off_t new_start
,
1173 struct lockf_entry_list
*granted
)
1176 KASSERT(new_start
>= lock
->lf_start
, ("can't increase lock"));
1177 lock
->lf_start
= new_start
;
1178 LIST_REMOVE(lock
, lf_link
);
1179 lf_insert_lock(state
, lock
);
1180 lf_update_dependancies(state
, lock
, FALSE
, granted
);
1184 * Set the end of an existing active lock, updating dependancies and
1185 * adding any newly woken locks to 'granted'.
1188 lf_set_end(struct lockf
*state
, struct lockf_entry
*lock
, off_t new_end
,
1189 struct lockf_entry_list
*granted
)
1192 KASSERT(new_end
<= lock
->lf_end
, ("can't increase lock"));
1193 lock
->lf_end
= new_end
;
1194 lf_update_dependancies(state
, lock
, FALSE
, granted
);
1198 * Add a lock to the active list, updating or removing any current
1199 * locks owned by the same owner and processing any pending locks that
1200 * become unblocked as a result. This code is also used for unlock
1201 * since the logic for updating existing locks is identical.
1203 * As a result of processing the new lock, we may unblock existing
1204 * pending locks as a result of downgrading/unlocking. We simply
1205 * activate the newly granted locks by looping.
1207 * Since the new lock already has its dependancies set up, we always
1208 * add it to the list (unless its an unlock request). This may
1209 * fragment the lock list in some pathological cases but its probably
1210 * not a real problem.
1213 lf_activate_lock(struct lockf
*state
, struct lockf_entry
*lock
)
1215 struct lockf_entry
*overlap
, *lf
;
1216 struct lockf_entry_list granted
;
1219 LIST_INIT(&granted
);
1220 LIST_INSERT_HEAD(&granted
, lock
, lf_link
);
1222 while (!LIST_EMPTY(&granted
)) {
1223 lock
= LIST_FIRST(&granted
);
1224 LIST_REMOVE(lock
, lf_link
);
1227 * Skip over locks owned by other processes. Handle
1228 * any locks that overlap and are owned by ourselves.
1230 overlap
= LIST_FIRST(&state
->ls_active
);
1232 ovcase
= lf_findoverlap(&overlap
, lock
, SELF
);
1235 if (ovcase
&& (lockf_debug
& 2)) {
1236 printf("lf_setlock: overlap %d", ovcase
);
1237 lf_print("", overlap
);
1243 * 1) overlap == lock
1244 * 2) overlap contains lock
1245 * 3) lock contains overlap
1246 * 4) overlap starts before lock
1247 * 5) overlap ends after lock
1250 case 0: /* no overlap */
1253 case 1: /* overlap == lock */
1255 * We have already setup the
1256 * dependants for the new lock, taking
1257 * into account a possible downgrade
1258 * or unlock. Remove the old lock.
1260 LIST_REMOVE(overlap
, lf_link
);
1261 lf_update_dependancies(state
, overlap
, TRUE
,
1263 lf_free_lock(overlap
);
1266 case 2: /* overlap contains lock */
1268 * Just split the existing lock.
1270 lf_split(state
, overlap
, lock
, &granted
);
1273 case 3: /* lock contains overlap */
1275 * Delete the overlap and advance to
1276 * the next entry in the list.
1278 lf
= LIST_NEXT(overlap
, lf_link
);
1279 LIST_REMOVE(overlap
, lf_link
);
1280 lf_update_dependancies(state
, overlap
, TRUE
,
1282 lf_free_lock(overlap
);
1286 case 4: /* overlap starts before lock */
1288 * Just update the overlap end and
1291 lf_set_end(state
, overlap
, lock
->lf_start
- 1,
1293 overlap
= LIST_NEXT(overlap
, lf_link
);
1296 case 5: /* overlap ends after lock */
1298 * Change the start of overlap and
1301 lf_set_start(state
, overlap
, lock
->lf_end
+ 1,
1308 if (lockf_debug
& 1) {
1309 if (lock
->lf_type
!= F_UNLCK
)
1310 lf_print("lf_activate_lock: activated", lock
);
1312 lf_print("lf_activate_lock: unlocked", lock
);
1313 lf_printlist("lf_activate_lock", lock
);
1315 #endif /* LOCKF_DEBUG */
1316 if (lock
->lf_type
!= F_UNLCK
)
1317 lf_insert_lock(state
, lock
);
1322 * Cancel a pending lock request, either as a result of a signal or a
1323 * cancel request for an async lock.
1326 lf_cancel_lock(struct lockf
*state
, struct lockf_entry
*lock
)
1328 struct lockf_entry_list granted
;
1331 * Note it is theoretically possible that cancelling this lock
1332 * may allow some other pending lock to become
1333 * active. Consider this case:
1335 * Owner Action Result Dependancies
1337 * A: lock [0..0] succeeds
1338 * B: lock [2..2] succeeds
1339 * C: lock [1..2] blocked C->B
1340 * D: lock [0..1] blocked C->B,D->A,D->C
1341 * A: unlock [0..0] C->B,D->C
1345 LIST_REMOVE(lock
, lf_link
);
1348 * Removing out-going edges is simple.
1350 sx_xlock(&lf_owner_graph_lock
);
1351 lf_remove_outgoing(lock
);
1352 sx_xunlock(&lf_owner_graph_lock
);
1355 * Removing in-coming edges may allow some other lock to
1356 * become active - we use lf_update_dependancies to figure
1359 LIST_INIT(&granted
);
1360 lf_update_dependancies(state
, lock
, TRUE
, &granted
);
1364 * Feed any newly active locks to lf_activate_lock.
1366 while (!LIST_EMPTY(&granted
)) {
1367 lock
= LIST_FIRST(&granted
);
1368 LIST_REMOVE(lock
, lf_link
);
1369 lf_activate_lock(state
, lock
);
1374 * Set a byte-range lock.
1377 lf_setlock(struct lockf
*state
, struct lockf_entry
*lock
, struct vnode
*vp
,
1380 static char lockstr
[] = "lockf";
1381 int priority
, error
;
1384 if (lockf_debug
& 1)
1385 lf_print("lf_setlock", lock
);
1386 #endif /* LOCKF_DEBUG */
1392 if (lock
->lf_type
== F_WRLCK
)
1394 if (!(lock
->lf_flags
& F_NOINTR
))
1397 * Scan lock list for this file looking for locks that would block us.
1399 if (lf_getblock(state
, lock
)) {
1401 * Free the structure and return if nonblocking.
1403 if ((lock
->lf_flags
& F_WAIT
) == 0
1404 && lock
->lf_async_task
== NULL
) {
1411 * For flock type locks, we must first remove
1412 * any shared locks that we hold before we sleep
1413 * waiting for an exclusive lock.
1415 if ((lock
->lf_flags
& F_FLOCK
) &&
1416 lock
->lf_type
== F_WRLCK
) {
1417 lock
->lf_type
= F_UNLCK
;
1418 lf_activate_lock(state
, lock
);
1419 lock
->lf_type
= F_WRLCK
;
1423 * We are blocked. Create edges to each blocking lock,
1424 * checking for deadlock using the owner graph. For
1425 * simplicity, we run deadlock detection for all
1426 * locks, posix and otherwise.
1428 sx_xlock(&lf_owner_graph_lock
);
1429 error
= lf_add_outgoing(state
, lock
);
1430 sx_xunlock(&lf_owner_graph_lock
);
1434 if (lockf_debug
& 1)
1435 lf_print("lf_setlock: deadlock", lock
);
1442 * We have added edges to everything that blocks
1443 * us. Sleep until they all go away.
1445 LIST_INSERT_HEAD(&state
->ls_pending
, lock
, lf_link
);
1447 if (lockf_debug
& 1) {
1448 struct lockf_edge
*e
;
1449 LIST_FOREACH(e
, &lock
->lf_outedges
, le_outlink
) {
1450 lf_print("lf_setlock: blocking on", e
->le_to
);
1451 lf_printlist("lf_setlock", e
->le_to
);
1454 #endif /* LOCKF_DEBUG */
1456 if ((lock
->lf_flags
& F_WAIT
) == 0) {
1458 * The caller requested async notification -
1459 * this callback happens when the blocking
1460 * lock is released, allowing the caller to
1461 * make another attempt to take the lock.
1463 *cookiep
= (void *) lock
;
1464 error
= EINPROGRESS
;
1469 error
= sx_sleep(lock
, &state
->ls_lock
, priority
, lockstr
, 0);
1470 if (lf_free_lock(lock
)) {
1476 * We may have been awakened by a signal and/or by a
1477 * debugger continuing us (in which cases we must
1478 * remove our lock graph edges) and/or by another
1479 * process releasing a lock (in which case our edges
1480 * have already been removed and we have been moved to
1481 * the active list). We may also have been woken by
1482 * lf_purgelocks which we report to the caller as
1483 * EINTR. In that case, lf_purgelocks will have
1484 * removed our lock graph edges.
1486 * Note that it is possible to receive a signal after
1487 * we were successfully woken (and moved to the active
1488 * list) but before we resumed execution. In this
1489 * case, our lf_outedges list will be clear. We
1490 * pretend there was no error.
1492 * Note also, if we have been sleeping long enough, we
1493 * may now have incoming edges from some newer lock
1494 * which is waiting behind us in the queue.
1496 if (lock
->lf_flags
& F_INTR
) {
1501 if (LIST_EMPTY(&lock
->lf_outedges
)) {
1504 lf_cancel_lock(state
, lock
);
1508 if (lockf_debug
& 1) {
1509 lf_print("lf_setlock: granted", lock
);
1515 * It looks like we are going to grant the lock. First add
1516 * edges from any currently pending lock that the new lock
1519 sx_xlock(&lf_owner_graph_lock
);
1520 error
= lf_add_incoming(state
, lock
);
1521 sx_xunlock(&lf_owner_graph_lock
);
1524 if (lockf_debug
& 1)
1525 lf_print("lf_setlock: deadlock", lock
);
1532 * No blocks!! Add the lock. Note that we will
1533 * downgrade or upgrade any overlapping locks this
1534 * process already owns.
1536 lf_activate_lock(state
, lock
);
1543 * Remove a byte-range lock on an inode.
1545 * Generally, find the lock (or an overlap to that lock)
1546 * and remove it (or shrink it), then wakeup anyone we can.
1549 lf_clearlock(struct lockf
*state
, struct lockf_entry
*unlock
)
1551 struct lockf_entry
*overlap
;
1553 overlap
= LIST_FIRST(&state
->ls_active
);
1555 if (overlap
== NOLOCKF
)
1558 if (unlock
->lf_type
!= F_UNLCK
)
1559 panic("lf_clearlock: bad type");
1560 if (lockf_debug
& 1)
1561 lf_print("lf_clearlock", unlock
);
1562 #endif /* LOCKF_DEBUG */
1564 lf_activate_lock(state
, unlock
);
1570 * Check whether there is a blocking lock, and if so return its
1574 lf_getlock(struct lockf
*state
, struct lockf_entry
*lock
, struct flock
*fl
)
1576 struct lockf_entry
*block
;
1579 if (lockf_debug
& 1)
1580 lf_print("lf_getlock", lock
);
1581 #endif /* LOCKF_DEBUG */
1583 if ((block
= lf_getblock(state
, lock
))) {
1584 fl
->l_type
= block
->lf_type
;
1585 fl
->l_whence
= SEEK_SET
;
1586 fl
->l_start
= block
->lf_start
;
1587 if (block
->lf_end
== OFF_MAX
)
1590 fl
->l_len
= block
->lf_end
- block
->lf_start
+ 1;
1591 fl
->l_pid
= block
->lf_owner
->lo_pid
;
1592 fl
->l_sysid
= block
->lf_owner
->lo_sysid
;
1594 fl
->l_type
= F_UNLCK
;
1600 * Cancel an async lock request.
1603 lf_cancel(struct lockf
*state
, struct lockf_entry
*lock
, void *cookie
)
1605 struct lockf_entry
*reallock
;
1608 * We need to match this request with an existing lock
1611 LIST_FOREACH(reallock
, &state
->ls_pending
, lf_link
) {
1612 if ((void *) reallock
== cookie
) {
1614 * Double-check that this lock looks right
1615 * (maybe use a rolling ID for the cancel
1618 if (!(reallock
->lf_vnode
== lock
->lf_vnode
1619 && reallock
->lf_start
== lock
->lf_start
1620 && reallock
->lf_end
== lock
->lf_end
)) {
1625 * Make sure this lock was async and then just
1626 * remove it from its wait lists.
1628 if (!reallock
->lf_async_task
) {
1633 * Note that since any other thread must take
1634 * state->ls_lock before it can possibly
1635 * trigger the async callback, we are safe
1636 * from a race with lf_wakeup_lock, i.e. we
1637 * can free the lock (actually our caller does
1640 lf_cancel_lock(state
, reallock
);
1646 * We didn't find a matching lock - not much we can do here.
1652 * Walk the list of locks for an inode and
1653 * return the first blocking lock.
1655 static struct lockf_entry
*
1656 lf_getblock(struct lockf
*state
, struct lockf_entry
*lock
)
1658 struct lockf_entry
*overlap
;
1660 LIST_FOREACH(overlap
, &state
->ls_active
, lf_link
) {
1662 * We may assume that the active list is sorted by
1665 if (overlap
->lf_start
> lock
->lf_end
)
1667 if (!lf_blocks(lock
, overlap
))
1675 * Walk the list of locks for an inode to find an overlapping lock (if
1676 * any) and return a classification of that overlap.
1679 * *overlap The place in the lock list to start looking
1680 * lock The lock which is being tested
1681 * type Pass 'SELF' to test only locks with the same
1682 * owner as lock, or 'OTHER' to test only locks
1683 * with a different owner
1685 * Returns one of six values:
1687 * 1) overlap == lock
1688 * 2) overlap contains lock
1689 * 3) lock contains overlap
1690 * 4) overlap starts before lock
1691 * 5) overlap ends after lock
1693 * If there is an overlapping lock, '*overlap' is set to point at the
1696 * NOTE: this returns only the FIRST overlapping lock. There
1697 * may be more than one.
1700 lf_findoverlap(struct lockf_entry
**overlap
, struct lockf_entry
*lock
, int type
)
1702 struct lockf_entry
*lf
;
1706 if ((*overlap
) == NOLOCKF
) {
1710 if (lockf_debug
& 2)
1711 lf_print("lf_findoverlap: looking for overlap in", lock
);
1712 #endif /* LOCKF_DEBUG */
1713 start
= lock
->lf_start
;
1718 if (lf
->lf_start
> end
)
1720 if (((type
& SELF
) && lf
->lf_owner
!= lock
->lf_owner
) ||
1721 ((type
& OTHERS
) && lf
->lf_owner
== lock
->lf_owner
)) {
1722 *overlap
= LIST_NEXT(lf
, lf_link
);
1726 if (lockf_debug
& 2)
1727 lf_print("\tchecking", lf
);
1728 #endif /* LOCKF_DEBUG */
1730 * OK, check for overlap
1734 * 1) overlap == lock
1735 * 2) overlap contains lock
1736 * 3) lock contains overlap
1737 * 4) overlap starts before lock
1738 * 5) overlap ends after lock
1740 if (start
> lf
->lf_end
) {
1743 if (lockf_debug
& 2)
1744 printf("no overlap\n");
1745 #endif /* LOCKF_DEBUG */
1746 *overlap
= LIST_NEXT(lf
, lf_link
);
1749 if (lf
->lf_start
== start
&& lf
->lf_end
== end
) {
1752 if (lockf_debug
& 2)
1753 printf("overlap == lock\n");
1754 #endif /* LOCKF_DEBUG */
1758 if (lf
->lf_start
<= start
&& lf
->lf_end
>= end
) {
1761 if (lockf_debug
& 2)
1762 printf("overlap contains lock\n");
1763 #endif /* LOCKF_DEBUG */
1767 if (start
<= lf
->lf_start
&& end
>= lf
->lf_end
) {
1770 if (lockf_debug
& 2)
1771 printf("lock contains overlap\n");
1772 #endif /* LOCKF_DEBUG */
1776 if (lf
->lf_start
< start
&& lf
->lf_end
>= start
) {
1779 if (lockf_debug
& 2)
1780 printf("overlap starts before lock\n");
1781 #endif /* LOCKF_DEBUG */
1785 if (lf
->lf_start
> start
&& lf
->lf_end
> end
) {
1788 if (lockf_debug
& 2)
1789 printf("overlap ends after lock\n");
1790 #endif /* LOCKF_DEBUG */
1794 panic("lf_findoverlap: default");
1800 * Split an the existing 'lock1', based on the extent of the lock
1801 * described by 'lock2'. The existing lock should cover 'lock2'
1804 * Any pending locks which have been been unblocked are added to
1808 lf_split(struct lockf
*state
, struct lockf_entry
*lock1
,
1809 struct lockf_entry
*lock2
, struct lockf_entry_list
*granted
)
1811 struct lockf_entry
*splitlock
;
1814 if (lockf_debug
& 2) {
1815 lf_print("lf_split", lock1
);
1816 lf_print("splitting from", lock2
);
1818 #endif /* LOCKF_DEBUG */
1820 * Check to see if we don't need to split at all.
1822 if (lock1
->lf_start
== lock2
->lf_start
) {
1823 lf_set_start(state
, lock1
, lock2
->lf_end
+ 1, granted
);
1826 if (lock1
->lf_end
== lock2
->lf_end
) {
1827 lf_set_end(state
, lock1
, lock2
->lf_start
- 1, granted
);
1831 * Make a new lock consisting of the last part of
1832 * the encompassing lock.
1834 splitlock
= lf_alloc_lock(lock1
->lf_owner
);
1835 memcpy(splitlock
, lock1
, sizeof *splitlock
);
1836 splitlock
->lf_refs
= 1;
1837 if (splitlock
->lf_flags
& F_REMOTE
)
1838 vref(splitlock
->lf_vnode
);
1841 * This cannot cause a deadlock since any edges we would add
1842 * to splitlock already exist in lock1. We must be sure to add
1843 * necessary dependancies to splitlock before we reduce lock1
1844 * otherwise we may accidentally grant a pending lock that
1845 * was blocked by the tail end of lock1.
1847 splitlock
->lf_start
= lock2
->lf_end
+ 1;
1848 LIST_INIT(&splitlock
->lf_outedges
);
1849 LIST_INIT(&splitlock
->lf_inedges
);
1850 sx_xlock(&lf_owner_graph_lock
);
1851 lf_add_incoming(state
, splitlock
);
1852 sx_xunlock(&lf_owner_graph_lock
);
1854 lf_set_end(state
, lock1
, lock2
->lf_start
- 1, granted
);
1857 * OK, now link it in
1859 lf_insert_lock(state
, splitlock
);
1863 STAILQ_ENTRY(lockdesc
) link
;
1867 STAILQ_HEAD(lockdesclist
, lockdesc
);
1870 lf_iteratelocks_sysid(int sysid
, lf_iterator
*fn
, void *arg
)
1873 struct lockf_entry
*lf
;
1874 struct lockdesc
*ldesc
;
1875 struct lockdesclist locks
;
1879 * In order to keep the locking simple, we iterate over the
1880 * active lock lists to build a list of locks that need
1881 * releasing. We then call the iterator for each one in turn.
1883 * We take an extra reference to the vnode for the duration to
1884 * make sure it doesn't go away before we are finished.
1886 STAILQ_INIT(&locks
);
1887 sx_xlock(&lf_lock_states_lock
);
1888 LIST_FOREACH(ls
, &lf_lock_states
, ls_link
) {
1889 sx_xlock(&ls
->ls_lock
);
1890 LIST_FOREACH(lf
, &ls
->ls_active
, lf_link
) {
1891 if (lf
->lf_owner
->lo_sysid
!= sysid
)
1894 ldesc
= malloc(sizeof(struct lockdesc
), M_LOCKF
,
1896 ldesc
->vp
= lf
->lf_vnode
;
1898 ldesc
->fl
.l_start
= lf
->lf_start
;
1899 if (lf
->lf_end
== OFF_MAX
)
1900 ldesc
->fl
.l_len
= 0;
1903 lf
->lf_end
- lf
->lf_start
+ 1;
1904 ldesc
->fl
.l_whence
= SEEK_SET
;
1905 ldesc
->fl
.l_type
= F_UNLCK
;
1906 ldesc
->fl
.l_pid
= lf
->lf_owner
->lo_pid
;
1907 ldesc
->fl
.l_sysid
= sysid
;
1908 STAILQ_INSERT_TAIL(&locks
, ldesc
, link
);
1910 sx_xunlock(&ls
->ls_lock
);
1912 sx_xunlock(&lf_lock_states_lock
);
1915 * Call the iterator function for each lock in turn. If the
1916 * iterator returns an error code, just free the rest of the
1917 * lockdesc structures.
1920 while ((ldesc
= STAILQ_FIRST(&locks
)) != NULL
) {
1921 STAILQ_REMOVE_HEAD(&locks
, link
);
1923 error
= fn(ldesc
->vp
, &ldesc
->fl
, arg
);
1925 free(ldesc
, M_LOCKF
);
1932 lf_iteratelocks_vnode(struct vnode
*vp
, lf_iterator
*fn
, void *arg
)
1935 struct lockf_entry
*lf
;
1936 struct lockdesc
*ldesc
;
1937 struct lockdesclist locks
;
1941 * In order to keep the locking simple, we iterate over the
1942 * active lock lists to build a list of locks that need
1943 * releasing. We then call the iterator for each one in turn.
1945 * We take an extra reference to the vnode for the duration to
1946 * make sure it doesn't go away before we are finished.
1948 STAILQ_INIT(&locks
);
1958 sx_xlock(&ls
->ls_lock
);
1959 LIST_FOREACH(lf
, &ls
->ls_active
, lf_link
) {
1960 ldesc
= malloc(sizeof(struct lockdesc
), M_LOCKF
,
1962 ldesc
->vp
= lf
->lf_vnode
;
1964 ldesc
->fl
.l_start
= lf
->lf_start
;
1965 if (lf
->lf_end
== OFF_MAX
)
1966 ldesc
->fl
.l_len
= 0;
1969 lf
->lf_end
- lf
->lf_start
+ 1;
1970 ldesc
->fl
.l_whence
= SEEK_SET
;
1971 ldesc
->fl
.l_type
= F_UNLCK
;
1972 ldesc
->fl
.l_pid
= lf
->lf_owner
->lo_pid
;
1973 ldesc
->fl
.l_sysid
= lf
->lf_owner
->lo_sysid
;
1974 STAILQ_INSERT_TAIL(&locks
, ldesc
, link
);
1976 sx_xunlock(&ls
->ls_lock
);
1983 * Call the iterator function for each lock in turn. If the
1984 * iterator returns an error code, just free the rest of the
1985 * lockdesc structures.
1988 while ((ldesc
= STAILQ_FIRST(&locks
)) != NULL
) {
1989 STAILQ_REMOVE_HEAD(&locks
, link
);
1991 error
= fn(ldesc
->vp
, &ldesc
->fl
, arg
);
1993 free(ldesc
, M_LOCKF
);
2000 lf_clearremotesys_iterator(struct vnode
*vp
, struct flock
*fl
, void *arg
)
2003 VOP_ADVLOCK(vp
, 0, F_UNLCK
, fl
, F_REMOTE
);
2008 lf_clearremotesys(int sysid
)
2011 KASSERT(sysid
!= 0, ("Can't clear local locks with F_UNLCKSYS"));
2012 lf_iteratelocks_sysid(sysid
, lf_clearremotesys_iterator
, NULL
);
2016 lf_countlocks(int sysid
)
2019 struct lock_owner
*lo
;
2023 sx_xlock(&lf_lock_owners_lock
);
2024 for (i
= 0; i
< LOCK_OWNER_HASH_SIZE
; i
++)
2025 LIST_FOREACH(lo
, &lf_lock_owners
[i
], lo_link
)
2026 if (lo
->lo_sysid
== sysid
)
2027 count
+= lo
->lo_refs
;
2028 sx_xunlock(&lf_lock_owners_lock
);
2036 * Return non-zero if y is reachable from x using a brute force
2037 * search. If reachable and path is non-null, return the route taken
2041 graph_reaches(struct owner_vertex
*x
, struct owner_vertex
*y
,
2042 struct owner_vertex_list
*path
)
2044 struct owner_edge
*e
;
2048 TAILQ_INSERT_HEAD(path
, x
, v_link
);
2052 LIST_FOREACH(e
, &x
->v_outedges
, e_outlink
) {
2053 if (graph_reaches(e
->e_to
, y
, path
)) {
2055 TAILQ_INSERT_HEAD(path
, x
, v_link
);
2063 * Perform consistency checks on the graph. Make sure the values of
2064 * v_order are correct. If checkorder is non-zero, check no vertex can
2065 * reach any other vertex with a smaller order.
2068 graph_check(struct owner_graph
*g
, int checkorder
)
2072 for (i
= 0; i
< g
->g_size
; i
++) {
2073 if (!g
->g_vertices
[i
]->v_owner
)
2075 KASSERT(g
->g_vertices
[i
]->v_order
== i
,
2076 ("lock graph vertices disordered"));
2078 for (j
= 0; j
< i
; j
++) {
2079 if (!g
->g_vertices
[j
]->v_owner
)
2081 KASSERT(!graph_reaches(g
->g_vertices
[i
],
2082 g
->g_vertices
[j
], NULL
),
2083 ("lock graph vertices disordered"));
2090 graph_print_vertices(struct owner_vertex_list
*set
)
2092 struct owner_vertex
*v
;
2095 TAILQ_FOREACH(v
, set
, v_link
) {
2096 printf("%d:", v
->v_order
);
2097 lf_print_owner(v
->v_owner
);
2098 if (TAILQ_NEXT(v
, v_link
))
2107 * Calculate the sub-set of vertices v from the affected region [y..x]
2108 * where v is reachable from y. Return -1 if a loop was detected
2109 * (i.e. x is reachable from y, otherwise the number of vertices in
2113 graph_delta_forward(struct owner_graph
*g
, struct owner_vertex
*x
,
2114 struct owner_vertex
*y
, struct owner_vertex_list
*delta
)
2117 struct owner_vertex
*v
;
2118 struct owner_edge
*e
;
2122 * We start with a set containing just y. Then for each vertex
2123 * v in the set so far unprocessed, we add each vertex that v
2124 * has an out-edge to and that is within the affected region
2125 * [y..x]. If we see the vertex x on our travels, stop
2129 TAILQ_INSERT_TAIL(delta
, y
, v_link
);
2134 LIST_FOREACH(e
, &v
->v_outedges
, e_outlink
) {
2137 if (e
->e_to
->v_order
< x
->v_order
2138 && e
->e_to
->v_gen
!= gen
) {
2139 e
->e_to
->v_gen
= gen
;
2140 TAILQ_INSERT_TAIL(delta
, e
->e_to
, v_link
);
2144 v
= TAILQ_NEXT(v
, v_link
);
2151 * Calculate the sub-set of vertices v from the affected region [y..x]
2152 * where v reaches x. Return the number of vertices in this subset.
2155 graph_delta_backward(struct owner_graph
*g
, struct owner_vertex
*x
,
2156 struct owner_vertex
*y
, struct owner_vertex_list
*delta
)
2159 struct owner_vertex
*v
;
2160 struct owner_edge
*e
;
2164 * We start with a set containing just x. Then for each vertex
2165 * v in the set so far unprocessed, we add each vertex that v
2166 * has an in-edge from and that is within the affected region
2170 TAILQ_INSERT_TAIL(delta
, x
, v_link
);
2175 LIST_FOREACH(e
, &v
->v_inedges
, e_inlink
) {
2176 if (e
->e_from
->v_order
> y
->v_order
2177 && e
->e_from
->v_gen
!= gen
) {
2178 e
->e_from
->v_gen
= gen
;
2179 TAILQ_INSERT_HEAD(delta
, e
->e_from
, v_link
);
2183 v
= TAILQ_PREV(v
, owner_vertex_list
, v_link
);
2190 graph_add_indices(int *indices
, int n
, struct owner_vertex_list
*set
)
2192 struct owner_vertex
*v
;
2195 TAILQ_FOREACH(v
, set
, v_link
) {
2197 i
> 0 && indices
[i
- 1] > v
->v_order
; i
--)
2199 for (j
= n
- 1; j
>= i
; j
--)
2200 indices
[j
+ 1] = indices
[j
];
2201 indices
[i
] = v
->v_order
;
2209 graph_assign_indices(struct owner_graph
*g
, int *indices
, int nextunused
,
2210 struct owner_vertex_list
*set
)
2212 struct owner_vertex
*v
, *vlowest
;
2214 while (!TAILQ_EMPTY(set
)) {
2216 TAILQ_FOREACH(v
, set
, v_link
) {
2217 if (!vlowest
|| v
->v_order
< vlowest
->v_order
)
2220 TAILQ_REMOVE(set
, vlowest
, v_link
);
2221 vlowest
->v_order
= indices
[nextunused
];
2222 g
->g_vertices
[vlowest
->v_order
] = vlowest
;
2226 return (nextunused
);
2230 graph_add_edge(struct owner_graph
*g
, struct owner_vertex
*x
,
2231 struct owner_vertex
*y
)
2233 struct owner_edge
*e
;
2234 struct owner_vertex_list deltaF
, deltaB
;
2235 int nF
, nB
, n
, vi
, i
;
2238 sx_assert(&lf_owner_graph_lock
, SX_XLOCKED
);
2240 LIST_FOREACH(e
, &x
->v_outedges
, e_outlink
) {
2248 if (lockf_debug
& 8) {
2249 printf("adding edge %d:", x
->v_order
);
2250 lf_print_owner(x
->v_owner
);
2251 printf(" -> %d:", y
->v_order
);
2252 lf_print_owner(y
->v_owner
);
2256 if (y
->v_order
< x
->v_order
) {
2258 * The new edge violates the order. First find the set
2259 * of affected vertices reachable from y (deltaF) and
2260 * the set of affect vertices affected that reach x
2261 * (deltaB), using the graph generation number to
2262 * detect whether we have visited a given vertex
2263 * already. We re-order the graph so that each vertex
2264 * in deltaB appears before each vertex in deltaF.
2266 * If x is a member of deltaF, then the new edge would
2267 * create a cycle. Otherwise, we may assume that
2268 * deltaF and deltaB are disjoint.
2271 if (g
->g_gen
== 0) {
2275 for (vi
= 0; vi
< g
->g_size
; vi
++) {
2276 g
->g_vertices
[vi
]->v_gen
= 0;
2280 nF
= graph_delta_forward(g
, x
, y
, &deltaF
);
2283 if (lockf_debug
& 8) {
2284 struct owner_vertex_list path
;
2285 printf("deadlock: ");
2287 graph_reaches(y
, x
, &path
);
2288 graph_print_vertices(&path
);
2295 if (lockf_debug
& 8) {
2296 printf("re-ordering graph vertices\n");
2297 printf("deltaF = ");
2298 graph_print_vertices(&deltaF
);
2302 nB
= graph_delta_backward(g
, x
, y
, &deltaB
);
2305 if (lockf_debug
& 8) {
2306 printf("deltaB = ");
2307 graph_print_vertices(&deltaB
);
2312 * We first build a set of vertex indices (vertex
2313 * order values) that we may use, then we re-assign
2314 * orders first to those vertices in deltaB, then to
2315 * deltaF. Note that the contents of deltaF and deltaB
2316 * may be partially disordered - we perform an
2317 * insertion sort while building our index set.
2319 indices
= g
->g_indexbuf
;
2320 n
= graph_add_indices(indices
, 0, &deltaF
);
2321 graph_add_indices(indices
, n
, &deltaB
);
2324 * We must also be sure to maintain the relative
2325 * ordering of deltaF and deltaB when re-assigning
2326 * vertices. We do this by iteratively removing the
2327 * lowest ordered element from the set and assigning
2328 * it the next value from our new ordering.
2330 i
= graph_assign_indices(g
, indices
, 0, &deltaB
);
2331 graph_assign_indices(g
, indices
, i
, &deltaF
);
2334 if (lockf_debug
& 8) {
2335 struct owner_vertex_list set
;
2337 for (i
= 0; i
< nB
+ nF
; i
++)
2338 TAILQ_INSERT_TAIL(&set
,
2339 g
->g_vertices
[indices
[i
]], v_link
);
2340 printf("new ordering = ");
2341 graph_print_vertices(&set
);
2346 KASSERT(x
->v_order
< y
->v_order
, ("Failed to re-order graph"));
2349 if (lockf_debug
& 8) {
2350 graph_check(g
, TRUE
);
2354 e
= malloc(sizeof(struct owner_edge
), M_LOCKF
, M_WAITOK
);
2356 LIST_INSERT_HEAD(&x
->v_outedges
, e
, e_outlink
);
2357 LIST_INSERT_HEAD(&y
->v_inedges
, e
, e_inlink
);
2366 * Remove an edge x->y from the graph.
2369 graph_remove_edge(struct owner_graph
*g
, struct owner_vertex
*x
,
2370 struct owner_vertex
*y
)
2372 struct owner_edge
*e
;
2374 sx_assert(&lf_owner_graph_lock
, SX_XLOCKED
);
2376 LIST_FOREACH(e
, &x
->v_outedges
, e_outlink
) {
2380 KASSERT(e
, ("Removing non-existent edge from deadlock graph"));
2383 if (e
->e_refs
== 0) {
2385 if (lockf_debug
& 8) {
2386 printf("removing edge %d:", x
->v_order
);
2387 lf_print_owner(x
->v_owner
);
2388 printf(" -> %d:", y
->v_order
);
2389 lf_print_owner(y
->v_owner
);
2393 LIST_REMOVE(e
, e_outlink
);
2394 LIST_REMOVE(e
, e_inlink
);
2400 * Allocate a vertex from the free list. Return ENOMEM if there are
2403 static struct owner_vertex
*
2404 graph_alloc_vertex(struct owner_graph
*g
, struct lock_owner
*lo
)
2406 struct owner_vertex
*v
;
2408 sx_assert(&lf_owner_graph_lock
, SX_XLOCKED
);
2410 v
= malloc(sizeof(struct owner_vertex
), M_LOCKF
, M_WAITOK
);
2411 if (g
->g_size
== g
->g_space
) {
2412 g
->g_vertices
= realloc(g
->g_vertices
,
2413 2 * g
->g_space
* sizeof(struct owner_vertex
*),
2415 free(g
->g_indexbuf
, M_LOCKF
);
2416 g
->g_indexbuf
= malloc(2 * g
->g_space
* sizeof(int),
2418 g
->g_space
= 2 * g
->g_space
;
2420 v
->v_order
= g
->g_size
;
2421 v
->v_gen
= g
->g_gen
;
2422 g
->g_vertices
[g
->g_size
] = v
;
2425 LIST_INIT(&v
->v_outedges
);
2426 LIST_INIT(&v
->v_inedges
);
2433 graph_free_vertex(struct owner_graph
*g
, struct owner_vertex
*v
)
2435 struct owner_vertex
*w
;
2438 sx_assert(&lf_owner_graph_lock
, SX_XLOCKED
);
2440 KASSERT(LIST_EMPTY(&v
->v_outedges
), ("Freeing vertex with edges"));
2441 KASSERT(LIST_EMPTY(&v
->v_inedges
), ("Freeing vertex with edges"));
2444 * Remove from the graph's array and close up the gap,
2445 * renumbering the other vertices.
2447 for (i
= v
->v_order
+ 1; i
< g
->g_size
; i
++) {
2448 w
= g
->g_vertices
[i
];
2450 g
->g_vertices
[i
- 1] = w
;
2457 static struct owner_graph
*
2458 graph_init(struct owner_graph
*g
)
2461 g
->g_vertices
= malloc(10 * sizeof(struct owner_vertex
*),
2465 g
->g_indexbuf
= malloc(g
->g_space
* sizeof(int), M_LOCKF
, M_WAITOK
);
2473 * Print description of a lock owner
2476 lf_print_owner(struct lock_owner
*lo
)
2479 if (lo
->lo_flags
& F_REMOTE
) {
2480 printf("remote pid %d, system %d",
2481 lo
->lo_pid
, lo
->lo_sysid
);
2482 } else if (lo
->lo_flags
& F_FLOCK
) {
2483 printf("file %p", lo
->lo_id
);
2485 printf("local pid %d", lo
->lo_pid
);
2493 lf_print(char *tag
, struct lockf_entry
*lock
)
2496 printf("%s: lock %p for ", tag
, (void *)lock
);
2497 lf_print_owner(lock
->lf_owner
);
2498 if (lock
->lf_inode
!= (struct inode
*)0)
2499 printf(" in ino %ju on dev <%s>,",
2500 (uintmax_t)lock
->lf_inode
->i_number
,
2501 devtoname(lock
->lf_inode
->i_dev
));
2502 printf(" %s, start %jd, end ",
2503 lock
->lf_type
== F_RDLCK
? "shared" :
2504 lock
->lf_type
== F_WRLCK
? "exclusive" :
2505 lock
->lf_type
== F_UNLCK
? "unlock" : "unknown",
2506 (intmax_t)lock
->lf_start
);
2507 if (lock
->lf_end
== OFF_MAX
)
2510 printf("%jd", (intmax_t)lock
->lf_end
);
2511 if (!LIST_EMPTY(&lock
->lf_outedges
))
2512 printf(" block %p\n",
2513 (void *)LIST_FIRST(&lock
->lf_outedges
)->le_to
);
2519 lf_printlist(char *tag
, struct lockf_entry
*lock
)
2521 struct lockf_entry
*lf
, *blk
;
2522 struct lockf_edge
*e
;
2524 if (lock
->lf_inode
== (struct inode
*)0)
2527 printf("%s: Lock list for ino %ju on dev <%s>:\n",
2528 tag
, (uintmax_t)lock
->lf_inode
->i_number
,
2529 devtoname(lock
->lf_inode
->i_dev
));
2530 LIST_FOREACH(lf
, &lock
->lf_vnode
->v_lockf
->ls_active
, lf_link
) {
2531 printf("\tlock %p for ",(void *)lf
);
2532 lf_print_owner(lock
->lf_owner
);
2533 printf(", %s, start %jd, end %jd",
2534 lf
->lf_type
== F_RDLCK
? "shared" :
2535 lf
->lf_type
== F_WRLCK
? "exclusive" :
2536 lf
->lf_type
== F_UNLCK
? "unlock" :
2537 "unknown", (intmax_t)lf
->lf_start
, (intmax_t)lf
->lf_end
);
2538 LIST_FOREACH(e
, &lf
->lf_outedges
, le_outlink
) {
2540 printf("\n\t\tlock request %p for ", (void *)blk
);
2541 lf_print_owner(blk
->lf_owner
);
2542 printf(", %s, start %jd, end %jd",
2543 blk
->lf_type
== F_RDLCK
? "shared" :
2544 blk
->lf_type
== F_WRLCK
? "exclusive" :
2545 blk
->lf_type
== F_UNLCK
? "unlock" :
2546 "unknown", (intmax_t)blk
->lf_start
,
2547 (intmax_t)blk
->lf_end
);
2548 if (!LIST_EMPTY(&blk
->lf_inedges
))
2549 panic("lf_printlist: bad list");
2554 #endif /* LOCKF_DEBUG */