Add lwkt_sleep() to formalize a shortcut numerous bits of code have been
[dragonfly/vkernel-mp.git] / sys / kern / kern_lockf.c
blob8497400244b2923753b489bd5a108cb65783db70
1 /*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
40 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
41 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.36 2006/12/23 01:35:04 swildner Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/malloc.h>
52 #include <sys/fcntl.h>
53 #include <sys/resourcevar.h>
55 #include <sys/lockf.h>
56 #include <machine/limits.h> /* for LLONG_MAX */
57 #include <machine/stdarg.h>
59 #ifdef INVARIANTS
60 int lf_global_counter = 0;
61 #endif
63 #ifdef LOCKF_DEBUG
64 int lf_print_ranges = 0;
66 static void _lf_print_lock(const struct lockf *);
67 static void _lf_printf(const char *, ...);
69 #define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
70 #define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
71 #else
72 #define lf_print_lock(lock)
73 #define lf_printf(ctl, args...)
74 #endif
76 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
78 static void lf_wakeup(struct lockf *, off_t, off_t);
79 static struct lockf_range *lf_alloc_range(void);
80 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
81 off_t, off_t);
82 static void lf_insert(struct lockf_range_list *list,
83 struct lockf_range *elm,
84 struct lockf_range *insert_point);
85 static void lf_destroy_range(struct lockf_range *);
87 static int lf_setlock(struct lockf *, struct proc *, int, int,
88 off_t, off_t);
89 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
90 int, int, off_t, off_t);
92 static int lf_count_change(struct proc *, int);
95 * Return TRUE (non-zero) if the type and posix flags match.
97 static __inline
98 int
99 lf_match(struct lockf_range *range, int type, int flags)
101 if (range->lf_type != type)
102 return(0);
103 if ((range->lf_flags ^ flags) & F_POSIX)
104 return(0);
105 return(1);
109 * Check whether range and [start, end] overlap.
111 static __inline
113 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
115 if (range->lf_start >= start && range->lf_start <= end)
116 return(1);
117 else if (start >= range->lf_start && start <= range->lf_end)
118 return(1);
119 else
120 return(0);
125 * Change the POSIX lock accounting for the given process.
127 void
128 lf_count_adjust(struct proc *p, int increase)
130 struct uidinfo *uip;
132 KKASSERT(p != NULL);
134 uip = p->p_ucred->cr_uidinfo;
136 if (increase)
137 uip->ui_posixlocks += p->p_numposixlocks;
138 else
139 uip->ui_posixlocks -= p->p_numposixlocks;
141 KASSERT(uip->ui_posixlocks >= 0,
142 ("Negative number of POSIX locks held by %s user: %d.",
143 increase ? "new" : "old", uip->ui_posixlocks));
146 static int
147 lf_count_change(struct proc *owner, int diff)
149 struct uidinfo *uip;
150 int max;
152 /* we might actually not have a process context */
153 if (owner == NULL)
154 return(0);
156 uip = owner->p_ucred->cr_uidinfo;
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
161 uip->ui_posixlocks >= max ) {
162 return(1);
165 uip->ui_posixlocks += diff;
166 owner->p_numposixlocks += diff;
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
175 return(0);
179 * Advisory record locking support
182 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
184 struct flock *fl = ap->a_fl;
185 struct proc *owner;
186 off_t start, end;
187 int type, flags, error;
188 lwkt_tokref ilock;
191 * Convert the flock structure into a start and end.
193 switch (fl->l_whence) {
194 case SEEK_SET:
195 case SEEK_CUR:
197 * Caller is responsible for adding any necessary offset
198 * when SEEK_CUR is used.
200 start = fl->l_start;
201 break;
203 case SEEK_END:
204 start = size + fl->l_start;
205 break;
207 default:
208 return(EINVAL);
210 if (start < 0)
211 return(EINVAL);
212 if (fl->l_len == 0) {
213 flags |= F_NOEND;
214 end = LLONG_MAX;
215 } else {
216 end = start + fl->l_len - 1;
217 if (end < start)
218 return(EINVAL);
221 flags = ap->a_flags;
222 type = fl->l_type;
224 * This isn't really correct for flock-style locks,
225 * but the current handling is somewhat broken anyway.
227 owner = (struct proc *)ap->a_id;
230 * Do the requested operation.
232 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
234 if (lock->init_done == 0) {
235 TAILQ_INIT(&lock->lf_range);
236 TAILQ_INIT(&lock->lf_blocked);
237 lock->init_done = 1;
240 switch(ap->a_op) {
241 case F_SETLK:
243 * NOTE: It is possible for both lf_range and lf_blocked to
244 * be empty if we block and get woken up, but another process
245 * then gets in and issues an unlock. So VMAYHAVELOCKS must
246 * be set after the lf_setlock() operation completes rather
247 * then before.
249 error = lf_setlock(lock, owner, type, flags, start, end);
250 ap->a_vp->v_flag |= VMAYHAVELOCKS;
251 break;
253 case F_UNLCK:
254 error = lf_setlock(lock, owner, type, flags, start, end);
255 if (TAILQ_EMPTY(&lock->lf_range) &&
256 TAILQ_EMPTY(&lock->lf_blocked)) {
257 ap->a_vp->v_flag &= ~VMAYHAVELOCKS;
259 break;
261 case F_GETLK:
262 error = lf_getlock(fl, lock, owner, type, flags, start, end);
263 break;
265 default:
266 error = EINVAL;
267 break;
269 lwkt_reltoken(&ilock);
270 return(error);
273 static int
274 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
275 off_t start, off_t end)
277 struct lockf_range *range;
278 struct lockf_range *brange;
279 struct lockf_range *next;
280 struct lockf_range *first_match;
281 struct lockf_range *last_match;
282 struct lockf_range *insert_point;
283 struct lockf_range *new_range1;
284 struct lockf_range *new_range2;
285 int wakeup_needed;
286 int double_clip;
287 int error = 0;
288 int count;
289 struct lockf_range_list deadlist;
291 new_range1 = NULL;
292 new_range2 = NULL;
293 count = 0;
295 restart:
297 * Preallocate two ranges so we don't have to worry about blocking
298 * in the middle of the lock code.
300 if (new_range1 == NULL)
301 new_range1 = lf_alloc_range();
302 if (new_range2 == NULL)
303 new_range2 = lf_alloc_range();
304 first_match = NULL;
305 last_match = NULL;
306 insert_point = NULL;
307 wakeup_needed = 0;
309 lf_print_lock(lock);
312 * Locate the insertion point for the new lock (the first range
313 * with an lf_start >= start).
315 * Locate the first and latch ranges owned by us that overlap
316 * the requested range.
318 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
319 if (insert_point == NULL && range->lf_start >= start)
320 insert_point = range;
323 * Skip non-overlapping locks. Locks are sorted by lf_start
324 * So we can terminate the search when lf_start exceeds the
325 * requested range (insert_point is still guarenteed to be
326 * set properly).
328 if (range->lf_end < start)
329 continue;
330 if (range->lf_start > end) {
331 range = NULL;
332 break;
336 * Overlapping lock. Set first_match and last_match if we
337 * are the owner.
339 if (range->lf_owner == owner) {
340 if (first_match == NULL)
341 first_match = range;
342 last_match = range;
343 continue;
347 * If we aren't the owner check for a conflicting lock. Only
348 * if not unlocking.
350 if (type != F_UNLCK) {
351 if (type == F_WRLCK || range->lf_type == F_WRLCK)
352 break;
357 * If a conflicting lock was observed, block or fail as appropriate.
358 * (this code is skipped when unlocking)
360 if (range != NULL) {
361 if ((flags & F_WAIT) == 0) {
362 error = EAGAIN;
363 goto do_cleanup;
367 * We are blocked. For POSIX locks we have to check
368 * for deadlocks and return with EDEADLK. This is done
369 * by checking whether range->lf_owner is already
370 * blocked.
372 * Since flock-style locks cover the whole file, a
373 * deadlock between those is nearly impossible.
374 * This can only occur if a process tries to lock the
375 * same inode exclusively while holding a shared lock
376 * with another descriptor.
377 * XXX How can we cleanly detect this?
378 * XXX The current mixing of flock & fcntl/lockf is evil.
380 * Handle existing locks of flock-style like POSIX locks.
382 if (flags & F_POSIX) {
383 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
384 if (brange->lf_owner == range->lf_owner) {
385 error = EDEADLK;
386 goto do_cleanup;
391 * For flock-style locks, we must first remove
392 * any shared locks that we hold before we sleep
393 * waiting for an exclusive lock.
395 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
396 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
398 brange = new_range1;
399 new_range1 = NULL;
400 lf_create_range(brange, owner, type, 0, start, end);
401 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
402 error = tsleep(brange, PCATCH, "lockf", 0);
405 * We may have been awaked by a signal and/or by a
406 * debugger continuing us (in which case we must remove
407 * ourselves from the blocked list) and/or by another
408 * process releasing/downgrading a lock (in which case
409 * we have already been removed from the blocked list
410 * and our lf_flags field is 1).
412 * Sleep if it looks like we might be livelocking.
414 if (brange->lf_flags == 0)
415 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
416 if (count == 2)
417 tsleep(brange, 0, "lockfz", 2);
418 else
419 ++count;
420 lf_destroy_range(brange);
422 if (error)
423 goto do_cleanup;
424 goto restart;
428 * If there are no overlapping locks owned by us then creating
429 * the new lock is easy. This is the most common case.
431 if (first_match == NULL) {
432 if (type == F_UNLCK)
433 goto do_wakeup;
434 if (flags & F_POSIX) {
435 if (lf_count_change(owner, 1)) {
436 error = ENOLCK;
437 goto do_cleanup;
440 range = new_range1;
441 new_range1 = NULL;
442 lf_create_range(range, owner, type, flags, start, end);
443 lf_insert(&lock->lf_range, range, insert_point);
444 goto do_wakeup;
448 * This is a special case that we need to check for in a couple
449 * of places.
451 if (first_match == last_match && first_match->lf_start < start &&
452 last_match->lf_end > end) {
453 double_clip = 1;
454 } else {
455 double_clip = 0;
459 * Figure out the worst case net increase in POSIX locks and account
460 * for it now before we start modifying things. If neither the
461 * first or last locks match we have an issue. If there is only
462 * one overlapping range which needs to be clipped on both ends
463 * we wind up having to create up to two new locks, else only one.
465 * When unlocking the worst case is always 1 new lock if our
466 * unlock request cuts the middle out of an existing lock range.
468 * count represents the 'cleanup' adjustment needed. It starts
469 * negative, is incremented whenever we create a new POSIX lock,
470 * and decremented whenever we delete an existing one. At the
471 * end of the day it had better be <= 0 or we didn't calculate the
472 * worse case properly here.
474 count = 0;
475 if (flags & F_POSIX) {
476 if (!lf_match(first_match, type, flags) &&
477 !lf_match(last_match, type, flags)
479 if (double_clip && type != F_UNLCK)
480 count = -2;
481 else
482 count = -1;
484 if (count && lf_count_change(owner, -count)) {
485 error = ENOLCK;
486 goto do_cleanup;
489 /* else flock style lock which encompasses entire range */
492 * Create and insert the lock represented the requested range.
493 * Adjust the net POSIX lock count. We have to move our insertion
494 * point since brange now represents the first record >= start.
496 * When unlocking, no new lock is inserted but we still clip.
498 if (type != F_UNLCK) {
499 brange = new_range1;
500 new_range1 = NULL;
501 lf_create_range(brange, owner, type, flags, start, end);
502 lf_insert(&lock->lf_range, brange, insert_point);
503 insert_point = brange;
504 if (flags & F_POSIX)
505 ++count;
506 } else {
507 brange = NULL;
511 * Handle the double_clip case. This is the only case where
512 * we wind up having to add TWO locks.
514 if (double_clip) {
515 KKASSERT(first_match == last_match);
516 last_match = new_range2;
517 new_range2 = NULL;
518 lf_create_range(last_match, first_match->lf_owner,
519 first_match->lf_type, first_match->lf_flags,
520 end + 1, first_match->lf_end);
521 first_match->lf_end = start - 1;
522 first_match->lf_flags &= ~F_NOEND;
525 * Figure out where to insert the right side clip.
527 lf_insert(&lock->lf_range, last_match, first_match);
528 if (last_match->lf_flags & F_POSIX)
529 ++count;
533 * Clip or destroy the locks between first_match and last_match,
534 * inclusive. Ignore the primary lock we created (brange). Note
535 * that if double-clipped, first_match and last_match will be
536 * outside our clipping range. Otherwise first_match and last_match
537 * will be deleted.
539 * We have already taken care of any double clipping.
541 * The insert_point may become invalid as we delete records, do not
542 * use that pointer any more. Also, when removing something other
543 * then 'range' we have to check to see if the item we are removing
544 * is 'next' and adjust 'next' properly.
546 * NOTE: brange will be NULL if F_UNLCKing.
548 TAILQ_INIT(&deadlist);
549 next = first_match;
551 while ((range = next) != NULL) {
552 next = TAILQ_NEXT(range, lf_link);
555 * Ignore elements that we do not own and ignore the
556 * primary request range which we just created.
558 if (range->lf_owner != owner || range == brange)
559 continue;
562 * We may have to wakeup a waiter when downgrading a lock.
564 if (type == F_UNLCK)
565 wakeup_needed = 1;
566 if (type == F_RDLCK && range->lf_type == F_WRLCK)
567 wakeup_needed = 1;
570 * Clip left. This can only occur on first_match.
572 * Merge the left clip with brange if possible. This must
573 * be done specifically, not in the optimized merge heuristic
574 * below, since we may have counted on it in our 'count'
575 * calculation above.
577 if (range->lf_start < start) {
578 KKASSERT(range == first_match);
579 if (brange &&
580 range->lf_end >= start - 1 &&
581 lf_match(range, type, flags)) {
582 range->lf_end = brange->lf_end;
583 range->lf_flags |= brange->lf_flags & F_NOEND;
585 * Removing something other then 'range',
586 * adjust 'next' if necessary.
588 if (next == brange)
589 next = TAILQ_NEXT(next, lf_link);
590 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
591 if (brange->lf_flags & F_POSIX)
592 --count;
593 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
594 brange = range;
595 } else if (range->lf_end >= start) {
596 range->lf_end = start - 1;
597 if (type != F_UNLCK)
598 range->lf_flags &= ~F_NOEND;
600 if (range == last_match)
601 break;
602 continue;
606 * Clip right. This can only occur on last_match.
608 * Merge the right clip if possible. This must be done
609 * specifically, not in the optimized merge heuristic
610 * below, since we may have counted on it in our 'count'
611 * calculation.
613 * Since we are adjusting lf_start, we have to move the
614 * record to maintain the sorted list. Since lf_start is
615 * only getting larger we can use the next element as the
616 * insert point (we don't have to backtrack).
618 if (range->lf_end > end) {
619 KKASSERT(range == last_match);
620 if (brange &&
621 range->lf_start <= end + 1 &&
622 lf_match(range, type, flags)) {
623 brange->lf_end = range->lf_end;
624 brange->lf_flags |= range->lf_flags & F_NOEND;
625 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
626 if (range->lf_flags & F_POSIX)
627 --count;
628 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
629 } else if (range->lf_start <= end) {
630 range->lf_start = end + 1;
631 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
632 lf_insert(&lock->lf_range, range, next);
634 /* range == last_match, we are done */
635 break;
639 * The record must be entirely enclosed. Note that the
640 * record could be first_match or last_match, and will be
641 * deleted.
643 KKASSERT(range->lf_start >= start && range->lf_end <= end);
644 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
645 if (range->lf_flags & F_POSIX)
646 --count;
647 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
648 if (range == last_match)
649 break;
653 * Attempt to merge locks adjacent to brange. For example, we may
654 * have had to clip first_match and/or last_match, and they might
655 * be adjacent. Or there might simply have been an adjacent lock
656 * already there.
658 * Don't get fancy, just check adjacent elements in the list if they
659 * happen to be owned by us.
661 * This case only gets hit if we have a situation where a shared
662 * and exclusive lock are adjacent, and the exclusive lock is
663 * downgraded to shared or the shared lock is upgraded to exclusive.
665 if (brange) {
666 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
667 if (range &&
668 range->lf_owner == owner &&
669 range->lf_end == brange->lf_start - 1 &&
670 lf_match(range, type, flags)
673 * Extend range to cover brange and scrap brange.
675 range->lf_end = brange->lf_end;
676 range->lf_flags |= brange->lf_flags & F_NOEND;
677 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
678 if (brange->lf_flags & F_POSIX)
679 --count;
680 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
681 brange = range;
683 range = TAILQ_NEXT(brange, lf_link);
684 if (range &&
685 range->lf_owner == owner &&
686 range->lf_start == brange->lf_end + 1 &&
687 lf_match(range, type, flags)
690 * Extend brange to cover range and scrap range.
692 brange->lf_end = range->lf_end;
693 brange->lf_flags |= range->lf_flags & F_NOEND;
694 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
695 if (range->lf_flags & F_POSIX)
696 --count;
697 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
702 * Destroy deleted elements. We didn't want to do it in the loop
703 * because the free() might have blocked.
705 * Adjust the count for any posix locks we thought we might create
706 * but didn't.
708 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
709 TAILQ_REMOVE(&deadlist, range, lf_link);
710 lf_destroy_range(range);
713 KKASSERT(count <= 0);
714 if (count < 0)
715 lf_count_change(owner, count);
716 do_wakeup:
717 lf_print_lock(lock);
718 if (wakeup_needed)
719 lf_wakeup(lock, start, end);
720 error = 0;
721 do_cleanup:
722 if (new_range1 != NULL)
723 lf_destroy_range(new_range1);
724 if (new_range2 != NULL)
725 lf_destroy_range(new_range2);
726 return(error);
730 * Check whether there is a blocking lock,
731 * and if so return its process identifier.
733 static int
734 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
735 int type, int flags, off_t start, off_t end)
737 struct lockf_range *range;
739 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
740 if (range->lf_owner != owner &&
741 lf_overlap(range, start, end) &&
742 (type == F_WRLCK || range->lf_type == F_WRLCK))
743 break;
744 if (range == NULL) {
745 fl->l_type = F_UNLCK;
746 return(0);
748 fl->l_type = range->lf_type;
749 fl->l_whence = SEEK_SET;
750 fl->l_start = range->lf_start;
751 if (range->lf_flags & F_NOEND)
752 fl->l_len = 0;
753 else
754 fl->l_len = range->lf_end - range->lf_start + 1;
755 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
756 fl->l_pid = range->lf_owner->p_pid;
757 else
758 fl->l_pid = -1;
759 return(0);
763 * Wakeup pending lock attempts. Theoretically we can stop as soon as
764 * we encounter an exclusive request that covers the whole range (at least
765 * insofar as the sleep code above calls lf_wakeup() if it would otherwise
766 * exit instead of loop), but for now just wakeup all overlapping
767 * requests. XXX
769 static void
770 lf_wakeup(struct lockf *lock, off_t start, off_t end)
772 struct lockf_range *range, *nrange;
774 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
775 if (lf_overlap(range, start, end) == 0)
776 continue;
777 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
778 range->lf_flags = 1;
779 wakeup(range);
784 * Allocate a range structure and initialize it sufficiently such that
785 * lf_destroy_range() does not barf.
787 static struct lockf_range *
788 lf_alloc_range(void)
790 struct lockf_range *range;
792 #ifdef INVARIANTS
793 lf_global_counter++;
794 #endif
795 range = kmalloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
796 range->lf_owner = NULL;
797 return(range);
800 static void
801 lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
802 struct lockf_range *insert_point)
804 while (insert_point && insert_point->lf_start < elm->lf_start)
805 insert_point = TAILQ_NEXT(insert_point, lf_link);
806 if (insert_point != NULL)
807 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
808 else
809 TAILQ_INSERT_TAIL(list, elm, lf_link);
812 static void
813 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
814 int flags, off_t start, off_t end)
816 KKASSERT(start <= end);
817 range->lf_type = type;
818 range->lf_flags = flags;
819 range->lf_start = start;
820 range->lf_end = end;
821 range->lf_owner = owner;
823 lf_printf("lf_create_range: %lld..%lld\n",
824 range->lf_start, range->lf_end);
827 static void
828 lf_destroy_range(struct lockf_range *range)
830 lf_printf("lf_destroy_range: %lld..%lld\n",
831 range->lf_start, range->lf_end);
832 kfree(range, M_LOCKF);
833 #ifdef INVARIANTS
834 lf_global_counter--;
835 KKASSERT(lf_global_counter>=0);
836 #endif
839 #ifdef LOCKF_DEBUG
841 static void
842 _lf_printf(const char *ctl, ...)
844 struct proc *p;
845 __va_list va;
847 if (lf_print_ranges) {
848 if ((p = curproc) != NULL)
849 kprintf("pid %d (%s): ", p->p_pid, p->p_comm);
851 __va_start(va, ctl);
852 kvprintf(ctl, va);
853 __va_end(va);
856 static void
857 _lf_print_lock(const struct lockf *lock)
859 struct lockf_range *range;
861 if (lf_print_ranges == 0)
862 return;
864 if (TAILQ_EMPTY(&lock->lf_range)) {
865 lf_printf("lockf %p: no ranges locked\n", lock);
866 } else {
867 lf_printf("lockf %p:\n", lock);
869 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
870 kprintf("\t%lld..%lld type %s owned by %d\n",
871 range->lf_start, range->lf_end,
872 range->lf_type == F_RDLCK ? "shared" : "exclusive",
873 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
874 if (TAILQ_EMPTY(&lock->lf_blocked))
875 kprintf("no process waiting for range\n");
876 else
877 kprintf("blocked locks:");
878 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
879 kprintf("\t%lld..%lld type %s waiting on %p\n",
880 range->lf_start, range->lf_end,
881 range->lf_type == F_RDLCK ? "shared" : "exclusive",
882 range);
884 #endif /* LOCKF_DEBUG */