We only need to determine the softlink_dir when no filesystem is
[dragonfly.git] / sys / kern / kern_lockf.c
blob3b4965a18ee7e3753049b21cc9f9b397333ac226
1 /*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 * Copyright (c) 2006 Matthew Dillon <dillon@backplane.com>. All rights reserved.
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * Scooter Morris at Genentech Inc.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
39 * @(#)ufs_lockf.c 8.3 (Berkeley) 1/6/94
40 * $FreeBSD: src/sys/kern/kern_lockf.c,v 1.25 1999/11/16 16:28:56 phk Exp $
41 * $DragonFly: src/sys/kern/kern_lockf.c,v 1.37 2007/11/01 22:48:16 dillon Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/unistd.h>
50 #include <sys/vnode.h>
51 #include <sys/malloc.h>
52 #include <sys/fcntl.h>
53 #include <sys/resourcevar.h>
55 #include <sys/lockf.h>
56 #include <machine/limits.h> /* for LLONG_MAX */
57 #include <machine/stdarg.h>
59 #ifdef INVARIANTS
60 int lf_global_counter = 0;
61 #endif
63 #ifdef LOCKF_DEBUG
64 int lf_print_ranges = 0;
66 static void _lf_print_lock(const struct lockf *);
67 static void _lf_printf(const char *, ...);
69 #define lf_print_lock(lock) if (lf_print_ranges) _lf_print_lock(lock)
70 #define lf_printf(ctl, args...) if (lf_print_ranges) _lf_printf(ctl, args)
71 #else
72 #define lf_print_lock(lock)
73 #define lf_printf(ctl, args...)
74 #endif
76 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures");
78 static void lf_wakeup(struct lockf *, off_t, off_t);
79 static struct lockf_range *lf_alloc_range(void);
80 static void lf_create_range(struct lockf_range *, struct proc *, int, int,
81 off_t, off_t);
82 static void lf_insert(struct lockf_range_list *list,
83 struct lockf_range *elm,
84 struct lockf_range *insert_point);
85 static void lf_destroy_range(struct lockf_range *);
87 static int lf_setlock(struct lockf *, struct proc *, int, int,
88 off_t, off_t);
89 static int lf_getlock(struct flock *, struct lockf *, struct proc *,
90 int, int, off_t, off_t);
92 static int lf_count_change(struct proc *, int);
95 * Return TRUE (non-zero) if the type and posix flags match.
97 static __inline
98 int
99 lf_match(struct lockf_range *range, int type, int flags)
101 if (range->lf_type != type)
102 return(0);
103 if ((range->lf_flags ^ flags) & F_POSIX)
104 return(0);
105 return(1);
109 * Check whether range and [start, end] overlap.
111 static __inline
113 lf_overlap(const struct lockf_range *range, off_t start, off_t end)
115 if (range->lf_start >= start && range->lf_start <= end)
116 return(1);
117 else if (start >= range->lf_start && start <= range->lf_end)
118 return(1);
119 else
120 return(0);
125 * Change the POSIX lock accounting for the given process.
127 void
128 lf_count_adjust(struct proc *p, int increase)
130 struct uidinfo *uip;
132 KKASSERT(p != NULL);
134 uip = p->p_ucred->cr_uidinfo;
136 if (increase)
137 uip->ui_posixlocks += p->p_numposixlocks;
138 else
139 uip->ui_posixlocks -= p->p_numposixlocks;
141 KASSERT(uip->ui_posixlocks >= 0,
142 ("Negative number of POSIX locks held by %s user: %d.",
143 increase ? "new" : "old", uip->ui_posixlocks));
146 static int
147 lf_count_change(struct proc *owner, int diff)
149 struct uidinfo *uip;
150 int max;
152 /* we might actually not have a process context */
153 if (owner == NULL)
154 return(0);
156 uip = owner->p_ucred->cr_uidinfo;
158 max = MIN(owner->p_rlimit[RLIMIT_POSIXLOCKS].rlim_cur,
159 maxposixlocksperuid);
160 if (diff > 0 && owner->p_ucred->cr_uid != 0 && max != -1 &&
161 uip->ui_posixlocks >= max ) {
162 return(1);
165 uip->ui_posixlocks += diff;
166 owner->p_numposixlocks += diff;
168 KASSERT(uip->ui_posixlocks >= 0,
169 ("Negative number of POSIX locks held by user: %d.",
170 uip->ui_posixlocks));
171 KASSERT(owner->p_numposixlocks >= 0,
172 ("Negative number of POSIX locks held by proc: %d.",
173 uip->ui_posixlocks));
175 return(0);
179 * Advisory record locking support
182 lf_advlock(struct vop_advlock_args *ap, struct lockf *lock, u_quad_t size)
184 struct flock *fl = ap->a_fl;
185 struct proc *owner;
186 off_t start, end;
187 int type, flags, error;
188 lwkt_tokref ilock;
191 * Convert the flock structure into a start and end.
193 switch (fl->l_whence) {
194 case SEEK_SET:
195 case SEEK_CUR:
197 * Caller is responsible for adding any necessary offset
198 * when SEEK_CUR is used.
200 start = fl->l_start;
201 break;
203 case SEEK_END:
204 start = size + fl->l_start;
205 break;
207 default:
208 return(EINVAL);
211 flags = ap->a_flags;
212 if (start < 0)
213 return(EINVAL);
214 if (fl->l_len == 0) {
215 flags |= F_NOEND;
216 end = LLONG_MAX;
217 } else {
218 end = start + fl->l_len - 1;
219 if (end < start)
220 return(EINVAL);
223 type = fl->l_type;
225 * This isn't really correct for flock-style locks,
226 * but the current handling is somewhat broken anyway.
228 owner = (struct proc *)ap->a_id;
231 * Do the requested operation.
233 lwkt_gettoken(&ilock, lwkt_token_pool_get(lock));
235 if (lock->init_done == 0) {
236 TAILQ_INIT(&lock->lf_range);
237 TAILQ_INIT(&lock->lf_blocked);
238 lock->init_done = 1;
241 switch(ap->a_op) {
242 case F_SETLK:
244 * NOTE: It is possible for both lf_range and lf_blocked to
245 * be empty if we block and get woken up, but another process
246 * then gets in and issues an unlock. So VMAYHAVELOCKS must
247 * be set after the lf_setlock() operation completes rather
248 * then before.
250 error = lf_setlock(lock, owner, type, flags, start, end);
251 ap->a_vp->v_flag |= VMAYHAVELOCKS;
252 break;
254 case F_UNLCK:
255 error = lf_setlock(lock, owner, type, flags, start, end);
256 if (TAILQ_EMPTY(&lock->lf_range) &&
257 TAILQ_EMPTY(&lock->lf_blocked)) {
258 ap->a_vp->v_flag &= ~VMAYHAVELOCKS;
260 break;
262 case F_GETLK:
263 error = lf_getlock(fl, lock, owner, type, flags, start, end);
264 break;
266 default:
267 error = EINVAL;
268 break;
270 lwkt_reltoken(&ilock);
271 return(error);
274 static int
275 lf_setlock(struct lockf *lock, struct proc *owner, int type, int flags,
276 off_t start, off_t end)
278 struct lockf_range *range;
279 struct lockf_range *brange;
280 struct lockf_range *next;
281 struct lockf_range *first_match;
282 struct lockf_range *last_match;
283 struct lockf_range *insert_point;
284 struct lockf_range *new_range1;
285 struct lockf_range *new_range2;
286 int wakeup_needed;
287 int double_clip;
288 int error = 0;
289 int count;
290 struct lockf_range_list deadlist;
292 new_range1 = NULL;
293 new_range2 = NULL;
294 count = 0;
296 restart:
298 * Preallocate two ranges so we don't have to worry about blocking
299 * in the middle of the lock code.
301 if (new_range1 == NULL)
302 new_range1 = lf_alloc_range();
303 if (new_range2 == NULL)
304 new_range2 = lf_alloc_range();
305 first_match = NULL;
306 last_match = NULL;
307 insert_point = NULL;
308 wakeup_needed = 0;
310 lf_print_lock(lock);
313 * Locate the insertion point for the new lock (the first range
314 * with an lf_start >= start).
316 * Locate the first and latch ranges owned by us that overlap
317 * the requested range.
319 TAILQ_FOREACH(range, &lock->lf_range, lf_link) {
320 if (insert_point == NULL && range->lf_start >= start)
321 insert_point = range;
324 * Skip non-overlapping locks. Locks are sorted by lf_start
325 * So we can terminate the search when lf_start exceeds the
326 * requested range (insert_point is still guarenteed to be
327 * set properly).
329 if (range->lf_end < start)
330 continue;
331 if (range->lf_start > end) {
332 range = NULL;
333 break;
337 * Overlapping lock. Set first_match and last_match if we
338 * are the owner.
340 if (range->lf_owner == owner) {
341 if (first_match == NULL)
342 first_match = range;
343 last_match = range;
344 continue;
348 * If we aren't the owner check for a conflicting lock. Only
349 * if not unlocking.
351 if (type != F_UNLCK) {
352 if (type == F_WRLCK || range->lf_type == F_WRLCK)
353 break;
358 * If a conflicting lock was observed, block or fail as appropriate.
359 * (this code is skipped when unlocking)
361 if (range != NULL) {
362 if ((flags & F_WAIT) == 0) {
363 error = EAGAIN;
364 goto do_cleanup;
368 * We are blocked. For POSIX locks we have to check
369 * for deadlocks and return with EDEADLK. This is done
370 * by checking whether range->lf_owner is already
371 * blocked.
373 * Since flock-style locks cover the whole file, a
374 * deadlock between those is nearly impossible.
375 * This can only occur if a process tries to lock the
376 * same inode exclusively while holding a shared lock
377 * with another descriptor.
378 * XXX How can we cleanly detect this?
379 * XXX The current mixing of flock & fcntl/lockf is evil.
381 * Handle existing locks of flock-style like POSIX locks.
383 if (flags & F_POSIX) {
384 TAILQ_FOREACH(brange, &lock->lf_blocked, lf_link)
385 if (brange->lf_owner == range->lf_owner) {
386 error = EDEADLK;
387 goto do_cleanup;
392 * For flock-style locks, we must first remove
393 * any shared locks that we hold before we sleep
394 * waiting for an exclusive lock.
396 if ((flags & F_POSIX) == 0 && type == F_WRLCK)
397 lf_setlock(lock, owner, F_UNLCK, 0, start, end);
399 brange = new_range1;
400 new_range1 = NULL;
401 lf_create_range(brange, owner, type, 0, start, end);
402 TAILQ_INSERT_TAIL(&lock->lf_blocked, brange, lf_link);
403 error = tsleep(brange, PCATCH, "lockf", 0);
406 * We may have been awaked by a signal and/or by a
407 * debugger continuing us (in which case we must remove
408 * ourselves from the blocked list) and/or by another
409 * process releasing/downgrading a lock (in which case
410 * we have already been removed from the blocked list
411 * and our lf_flags field is 1).
413 * Sleep if it looks like we might be livelocking.
415 if (brange->lf_flags == 0)
416 TAILQ_REMOVE(&lock->lf_blocked, brange, lf_link);
417 if (count == 2)
418 tsleep(brange, 0, "lockfz", 2);
419 else
420 ++count;
421 lf_destroy_range(brange);
423 if (error)
424 goto do_cleanup;
425 goto restart;
429 * If there are no overlapping locks owned by us then creating
430 * the new lock is easy. This is the most common case.
432 if (first_match == NULL) {
433 if (type == F_UNLCK)
434 goto do_wakeup;
435 if (flags & F_POSIX) {
436 if (lf_count_change(owner, 1)) {
437 error = ENOLCK;
438 goto do_cleanup;
441 range = new_range1;
442 new_range1 = NULL;
443 lf_create_range(range, owner, type, flags, start, end);
444 lf_insert(&lock->lf_range, range, insert_point);
445 goto do_wakeup;
449 * This is a special case that we need to check for in a couple
450 * of places.
452 if (first_match == last_match && first_match->lf_start < start &&
453 last_match->lf_end > end) {
454 double_clip = 1;
455 } else {
456 double_clip = 0;
460 * Figure out the worst case net increase in POSIX locks and account
461 * for it now before we start modifying things. If neither the
462 * first or last locks match we have an issue. If there is only
463 * one overlapping range which needs to be clipped on both ends
464 * we wind up having to create up to two new locks, else only one.
466 * When unlocking the worst case is always 1 new lock if our
467 * unlock request cuts the middle out of an existing lock range.
469 * count represents the 'cleanup' adjustment needed. It starts
470 * negative, is incremented whenever we create a new POSIX lock,
471 * and decremented whenever we delete an existing one. At the
472 * end of the day it had better be <= 0 or we didn't calculate the
473 * worse case properly here.
475 count = 0;
476 if (flags & F_POSIX) {
477 if (!lf_match(first_match, type, flags) &&
478 !lf_match(last_match, type, flags)
480 if (double_clip && type != F_UNLCK)
481 count = -2;
482 else
483 count = -1;
485 if (count && lf_count_change(owner, -count)) {
486 error = ENOLCK;
487 goto do_cleanup;
490 /* else flock style lock which encompasses entire range */
493 * Create and insert the lock represented the requested range.
494 * Adjust the net POSIX lock count. We have to move our insertion
495 * point since brange now represents the first record >= start.
497 * When unlocking, no new lock is inserted but we still clip.
499 if (type != F_UNLCK) {
500 brange = new_range1;
501 new_range1 = NULL;
502 lf_create_range(brange, owner, type, flags, start, end);
503 lf_insert(&lock->lf_range, brange, insert_point);
504 insert_point = brange;
505 if (flags & F_POSIX)
506 ++count;
507 } else {
508 brange = NULL;
512 * Handle the double_clip case. This is the only case where
513 * we wind up having to add TWO locks.
515 if (double_clip) {
516 KKASSERT(first_match == last_match);
517 last_match = new_range2;
518 new_range2 = NULL;
519 lf_create_range(last_match, first_match->lf_owner,
520 first_match->lf_type, first_match->lf_flags,
521 end + 1, first_match->lf_end);
522 first_match->lf_end = start - 1;
523 first_match->lf_flags &= ~F_NOEND;
526 * Figure out where to insert the right side clip.
528 lf_insert(&lock->lf_range, last_match, first_match);
529 if (last_match->lf_flags & F_POSIX)
530 ++count;
534 * Clip or destroy the locks between first_match and last_match,
535 * inclusive. Ignore the primary lock we created (brange). Note
536 * that if double-clipped, first_match and last_match will be
537 * outside our clipping range. Otherwise first_match and last_match
538 * will be deleted.
540 * We have already taken care of any double clipping.
542 * The insert_point may become invalid as we delete records, do not
543 * use that pointer any more. Also, when removing something other
544 * then 'range' we have to check to see if the item we are removing
545 * is 'next' and adjust 'next' properly.
547 * NOTE: brange will be NULL if F_UNLCKing.
549 TAILQ_INIT(&deadlist);
550 next = first_match;
552 while ((range = next) != NULL) {
553 next = TAILQ_NEXT(range, lf_link);
556 * Ignore elements that we do not own and ignore the
557 * primary request range which we just created.
559 if (range->lf_owner != owner || range == brange)
560 continue;
563 * We may have to wakeup a waiter when downgrading a lock.
565 if (type == F_UNLCK)
566 wakeup_needed = 1;
567 if (type == F_RDLCK && range->lf_type == F_WRLCK)
568 wakeup_needed = 1;
571 * Clip left. This can only occur on first_match.
573 * Merge the left clip with brange if possible. This must
574 * be done specifically, not in the optimized merge heuristic
575 * below, since we may have counted on it in our 'count'
576 * calculation above.
578 if (range->lf_start < start) {
579 KKASSERT(range == first_match);
580 if (brange &&
581 range->lf_end >= start - 1 &&
582 lf_match(range, type, flags)) {
583 range->lf_end = brange->lf_end;
584 range->lf_flags |= brange->lf_flags & F_NOEND;
586 * Removing something other then 'range',
587 * adjust 'next' if necessary.
589 if (next == brange)
590 next = TAILQ_NEXT(next, lf_link);
591 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
592 if (brange->lf_flags & F_POSIX)
593 --count;
594 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
595 brange = range;
596 } else if (range->lf_end >= start) {
597 range->lf_end = start - 1;
598 if (type != F_UNLCK)
599 range->lf_flags &= ~F_NOEND;
601 if (range == last_match)
602 break;
603 continue;
607 * Clip right. This can only occur on last_match.
609 * Merge the right clip if possible. This must be done
610 * specifically, not in the optimized merge heuristic
611 * below, since we may have counted on it in our 'count'
612 * calculation.
614 * Since we are adjusting lf_start, we have to move the
615 * record to maintain the sorted list. Since lf_start is
616 * only getting larger we can use the next element as the
617 * insert point (we don't have to backtrack).
619 if (range->lf_end > end) {
620 KKASSERT(range == last_match);
621 if (brange &&
622 range->lf_start <= end + 1 &&
623 lf_match(range, type, flags)) {
624 brange->lf_end = range->lf_end;
625 brange->lf_flags |= range->lf_flags & F_NOEND;
626 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
627 if (range->lf_flags & F_POSIX)
628 --count;
629 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
630 } else if (range->lf_start <= end) {
631 range->lf_start = end + 1;
632 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
633 lf_insert(&lock->lf_range, range, next);
635 /* range == last_match, we are done */
636 break;
640 * The record must be entirely enclosed. Note that the
641 * record could be first_match or last_match, and will be
642 * deleted.
644 KKASSERT(range->lf_start >= start && range->lf_end <= end);
645 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
646 if (range->lf_flags & F_POSIX)
647 --count;
648 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
649 if (range == last_match)
650 break;
654 * Attempt to merge locks adjacent to brange. For example, we may
655 * have had to clip first_match and/or last_match, and they might
656 * be adjacent. Or there might simply have been an adjacent lock
657 * already there.
659 * Don't get fancy, just check adjacent elements in the list if they
660 * happen to be owned by us.
662 * This case only gets hit if we have a situation where a shared
663 * and exclusive lock are adjacent, and the exclusive lock is
664 * downgraded to shared or the shared lock is upgraded to exclusive.
666 if (brange) {
667 range = TAILQ_PREV(brange, lockf_range_list, lf_link);
668 if (range &&
669 range->lf_owner == owner &&
670 range->lf_end == brange->lf_start - 1 &&
671 lf_match(range, type, flags)
674 * Extend range to cover brange and scrap brange.
676 range->lf_end = brange->lf_end;
677 range->lf_flags |= brange->lf_flags & F_NOEND;
678 TAILQ_REMOVE(&lock->lf_range, brange, lf_link);
679 if (brange->lf_flags & F_POSIX)
680 --count;
681 TAILQ_INSERT_TAIL(&deadlist, brange, lf_link);
682 brange = range;
684 range = TAILQ_NEXT(brange, lf_link);
685 if (range &&
686 range->lf_owner == owner &&
687 range->lf_start == brange->lf_end + 1 &&
688 lf_match(range, type, flags)
691 * Extend brange to cover range and scrap range.
693 brange->lf_end = range->lf_end;
694 brange->lf_flags |= range->lf_flags & F_NOEND;
695 TAILQ_REMOVE(&lock->lf_range, range, lf_link);
696 if (range->lf_flags & F_POSIX)
697 --count;
698 TAILQ_INSERT_TAIL(&deadlist, range, lf_link);
703 * Destroy deleted elements. We didn't want to do it in the loop
704 * because the free() might have blocked.
706 * Adjust the count for any posix locks we thought we might create
707 * but didn't.
709 while ((range = TAILQ_FIRST(&deadlist)) != NULL) {
710 TAILQ_REMOVE(&deadlist, range, lf_link);
711 lf_destroy_range(range);
714 KKASSERT(count <= 0);
715 if (count < 0)
716 lf_count_change(owner, count);
717 do_wakeup:
718 lf_print_lock(lock);
719 if (wakeup_needed)
720 lf_wakeup(lock, start, end);
721 error = 0;
722 do_cleanup:
723 if (new_range1 != NULL)
724 lf_destroy_range(new_range1);
725 if (new_range2 != NULL)
726 lf_destroy_range(new_range2);
727 return(error);
731 * Check whether there is a blocking lock,
732 * and if so return its process identifier.
734 static int
735 lf_getlock(struct flock *fl, struct lockf *lock, struct proc *owner,
736 int type, int flags, off_t start, off_t end)
738 struct lockf_range *range;
740 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
741 if (range->lf_owner != owner &&
742 lf_overlap(range, start, end) &&
743 (type == F_WRLCK || range->lf_type == F_WRLCK))
744 break;
745 if (range == NULL) {
746 fl->l_type = F_UNLCK;
747 return(0);
749 fl->l_type = range->lf_type;
750 fl->l_whence = SEEK_SET;
751 fl->l_start = range->lf_start;
752 if (range->lf_flags & F_NOEND)
753 fl->l_len = 0;
754 else
755 fl->l_len = range->lf_end - range->lf_start + 1;
756 if (range->lf_owner != NULL && (range->lf_flags & F_POSIX))
757 fl->l_pid = range->lf_owner->p_pid;
758 else
759 fl->l_pid = -1;
760 return(0);
764 * Wakeup pending lock attempts. Theoretically we can stop as soon as
765 * we encounter an exclusive request that covers the whole range (at least
766 * insofar as the sleep code above calls lf_wakeup() if it would otherwise
767 * exit instead of loop), but for now just wakeup all overlapping
768 * requests. XXX
770 static void
771 lf_wakeup(struct lockf *lock, off_t start, off_t end)
773 struct lockf_range *range, *nrange;
775 TAILQ_FOREACH_MUTABLE(range, &lock->lf_blocked, lf_link, nrange) {
776 if (lf_overlap(range, start, end) == 0)
777 continue;
778 TAILQ_REMOVE(&lock->lf_blocked, range, lf_link);
779 range->lf_flags = 1;
780 wakeup(range);
785 * Allocate a range structure and initialize it sufficiently such that
786 * lf_destroy_range() does not barf.
788 static struct lockf_range *
789 lf_alloc_range(void)
791 struct lockf_range *range;
793 #ifdef INVARIANTS
794 lf_global_counter++;
795 #endif
796 range = kmalloc(sizeof(struct lockf_range), M_LOCKF, M_WAITOK);
797 range->lf_owner = NULL;
798 return(range);
801 static void
802 lf_insert(struct lockf_range_list *list, struct lockf_range *elm,
803 struct lockf_range *insert_point)
805 while (insert_point && insert_point->lf_start < elm->lf_start)
806 insert_point = TAILQ_NEXT(insert_point, lf_link);
807 if (insert_point != NULL)
808 TAILQ_INSERT_BEFORE(insert_point, elm, lf_link);
809 else
810 TAILQ_INSERT_TAIL(list, elm, lf_link);
813 static void
814 lf_create_range(struct lockf_range *range, struct proc *owner, int type,
815 int flags, off_t start, off_t end)
817 KKASSERT(start <= end);
818 range->lf_type = type;
819 range->lf_flags = flags;
820 range->lf_start = start;
821 range->lf_end = end;
822 range->lf_owner = owner;
824 lf_printf("lf_create_range: %lld..%lld\n",
825 range->lf_start, range->lf_end);
828 static void
829 lf_destroy_range(struct lockf_range *range)
831 lf_printf("lf_destroy_range: %lld..%lld\n",
832 range->lf_start, range->lf_end);
833 kfree(range, M_LOCKF);
834 #ifdef INVARIANTS
835 lf_global_counter--;
836 KKASSERT(lf_global_counter>=0);
837 #endif
840 #ifdef LOCKF_DEBUG
842 static void
843 _lf_printf(const char *ctl, ...)
845 struct proc *p;
846 __va_list va;
848 if (lf_print_ranges) {
849 if ((p = curproc) != NULL)
850 kprintf("pid %d (%s): ", p->p_pid, p->p_comm);
852 __va_start(va, ctl);
853 kvprintf(ctl, va);
854 __va_end(va);
857 static void
858 _lf_print_lock(const struct lockf *lock)
860 struct lockf_range *range;
862 if (lf_print_ranges == 0)
863 return;
865 if (TAILQ_EMPTY(&lock->lf_range)) {
866 lf_printf("lockf %p: no ranges locked\n", lock);
867 } else {
868 lf_printf("lockf %p:\n", lock);
870 TAILQ_FOREACH(range, &lock->lf_range, lf_link)
871 kprintf("\t%lld..%lld type %s owned by %d\n",
872 range->lf_start, range->lf_end,
873 range->lf_type == F_RDLCK ? "shared" : "exclusive",
874 range->lf_flags & F_POSIX ? range->lf_owner->p_pid : -1);
875 if (TAILQ_EMPTY(&lock->lf_blocked))
876 kprintf("no process waiting for range\n");
877 else
878 kprintf("blocked locks:");
879 TAILQ_FOREACH(range, &lock->lf_blocked, lf_link)
880 kprintf("\t%lld..%lld type %s waiting on %p\n",
881 range->lf_start, range->lf_end,
882 range->lf_type == F_RDLCK ? "shared" : "exclusive",
883 range);
885 #endif /* LOCKF_DEBUG */