4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'linux/Documentation/mandatory.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fs.h>
120 #include <linux/init.h>
121 #include <linux/module.h>
122 #include <linux/security.h>
123 #include <linux/slab.h>
124 #include <linux/smp_lock.h>
125 #include <linux/time.h>
127 #include <asm/semaphore.h>
128 #include <asm/uaccess.h>
130 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
131 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
132 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
134 int leases_enable
= 1;
135 int lease_break_time
= 45;
137 #define for_each_lock(inode, lockp) \
138 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
140 LIST_HEAD(file_lock_list
);
141 static LIST_HEAD(blocked_list
);
143 static kmem_cache_t
*filelock_cache
;
145 /* Allocate an empty lock structure. */
146 static struct file_lock
*locks_alloc_lock(void)
148 return kmem_cache_alloc(filelock_cache
, SLAB_KERNEL
);
151 /* Free a lock which is not in use. */
152 static inline void locks_free_lock(struct file_lock
*fl
)
158 if (waitqueue_active(&fl
->fl_wait
))
159 panic("Attempting to free lock with active wait queue");
161 if (!list_empty(&fl
->fl_block
))
162 panic("Attempting to free lock with active block list");
164 if (!list_empty(&fl
->fl_link
))
165 panic("Attempting to free lock on active lock list");
167 kmem_cache_free(filelock_cache
, fl
);
170 void locks_init_lock(struct file_lock
*fl
)
172 INIT_LIST_HEAD(&fl
->fl_link
);
173 INIT_LIST_HEAD(&fl
->fl_block
);
174 init_waitqueue_head(&fl
->fl_wait
);
176 fl
->fl_fasync
= NULL
;
182 fl
->fl_start
= fl
->fl_end
= 0;
183 fl
->fl_notify
= NULL
;
184 fl
->fl_insert
= NULL
;
185 fl
->fl_remove
= NULL
;
189 * Initialises the fields of the file lock which are invariant for
192 static void init_once(void *foo
, kmem_cache_t
*cache
, unsigned long flags
)
194 struct file_lock
*lock
= (struct file_lock
*) foo
;
196 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) !=
197 SLAB_CTOR_CONSTRUCTOR
)
200 locks_init_lock(lock
);
204 * Initialize a new lock from an existing file_lock structure.
206 void locks_copy_lock(struct file_lock
*new, struct file_lock
*fl
)
208 new->fl_owner
= fl
->fl_owner
;
209 new->fl_pid
= fl
->fl_pid
;
210 new->fl_file
= fl
->fl_file
;
211 new->fl_flags
= fl
->fl_flags
;
212 new->fl_type
= fl
->fl_type
;
213 new->fl_start
= fl
->fl_start
;
214 new->fl_end
= fl
->fl_end
;
215 new->fl_notify
= fl
->fl_notify
;
216 new->fl_insert
= fl
->fl_insert
;
217 new->fl_remove
= fl
->fl_remove
;
218 new->fl_u
= fl
->fl_u
;
221 static inline int flock_translate_cmd(int cmd
) {
223 return cmd
& (LOCK_MAND
| LOCK_RW
);
224 switch (cmd
&~ LOCK_NB
) {
235 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
236 static int flock_make_lock(struct file
*filp
,
237 struct file_lock
**lock
, unsigned int cmd
)
239 struct file_lock
*fl
;
240 int type
= flock_translate_cmd(cmd
);
244 fl
= locks_alloc_lock();
249 fl
->fl_pid
= current
->tgid
;
250 fl
->fl_flags
= (cmd
& LOCK_NB
) ? FL_FLOCK
: FL_FLOCK
| FL_SLEEP
;
252 fl
->fl_end
= OFFSET_MAX
;
258 static int assign_type(struct file_lock
*fl
, int type
)
272 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
275 static int flock_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
280 switch (l
->l_whence
) {
288 start
= i_size_read(filp
->f_dentry
->d_inode
);
294 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
295 POSIX-2001 defines it. */
297 end
= start
+ l
->l_len
- 1;
305 if (l
->l_len
> 0 && end
< 0)
308 fl
->fl_start
= start
; /* we record the absolute position */
311 fl
->fl_end
= OFFSET_MAX
;
313 fl
->fl_owner
= current
->files
;
314 fl
->fl_pid
= current
->tgid
;
316 fl
->fl_flags
= FL_POSIX
;
317 fl
->fl_notify
= NULL
;
318 fl
->fl_insert
= NULL
;
319 fl
->fl_remove
= NULL
;
321 return assign_type(fl
, l
->l_type
);
324 #if BITS_PER_LONG == 32
325 static int flock64_to_posix_lock(struct file
*filp
, struct file_lock
*fl
,
330 switch (l
->l_whence
) {
338 start
= i_size_read(filp
->f_dentry
->d_inode
);
344 if (((start
+= l
->l_start
) < 0) || (l
->l_len
< 0))
346 fl
->fl_end
= start
+ l
->l_len
- 1;
347 if (l
->l_len
> 0 && fl
->fl_end
< 0)
349 fl
->fl_start
= start
; /* we record the absolute position */
351 fl
->fl_end
= OFFSET_MAX
;
353 fl
->fl_owner
= current
->files
;
354 fl
->fl_pid
= current
->tgid
;
356 fl
->fl_flags
= FL_POSIX
;
357 fl
->fl_notify
= NULL
;
358 fl
->fl_insert
= NULL
;
359 fl
->fl_remove
= NULL
;
365 fl
->fl_type
= l
->l_type
;
375 /* Allocate a file_lock initialised to this type of lease */
376 static int lease_alloc(struct file
*filp
, int type
, struct file_lock
**flp
)
378 struct file_lock
*fl
= locks_alloc_lock();
382 fl
->fl_owner
= current
->files
;
383 fl
->fl_pid
= current
->tgid
;
386 fl
->fl_flags
= FL_LEASE
;
387 if (assign_type(fl
, type
) != 0) {
392 fl
->fl_end
= OFFSET_MAX
;
393 fl
->fl_notify
= NULL
;
394 fl
->fl_insert
= NULL
;
395 fl
->fl_remove
= NULL
;
401 /* Check if two locks overlap each other.
403 static inline int locks_overlap(struct file_lock
*fl1
, struct file_lock
*fl2
)
405 return ((fl1
->fl_end
>= fl2
->fl_start
) &&
406 (fl2
->fl_end
>= fl1
->fl_start
));
410 * Check whether two locks have the same owner. The apparently superfluous
411 * check for fl_pid enables us to distinguish between locks set by lockd.
414 posix_same_owner(struct file_lock
*fl1
, struct file_lock
*fl2
)
416 return (fl1
->fl_owner
== fl2
->fl_owner
) &&
417 (fl1
->fl_pid
== fl2
->fl_pid
);
420 /* Remove waiter from blocker's block list.
421 * When blocker ends up pointing to itself then the list is empty.
423 static inline void __locks_delete_block(struct file_lock
*waiter
)
425 list_del_init(&waiter
->fl_block
);
426 list_del_init(&waiter
->fl_link
);
427 waiter
->fl_next
= NULL
;
432 static void locks_delete_block(struct file_lock
*waiter
)
435 __locks_delete_block(waiter
);
439 /* Insert waiter into blocker's block list.
440 * We use a circular list so that processes can be easily woken up in
441 * the order they blocked. The documentation doesn't require this but
442 * it seems like the reasonable thing to do.
444 static void locks_insert_block(struct file_lock
*blocker
,
445 struct file_lock
*waiter
)
447 if (!list_empty(&waiter
->fl_block
)) {
448 printk(KERN_ERR
"locks_insert_block: removing duplicated lock "
449 "(pid=%d %Ld-%Ld type=%d)\n", waiter
->fl_pid
,
450 waiter
->fl_start
, waiter
->fl_end
, waiter
->fl_type
);
451 __locks_delete_block(waiter
);
453 list_add_tail(&waiter
->fl_block
, &blocker
->fl_block
);
454 waiter
->fl_next
= blocker
;
455 list_add(&waiter
->fl_link
, &blocked_list
);
458 /* Wake up processes blocked waiting for blocker.
459 * If told to wait then schedule the processes until the block list
460 * is empty, otherwise empty the block list ourselves.
462 static void locks_wake_up_blocks(struct file_lock
*blocker
)
464 while (!list_empty(&blocker
->fl_block
)) {
465 struct file_lock
*waiter
= list_entry(blocker
->fl_block
.next
,
466 struct file_lock
, fl_block
);
467 __locks_delete_block(waiter
);
468 if (waiter
->fl_notify
)
469 waiter
->fl_notify(waiter
);
471 wake_up(&waiter
->fl_wait
);
475 /* Insert file lock fl into an inode's lock list at the position indicated
476 * by pos. At the same time add the lock to the global file lock list.
478 static void locks_insert_lock(struct file_lock
**pos
, struct file_lock
*fl
)
480 list_add(&fl
->fl_link
, &file_lock_list
);
482 /* insert into file's list */
491 * Delete a lock and then free it.
492 * Wake up processes that are blocked waiting for this lock,
493 * notify the FS that the lock has been cleared and
494 * finally free the lock.
496 static void locks_delete_lock(struct file_lock
**thisfl_p
)
498 struct file_lock
*fl
= *thisfl_p
;
500 *thisfl_p
= fl
->fl_next
;
502 list_del_init(&fl
->fl_link
);
504 fasync_helper(0, fl
->fl_file
, 0, &fl
->fl_fasync
);
505 if (fl
->fl_fasync
!= NULL
) {
506 printk(KERN_ERR
"locks_delete_lock: fasync == %p\n", fl
->fl_fasync
);
507 fl
->fl_fasync
= NULL
;
513 locks_wake_up_blocks(fl
);
517 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
518 * checks for shared/exclusive status of overlapping locks.
520 static int locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
522 if (sys_fl
->fl_type
== F_WRLCK
)
524 if (caller_fl
->fl_type
== F_WRLCK
)
529 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
530 * checking before calling the locks_conflict().
532 static int posix_locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
534 /* POSIX locks owned by the same process do not conflict with
537 if (!IS_POSIX(sys_fl
) || posix_same_owner(caller_fl
, sys_fl
))
540 /* Check whether they overlap */
541 if (!locks_overlap(caller_fl
, sys_fl
))
544 return (locks_conflict(caller_fl
, sys_fl
));
547 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
548 * checking before calling the locks_conflict().
550 static int flock_locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
552 /* FLOCK locks referring to the same filp do not conflict with
555 if (!IS_FLOCK(sys_fl
) || (caller_fl
->fl_file
== sys_fl
->fl_file
))
557 if ((caller_fl
->fl_type
& LOCK_MAND
) || (sys_fl
->fl_type
& LOCK_MAND
))
560 return (locks_conflict(caller_fl
, sys_fl
));
563 static int interruptible_sleep_on_locked(wait_queue_head_t
*fl_wait
, int timeout
)
566 DECLARE_WAITQUEUE(wait
, current
);
568 __set_current_state(TASK_INTERRUPTIBLE
);
569 add_wait_queue(fl_wait
, &wait
);
573 result
= schedule_timeout(timeout
);
574 if (signal_pending(current
))
575 result
= -ERESTARTSYS
;
576 remove_wait_queue(fl_wait
, &wait
);
577 __set_current_state(TASK_RUNNING
);
581 static int locks_block_on_timeout(struct file_lock
*blocker
, struct file_lock
*waiter
, int time
)
584 locks_insert_block(blocker
, waiter
);
585 result
= interruptible_sleep_on_locked(&waiter
->fl_wait
, time
);
586 __locks_delete_block(waiter
);
591 posix_test_lock(struct file
*filp
, struct file_lock
*fl
)
593 struct file_lock
*cfl
;
596 for (cfl
= filp
->f_dentry
->d_inode
->i_flock
; cfl
; cfl
= cfl
->fl_next
) {
599 if (posix_locks_conflict(cfl
, fl
))
607 /* This function tests for deadlock condition before putting a process to
608 * sleep. The detection scheme is no longer recursive. Recursive was neat,
609 * but dangerous - we risked stack corruption if the lock data was bad, or
610 * if the recursion was too deep for any other reason.
612 * We rely on the fact that a task can only be on one lock's wait queue
613 * at a time. When we find blocked_task on a wait queue we can re-search
614 * with blocked_task equal to that queue's owner, until either blocked_task
615 * isn't found, or blocked_task is found on a queue owned by my_task.
617 * Note: the above assumption may not be true when handling lock requests
618 * from a broken NFS client. But broken NFS clients have a lot more to
619 * worry about than proper deadlock detection anyway... --okir
621 int posix_locks_deadlock(struct file_lock
*caller_fl
,
622 struct file_lock
*block_fl
)
624 struct list_head
*tmp
;
625 fl_owner_t caller_owner
, blocked_owner
;
626 unsigned int caller_pid
, blocked_pid
;
628 caller_owner
= caller_fl
->fl_owner
;
629 caller_pid
= caller_fl
->fl_pid
;
630 blocked_owner
= block_fl
->fl_owner
;
631 blocked_pid
= block_fl
->fl_pid
;
634 if (caller_owner
== blocked_owner
&& caller_pid
== blocked_pid
)
636 list_for_each(tmp
, &blocked_list
) {
637 struct file_lock
*fl
= list_entry(tmp
, struct file_lock
, fl_link
);
638 if ((fl
->fl_owner
== blocked_owner
)
639 && (fl
->fl_pid
== blocked_pid
)) {
641 blocked_owner
= fl
->fl_owner
;
642 blocked_pid
= fl
->fl_pid
;
649 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
650 * at the head of the list, but that's secret knowledge known only to
651 * flock_lock_file and posix_lock_file.
653 static int flock_lock_file(struct file
*filp
, struct file_lock
*new_fl
)
655 struct file_lock
**before
;
656 struct inode
* inode
= filp
->f_dentry
->d_inode
;
661 for_each_lock(inode
, before
) {
662 struct file_lock
*fl
= *before
;
667 if (filp
!= fl
->fl_file
)
669 if (new_fl
->fl_type
== fl
->fl_type
)
672 locks_delete_lock(before
);
677 if (new_fl
->fl_type
== F_UNLCK
)
681 * If a higher-priority process was blocked on the old file lock,
682 * give it the opportunity to lock the file.
688 for_each_lock(inode
, before
) {
689 struct file_lock
*fl
= *before
;
694 if (!flock_locks_conflict(new_fl
, fl
))
697 if (new_fl
->fl_flags
& FL_SLEEP
) {
698 locks_insert_block(fl
, new_fl
);
702 locks_insert_lock(&inode
->i_flock
, new_fl
);
710 static int __posix_lock_file(struct inode
*inode
, struct file_lock
*request
)
712 struct file_lock
*fl
;
713 struct file_lock
*new_fl
, *new_fl2
;
714 struct file_lock
*left
= NULL
;
715 struct file_lock
*right
= NULL
;
716 struct file_lock
**before
;
717 int error
, added
= 0;
720 * We may need two file_lock structures for this operation,
721 * so we get them in advance to avoid races.
723 new_fl
= locks_alloc_lock();
724 new_fl2
= locks_alloc_lock();
727 if (request
->fl_type
!= F_UNLCK
) {
728 for_each_lock(inode
, before
) {
729 struct file_lock
*fl
= *before
;
732 if (!posix_locks_conflict(request
, fl
))
735 if (!(request
->fl_flags
& FL_SLEEP
))
738 if (posix_locks_deadlock(request
, fl
))
741 locks_insert_block(fl
, request
);
746 /* If we're just looking for a conflict, we're done. */
748 if (request
->fl_flags
& FL_ACCESS
)
751 error
= -ENOLCK
; /* "no luck" */
752 if (!(new_fl
&& new_fl2
))
756 * We've allocated the new locks in advance, so there are no
757 * errors possible (and no blocking operations) from here on.
759 * Find the first old lock with the same owner as the new lock.
762 before
= &inode
->i_flock
;
764 /* First skip locks owned by other processes. */
765 while ((fl
= *before
) && (!IS_POSIX(fl
) ||
766 !posix_same_owner(request
, fl
))) {
767 before
= &fl
->fl_next
;
770 /* Process locks with this owner. */
771 while ((fl
= *before
) && posix_same_owner(request
, fl
)) {
772 /* Detect adjacent or overlapping regions (if same lock type)
774 if (request
->fl_type
== fl
->fl_type
) {
775 if (fl
->fl_end
< request
->fl_start
- 1)
777 /* If the next lock in the list has entirely bigger
778 * addresses than the new one, insert the lock here.
780 if (fl
->fl_start
> request
->fl_end
+ 1)
783 /* If we come here, the new and old lock are of the
784 * same type and adjacent or overlapping. Make one
785 * lock yielding from the lower start address of both
786 * locks to the higher end address.
788 if (fl
->fl_start
> request
->fl_start
)
789 fl
->fl_start
= request
->fl_start
;
791 request
->fl_start
= fl
->fl_start
;
792 if (fl
->fl_end
< request
->fl_end
)
793 fl
->fl_end
= request
->fl_end
;
795 request
->fl_end
= fl
->fl_end
;
797 locks_delete_lock(before
);
804 /* Processing for different lock types is a bit
807 if (fl
->fl_end
< request
->fl_start
)
809 if (fl
->fl_start
> request
->fl_end
)
811 if (request
->fl_type
== F_UNLCK
)
813 if (fl
->fl_start
< request
->fl_start
)
815 /* If the next lock in the list has a higher end
816 * address than the new one, insert the new one here.
818 if (fl
->fl_end
> request
->fl_end
) {
822 if (fl
->fl_start
>= request
->fl_start
) {
823 /* The new lock completely replaces an old
824 * one (This may happen several times).
827 locks_delete_lock(before
);
830 /* Replace the old lock with the new one.
831 * Wake up anybody waiting for the old one,
832 * as the change in lock type might satisfy
835 locks_wake_up_blocks(fl
);
836 fl
->fl_start
= request
->fl_start
;
837 fl
->fl_end
= request
->fl_end
;
838 fl
->fl_type
= request
->fl_type
;
839 fl
->fl_u
= request
->fl_u
;
844 /* Go on to next lock.
847 before
= &fl
->fl_next
;
852 if (request
->fl_type
== F_UNLCK
)
854 locks_copy_lock(new_fl
, request
);
855 locks_insert_lock(before
, new_fl
);
860 /* The new lock breaks the old one in two pieces,
861 * so we have to use the second new lock.
865 locks_copy_lock(left
, right
);
866 locks_insert_lock(before
, left
);
868 right
->fl_start
= request
->fl_end
+ 1;
869 locks_wake_up_blocks(right
);
872 left
->fl_end
= request
->fl_start
- 1;
873 locks_wake_up_blocks(left
);
878 * Free any unused locks.
881 locks_free_lock(new_fl
);
883 locks_free_lock(new_fl2
);
888 * posix_lock_file - Apply a POSIX-style lock to a file
889 * @filp: The file to apply the lock to
890 * @fl: The lock to be applied
892 * Add a POSIX style lock to a file.
893 * We merge adjacent & overlapping locks whenever possible.
894 * POSIX locks are sorted by owner task, then by starting address
896 int posix_lock_file(struct file
*filp
, struct file_lock
*fl
)
898 return __posix_lock_file(filp
->f_dentry
->d_inode
, fl
);
902 * locks_mandatory_locked - Check for an active lock
903 * @inode: the file to check
905 * Searches the inode's list of locks to find any POSIX locks which conflict.
906 * This function is called from locks_verify_locked() only.
908 int locks_mandatory_locked(struct inode
*inode
)
910 fl_owner_t owner
= current
->files
;
911 struct file_lock
*fl
;
914 * Search the lock list for this inode for any POSIX locks.
917 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
920 if (fl
->fl_owner
!= owner
)
924 return fl
? -EAGAIN
: 0;
928 * locks_mandatory_area - Check for a conflicting lock
929 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
931 * @inode: the file to check
932 * @file: how the file was opened (if it was)
933 * @offset: start of area to check
934 * @count: length of area to check
936 * Searches the inode's list of locks to find any POSIX locks which conflict.
937 * This function is called from locks_verify_area() and
938 * locks_verify_truncate().
940 int locks_mandatory_area(int read_write
, struct inode
*inode
,
941 struct file
*filp
, loff_t offset
,
947 locks_init_lock(&fl
);
948 fl
.fl_owner
= current
->files
;
949 fl
.fl_pid
= current
->tgid
;
951 fl
.fl_flags
= FL_POSIX
| FL_ACCESS
;
952 if (filp
&& !(filp
->f_flags
& O_NONBLOCK
))
953 fl
.fl_flags
|= FL_SLEEP
;
954 fl
.fl_type
= (read_write
== FLOCK_VERIFY_WRITE
) ? F_WRLCK
: F_RDLCK
;
955 fl
.fl_start
= offset
;
956 fl
.fl_end
= offset
+ count
- 1;
959 error
= __posix_lock_file(inode
, &fl
);
960 if (error
!= -EAGAIN
)
962 if (!(fl
.fl_flags
& FL_SLEEP
))
964 error
= wait_event_interruptible(fl
.fl_wait
, !fl
.fl_next
);
967 * If we've been sleeping someone might have
968 * changed the permissions behind our back.
970 if ((inode
->i_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
)
974 locks_delete_block(&fl
);
981 /* We already had a lease on this file; just change its type */
982 static int lease_modify(struct file_lock
**before
, int arg
)
984 struct file_lock
*fl
= *before
;
985 int error
= assign_type(fl
, arg
);
989 locks_wake_up_blocks(fl
);
990 if (arg
== F_UNLCK
) {
991 struct file
*filp
= fl
->fl_file
;
994 filp
->f_owner
.signum
= 0;
995 locks_delete_lock(before
);
1000 static void time_out_leases(struct inode
*inode
)
1002 struct file_lock
**before
;
1003 struct file_lock
*fl
;
1005 before
= &inode
->i_flock
;
1006 while ((fl
= *before
) && IS_LEASE(fl
) && (fl
->fl_type
& F_INPROGRESS
)) {
1007 if ((fl
->fl_break_time
== 0)
1008 || time_before(jiffies
, fl
->fl_break_time
)) {
1009 before
= &fl
->fl_next
;
1012 printk(KERN_INFO
"lease broken - owner pid = %d\n", fl
->fl_pid
);
1013 lease_modify(before
, fl
->fl_type
& ~F_INPROGRESS
);
1014 if (fl
== *before
) /* lease_modify may have freed fl */
1015 before
= &fl
->fl_next
;
1020 * __break_lease - revoke all outstanding leases on file
1021 * @inode: the inode of the file to return
1022 * @mode: the open mode (read or write)
1024 * break_lease (inlined for speed) has checked there already
1025 * is a lease on this file. Leases are broken on a call to open()
1026 * or truncate(). This function can sleep unless you
1027 * specified %O_NONBLOCK to your open().
1029 int __break_lease(struct inode
*inode
, unsigned int mode
)
1031 int error
= 0, future
;
1032 struct file_lock
*new_fl
, *flock
;
1033 struct file_lock
*fl
;
1035 unsigned long break_time
;
1036 int i_have_this_lease
= 0;
1038 alloc_err
= lease_alloc(NULL
, mode
& FMODE_WRITE
? F_WRLCK
: F_RDLCK
,
1043 time_out_leases(inode
);
1045 flock
= inode
->i_flock
;
1046 if ((flock
== NULL
) || !IS_LEASE(flock
))
1049 for (fl
= flock
; fl
&& IS_LEASE(fl
); fl
= fl
->fl_next
)
1050 if (fl
->fl_owner
== current
->files
)
1051 i_have_this_lease
= 1;
1053 if (mode
& FMODE_WRITE
) {
1054 /* If we want write access, we have to revoke any lease. */
1055 future
= F_UNLCK
| F_INPROGRESS
;
1056 } else if (flock
->fl_type
& F_INPROGRESS
) {
1057 /* If the lease is already being broken, we just leave it */
1058 future
= flock
->fl_type
;
1059 } else if (flock
->fl_type
& F_WRLCK
) {
1060 /* Downgrade the exclusive lease to a read-only lease. */
1061 future
= F_RDLCK
| F_INPROGRESS
;
1063 /* the existing lease was read-only, so we can read too. */
1067 if (alloc_err
&& !i_have_this_lease
&& ((mode
& O_NONBLOCK
) == 0)) {
1073 if (lease_break_time
> 0) {
1074 break_time
= jiffies
+ lease_break_time
* HZ
;
1075 if (break_time
== 0)
1076 break_time
++; /* so that 0 means no break time */
1079 for (fl
= flock
; fl
&& IS_LEASE(fl
); fl
= fl
->fl_next
) {
1080 if (fl
->fl_type
!= future
) {
1081 fl
->fl_type
= future
;
1082 fl
->fl_break_time
= break_time
;
1083 kill_fasync(&fl
->fl_fasync
, SIGIO
, POLL_MSG
);
1087 if (i_have_this_lease
|| (mode
& O_NONBLOCK
)) {
1088 error
= -EWOULDBLOCK
;
1093 break_time
= flock
->fl_break_time
;
1094 if (break_time
!= 0) {
1095 break_time
-= jiffies
;
1096 if (break_time
== 0)
1099 error
= locks_block_on_timeout(flock
, new_fl
, break_time
);
1102 time_out_leases(inode
);
1103 /* Wait for the next lease that has not been broken yet */
1104 for (flock
= inode
->i_flock
; flock
&& IS_LEASE(flock
);
1105 flock
= flock
->fl_next
) {
1106 if (flock
->fl_type
& F_INPROGRESS
)
1115 locks_free_lock(new_fl
);
1123 * This is to force NFS clients to flush their caches for files with
1124 * exclusive leases. The justification is that if someone has an
1125 * exclusive lease, then they could be modifiying it.
1127 void lease_get_mtime(struct inode
*inode
, struct timespec
*time
)
1129 struct file_lock
*flock
= inode
->i_flock
;
1130 if (flock
&& IS_LEASE(flock
) && (flock
->fl_type
& F_WRLCK
))
1131 *time
= CURRENT_TIME
;
1133 *time
= inode
->i_mtime
;
1137 * fcntl_getlease - Enquire what lease is currently active
1140 * The value returned by this function will be one of
1141 * (if no lease break is pending):
1143 * %F_RDLCK to indicate a shared lease is held.
1145 * %F_WRLCK to indicate an exclusive lease is held.
1147 * %F_UNLCK to indicate no lease is held.
1149 * (if a lease break is pending):
1151 * %F_RDLCK to indicate an exclusive lease needs to be
1152 * changed to a shared lease (or removed).
1154 * %F_UNLCK to indicate the lease needs to be removed.
1156 * XXX: sfr & willy disagree over whether F_INPROGRESS
1157 * should be returned to userspace.
1159 int fcntl_getlease(struct file
*filp
)
1161 struct file_lock
*fl
;
1165 time_out_leases(filp
->f_dentry
->d_inode
);
1166 for (fl
= filp
->f_dentry
->d_inode
->i_flock
; fl
&& IS_LEASE(fl
);
1168 if (fl
->fl_file
== filp
) {
1169 type
= fl
->fl_type
& ~F_INPROGRESS
;
1178 * fcntl_setlease - sets a lease on an open file
1179 * @fd: open file descriptor
1180 * @filp: file pointer
1181 * @arg: type of lease to obtain
1183 * Call this fcntl to establish a lease on the file.
1184 * Note that you also need to call %F_SETSIG to
1185 * receive a signal when the lease is broken.
1187 int fcntl_setlease(unsigned int fd
, struct file
*filp
, long arg
)
1189 struct file_lock
*fl
, **before
, **my_before
= NULL
;
1190 struct dentry
*dentry
;
1191 struct inode
*inode
;
1192 int error
, rdlease_count
= 0, wrlease_count
= 0;
1194 dentry
= filp
->f_dentry
;
1195 inode
= dentry
->d_inode
;
1197 if ((current
->fsuid
!= inode
->i_uid
) && !capable(CAP_LEASE
))
1199 if (!S_ISREG(inode
->i_mode
))
1201 error
= security_file_lock(filp
, arg
);
1207 time_out_leases(inode
);
1210 * FIXME: What about F_RDLCK and files open for writing?
1213 if ((arg
== F_WRLCK
)
1214 && ((atomic_read(&dentry
->d_count
) > 1)
1215 || (atomic_read(&inode
->i_count
) > 1)))
1219 * At this point, we know that if there is an exclusive
1220 * lease on this file, then we hold it on this filp
1221 * (otherwise our open of this file would have blocked).
1222 * And if we are trying to acquire an exclusive lease,
1223 * then the file is not open by anyone (including us)
1224 * except for this filp.
1226 for (before
= &inode
->i_flock
;
1227 ((fl
= *before
) != NULL
) && IS_LEASE(fl
);
1228 before
= &fl
->fl_next
) {
1229 if (fl
->fl_file
== filp
)
1231 else if (fl
->fl_type
== (F_INPROGRESS
| F_UNLCK
))
1233 * Someone is in the process of opening this
1234 * file for writing so we may not take an
1235 * exclusive lease on it.
1242 if ((arg
== F_RDLCK
&& (wrlease_count
> 0)) ||
1243 (arg
== F_WRLCK
&& ((rdlease_count
+ wrlease_count
) > 0)))
1246 if (my_before
!= NULL
) {
1247 error
= lease_modify(my_before
, arg
);
1259 error
= lease_alloc(filp
, arg
, &fl
);
1263 error
= fasync_helper(fd
, filp
, 1, &fl
->fl_fasync
);
1265 locks_free_lock(fl
);
1268 fl
->fl_next
= *before
;
1270 list_add(&fl
->fl_link
, &file_lock_list
);
1272 error
= f_setown(filp
, current
->tgid
, 1);
1279 * sys_flock: - flock() system call.
1280 * @fd: the file descriptor to lock.
1281 * @cmd: the type of lock to apply.
1283 * Apply a %FL_FLOCK style lock to an open file descriptor.
1284 * The @cmd can be one of
1286 * %LOCK_SH -- a shared lock.
1288 * %LOCK_EX -- an exclusive lock.
1290 * %LOCK_UN -- remove an existing lock.
1292 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1294 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1295 * processes read and write access respectively.
1297 asmlinkage
long sys_flock(unsigned int fd
, unsigned int cmd
)
1300 struct file_lock
*lock
;
1308 if ((cmd
!= LOCK_UN
) && !(cmd
& LOCK_MAND
) && !(filp
->f_mode
& 3))
1311 error
= flock_make_lock(filp
, &lock
, cmd
);
1315 error
= security_file_lock(filp
, cmd
);
1320 error
= flock_lock_file(filp
, lock
);
1321 if ((error
!= -EAGAIN
) || (cmd
& LOCK_NB
))
1323 error
= wait_event_interruptible(lock
->fl_wait
, !lock
->fl_next
);
1327 locks_delete_block(lock
);
1333 locks_free_lock(lock
);
1342 /* Report the first existing lock that would conflict with l.
1343 * This implements the F_GETLK command of fcntl().
1345 int fcntl_getlk(struct file
*filp
, struct flock __user
*l
)
1347 struct file_lock
*fl
, file_lock
;
1352 if (copy_from_user(&flock
, l
, sizeof(flock
)))
1355 if ((flock
.l_type
!= F_RDLCK
) && (flock
.l_type
!= F_WRLCK
))
1358 error
= flock_to_posix_lock(filp
, &file_lock
, &flock
);
1362 if (filp
->f_op
&& filp
->f_op
->lock
) {
1363 error
= filp
->f_op
->lock(filp
, F_GETLK
, &file_lock
);
1366 else if (error
== LOCK_USE_CLNT
)
1367 /* Bypass for NFS with no locking - 2.0.36 compat */
1368 fl
= posix_test_lock(filp
, &file_lock
);
1370 fl
= (file_lock
.fl_type
== F_UNLCK
? NULL
: &file_lock
);
1372 fl
= posix_test_lock(filp
, &file_lock
);
1375 flock
.l_type
= F_UNLCK
;
1377 flock
.l_pid
= fl
->fl_pid
;
1378 #if BITS_PER_LONG == 32
1380 * Make sure we can represent the posix lock via
1381 * legacy 32bit flock.
1384 if (fl
->fl_start
> OFFT_OFFSET_MAX
)
1386 if ((fl
->fl_end
!= OFFSET_MAX
)
1387 && (fl
->fl_end
> OFFT_OFFSET_MAX
))
1390 flock
.l_start
= fl
->fl_start
;
1391 flock
.l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
1392 fl
->fl_end
- fl
->fl_start
+ 1;
1394 flock
.l_type
= fl
->fl_type
;
1397 if (!copy_to_user(l
, &flock
, sizeof(flock
)))
1404 /* Apply the lock described by l to an open file descriptor.
1405 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1407 int fcntl_setlk(struct file
*filp
, unsigned int cmd
, struct flock __user
*l
)
1409 struct file_lock
*file_lock
= locks_alloc_lock();
1411 struct inode
*inode
;
1414 if (file_lock
== NULL
)
1418 * This might block, so we do it before checking the inode.
1421 if (copy_from_user(&flock
, l
, sizeof(flock
)))
1424 inode
= filp
->f_dentry
->d_inode
;
1426 /* Don't allow mandatory locks on files that may be memory mapped
1429 if (IS_MANDLOCK(inode
) &&
1430 (inode
->i_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
) {
1431 struct address_space
*mapping
= inode
->i_mapping
;
1433 if (!list_empty(&mapping
->i_mmap_shared
)) {
1439 error
= flock_to_posix_lock(filp
, file_lock
, &flock
);
1442 if (cmd
== F_SETLKW
) {
1443 file_lock
->fl_flags
|= FL_SLEEP
;
1447 switch (flock
.l_type
) {
1449 if (!(filp
->f_mode
& FMODE_READ
))
1453 if (!(filp
->f_mode
& FMODE_WRITE
))
1463 error
= security_file_lock(filp
, file_lock
->fl_type
);
1467 if (filp
->f_op
&& filp
->f_op
->lock
!= NULL
) {
1468 error
= filp
->f_op
->lock(filp
, cmd
, file_lock
);
1474 error
= __posix_lock_file(inode
, file_lock
);
1475 if ((error
!= -EAGAIN
) || (cmd
== F_SETLK
))
1477 error
= wait_event_interruptible(file_lock
->fl_wait
,
1478 !file_lock
->fl_next
);
1482 locks_delete_block(file_lock
);
1487 locks_free_lock(file_lock
);
1491 #if BITS_PER_LONG == 32
1492 /* Report the first existing lock that would conflict with l.
1493 * This implements the F_GETLK command of fcntl().
1495 int fcntl_getlk64(struct file
*filp
, struct flock64 __user
*l
)
1497 struct file_lock
*fl
, file_lock
;
1498 struct flock64 flock
;
1502 if (copy_from_user(&flock
, l
, sizeof(flock
)))
1505 if ((flock
.l_type
!= F_RDLCK
) && (flock
.l_type
!= F_WRLCK
))
1508 error
= flock64_to_posix_lock(filp
, &file_lock
, &flock
);
1512 if (filp
->f_op
&& filp
->f_op
->lock
) {
1513 error
= filp
->f_op
->lock(filp
, F_GETLK
, &file_lock
);
1516 else if (error
== LOCK_USE_CLNT
)
1517 /* Bypass for NFS with no locking - 2.0.36 compat */
1518 fl
= posix_test_lock(filp
, &file_lock
);
1520 fl
= (file_lock
.fl_type
== F_UNLCK
? NULL
: &file_lock
);
1522 fl
= posix_test_lock(filp
, &file_lock
);
1525 flock
.l_type
= F_UNLCK
;
1527 flock
.l_pid
= fl
->fl_pid
;
1528 flock
.l_start
= fl
->fl_start
;
1529 flock
.l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
1530 fl
->fl_end
- fl
->fl_start
+ 1;
1532 flock
.l_type
= fl
->fl_type
;
1535 if (!copy_to_user(l
, &flock
, sizeof(flock
)))
1542 /* Apply the lock described by l to an open file descriptor.
1543 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1545 int fcntl_setlk64(struct file
*filp
, unsigned int cmd
, struct flock64 __user
*l
)
1547 struct file_lock
*file_lock
= locks_alloc_lock();
1548 struct flock64 flock
;
1549 struct inode
*inode
;
1552 if (file_lock
== NULL
)
1556 * This might block, so we do it before checking the inode.
1559 if (copy_from_user(&flock
, l
, sizeof(flock
)))
1562 inode
= filp
->f_dentry
->d_inode
;
1564 /* Don't allow mandatory locks on files that may be memory mapped
1567 if (IS_MANDLOCK(inode
) &&
1568 (inode
->i_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
) {
1569 struct address_space
*mapping
= inode
->i_mapping
;
1571 if (!list_empty(&mapping
->i_mmap_shared
)) {
1577 error
= flock64_to_posix_lock(filp
, file_lock
, &flock
);
1580 if (cmd
== F_SETLKW64
) {
1581 file_lock
->fl_flags
|= FL_SLEEP
;
1585 switch (flock
.l_type
) {
1587 if (!(filp
->f_mode
& FMODE_READ
))
1591 if (!(filp
->f_mode
& FMODE_WRITE
))
1601 error
= security_file_lock(filp
, file_lock
->fl_type
);
1605 if (filp
->f_op
&& filp
->f_op
->lock
!= NULL
) {
1606 error
= filp
->f_op
->lock(filp
, cmd
, file_lock
);
1612 error
= __posix_lock_file(inode
, file_lock
);
1613 if ((error
!= -EAGAIN
) || (cmd
== F_SETLK64
))
1615 error
= wait_event_interruptible(file_lock
->fl_wait
,
1616 !file_lock
->fl_next
);
1620 locks_delete_block(file_lock
);
1625 locks_free_lock(file_lock
);
1628 #endif /* BITS_PER_LONG == 32 */
1631 * This function is called when the file is being removed
1632 * from the task's fd array. POSIX locks belonging to this task
1633 * are deleted at this time.
1635 void locks_remove_posix(struct file
*filp
, fl_owner_t owner
)
1637 struct file_lock lock
, **before
;
1640 * If there are no locks held on this file, we don't need to call
1641 * posix_lock_file(). Another process could be setting a lock on this
1642 * file at the same time, but we wouldn't remove that lock anyway.
1644 before
= &filp
->f_dentry
->d_inode
->i_flock
;
1645 if (*before
== NULL
)
1648 lock
.fl_type
= F_UNLCK
;
1649 lock
.fl_flags
= FL_POSIX
;
1651 lock
.fl_end
= OFFSET_MAX
;
1652 lock
.fl_owner
= owner
;
1653 lock
.fl_pid
= current
->tgid
;
1654 lock
.fl_file
= filp
;
1656 if (filp
->f_op
&& filp
->f_op
->lock
!= NULL
) {
1657 filp
->f_op
->lock(filp
, F_SETLK
, &lock
);
1658 /* Ignore any error -- we must remove the locks anyway */
1661 /* Can't use posix_lock_file here; we need to remove it no matter
1662 * which pid we have.
1665 while (*before
!= NULL
) {
1666 struct file_lock
*fl
= *before
;
1667 if (IS_POSIX(fl
) && (fl
->fl_owner
== owner
)) {
1668 locks_delete_lock(before
);
1671 before
= &fl
->fl_next
;
1677 * This function is called on the last close of an open file.
1679 void locks_remove_flock(struct file
*filp
)
1681 struct inode
* inode
= filp
->f_dentry
->d_inode
;
1682 struct file_lock
*fl
;
1683 struct file_lock
**before
;
1685 if (!inode
->i_flock
)
1689 before
= &inode
->i_flock
;
1691 while ((fl
= *before
) != NULL
) {
1692 if (fl
->fl_file
== filp
) {
1694 locks_delete_lock(before
);
1698 lease_modify(before
, F_UNLCK
);
1703 before
= &fl
->fl_next
;
1709 * posix_block_lock - blocks waiting for a file lock
1710 * @blocker: the lock which is blocking
1711 * @waiter: the lock which conflicts and has to wait
1713 * lockd needs to block waiting for locks.
1716 posix_block_lock(struct file_lock
*blocker
, struct file_lock
*waiter
)
1718 locks_insert_block(blocker
, waiter
);
1722 * posix_unblock_lock - stop waiting for a file lock
1723 * @waiter: the lock which was waiting
1725 * lockd needs to block waiting for locks.
1728 posix_unblock_lock(struct file
*filp
, struct file_lock
*waiter
)
1731 * A remote machine may cancel the lock request after it's been
1732 * granted locally. If that happens, we need to delete the lock.
1735 if (waiter
->fl_next
) {
1736 __locks_delete_block(waiter
);
1740 waiter
->fl_type
= F_UNLCK
;
1741 posix_lock_file(filp
, waiter
);
1745 static void lock_get_status(char* out
, struct file_lock
*fl
, int id
, char *pfx
)
1747 struct inode
*inode
= NULL
;
1749 if (fl
->fl_file
!= NULL
)
1750 inode
= fl
->fl_file
->f_dentry
->d_inode
;
1752 out
+= sprintf(out
, "%d:%s ", id
, pfx
);
1754 out
+= sprintf(out
, "%6s %s ",
1755 (fl
->fl_flags
& FL_ACCESS
) ? "ACCESS" : "POSIX ",
1756 (inode
== NULL
) ? "*NOINODE*" :
1757 (IS_MANDLOCK(inode
) &&
1758 (inode
->i_mode
& (S_IXGRP
| S_ISGID
)) == S_ISGID
) ?
1759 "MANDATORY" : "ADVISORY ");
1760 } else if (IS_FLOCK(fl
)) {
1761 if (fl
->fl_type
& LOCK_MAND
) {
1762 out
+= sprintf(out
, "FLOCK MSNFS ");
1764 out
+= sprintf(out
, "FLOCK ADVISORY ");
1766 } else if (IS_LEASE(fl
)) {
1767 out
+= sprintf(out
, "LEASE ");
1768 if (fl
->fl_type
& F_INPROGRESS
)
1769 out
+= sprintf(out
, "BREAKING ");
1770 else if (fl
->fl_file
)
1771 out
+= sprintf(out
, "ACTIVE ");
1773 out
+= sprintf(out
, "BREAKER ");
1775 out
+= sprintf(out
, "UNKNOWN UNKNOWN ");
1777 if (fl
->fl_type
& LOCK_MAND
) {
1778 out
+= sprintf(out
, "%s ",
1779 (fl
->fl_type
& LOCK_READ
)
1780 ? (fl
->fl_type
& LOCK_WRITE
) ? "RW " : "READ "
1781 : (fl
->fl_type
& LOCK_WRITE
) ? "WRITE" : "NONE ");
1783 out
+= sprintf(out
, "%s ",
1784 (fl
->fl_type
& F_INPROGRESS
)
1785 ? (fl
->fl_type
& F_UNLCK
) ? "UNLCK" : "READ "
1786 : (fl
->fl_type
& F_WRLCK
) ? "WRITE" : "READ ");
1789 #if WE_CAN_BREAK_LSLK_NOW
1790 out
+= sprintf(out
, "%d %s:%ld ", fl
->fl_pid
,
1791 inode
->i_sb
->s_id
, inode
->i_ino
);
1793 /* userspace relies on this representation of dev_t ;-( */
1794 out
+= sprintf(out
, "%d %02x:%02x:%ld ", fl
->fl_pid
,
1795 MAJOR(inode
->i_sb
->s_dev
),
1796 MINOR(inode
->i_sb
->s_dev
), inode
->i_ino
);
1799 out
+= sprintf(out
, "%d <none>:0 ", fl
->fl_pid
);
1802 if (fl
->fl_end
== OFFSET_MAX
)
1803 out
+= sprintf(out
, "%Ld EOF\n", fl
->fl_start
);
1805 out
+= sprintf(out
, "%Ld %Ld\n", fl
->fl_start
,
1808 out
+= sprintf(out
, "0 EOF\n");
1812 static void move_lock_status(char **p
, off_t
* pos
, off_t offset
)
1816 if(*pos
>= offset
) {
1817 /* the complete line is valid */
1822 if(*pos
+len
> offset
) {
1823 /* use the second part of the line */
1824 int i
= offset
-*pos
;
1825 memmove(*p
,*p
+i
,len
-i
);
1830 /* discard the complete line */
1835 * get_locks_status - reports lock usage in /proc/locks
1836 * @buffer: address in userspace to write into
1838 * @offset: how far we are through the buffer
1839 * @length: how much to read
1842 int get_locks_status(char *buffer
, char **start
, off_t offset
, int length
)
1844 struct list_head
*tmp
;
1850 list_for_each(tmp
, &file_lock_list
) {
1851 struct list_head
*btmp
;
1852 struct file_lock
*fl
= list_entry(tmp
, struct file_lock
, fl_link
);
1853 lock_get_status(q
, fl
, ++i
, "");
1854 move_lock_status(&q
, &pos
, offset
);
1856 if(pos
>= offset
+length
)
1859 list_for_each(btmp
, &fl
->fl_block
) {
1860 struct file_lock
*bfl
= list_entry(btmp
,
1861 struct file_lock
, fl_block
);
1862 lock_get_status(q
, bfl
, i
, " ->");
1863 move_lock_status(&q
, &pos
, offset
);
1865 if(pos
>= offset
+length
)
1872 if(q
-buffer
< length
)
1878 * lock_may_read - checks that the region is free of locks
1879 * @inode: the inode that is being read
1880 * @start: the first byte to read
1881 * @len: the number of bytes to read
1883 * Emulates Windows locking requirements. Whole-file
1884 * mandatory locks (share modes) can prohibit a read and
1885 * byte-range POSIX locks can prohibit a read if they overlap.
1887 * N.B. this function is only ever called
1888 * from knfsd and ownership of locks is never checked.
1890 int lock_may_read(struct inode
*inode
, loff_t start
, unsigned long len
)
1892 struct file_lock
*fl
;
1895 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
1897 if (fl
->fl_type
== F_RDLCK
)
1899 if ((fl
->fl_end
< start
) || (fl
->fl_start
> (start
+ len
)))
1901 } else if (IS_FLOCK(fl
)) {
1902 if (!(fl
->fl_type
& LOCK_MAND
))
1904 if (fl
->fl_type
& LOCK_READ
)
1916 * lock_may_write - checks that the region is free of locks
1917 * @inode: the inode that is being written
1918 * @start: the first byte to write
1919 * @len: the number of bytes to write
1921 * Emulates Windows locking requirements. Whole-file
1922 * mandatory locks (share modes) can prohibit a write and
1923 * byte-range POSIX locks can prohibit a write if they overlap.
1925 * N.B. this function is only ever called
1926 * from knfsd and ownership of locks is never checked.
1928 int lock_may_write(struct inode
*inode
, loff_t start
, unsigned long len
)
1930 struct file_lock
*fl
;
1933 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
1935 if ((fl
->fl_end
< start
) || (fl
->fl_start
> (start
+ len
)))
1937 } else if (IS_FLOCK(fl
)) {
1938 if (!(fl
->fl_type
& LOCK_MAND
))
1940 if (fl
->fl_type
& LOCK_WRITE
)
1951 static int __init
filelock_init(void)
1953 filelock_cache
= kmem_cache_create("file_lock_cache",
1954 sizeof(struct file_lock
), 0, 0, init_once
, NULL
);
1955 if (!filelock_cache
)
1956 panic("cannot create file lock slab cache");
1960 module_init(filelock_init
)
1962 EXPORT_SYMBOL(file_lock_list
);
1963 EXPORT_SYMBOL(locks_init_lock
);
1964 EXPORT_SYMBOL(locks_copy_lock
);
1965 EXPORT_SYMBOL(posix_lock_file
);
1966 EXPORT_SYMBOL(posix_test_lock
);
1967 EXPORT_SYMBOL(posix_block_lock
);
1968 EXPORT_SYMBOL(posix_unblock_lock
);
1969 EXPORT_SYMBOL(posix_locks_deadlock
);
1970 EXPORT_SYMBOL(locks_mandatory_area
);