4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'linux/Documentation/mandatory.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
108 #include <linux/malloc.h>
109 #include <linux/file.h>
110 #include <linux/smp_lock.h>
111 #include <linux/init.h>
113 #include <asm/uaccess.h>
115 LIST_HEAD(file_lock_list
);
116 static LIST_HEAD(blocked_list
);
118 static kmem_cache_t
*filelock_cache
;
120 /* Allocate an empty lock structure. */
121 static struct file_lock
*locks_alloc_lock(void)
123 struct file_lock
*fl
;
124 fl
= kmem_cache_alloc(filelock_cache
, SLAB_KERNEL
);
128 /* Free a lock which is not in use. */
129 static inline void locks_free_lock(struct file_lock
*fl
)
136 if (waitqueue_active(&fl
->fl_wait
))
137 panic("Attempting to free lock with active wait queue");
139 if (!list_empty(&fl
->fl_block
))
140 panic("Attempting to free lock with active block list");
142 if (!list_empty(&fl
->fl_link
))
143 panic("Attempting to free lock on active lock list");
145 kmem_cache_free(filelock_cache
, fl
);
149 * Initialises the fields of the file lock which are invariant for
152 static void init_once(void *foo
, kmem_cache_t
*cache
, unsigned long flags
)
154 struct file_lock
*lock
= (struct file_lock
*) foo
;
156 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) !=
157 SLAB_CTOR_CONSTRUCTOR
)
160 lock
->fl_next
= NULL
;
161 INIT_LIST_HEAD(&lock
->fl_link
);
162 INIT_LIST_HEAD(&lock
->fl_block
);
163 init_waitqueue_head(&lock
->fl_wait
);
167 * Initialize a new lock from an existing file_lock structure.
169 static void locks_copy_lock(struct file_lock
*new, struct file_lock
*fl
)
171 new->fl_owner
= fl
->fl_owner
;
172 new->fl_pid
= fl
->fl_pid
;
173 new->fl_file
= fl
->fl_file
;
174 new->fl_flags
= fl
->fl_flags
;
175 new->fl_type
= fl
->fl_type
;
176 new->fl_start
= fl
->fl_start
;
177 new->fl_end
= fl
->fl_end
;
178 new->fl_notify
= fl
->fl_notify
;
179 new->fl_insert
= fl
->fl_insert
;
180 new->fl_remove
= fl
->fl_remove
;
181 new->fl_u
= fl
->fl_u
;
184 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
185 static struct file_lock
*flock_make_lock(struct file
*filp
, unsigned int type
)
187 struct file_lock
*fl
= locks_alloc_lock();
193 fl
->fl_pid
= current
->pid
;
194 fl
->fl_flags
= FL_FLOCK
;
197 fl
->fl_end
= OFFSET_MAX
;
198 fl
->fl_notify
= NULL
;
199 fl
->fl_insert
= NULL
;
200 fl
->fl_remove
= NULL
;
205 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
208 static int posix_make_lock(struct file
*filp
, struct file_lock
*fl
,
213 switch (l
->l_whence
) {
221 start
= filp
->f_dentry
->d_inode
->i_size
;
227 if (((start
+= l
->l_start
) < 0) || (l
->l_len
< 0))
229 fl
->fl_end
= start
+ l
->l_len
- 1;
230 if (l
->l_len
> 0 && fl
->fl_end
< 0)
232 fl
->fl_start
= start
; /* we record the absolute position */
234 fl
->fl_end
= OFFSET_MAX
;
236 fl
->fl_owner
= current
->files
;
237 fl
->fl_pid
= current
->pid
;
239 fl
->fl_flags
= FL_POSIX
;
240 fl
->fl_notify
= NULL
;
241 fl
->fl_insert
= NULL
;
242 fl
->fl_remove
= NULL
;
248 fl
->fl_type
= l
->l_type
;
257 /* Check if two locks overlap each other.
259 static inline int locks_overlap(struct file_lock
*fl1
, struct file_lock
*fl2
)
261 return ((fl1
->fl_end
>= fl2
->fl_start
) &&
262 (fl2
->fl_end
>= fl1
->fl_start
));
266 * Check whether two locks have the same owner
267 * N.B. Do we need the test on PID as well as owner?
268 * (Clone tasks should be considered as one "owner".)
271 locks_same_owner(struct file_lock
*fl1
, struct file_lock
*fl2
)
273 return (fl1
->fl_owner
== fl2
->fl_owner
) &&
274 (fl1
->fl_pid
== fl2
->fl_pid
);
277 /* Remove waiter from blocker's block list.
278 * When blocker ends up pointing to itself then the list is empty.
280 static void locks_delete_block(struct file_lock
*waiter
)
282 list_del(&waiter
->fl_block
);
283 INIT_LIST_HEAD(&waiter
->fl_block
);
284 list_del(&waiter
->fl_link
);
285 INIT_LIST_HEAD(&waiter
->fl_link
);
288 /* Insert waiter into blocker's block list.
289 * We use a circular list so that processes can be easily woken up in
290 * the order they blocked. The documentation doesn't require this but
291 * it seems like the reasonable thing to do.
293 static void locks_insert_block(struct file_lock
*blocker
,
294 struct file_lock
*waiter
)
296 if (!list_empty(&waiter
->fl_block
)) {
297 printk(KERN_ERR
"locks_insert_block: removing duplicated lock "
298 "(pid=%d %Ld-%Ld type=%d)\n", waiter
->fl_pid
,
299 waiter
->fl_start
, waiter
->fl_end
, waiter
->fl_type
);
300 locks_delete_block(waiter
);
302 list_add_tail(&waiter
->fl_block
, &blocker
->fl_block
);
303 // list_add(&waiter->fl_link, &blocked_list);
304 // waiter->fl_next = blocker;
307 /* Wake up processes blocked waiting for blocker.
308 * If told to wait then schedule the processes until the block list
309 * is empty, otherwise empty the block list ourselves.
311 static void locks_wake_up_blocks(struct file_lock
*blocker
, unsigned int wait
)
313 while (!list_empty(&blocker
->fl_block
)) {
314 struct file_lock
*waiter
= list_entry(blocker
->fl_block
.next
, struct file_lock
, fl_block
);
315 /* N.B. Is it possible for the notify function to block?? */
316 if (waiter
->fl_notify
)
317 waiter
->fl_notify(waiter
);
318 wake_up(&waiter
->fl_wait
);
320 /* Let the blocked process remove waiter from the
321 * block list when it gets scheduled.
323 current
->policy
|= SCHED_YIELD
;
326 /* Remove waiter from the block list, because by the
327 * time it wakes up blocker won't exist any more.
329 locks_delete_block(waiter
);
334 /* Insert file lock fl into an inode's lock list at the position indicated
335 * by pos. At the same time add the lock to the global file lock list.
337 static void locks_insert_lock(struct file_lock
**pos
, struct file_lock
*fl
)
339 list_add(&fl
->fl_link
, &file_lock_list
);
341 /* insert into file's list */
349 /* Delete a lock and free it.
350 * First remove our lock from the active lock lists. Then call
351 * locks_wake_up_blocks() to wake up processes that are blocked
352 * waiting for this lock. Finally free the lock structure.
354 static void locks_delete_lock(struct file_lock
**thisfl_p
, unsigned int wait
)
356 int (*lock
)(struct file
*, int, struct file_lock
*);
357 struct file_lock
*fl
= *thisfl_p
;
359 *thisfl_p
= fl
->fl_next
;
362 list_del(&fl
->fl_link
);
363 INIT_LIST_HEAD(&fl
->fl_link
);
368 locks_wake_up_blocks(fl
, wait
);
369 lock
= fl
->fl_file
->f_op
->lock
;
371 fl
->fl_type
= F_UNLCK
;
372 lock(fl
->fl_file
, F_SETLK
, fl
);
377 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
378 * checks for overlapping locks and shared/exclusive status.
380 static int locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
382 if (!locks_overlap(caller_fl
, sys_fl
))
385 switch (caller_fl
->fl_type
) {
387 return (sys_fl
->fl_type
== F_WRLCK
);
393 printk("locks_conflict(): impossible lock type - %d\n",
397 return (0); /* This should never happen */
400 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
401 * checking before calling the locks_conflict().
403 static int posix_locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
405 /* POSIX locks owned by the same process do not conflict with
408 if (!(sys_fl
->fl_flags
& FL_POSIX
) ||
409 locks_same_owner(caller_fl
, sys_fl
))
412 return (locks_conflict(caller_fl
, sys_fl
));
415 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
416 * checking before calling the locks_conflict().
418 static int flock_locks_conflict(struct file_lock
*caller_fl
, struct file_lock
*sys_fl
)
420 /* FLOCK locks referring to the same filp do not conflict with
423 if (!(sys_fl
->fl_flags
& FL_FLOCK
) ||
424 (caller_fl
->fl_file
== sys_fl
->fl_file
))
427 return (locks_conflict(caller_fl
, sys_fl
));
431 posix_test_lock(struct file
*filp
, struct file_lock
*fl
)
433 struct file_lock
*cfl
;
435 for (cfl
= filp
->f_dentry
->d_inode
->i_flock
; cfl
; cfl
= cfl
->fl_next
) {
436 if (!(cfl
->fl_flags
& FL_POSIX
))
438 if (posix_locks_conflict(cfl
, fl
))
445 /* This function tests for deadlock condition before putting a process to
446 * sleep. The detection scheme is no longer recursive. Recursive was neat,
447 * but dangerous - we risked stack corruption if the lock data was bad, or
448 * if the recursion was too deep for any other reason.
450 * We rely on the fact that a task can only be on one lock's wait queue
451 * at a time. When we find blocked_task on a wait queue we can re-search
452 * with blocked_task equal to that queue's owner, until either blocked_task
453 * isn't found, or blocked_task is found on a queue owned by my_task.
455 * Note: the above assumption may not be true when handling lock requests
456 * from a broken NFS client. But broken NFS clients have a lot more to
457 * worry about than proper deadlock detection anyway... --okir
459 static int posix_locks_deadlock(struct file_lock
*caller_fl
,
460 struct file_lock
*block_fl
)
462 struct list_head
*tmp
;
463 void *caller_owner
, *blocked_owner
;
464 unsigned int caller_pid
, blocked_pid
;
466 caller_owner
= caller_fl
->fl_owner
;
467 caller_pid
= caller_fl
->fl_pid
;
468 blocked_owner
= block_fl
->fl_owner
;
469 blocked_pid
= block_fl
->fl_pid
;
472 if (caller_owner
== blocked_owner
&& caller_pid
== blocked_pid
)
474 list_for_each(tmp
, &file_lock_list
) {
475 struct list_head
*btmp
;
476 struct file_lock
*fl
= list_entry(tmp
, struct file_lock
, fl_link
);
477 if (fl
->fl_owner
== NULL
|| list_empty(&fl
->fl_block
))
479 list_for_each(btmp
, &fl
->fl_block
) {
480 struct file_lock
*bfl
= list_entry(tmp
, struct file_lock
, fl_block
);
481 if (bfl
->fl_owner
== blocked_owner
&&
482 bfl
->fl_pid
== blocked_pid
) {
483 if (fl
->fl_owner
== caller_owner
&&
484 fl
->fl_pid
== caller_pid
) {
487 blocked_owner
= fl
->fl_owner
;
488 blocked_pid
= fl
->fl_pid
;
496 int locks_mandatory_locked(struct inode
*inode
)
498 fl_owner_t owner
= current
->files
;
499 struct file_lock
*fl
;
502 * Search the lock list for this inode for any POSIX locks.
505 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
506 if (!(fl
->fl_flags
& FL_POSIX
))
508 if (fl
->fl_owner
!= owner
)
512 return fl
? -EAGAIN
: 0;
515 int locks_mandatory_area(int read_write
, struct inode
*inode
,
516 struct file
*filp
, loff_t offset
,
519 struct file_lock
*fl
;
520 struct file_lock
*new_fl
= locks_alloc_lock();
523 new_fl
->fl_owner
= current
->files
;
524 new_fl
->fl_pid
= current
->pid
;
525 new_fl
->fl_file
= filp
;
526 new_fl
->fl_flags
= FL_POSIX
| FL_ACCESS
;
527 new_fl
->fl_type
= (read_write
== FLOCK_VERIFY_WRITE
) ? F_WRLCK
: F_RDLCK
;
528 new_fl
->fl_start
= offset
;
529 new_fl
->fl_end
= offset
+ count
- 1;
535 /* Search the lock list for this inode for locks that conflict with
536 * the proposed read/write.
538 for (fl
= inode
->i_flock
; ; fl
= fl
->fl_next
) {
542 if (!(fl
->fl_flags
& FL_POSIX
))
544 /* Block for writes against a "read" lock,
545 * and both reads and writes against a "write" lock.
547 if (posix_locks_conflict(new_fl
, fl
)) {
549 if (filp
&& (filp
->f_flags
& O_NONBLOCK
))
551 error
= -ERESTARTSYS
;
552 if (signal_pending(current
))
555 if (posix_locks_deadlock(new_fl
, fl
))
558 locks_insert_block(fl
, new_fl
);
559 interruptible_sleep_on(&new_fl
->fl_wait
);
560 locks_delete_block(new_fl
);
563 * If we've been sleeping someone might have
564 * changed the permissions behind our back.
566 if ((inode
->i_mode
& (S_ISGID
| S_IXGRP
)) != S_ISGID
)
572 locks_free_lock(new_fl
);
576 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks at
577 * the head of the list, but that's secret knowledge known only to the next
580 static int flock_lock_file(struct file
*filp
, unsigned int lock_type
,
583 struct file_lock
*fl
;
584 struct file_lock
*new_fl
= NULL
;
585 struct file_lock
**before
;
586 struct inode
* inode
= filp
->f_dentry
->d_inode
;
588 int unlock
= (lock_type
== F_UNLCK
);
591 * If we need a new lock, get it in advance to avoid races.
595 new_fl
= flock_make_lock(filp
, lock_type
);
603 before
= &inode
->i_flock
;
604 while (((fl
= *before
) != NULL
) && (fl
->fl_flags
& FL_FLOCK
)) {
605 if (filp
== fl
->fl_file
) {
606 if (lock_type
== fl
->fl_type
)
611 before
= &fl
->fl_next
;
613 /* change means that we are changing the type of an existing lock, or
614 * or else unlocking it.
617 /* N.B. What if the wait argument is false? */
618 locks_delete_lock(before
, !unlock
);
620 * If we waited, another lock may have been added ...
629 /* Check signals each time we start */
630 error
= -ERESTARTSYS
;
631 if (signal_pending(current
))
633 for (fl
= inode
->i_flock
; (fl
!= NULL
) && (fl
->fl_flags
& FL_FLOCK
);
635 if (!flock_locks_conflict(new_fl
, fl
))
640 locks_insert_block(fl
, new_fl
);
641 interruptible_sleep_on(&new_fl
->fl_wait
);
642 locks_delete_block(new_fl
);
645 locks_insert_lock(&inode
->i_flock
, new_fl
);
651 locks_free_lock(new_fl
);
655 /* Add a POSIX style lock to a file.
656 * We merge adjacent locks whenever possible. POSIX locks are sorted by owner
657 * task, then by starting address
660 * To make freeing a lock much faster, we keep a pointer to the lock before the
661 * actual one. But the real gain of the new coding was, that lock_it() and
662 * unlock_it() became one function.
664 * To all purists: Yes, I use a few goto's. Just pass on to the next function.
667 int posix_lock_file(struct file
*filp
, struct file_lock
*caller
,
670 struct file_lock
*fl
;
671 struct file_lock
*new_fl
, *new_fl2
;
672 struct file_lock
*left
= NULL
;
673 struct file_lock
*right
= NULL
;
674 struct file_lock
**before
;
675 struct inode
* inode
= filp
->f_dentry
->d_inode
;
676 int error
, added
= 0;
679 * We may need two file_lock structures for this operation,
680 * so we get them in advance to avoid races.
682 new_fl
= locks_alloc_lock();
683 new_fl2
= locks_alloc_lock();
684 error
= -ENOLCK
; /* "no luck" */
685 if (!(new_fl
&& new_fl2
))
688 if (caller
->fl_type
!= F_UNLCK
) {
690 for (fl
= inode
->i_flock
; fl
!= NULL
; fl
= fl
->fl_next
) {
691 if (!(fl
->fl_flags
& FL_POSIX
))
693 if (!posix_locks_conflict(caller
, fl
))
699 if (posix_locks_deadlock(caller
, fl
))
701 error
= -ERESTARTSYS
;
702 if (signal_pending(current
))
704 locks_insert_block(fl
, caller
);
705 interruptible_sleep_on(&caller
->fl_wait
);
706 locks_delete_block(caller
);
712 * We've allocated the new locks in advance, so there are no
713 * errors possible (and no blocking operations) from here on.
715 * Find the first old lock with the same owner as the new lock.
718 before
= &inode
->i_flock
;
720 /* First skip locks owned by other processes.
722 while ((fl
= *before
) && (!(fl
->fl_flags
& FL_POSIX
) ||
723 !locks_same_owner(caller
, fl
))) {
724 before
= &fl
->fl_next
;
727 /* Process locks with this owner.
729 while ((fl
= *before
) && locks_same_owner(caller
, fl
)) {
730 /* Detect adjacent or overlapping regions (if same lock type)
732 if (caller
->fl_type
== fl
->fl_type
) {
733 if (fl
->fl_end
< caller
->fl_start
- 1)
735 /* If the next lock in the list has entirely bigger
736 * addresses than the new one, insert the lock here.
738 if (fl
->fl_start
> caller
->fl_end
+ 1)
741 /* If we come here, the new and old lock are of the
742 * same type and adjacent or overlapping. Make one
743 * lock yielding from the lower start address of both
744 * locks to the higher end address.
746 if (fl
->fl_start
> caller
->fl_start
)
747 fl
->fl_start
= caller
->fl_start
;
749 caller
->fl_start
= fl
->fl_start
;
750 if (fl
->fl_end
< caller
->fl_end
)
751 fl
->fl_end
= caller
->fl_end
;
753 caller
->fl_end
= fl
->fl_end
;
755 locks_delete_lock(before
, 0);
762 /* Processing for different lock types is a bit
765 if (fl
->fl_end
< caller
->fl_start
)
767 if (fl
->fl_start
> caller
->fl_end
)
769 if (caller
->fl_type
== F_UNLCK
)
771 if (fl
->fl_start
< caller
->fl_start
)
773 /* If the next lock in the list has a higher end
774 * address than the new one, insert the new one here.
776 if (fl
->fl_end
> caller
->fl_end
) {
780 if (fl
->fl_start
>= caller
->fl_start
) {
781 /* The new lock completely replaces an old
782 * one (This may happen several times).
785 locks_delete_lock(before
, 0);
788 /* Replace the old lock with the new one.
789 * Wake up anybody waiting for the old one,
790 * as the change in lock type might satisfy
793 locks_wake_up_blocks(fl
, 0);
794 fl
->fl_start
= caller
->fl_start
;
795 fl
->fl_end
= caller
->fl_end
;
796 fl
->fl_type
= caller
->fl_type
;
797 fl
->fl_u
= caller
->fl_u
;
802 /* Go on to next lock.
805 before
= &fl
->fl_next
;
810 if (caller
->fl_type
== F_UNLCK
)
812 locks_copy_lock(new_fl
, caller
);
813 locks_insert_lock(before
, new_fl
);
818 /* The new lock breaks the old one in two pieces,
819 * so we have to use the second new lock (in this
820 * case, even F_UNLCK may fail!).
822 locks_copy_lock(new_fl2
, right
);
823 locks_insert_lock(before
, left
);
827 right
->fl_start
= caller
->fl_end
+ 1;
828 locks_wake_up_blocks(right
, 0);
831 left
->fl_end
= caller
->fl_start
- 1;
832 locks_wake_up_blocks(left
, 0);
836 * Free any unused locks.
839 locks_free_lock(new_fl
);
841 locks_free_lock(new_fl2
);
845 static inline int flock_translate_cmd(int cmd
) {
846 switch (cmd
&~ LOCK_NB
) {
857 /* flock() system call entry point. Apply a FL_FLOCK style lock to
858 * an open file descriptor.
860 asmlinkage
long sys_flock(unsigned int fd
, unsigned int cmd
)
870 error
= flock_translate_cmd(cmd
);
876 if ((type
!= F_UNLCK
) && !(filp
->f_mode
& 3))
880 error
= flock_lock_file(filp
, type
,
881 (cmd
& (LOCK_UN
| LOCK_NB
)) ? 0 : 1);
890 /* Report the first existing lock that would conflict with l.
891 * This implements the F_GETLK command of fcntl().
893 int fcntl_getlk(unsigned int fd
, struct flock
*l
)
896 struct file_lock
*fl
, *file_lock
= locks_alloc_lock();
901 if (copy_from_user(&flock
, l
, sizeof(flock
)))
904 if ((flock
.l_type
!= F_RDLCK
) && (flock
.l_type
!= F_WRLCK
))
912 if (!posix_make_lock(filp
, file_lock
, &flock
))
915 if (filp
->f_op
->lock
) {
916 error
= filp
->f_op
->lock(filp
, F_GETLK
, file_lock
);
919 else if (error
== LOCK_USE_CLNT
)
920 /* Bypass for NFS with no locking - 2.0.36 compat */
921 fl
= posix_test_lock(filp
, file_lock
);
923 fl
= (file_lock
->fl_type
== F_UNLCK
? NULL
: file_lock
);
925 fl
= posix_test_lock(filp
, file_lock
);
928 flock
.l_type
= F_UNLCK
;
930 flock
.l_pid
= fl
->fl_pid
;
931 flock
.l_start
= fl
->fl_start
;
932 flock
.l_len
= fl
->fl_end
== OFFSET_MAX
? 0 :
933 fl
->fl_end
- fl
->fl_start
+ 1;
935 flock
.l_type
= fl
->fl_type
;
938 if (!copy_to_user(l
, &flock
, sizeof(flock
)))
944 locks_free_lock(file_lock
);
948 /* Apply the lock described by l to an open file descriptor.
949 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
951 int fcntl_setlk(unsigned int fd
, unsigned int cmd
, struct flock
*l
)
954 struct file_lock
*file_lock
= locks_alloc_lock();
960 * This might block, so we do it before checking the inode.
963 if (copy_from_user(&flock
, l
, sizeof(flock
)))
966 /* Get arguments and validate them ...
975 inode
= filp
->f_dentry
->d_inode
;
977 /* Don't allow mandatory locks on files that may be memory mapped
980 if (IS_MANDLOCK(inode
) &&
981 (inode
->i_mode
& (S_ISGID
| S_IXGRP
)) == S_ISGID
) {
982 struct vm_area_struct
*vma
;
983 struct address_space
*mapping
= inode
->i_mapping
;
984 spin_lock(&mapping
->i_shared_lock
);
985 for(vma
= mapping
->i_mmap
;vma
;vma
= vma
->vm_next_share
) {
986 if (!(vma
->vm_flags
& VM_MAYSHARE
))
988 spin_unlock(&mapping
->i_shared_lock
);
992 spin_unlock(&mapping
->i_shared_lock
);
996 if (!posix_make_lock(filp
, file_lock
, &flock
))
1000 switch (flock
.l_type
) {
1002 if (!(filp
->f_mode
& FMODE_READ
))
1006 if (!(filp
->f_mode
& FMODE_WRITE
))
1014 /* warn a bit for now, but don't overdo it */
1016 static int count
= 0;
1020 "fcntl_setlk() called by process %d (%s) with broken flock() emulation\n",
1021 current
->pid
, current
->comm
);
1024 if (!(filp
->f_mode
& 3))
1033 if (filp
->f_op
->lock
!= NULL
) {
1034 error
= filp
->f_op
->lock(filp
, cmd
, file_lock
);
1038 error
= posix_lock_file(filp
, file_lock
, cmd
== F_SETLKW
);
1043 locks_free_lock(file_lock
);
1048 * This function is called when the file is being removed
1049 * from the task's fd array.
1051 void locks_remove_posix(struct file
*filp
, fl_owner_t owner
)
1053 struct inode
* inode
= filp
->f_dentry
->d_inode
;
1054 struct file_lock
*fl
;
1055 struct file_lock
**before
;
1058 * For POSIX locks we free all locks on this file for the given task.
1061 before
= &inode
->i_flock
;
1062 while ((fl
= *before
) != NULL
) {
1063 if ((fl
->fl_flags
& FL_POSIX
) && fl
->fl_owner
== owner
) {
1064 locks_delete_lock(before
, 0);
1067 before
= &fl
->fl_next
;
1072 * This function is called on the last close of an open file.
1074 void locks_remove_flock(struct file
*filp
)
1076 struct inode
* inode
= filp
->f_dentry
->d_inode
;
1077 struct file_lock file_lock
, *fl
;
1078 struct file_lock
**before
;
1081 before
= &inode
->i_flock
;
1082 while ((fl
= *before
) != NULL
) {
1083 if ((fl
->fl_flags
& FL_FLOCK
) && fl
->fl_file
== filp
) {
1084 int (*lock
)(struct file
*, int, struct file_lock
*);
1087 lock
= filp
->f_op
->lock
;
1090 file_lock
.fl_type
= F_UNLCK
;
1092 locks_delete_lock(before
, 0);
1094 lock(filp
, F_SETLK
, &file_lock
);
1095 /* List may have changed: */
1100 before
= &fl
->fl_next
;
1104 /* The following two are for the benefit of lockd.
1107 posix_block_lock(struct file_lock
*blocker
, struct file_lock
*waiter
)
1110 locks_insert_block(blocker
, waiter
);
1115 posix_unblock_lock(struct file_lock
*waiter
)
1117 locks_delete_block(waiter
);
1121 static void lock_get_status(char* out
, struct file_lock
*fl
, int id
, char *pfx
)
1123 struct inode
*inode
;
1125 inode
= fl
->fl_file
->f_dentry
->d_inode
;
1127 out
+= sprintf(out
, "%d:%s ", id
, pfx
);
1128 if (fl
->fl_flags
& FL_POSIX
) {
1129 out
+= sprintf(out
, "%6s %s ",
1130 (fl
->fl_flags
& FL_ACCESS
) ? "ACCESS" : "POSIX ",
1131 (IS_MANDLOCK(inode
) &&
1132 (inode
->i_mode
& (S_IXGRP
| S_ISGID
)) == S_ISGID
) ?
1133 "MANDATORY" : "ADVISORY ");
1136 out
+= sprintf(out
, "FLOCK ADVISORY ");
1138 out
+= sprintf(out
, "%s ", (fl
->fl_type
== F_RDLCK
) ? "READ " : "WRITE");
1139 out
+= sprintf(out
, "%d %s:%ld %Ld %Ld ",
1141 kdevname(inode
->i_dev
), inode
->i_ino
,
1142 (long long)fl
->fl_start
, (long long)fl
->fl_end
);
1143 sprintf(out
, "%08lx %08lx %08lx %08lx %08lx\n",
1144 (long)fl
, (long)fl
->fl_link
.prev
, (long)fl
->fl_link
.next
,
1145 (long)fl
->fl_next
, (long)fl
->fl_block
.next
);
1148 static void move_lock_status(char **p
, off_t
* pos
, off_t offset
)
1152 if(*pos
>= offset
) {
1153 /* the complete line is valid */
1158 if(*pos
+len
> offset
) {
1159 /* use the second part of the line */
1160 int i
= offset
-*pos
;
1161 memmove(*p
,*p
+i
,len
-i
);
1166 /* discard the complete line */
1170 int get_locks_status(char *buffer
, char **start
, off_t offset
, int length
)
1172 struct list_head
*tmp
;
1178 list_for_each(tmp
, &file_lock_list
) {
1179 struct list_head
*btmp
;
1180 struct file_lock
*fl
= list_entry(tmp
, struct file_lock
, fl_link
);
1181 lock_get_status(q
, fl
, ++i
, "");
1182 move_lock_status(&q
, &pos
, offset
);
1184 if(pos
>= offset
+length
)
1187 list_for_each(btmp
, &fl
->fl_block
) {
1188 struct file_lock
*bfl
= list_entry(btmp
,
1189 struct file_lock
, fl_block
);
1190 lock_get_status(q
, bfl
, i
, " ->");
1191 move_lock_status(&q
, &pos
, offset
);
1193 if(pos
>= offset
+length
)
1200 if(q
-buffer
< length
)
1205 void __init
filelock_init(void)
1207 filelock_cache
= kmem_cache_create("file lock cache",
1208 sizeof(struct file_lock
), 0, 0, init_once
, NULL
);
1209 if (!filelock_cache
)
1210 panic("cannot create file lock slab cache");