fasync: re-organize fasync entry insertion to allow it under a spinlock
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / locks.c
blob85fd9ce1abae6d97ca92fa3b2884c8732a169a60
1 /*
2 * linux/fs/locks.c
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/mandatory.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69 * Manual, Section 2.
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
98 * locking.
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/smp_lock.h>
126 #include <linux/syscalls.h>
127 #include <linux/time.h>
128 #include <linux/rcupdate.h>
129 #include <linux/pid_namespace.h>
131 #include <asm/uaccess.h>
133 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
134 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
135 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
137 int leases_enable = 1;
138 int lease_break_time = 45;
140 #define for_each_lock(inode, lockp) \
141 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
143 static LIST_HEAD(file_lock_list);
144 static LIST_HEAD(blocked_list);
147 * Protects the two list heads above, plus the inode->i_flock list
148 * FIXME: should use a spinlock, once lockd and ceph are ready.
150 void lock_flocks(void)
152 lock_kernel();
154 EXPORT_SYMBOL_GPL(lock_flocks);
156 void unlock_flocks(void)
158 unlock_kernel();
160 EXPORT_SYMBOL_GPL(unlock_flocks);
162 static struct kmem_cache *filelock_cache __read_mostly;
164 /* Allocate an empty lock structure. */
165 struct file_lock *locks_alloc_lock(void)
167 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
169 EXPORT_SYMBOL_GPL(locks_alloc_lock);
171 void locks_release_private(struct file_lock *fl)
173 if (fl->fl_ops) {
174 if (fl->fl_ops->fl_release_private)
175 fl->fl_ops->fl_release_private(fl);
176 fl->fl_ops = NULL;
178 if (fl->fl_lmops) {
179 if (fl->fl_lmops->fl_release_private)
180 fl->fl_lmops->fl_release_private(fl);
181 fl->fl_lmops = NULL;
185 EXPORT_SYMBOL_GPL(locks_release_private);
187 /* Free a lock which is not in use. */
188 static void locks_free_lock(struct file_lock *fl)
190 BUG_ON(waitqueue_active(&fl->fl_wait));
191 BUG_ON(!list_empty(&fl->fl_block));
192 BUG_ON(!list_empty(&fl->fl_link));
194 locks_release_private(fl);
195 kmem_cache_free(filelock_cache, fl);
198 void locks_init_lock(struct file_lock *fl)
200 INIT_LIST_HEAD(&fl->fl_link);
201 INIT_LIST_HEAD(&fl->fl_block);
202 init_waitqueue_head(&fl->fl_wait);
203 fl->fl_next = NULL;
204 fl->fl_fasync = NULL;
205 fl->fl_owner = NULL;
206 fl->fl_pid = 0;
207 fl->fl_nspid = NULL;
208 fl->fl_file = NULL;
209 fl->fl_flags = 0;
210 fl->fl_type = 0;
211 fl->fl_start = fl->fl_end = 0;
212 fl->fl_ops = NULL;
213 fl->fl_lmops = NULL;
216 EXPORT_SYMBOL(locks_init_lock);
219 * Initialises the fields of the file lock which are invariant for
220 * free file_locks.
222 static void init_once(void *foo)
224 struct file_lock *lock = (struct file_lock *) foo;
226 locks_init_lock(lock);
229 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231 if (fl->fl_ops) {
232 if (fl->fl_ops->fl_copy_lock)
233 fl->fl_ops->fl_copy_lock(new, fl);
234 new->fl_ops = fl->fl_ops;
236 if (fl->fl_lmops) {
237 if (fl->fl_lmops->fl_copy_lock)
238 fl->fl_lmops->fl_copy_lock(new, fl);
239 new->fl_lmops = fl->fl_lmops;
244 * Initialize a new lock from an existing file_lock structure.
246 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
248 new->fl_owner = fl->fl_owner;
249 new->fl_pid = fl->fl_pid;
250 new->fl_file = NULL;
251 new->fl_flags = fl->fl_flags;
252 new->fl_type = fl->fl_type;
253 new->fl_start = fl->fl_start;
254 new->fl_end = fl->fl_end;
255 new->fl_ops = NULL;
256 new->fl_lmops = NULL;
258 EXPORT_SYMBOL(__locks_copy_lock);
260 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
262 locks_release_private(new);
264 __locks_copy_lock(new, fl);
265 new->fl_file = fl->fl_file;
266 new->fl_ops = fl->fl_ops;
267 new->fl_lmops = fl->fl_lmops;
269 locks_copy_private(new, fl);
272 EXPORT_SYMBOL(locks_copy_lock);
274 static inline int flock_translate_cmd(int cmd) {
275 if (cmd & LOCK_MAND)
276 return cmd & (LOCK_MAND | LOCK_RW);
277 switch (cmd) {
278 case LOCK_SH:
279 return F_RDLCK;
280 case LOCK_EX:
281 return F_WRLCK;
282 case LOCK_UN:
283 return F_UNLCK;
285 return -EINVAL;
288 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
289 static int flock_make_lock(struct file *filp, struct file_lock **lock,
290 unsigned int cmd)
292 struct file_lock *fl;
293 int type = flock_translate_cmd(cmd);
294 if (type < 0)
295 return type;
297 fl = locks_alloc_lock();
298 if (fl == NULL)
299 return -ENOMEM;
301 fl->fl_file = filp;
302 fl->fl_pid = current->tgid;
303 fl->fl_flags = FL_FLOCK;
304 fl->fl_type = type;
305 fl->fl_end = OFFSET_MAX;
307 *lock = fl;
308 return 0;
311 static int assign_type(struct file_lock *fl, int type)
313 switch (type) {
314 case F_RDLCK:
315 case F_WRLCK:
316 case F_UNLCK:
317 fl->fl_type = type;
318 break;
319 default:
320 return -EINVAL;
322 return 0;
325 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
326 * style lock.
328 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
329 struct flock *l)
331 off_t start, end;
333 switch (l->l_whence) {
334 case SEEK_SET:
335 start = 0;
336 break;
337 case SEEK_CUR:
338 start = filp->f_pos;
339 break;
340 case SEEK_END:
341 start = i_size_read(filp->f_path.dentry->d_inode);
342 break;
343 default:
344 return -EINVAL;
347 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
348 POSIX-2001 defines it. */
349 start += l->l_start;
350 if (start < 0)
351 return -EINVAL;
352 fl->fl_end = OFFSET_MAX;
353 if (l->l_len > 0) {
354 end = start + l->l_len - 1;
355 fl->fl_end = end;
356 } else if (l->l_len < 0) {
357 end = start - 1;
358 fl->fl_end = end;
359 start += l->l_len;
360 if (start < 0)
361 return -EINVAL;
363 fl->fl_start = start; /* we record the absolute position */
364 if (fl->fl_end < fl->fl_start)
365 return -EOVERFLOW;
367 fl->fl_owner = current->files;
368 fl->fl_pid = current->tgid;
369 fl->fl_file = filp;
370 fl->fl_flags = FL_POSIX;
371 fl->fl_ops = NULL;
372 fl->fl_lmops = NULL;
374 return assign_type(fl, l->l_type);
377 #if BITS_PER_LONG == 32
378 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
379 struct flock64 *l)
381 loff_t start;
383 switch (l->l_whence) {
384 case SEEK_SET:
385 start = 0;
386 break;
387 case SEEK_CUR:
388 start = filp->f_pos;
389 break;
390 case SEEK_END:
391 start = i_size_read(filp->f_path.dentry->d_inode);
392 break;
393 default:
394 return -EINVAL;
397 start += l->l_start;
398 if (start < 0)
399 return -EINVAL;
400 fl->fl_end = OFFSET_MAX;
401 if (l->l_len > 0) {
402 fl->fl_end = start + l->l_len - 1;
403 } else if (l->l_len < 0) {
404 fl->fl_end = start - 1;
405 start += l->l_len;
406 if (start < 0)
407 return -EINVAL;
409 fl->fl_start = start; /* we record the absolute position */
410 if (fl->fl_end < fl->fl_start)
411 return -EOVERFLOW;
413 fl->fl_owner = current->files;
414 fl->fl_pid = current->tgid;
415 fl->fl_file = filp;
416 fl->fl_flags = FL_POSIX;
417 fl->fl_ops = NULL;
418 fl->fl_lmops = NULL;
420 switch (l->l_type) {
421 case F_RDLCK:
422 case F_WRLCK:
423 case F_UNLCK:
424 fl->fl_type = l->l_type;
425 break;
426 default:
427 return -EINVAL;
430 return (0);
432 #endif
434 /* default lease lock manager operations */
435 static void lease_break_callback(struct file_lock *fl)
437 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
440 static void lease_release_private_callback(struct file_lock *fl)
442 if (!fl->fl_file)
443 return;
445 f_delown(fl->fl_file);
446 fl->fl_file->f_owner.signum = 0;
449 static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try)
451 return fl->fl_file == try->fl_file;
454 static const struct lock_manager_operations lease_manager_ops = {
455 .fl_break = lease_break_callback,
456 .fl_release_private = lease_release_private_callback,
457 .fl_mylease = lease_mylease_callback,
458 .fl_change = lease_modify,
462 * Initialize a lease, use the default lock manager operations
464 static int lease_init(struct file *filp, int type, struct file_lock *fl)
466 if (assign_type(fl, type) != 0)
467 return -EINVAL;
469 fl->fl_owner = current->files;
470 fl->fl_pid = current->tgid;
472 fl->fl_file = filp;
473 fl->fl_flags = FL_LEASE;
474 fl->fl_start = 0;
475 fl->fl_end = OFFSET_MAX;
476 fl->fl_ops = NULL;
477 fl->fl_lmops = &lease_manager_ops;
478 return 0;
481 /* Allocate a file_lock initialised to this type of lease */
482 static struct file_lock *lease_alloc(struct file *filp, int type)
484 struct file_lock *fl = locks_alloc_lock();
485 int error = -ENOMEM;
487 if (fl == NULL)
488 return ERR_PTR(error);
490 error = lease_init(filp, type, fl);
491 if (error) {
492 locks_free_lock(fl);
493 return ERR_PTR(error);
495 return fl;
498 /* Check if two locks overlap each other.
500 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
502 return ((fl1->fl_end >= fl2->fl_start) &&
503 (fl2->fl_end >= fl1->fl_start));
507 * Check whether two locks have the same owner.
509 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
511 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
512 return fl2->fl_lmops == fl1->fl_lmops &&
513 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
514 return fl1->fl_owner == fl2->fl_owner;
517 /* Remove waiter from blocker's block list.
518 * When blocker ends up pointing to itself then the list is empty.
520 static void __locks_delete_block(struct file_lock *waiter)
522 list_del_init(&waiter->fl_block);
523 list_del_init(&waiter->fl_link);
524 waiter->fl_next = NULL;
529 static void locks_delete_block(struct file_lock *waiter)
531 lock_flocks();
532 __locks_delete_block(waiter);
533 unlock_flocks();
536 /* Insert waiter into blocker's block list.
537 * We use a circular list so that processes can be easily woken up in
538 * the order they blocked. The documentation doesn't require this but
539 * it seems like the reasonable thing to do.
541 static void locks_insert_block(struct file_lock *blocker,
542 struct file_lock *waiter)
544 BUG_ON(!list_empty(&waiter->fl_block));
545 list_add_tail(&waiter->fl_block, &blocker->fl_block);
546 waiter->fl_next = blocker;
547 if (IS_POSIX(blocker))
548 list_add(&waiter->fl_link, &blocked_list);
551 /* Wake up processes blocked waiting for blocker.
552 * If told to wait then schedule the processes until the block list
553 * is empty, otherwise empty the block list ourselves.
555 static void locks_wake_up_blocks(struct file_lock *blocker)
557 while (!list_empty(&blocker->fl_block)) {
558 struct file_lock *waiter;
560 waiter = list_first_entry(&blocker->fl_block,
561 struct file_lock, fl_block);
562 __locks_delete_block(waiter);
563 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
564 waiter->fl_lmops->fl_notify(waiter);
565 else
566 wake_up(&waiter->fl_wait);
570 /* Insert file lock fl into an inode's lock list at the position indicated
571 * by pos. At the same time add the lock to the global file lock list.
573 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
575 list_add(&fl->fl_link, &file_lock_list);
577 fl->fl_nspid = get_pid(task_tgid(current));
579 /* insert into file's list */
580 fl->fl_next = *pos;
581 *pos = fl;
585 * Delete a lock and then free it.
586 * Wake up processes that are blocked waiting for this lock,
587 * notify the FS that the lock has been cleared and
588 * finally free the lock.
590 static void locks_delete_lock(struct file_lock **thisfl_p)
592 struct file_lock *fl = *thisfl_p;
594 *thisfl_p = fl->fl_next;
595 fl->fl_next = NULL;
596 list_del_init(&fl->fl_link);
598 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
599 if (fl->fl_fasync != NULL) {
600 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
601 fl->fl_fasync = NULL;
604 if (fl->fl_nspid) {
605 put_pid(fl->fl_nspid);
606 fl->fl_nspid = NULL;
609 locks_wake_up_blocks(fl);
610 locks_free_lock(fl);
613 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
614 * checks for shared/exclusive status of overlapping locks.
616 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
618 if (sys_fl->fl_type == F_WRLCK)
619 return 1;
620 if (caller_fl->fl_type == F_WRLCK)
621 return 1;
622 return 0;
625 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
626 * checking before calling the locks_conflict().
628 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
630 /* POSIX locks owned by the same process do not conflict with
631 * each other.
633 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
634 return (0);
636 /* Check whether they overlap */
637 if (!locks_overlap(caller_fl, sys_fl))
638 return 0;
640 return (locks_conflict(caller_fl, sys_fl));
643 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
644 * checking before calling the locks_conflict().
646 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
648 /* FLOCK locks referring to the same filp do not conflict with
649 * each other.
651 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
652 return (0);
653 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
654 return 0;
656 return (locks_conflict(caller_fl, sys_fl));
659 void
660 posix_test_lock(struct file *filp, struct file_lock *fl)
662 struct file_lock *cfl;
664 lock_flocks();
665 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
666 if (!IS_POSIX(cfl))
667 continue;
668 if (posix_locks_conflict(fl, cfl))
669 break;
671 if (cfl) {
672 __locks_copy_lock(fl, cfl);
673 if (cfl->fl_nspid)
674 fl->fl_pid = pid_vnr(cfl->fl_nspid);
675 } else
676 fl->fl_type = F_UNLCK;
677 unlock_flocks();
678 return;
680 EXPORT_SYMBOL(posix_test_lock);
683 * Deadlock detection:
685 * We attempt to detect deadlocks that are due purely to posix file
686 * locks.
688 * We assume that a task can be waiting for at most one lock at a time.
689 * So for any acquired lock, the process holding that lock may be
690 * waiting on at most one other lock. That lock in turns may be held by
691 * someone waiting for at most one other lock. Given a requested lock
692 * caller_fl which is about to wait for a conflicting lock block_fl, we
693 * follow this chain of waiters to ensure we are not about to create a
694 * cycle.
696 * Since we do this before we ever put a process to sleep on a lock, we
697 * are ensured that there is never a cycle; that is what guarantees that
698 * the while() loop in posix_locks_deadlock() eventually completes.
700 * Note: the above assumption may not be true when handling lock
701 * requests from a broken NFS client. It may also fail in the presence
702 * of tasks (such as posix threads) sharing the same open file table.
704 * To handle those cases, we just bail out after a few iterations.
707 #define MAX_DEADLK_ITERATIONS 10
709 /* Find a lock that the owner of the given block_fl is blocking on. */
710 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
712 struct file_lock *fl;
714 list_for_each_entry(fl, &blocked_list, fl_link) {
715 if (posix_same_owner(fl, block_fl))
716 return fl->fl_next;
718 return NULL;
721 static int posix_locks_deadlock(struct file_lock *caller_fl,
722 struct file_lock *block_fl)
724 int i = 0;
726 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
727 if (i++ > MAX_DEADLK_ITERATIONS)
728 return 0;
729 if (posix_same_owner(caller_fl, block_fl))
730 return 1;
732 return 0;
735 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
736 * after any leases, but before any posix locks.
738 * Note that if called with an FL_EXISTS argument, the caller may determine
739 * whether or not a lock was successfully freed by testing the return
740 * value for -ENOENT.
742 static int flock_lock_file(struct file *filp, struct file_lock *request)
744 struct file_lock *new_fl = NULL;
745 struct file_lock **before;
746 struct inode * inode = filp->f_path.dentry->d_inode;
747 int error = 0;
748 int found = 0;
750 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
751 new_fl = locks_alloc_lock();
752 if (!new_fl)
753 return -ENOMEM;
756 lock_flocks();
757 if (request->fl_flags & FL_ACCESS)
758 goto find_conflict;
760 for_each_lock(inode, before) {
761 struct file_lock *fl = *before;
762 if (IS_POSIX(fl))
763 break;
764 if (IS_LEASE(fl))
765 continue;
766 if (filp != fl->fl_file)
767 continue;
768 if (request->fl_type == fl->fl_type)
769 goto out;
770 found = 1;
771 locks_delete_lock(before);
772 break;
775 if (request->fl_type == F_UNLCK) {
776 if ((request->fl_flags & FL_EXISTS) && !found)
777 error = -ENOENT;
778 goto out;
782 * If a higher-priority process was blocked on the old file lock,
783 * give it the opportunity to lock the file.
785 if (found) {
786 unlock_flocks();
787 cond_resched();
788 lock_flocks();
791 find_conflict:
792 for_each_lock(inode, before) {
793 struct file_lock *fl = *before;
794 if (IS_POSIX(fl))
795 break;
796 if (IS_LEASE(fl))
797 continue;
798 if (!flock_locks_conflict(request, fl))
799 continue;
800 error = -EAGAIN;
801 if (!(request->fl_flags & FL_SLEEP))
802 goto out;
803 error = FILE_LOCK_DEFERRED;
804 locks_insert_block(fl, request);
805 goto out;
807 if (request->fl_flags & FL_ACCESS)
808 goto out;
809 locks_copy_lock(new_fl, request);
810 locks_insert_lock(before, new_fl);
811 new_fl = NULL;
812 error = 0;
814 out:
815 unlock_flocks();
816 if (new_fl)
817 locks_free_lock(new_fl);
818 return error;
821 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
823 struct file_lock *fl;
824 struct file_lock *new_fl = NULL;
825 struct file_lock *new_fl2 = NULL;
826 struct file_lock *left = NULL;
827 struct file_lock *right = NULL;
828 struct file_lock **before;
829 int error, added = 0;
832 * We may need two file_lock structures for this operation,
833 * so we get them in advance to avoid races.
835 * In some cases we can be sure, that no new locks will be needed
837 if (!(request->fl_flags & FL_ACCESS) &&
838 (request->fl_type != F_UNLCK ||
839 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
840 new_fl = locks_alloc_lock();
841 new_fl2 = locks_alloc_lock();
844 lock_flocks();
845 if (request->fl_type != F_UNLCK) {
846 for_each_lock(inode, before) {
847 fl = *before;
848 if (!IS_POSIX(fl))
849 continue;
850 if (!posix_locks_conflict(request, fl))
851 continue;
852 if (conflock)
853 __locks_copy_lock(conflock, fl);
854 error = -EAGAIN;
855 if (!(request->fl_flags & FL_SLEEP))
856 goto out;
857 error = -EDEADLK;
858 if (posix_locks_deadlock(request, fl))
859 goto out;
860 error = FILE_LOCK_DEFERRED;
861 locks_insert_block(fl, request);
862 goto out;
866 /* If we're just looking for a conflict, we're done. */
867 error = 0;
868 if (request->fl_flags & FL_ACCESS)
869 goto out;
872 * Find the first old lock with the same owner as the new lock.
875 before = &inode->i_flock;
877 /* First skip locks owned by other processes. */
878 while ((fl = *before) && (!IS_POSIX(fl) ||
879 !posix_same_owner(request, fl))) {
880 before = &fl->fl_next;
883 /* Process locks with this owner. */
884 while ((fl = *before) && posix_same_owner(request, fl)) {
885 /* Detect adjacent or overlapping regions (if same lock type)
887 if (request->fl_type == fl->fl_type) {
888 /* In all comparisons of start vs end, use
889 * "start - 1" rather than "end + 1". If end
890 * is OFFSET_MAX, end + 1 will become negative.
892 if (fl->fl_end < request->fl_start - 1)
893 goto next_lock;
894 /* If the next lock in the list has entirely bigger
895 * addresses than the new one, insert the lock here.
897 if (fl->fl_start - 1 > request->fl_end)
898 break;
900 /* If we come here, the new and old lock are of the
901 * same type and adjacent or overlapping. Make one
902 * lock yielding from the lower start address of both
903 * locks to the higher end address.
905 if (fl->fl_start > request->fl_start)
906 fl->fl_start = request->fl_start;
907 else
908 request->fl_start = fl->fl_start;
909 if (fl->fl_end < request->fl_end)
910 fl->fl_end = request->fl_end;
911 else
912 request->fl_end = fl->fl_end;
913 if (added) {
914 locks_delete_lock(before);
915 continue;
917 request = fl;
918 added = 1;
920 else {
921 /* Processing for different lock types is a bit
922 * more complex.
924 if (fl->fl_end < request->fl_start)
925 goto next_lock;
926 if (fl->fl_start > request->fl_end)
927 break;
928 if (request->fl_type == F_UNLCK)
929 added = 1;
930 if (fl->fl_start < request->fl_start)
931 left = fl;
932 /* If the next lock in the list has a higher end
933 * address than the new one, insert the new one here.
935 if (fl->fl_end > request->fl_end) {
936 right = fl;
937 break;
939 if (fl->fl_start >= request->fl_start) {
940 /* The new lock completely replaces an old
941 * one (This may happen several times).
943 if (added) {
944 locks_delete_lock(before);
945 continue;
947 /* Replace the old lock with the new one.
948 * Wake up anybody waiting for the old one,
949 * as the change in lock type might satisfy
950 * their needs.
952 locks_wake_up_blocks(fl);
953 fl->fl_start = request->fl_start;
954 fl->fl_end = request->fl_end;
955 fl->fl_type = request->fl_type;
956 locks_release_private(fl);
957 locks_copy_private(fl, request);
958 request = fl;
959 added = 1;
962 /* Go on to next lock.
964 next_lock:
965 before = &fl->fl_next;
969 * The above code only modifies existing locks in case of
970 * merging or replacing. If new lock(s) need to be inserted
971 * all modifications are done bellow this, so it's safe yet to
972 * bail out.
974 error = -ENOLCK; /* "no luck" */
975 if (right && left == right && !new_fl2)
976 goto out;
978 error = 0;
979 if (!added) {
980 if (request->fl_type == F_UNLCK) {
981 if (request->fl_flags & FL_EXISTS)
982 error = -ENOENT;
983 goto out;
986 if (!new_fl) {
987 error = -ENOLCK;
988 goto out;
990 locks_copy_lock(new_fl, request);
991 locks_insert_lock(before, new_fl);
992 new_fl = NULL;
994 if (right) {
995 if (left == right) {
996 /* The new lock breaks the old one in two pieces,
997 * so we have to use the second new lock.
999 left = new_fl2;
1000 new_fl2 = NULL;
1001 locks_copy_lock(left, right);
1002 locks_insert_lock(before, left);
1004 right->fl_start = request->fl_end + 1;
1005 locks_wake_up_blocks(right);
1007 if (left) {
1008 left->fl_end = request->fl_start - 1;
1009 locks_wake_up_blocks(left);
1011 out:
1012 unlock_flocks();
1014 * Free any unused locks.
1016 if (new_fl)
1017 locks_free_lock(new_fl);
1018 if (new_fl2)
1019 locks_free_lock(new_fl2);
1020 return error;
1024 * posix_lock_file - Apply a POSIX-style lock to a file
1025 * @filp: The file to apply the lock to
1026 * @fl: The lock to be applied
1027 * @conflock: Place to return a copy of the conflicting lock, if found.
1029 * Add a POSIX style lock to a file.
1030 * We merge adjacent & overlapping locks whenever possible.
1031 * POSIX locks are sorted by owner task, then by starting address
1033 * Note that if called with an FL_EXISTS argument, the caller may determine
1034 * whether or not a lock was successfully freed by testing the return
1035 * value for -ENOENT.
1037 int posix_lock_file(struct file *filp, struct file_lock *fl,
1038 struct file_lock *conflock)
1040 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1042 EXPORT_SYMBOL(posix_lock_file);
1045 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1046 * @filp: The file to apply the lock to
1047 * @fl: The lock to be applied
1049 * Add a POSIX style lock to a file.
1050 * We merge adjacent & overlapping locks whenever possible.
1051 * POSIX locks are sorted by owner task, then by starting address
1053 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1055 int error;
1056 might_sleep ();
1057 for (;;) {
1058 error = posix_lock_file(filp, fl, NULL);
1059 if (error != FILE_LOCK_DEFERRED)
1060 break;
1061 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1062 if (!error)
1063 continue;
1065 locks_delete_block(fl);
1066 break;
1068 return error;
1070 EXPORT_SYMBOL(posix_lock_file_wait);
1073 * locks_mandatory_locked - Check for an active lock
1074 * @inode: the file to check
1076 * Searches the inode's list of locks to find any POSIX locks which conflict.
1077 * This function is called from locks_verify_locked() only.
1079 int locks_mandatory_locked(struct inode *inode)
1081 fl_owner_t owner = current->files;
1082 struct file_lock *fl;
1085 * Search the lock list for this inode for any POSIX locks.
1087 lock_flocks();
1088 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1089 if (!IS_POSIX(fl))
1090 continue;
1091 if (fl->fl_owner != owner)
1092 break;
1094 unlock_flocks();
1095 return fl ? -EAGAIN : 0;
1099 * locks_mandatory_area - Check for a conflicting lock
1100 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1101 * for shared
1102 * @inode: the file to check
1103 * @filp: how the file was opened (if it was)
1104 * @offset: start of area to check
1105 * @count: length of area to check
1107 * Searches the inode's list of locks to find any POSIX locks which conflict.
1108 * This function is called from rw_verify_area() and
1109 * locks_verify_truncate().
1111 int locks_mandatory_area(int read_write, struct inode *inode,
1112 struct file *filp, loff_t offset,
1113 size_t count)
1115 struct file_lock fl;
1116 int error;
1118 locks_init_lock(&fl);
1119 fl.fl_owner = current->files;
1120 fl.fl_pid = current->tgid;
1121 fl.fl_file = filp;
1122 fl.fl_flags = FL_POSIX | FL_ACCESS;
1123 if (filp && !(filp->f_flags & O_NONBLOCK))
1124 fl.fl_flags |= FL_SLEEP;
1125 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1126 fl.fl_start = offset;
1127 fl.fl_end = offset + count - 1;
1129 for (;;) {
1130 error = __posix_lock_file(inode, &fl, NULL);
1131 if (error != FILE_LOCK_DEFERRED)
1132 break;
1133 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1134 if (!error) {
1136 * If we've been sleeping someone might have
1137 * changed the permissions behind our back.
1139 if (__mandatory_lock(inode))
1140 continue;
1143 locks_delete_block(&fl);
1144 break;
1147 return error;
1150 EXPORT_SYMBOL(locks_mandatory_area);
1152 /* We already had a lease on this file; just change its type */
1153 int lease_modify(struct file_lock **before, int arg)
1155 struct file_lock *fl = *before;
1156 int error = assign_type(fl, arg);
1158 if (error)
1159 return error;
1160 locks_wake_up_blocks(fl);
1161 if (arg == F_UNLCK)
1162 locks_delete_lock(before);
1163 return 0;
1166 EXPORT_SYMBOL(lease_modify);
1168 static void time_out_leases(struct inode *inode)
1170 struct file_lock **before;
1171 struct file_lock *fl;
1173 before = &inode->i_flock;
1174 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1175 if ((fl->fl_break_time == 0)
1176 || time_before(jiffies, fl->fl_break_time)) {
1177 before = &fl->fl_next;
1178 continue;
1180 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1181 if (fl == *before) /* lease_modify may have freed fl */
1182 before = &fl->fl_next;
1187 * __break_lease - revoke all outstanding leases on file
1188 * @inode: the inode of the file to return
1189 * @mode: the open mode (read or write)
1191 * break_lease (inlined for speed) has checked there already is at least
1192 * some kind of lock (maybe a lease) on this file. Leases are broken on
1193 * a call to open() or truncate(). This function can sleep unless you
1194 * specified %O_NONBLOCK to your open().
1196 int __break_lease(struct inode *inode, unsigned int mode)
1198 int error = 0, future;
1199 struct file_lock *new_fl, *flock;
1200 struct file_lock *fl;
1201 unsigned long break_time;
1202 int i_have_this_lease = 0;
1203 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1205 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1207 lock_flocks();
1209 time_out_leases(inode);
1211 flock = inode->i_flock;
1212 if ((flock == NULL) || !IS_LEASE(flock))
1213 goto out;
1215 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1216 if (fl->fl_owner == current->files)
1217 i_have_this_lease = 1;
1219 if (want_write) {
1220 /* If we want write access, we have to revoke any lease. */
1221 future = F_UNLCK | F_INPROGRESS;
1222 } else if (flock->fl_type & F_INPROGRESS) {
1223 /* If the lease is already being broken, we just leave it */
1224 future = flock->fl_type;
1225 } else if (flock->fl_type & F_WRLCK) {
1226 /* Downgrade the exclusive lease to a read-only lease. */
1227 future = F_RDLCK | F_INPROGRESS;
1228 } else {
1229 /* the existing lease was read-only, so we can read too. */
1230 goto out;
1233 if (IS_ERR(new_fl) && !i_have_this_lease
1234 && ((mode & O_NONBLOCK) == 0)) {
1235 error = PTR_ERR(new_fl);
1236 goto out;
1239 break_time = 0;
1240 if (lease_break_time > 0) {
1241 break_time = jiffies + lease_break_time * HZ;
1242 if (break_time == 0)
1243 break_time++; /* so that 0 means no break time */
1246 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1247 if (fl->fl_type != future) {
1248 fl->fl_type = future;
1249 fl->fl_break_time = break_time;
1250 /* lease must have lmops break callback */
1251 fl->fl_lmops->fl_break(fl);
1255 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1256 error = -EWOULDBLOCK;
1257 goto out;
1260 restart:
1261 break_time = flock->fl_break_time;
1262 if (break_time != 0) {
1263 break_time -= jiffies;
1264 if (break_time == 0)
1265 break_time++;
1267 locks_insert_block(flock, new_fl);
1268 unlock_flocks();
1269 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1270 !new_fl->fl_next, break_time);
1271 lock_flocks();
1272 __locks_delete_block(new_fl);
1273 if (error >= 0) {
1274 if (error == 0)
1275 time_out_leases(inode);
1276 /* Wait for the next lease that has not been broken yet */
1277 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1278 flock = flock->fl_next) {
1279 if (flock->fl_type & F_INPROGRESS)
1280 goto restart;
1282 error = 0;
1285 out:
1286 unlock_flocks();
1287 if (!IS_ERR(new_fl))
1288 locks_free_lock(new_fl);
1289 return error;
1292 EXPORT_SYMBOL(__break_lease);
1295 * lease_get_mtime - get the last modified time of an inode
1296 * @inode: the inode
1297 * @time: pointer to a timespec which will contain the last modified time
1299 * This is to force NFS clients to flush their caches for files with
1300 * exclusive leases. The justification is that if someone has an
1301 * exclusive lease, then they could be modifying it.
1303 void lease_get_mtime(struct inode *inode, struct timespec *time)
1305 struct file_lock *flock = inode->i_flock;
1306 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1307 *time = current_fs_time(inode->i_sb);
1308 else
1309 *time = inode->i_mtime;
1312 EXPORT_SYMBOL(lease_get_mtime);
1315 * fcntl_getlease - Enquire what lease is currently active
1316 * @filp: the file
1318 * The value returned by this function will be one of
1319 * (if no lease break is pending):
1321 * %F_RDLCK to indicate a shared lease is held.
1323 * %F_WRLCK to indicate an exclusive lease is held.
1325 * %F_UNLCK to indicate no lease is held.
1327 * (if a lease break is pending):
1329 * %F_RDLCK to indicate an exclusive lease needs to be
1330 * changed to a shared lease (or removed).
1332 * %F_UNLCK to indicate the lease needs to be removed.
1334 * XXX: sfr & willy disagree over whether F_INPROGRESS
1335 * should be returned to userspace.
1337 int fcntl_getlease(struct file *filp)
1339 struct file_lock *fl;
1340 int type = F_UNLCK;
1342 lock_flocks();
1343 time_out_leases(filp->f_path.dentry->d_inode);
1344 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1345 fl = fl->fl_next) {
1346 if (fl->fl_file == filp) {
1347 type = fl->fl_type & ~F_INPROGRESS;
1348 break;
1351 unlock_flocks();
1352 return type;
1356 * generic_setlease - sets a lease on an open file
1357 * @filp: file pointer
1358 * @arg: type of lease to obtain
1359 * @flp: input - file_lock to use, output - file_lock inserted
1361 * The (input) flp->fl_lmops->fl_break function is required
1362 * by break_lease().
1364 * Called with file_lock_lock held.
1366 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1368 struct file_lock *fl, **before, **my_before = NULL, *lease;
1369 struct dentry *dentry = filp->f_path.dentry;
1370 struct inode *inode = dentry->d_inode;
1371 int error, rdlease_count = 0, wrlease_count = 0;
1373 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1374 return -EACCES;
1375 if (!S_ISREG(inode->i_mode))
1376 return -EINVAL;
1377 error = security_file_lock(filp, arg);
1378 if (error)
1379 return error;
1381 time_out_leases(inode);
1383 BUG_ON(!(*flp)->fl_lmops->fl_break);
1385 lease = *flp;
1387 if (arg != F_UNLCK) {
1388 error = -EAGAIN;
1389 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1390 goto out;
1391 if ((arg == F_WRLCK)
1392 && ((atomic_read(&dentry->d_count) > 1)
1393 || (atomic_read(&inode->i_count) > 1)))
1394 goto out;
1398 * At this point, we know that if there is an exclusive
1399 * lease on this file, then we hold it on this filp
1400 * (otherwise our open of this file would have blocked).
1401 * And if we are trying to acquire an exclusive lease,
1402 * then the file is not open by anyone (including us)
1403 * except for this filp.
1405 for (before = &inode->i_flock;
1406 ((fl = *before) != NULL) && IS_LEASE(fl);
1407 before = &fl->fl_next) {
1408 if (lease->fl_lmops->fl_mylease(fl, lease))
1409 my_before = before;
1410 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1412 * Someone is in the process of opening this
1413 * file for writing so we may not take an
1414 * exclusive lease on it.
1416 wrlease_count++;
1417 else
1418 rdlease_count++;
1421 error = -EAGAIN;
1422 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1423 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1424 goto out;
1426 if (my_before != NULL) {
1427 *flp = *my_before;
1428 error = lease->fl_lmops->fl_change(my_before, arg);
1429 goto out;
1432 if (arg == F_UNLCK)
1433 goto out;
1435 error = -EINVAL;
1436 if (!leases_enable)
1437 goto out;
1439 locks_insert_lock(before, lease);
1440 return 0;
1442 out:
1443 locks_free_lock(lease);
1444 return error;
1446 EXPORT_SYMBOL(generic_setlease);
1448 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1450 if (filp->f_op && filp->f_op->setlease)
1451 return filp->f_op->setlease(filp, arg, lease);
1452 else
1453 return generic_setlease(filp, arg, lease);
1457 * vfs_setlease - sets a lease on an open file
1458 * @filp: file pointer
1459 * @arg: type of lease to obtain
1460 * @lease: file_lock to use
1462 * Call this to establish a lease on the file.
1463 * The (*lease)->fl_lmops->fl_break operation must be set; if not,
1464 * break_lease will oops!
1466 * This will call the filesystem's setlease file method, if
1467 * defined. Note that there is no getlease method; instead, the
1468 * filesystem setlease method should call back to setlease() to
1469 * add a lease to the inode's lease list, where fcntl_getlease() can
1470 * find it. Since fcntl_getlease() only reports whether the current
1471 * task holds a lease, a cluster filesystem need only do this for
1472 * leases held by processes on this node.
1474 * There is also no break_lease method; filesystems that
1475 * handle their own leases should break leases themselves from the
1476 * filesystem's open, create, and (on truncate) setattr methods.
1478 * Warning: the only current setlease methods exist only to disable
1479 * leases in certain cases. More vfs changes may be required to
1480 * allow a full filesystem lease implementation.
1483 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1485 int error;
1487 lock_flocks();
1488 error = __vfs_setlease(filp, arg, lease);
1489 unlock_flocks();
1491 return error;
1493 EXPORT_SYMBOL_GPL(vfs_setlease);
1496 * fcntl_setlease - sets a lease on an open file
1497 * @fd: open file descriptor
1498 * @filp: file pointer
1499 * @arg: type of lease to obtain
1501 * Call this fcntl to establish a lease on the file.
1502 * Note that you also need to call %F_SETSIG to
1503 * receive a signal when the lease is broken.
1505 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1507 struct file_lock *fl;
1508 struct fasync_struct *new;
1509 struct inode *inode = filp->f_path.dentry->d_inode;
1510 int error;
1512 fl = lease_alloc(filp, arg);
1513 if (IS_ERR(fl))
1514 return PTR_ERR(fl);
1516 new = fasync_alloc();
1517 if (!new) {
1518 locks_free_lock(fl);
1519 return -ENOMEM;
1521 lock_flocks();
1522 error = __vfs_setlease(filp, arg, &fl);
1523 if (error || arg == F_UNLCK)
1524 goto out_unlock;
1527 * fasync_insert_entry() returns the old entry if any.
1528 * If there was no old entry, then it used 'new' and
1529 * inserted it into the fasync list. Clear new so that
1530 * we don't release it here.
1532 if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
1533 new = NULL;
1535 if (error < 0) {
1536 /* remove lease just inserted by setlease */
1537 fl->fl_type = F_UNLCK | F_INPROGRESS;
1538 fl->fl_break_time = jiffies - 10;
1539 time_out_leases(inode);
1540 goto out_unlock;
1543 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1544 out_unlock:
1545 unlock_flocks();
1546 if (new)
1547 fasync_free(new);
1548 return error;
1552 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1553 * @filp: The file to apply the lock to
1554 * @fl: The lock to be applied
1556 * Add a FLOCK style lock to a file.
1558 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1560 int error;
1561 might_sleep();
1562 for (;;) {
1563 error = flock_lock_file(filp, fl);
1564 if (error != FILE_LOCK_DEFERRED)
1565 break;
1566 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1567 if (!error)
1568 continue;
1570 locks_delete_block(fl);
1571 break;
1573 return error;
1576 EXPORT_SYMBOL(flock_lock_file_wait);
1579 * sys_flock: - flock() system call.
1580 * @fd: the file descriptor to lock.
1581 * @cmd: the type of lock to apply.
1583 * Apply a %FL_FLOCK style lock to an open file descriptor.
1584 * The @cmd can be one of
1586 * %LOCK_SH -- a shared lock.
1588 * %LOCK_EX -- an exclusive lock.
1590 * %LOCK_UN -- remove an existing lock.
1592 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1594 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1595 * processes read and write access respectively.
1597 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1599 struct file *filp;
1600 struct file_lock *lock;
1601 int can_sleep, unlock;
1602 int error;
1604 error = -EBADF;
1605 filp = fget(fd);
1606 if (!filp)
1607 goto out;
1609 can_sleep = !(cmd & LOCK_NB);
1610 cmd &= ~LOCK_NB;
1611 unlock = (cmd == LOCK_UN);
1613 if (!unlock && !(cmd & LOCK_MAND) &&
1614 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1615 goto out_putf;
1617 error = flock_make_lock(filp, &lock, cmd);
1618 if (error)
1619 goto out_putf;
1620 if (can_sleep)
1621 lock->fl_flags |= FL_SLEEP;
1623 error = security_file_lock(filp, lock->fl_type);
1624 if (error)
1625 goto out_free;
1627 if (filp->f_op && filp->f_op->flock)
1628 error = filp->f_op->flock(filp,
1629 (can_sleep) ? F_SETLKW : F_SETLK,
1630 lock);
1631 else
1632 error = flock_lock_file_wait(filp, lock);
1634 out_free:
1635 locks_free_lock(lock);
1637 out_putf:
1638 fput(filp);
1639 out:
1640 return error;
1644 * vfs_test_lock - test file byte range lock
1645 * @filp: The file to test lock for
1646 * @fl: The lock to test; also used to hold result
1648 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
1649 * setting conf->fl_type to something other than F_UNLCK.
1651 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1653 if (filp->f_op && filp->f_op->lock)
1654 return filp->f_op->lock(filp, F_GETLK, fl);
1655 posix_test_lock(filp, fl);
1656 return 0;
1658 EXPORT_SYMBOL_GPL(vfs_test_lock);
1660 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1662 flock->l_pid = fl->fl_pid;
1663 #if BITS_PER_LONG == 32
1665 * Make sure we can represent the posix lock via
1666 * legacy 32bit flock.
1668 if (fl->fl_start > OFFT_OFFSET_MAX)
1669 return -EOVERFLOW;
1670 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1671 return -EOVERFLOW;
1672 #endif
1673 flock->l_start = fl->fl_start;
1674 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1675 fl->fl_end - fl->fl_start + 1;
1676 flock->l_whence = 0;
1677 flock->l_type = fl->fl_type;
1678 return 0;
1681 #if BITS_PER_LONG == 32
1682 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1684 flock->l_pid = fl->fl_pid;
1685 flock->l_start = fl->fl_start;
1686 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1687 fl->fl_end - fl->fl_start + 1;
1688 flock->l_whence = 0;
1689 flock->l_type = fl->fl_type;
1691 #endif
1693 /* Report the first existing lock that would conflict with l.
1694 * This implements the F_GETLK command of fcntl().
1696 int fcntl_getlk(struct file *filp, struct flock __user *l)
1698 struct file_lock file_lock;
1699 struct flock flock;
1700 int error;
1702 error = -EFAULT;
1703 if (copy_from_user(&flock, l, sizeof(flock)))
1704 goto out;
1705 error = -EINVAL;
1706 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1707 goto out;
1709 error = flock_to_posix_lock(filp, &file_lock, &flock);
1710 if (error)
1711 goto out;
1713 error = vfs_test_lock(filp, &file_lock);
1714 if (error)
1715 goto out;
1717 flock.l_type = file_lock.fl_type;
1718 if (file_lock.fl_type != F_UNLCK) {
1719 error = posix_lock_to_flock(&flock, &file_lock);
1720 if (error)
1721 goto out;
1723 error = -EFAULT;
1724 if (!copy_to_user(l, &flock, sizeof(flock)))
1725 error = 0;
1726 out:
1727 return error;
1731 * vfs_lock_file - file byte range lock
1732 * @filp: The file to apply the lock to
1733 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1734 * @fl: The lock to be applied
1735 * @conf: Place to return a copy of the conflicting lock, if found.
1737 * A caller that doesn't care about the conflicting lock may pass NULL
1738 * as the final argument.
1740 * If the filesystem defines a private ->lock() method, then @conf will
1741 * be left unchanged; so a caller that cares should initialize it to
1742 * some acceptable default.
1744 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1745 * locks, the ->lock() interface may return asynchronously, before the lock has
1746 * been granted or denied by the underlying filesystem, if (and only if)
1747 * fl_grant is set. Callers expecting ->lock() to return asynchronously
1748 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1749 * the request is for a blocking lock. When ->lock() does return asynchronously,
1750 * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1751 * request completes.
1752 * If the request is for non-blocking lock the file system should return
1753 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1754 * with the result. If the request timed out the callback routine will return a
1755 * nonzero return code and the file system should release the lock. The file
1756 * system is also responsible to keep a corresponding posix lock when it
1757 * grants a lock so the VFS can find out which locks are locally held and do
1758 * the correct lock cleanup when required.
1759 * The underlying filesystem must not drop the kernel lock or call
1760 * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1761 * return code.
1763 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1765 if (filp->f_op && filp->f_op->lock)
1766 return filp->f_op->lock(filp, cmd, fl);
1767 else
1768 return posix_lock_file(filp, fl, conf);
1770 EXPORT_SYMBOL_GPL(vfs_lock_file);
1772 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1773 struct file_lock *fl)
1775 int error;
1777 error = security_file_lock(filp, fl->fl_type);
1778 if (error)
1779 return error;
1781 for (;;) {
1782 error = vfs_lock_file(filp, cmd, fl, NULL);
1783 if (error != FILE_LOCK_DEFERRED)
1784 break;
1785 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1786 if (!error)
1787 continue;
1789 locks_delete_block(fl);
1790 break;
1793 return error;
1796 /* Apply the lock described by l to an open file descriptor.
1797 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1799 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1800 struct flock __user *l)
1802 struct file_lock *file_lock = locks_alloc_lock();
1803 struct flock flock;
1804 struct inode *inode;
1805 struct file *f;
1806 int error;
1808 if (file_lock == NULL)
1809 return -ENOLCK;
1812 * This might block, so we do it before checking the inode.
1814 error = -EFAULT;
1815 if (copy_from_user(&flock, l, sizeof(flock)))
1816 goto out;
1818 inode = filp->f_path.dentry->d_inode;
1820 /* Don't allow mandatory locks on files that may be memory mapped
1821 * and shared.
1823 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1824 error = -EAGAIN;
1825 goto out;
1828 again:
1829 error = flock_to_posix_lock(filp, file_lock, &flock);
1830 if (error)
1831 goto out;
1832 if (cmd == F_SETLKW) {
1833 file_lock->fl_flags |= FL_SLEEP;
1836 error = -EBADF;
1837 switch (flock.l_type) {
1838 case F_RDLCK:
1839 if (!(filp->f_mode & FMODE_READ))
1840 goto out;
1841 break;
1842 case F_WRLCK:
1843 if (!(filp->f_mode & FMODE_WRITE))
1844 goto out;
1845 break;
1846 case F_UNLCK:
1847 break;
1848 default:
1849 error = -EINVAL;
1850 goto out;
1853 error = do_lock_file_wait(filp, cmd, file_lock);
1856 * Attempt to detect a close/fcntl race and recover by
1857 * releasing the lock that was just acquired.
1860 * we need that spin_lock here - it prevents reordering between
1861 * update of inode->i_flock and check for it done in close().
1862 * rcu_read_lock() wouldn't do.
1864 spin_lock(&current->files->file_lock);
1865 f = fcheck(fd);
1866 spin_unlock(&current->files->file_lock);
1867 if (!error && f != filp && flock.l_type != F_UNLCK) {
1868 flock.l_type = F_UNLCK;
1869 goto again;
1872 out:
1873 locks_free_lock(file_lock);
1874 return error;
1877 #if BITS_PER_LONG == 32
1878 /* Report the first existing lock that would conflict with l.
1879 * This implements the F_GETLK command of fcntl().
1881 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1883 struct file_lock file_lock;
1884 struct flock64 flock;
1885 int error;
1887 error = -EFAULT;
1888 if (copy_from_user(&flock, l, sizeof(flock)))
1889 goto out;
1890 error = -EINVAL;
1891 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1892 goto out;
1894 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1895 if (error)
1896 goto out;
1898 error = vfs_test_lock(filp, &file_lock);
1899 if (error)
1900 goto out;
1902 flock.l_type = file_lock.fl_type;
1903 if (file_lock.fl_type != F_UNLCK)
1904 posix_lock_to_flock64(&flock, &file_lock);
1906 error = -EFAULT;
1907 if (!copy_to_user(l, &flock, sizeof(flock)))
1908 error = 0;
1910 out:
1911 return error;
1914 /* Apply the lock described by l to an open file descriptor.
1915 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1917 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1918 struct flock64 __user *l)
1920 struct file_lock *file_lock = locks_alloc_lock();
1921 struct flock64 flock;
1922 struct inode *inode;
1923 struct file *f;
1924 int error;
1926 if (file_lock == NULL)
1927 return -ENOLCK;
1930 * This might block, so we do it before checking the inode.
1932 error = -EFAULT;
1933 if (copy_from_user(&flock, l, sizeof(flock)))
1934 goto out;
1936 inode = filp->f_path.dentry->d_inode;
1938 /* Don't allow mandatory locks on files that may be memory mapped
1939 * and shared.
1941 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1942 error = -EAGAIN;
1943 goto out;
1946 again:
1947 error = flock64_to_posix_lock(filp, file_lock, &flock);
1948 if (error)
1949 goto out;
1950 if (cmd == F_SETLKW64) {
1951 file_lock->fl_flags |= FL_SLEEP;
1954 error = -EBADF;
1955 switch (flock.l_type) {
1956 case F_RDLCK:
1957 if (!(filp->f_mode & FMODE_READ))
1958 goto out;
1959 break;
1960 case F_WRLCK:
1961 if (!(filp->f_mode & FMODE_WRITE))
1962 goto out;
1963 break;
1964 case F_UNLCK:
1965 break;
1966 default:
1967 error = -EINVAL;
1968 goto out;
1971 error = do_lock_file_wait(filp, cmd, file_lock);
1974 * Attempt to detect a close/fcntl race and recover by
1975 * releasing the lock that was just acquired.
1977 spin_lock(&current->files->file_lock);
1978 f = fcheck(fd);
1979 spin_unlock(&current->files->file_lock);
1980 if (!error && f != filp && flock.l_type != F_UNLCK) {
1981 flock.l_type = F_UNLCK;
1982 goto again;
1985 out:
1986 locks_free_lock(file_lock);
1987 return error;
1989 #endif /* BITS_PER_LONG == 32 */
1992 * This function is called when the file is being removed
1993 * from the task's fd array. POSIX locks belonging to this task
1994 * are deleted at this time.
1996 void locks_remove_posix(struct file *filp, fl_owner_t owner)
1998 struct file_lock lock;
2001 * If there are no locks held on this file, we don't need to call
2002 * posix_lock_file(). Another process could be setting a lock on this
2003 * file at the same time, but we wouldn't remove that lock anyway.
2005 if (!filp->f_path.dentry->d_inode->i_flock)
2006 return;
2008 lock.fl_type = F_UNLCK;
2009 lock.fl_flags = FL_POSIX | FL_CLOSE;
2010 lock.fl_start = 0;
2011 lock.fl_end = OFFSET_MAX;
2012 lock.fl_owner = owner;
2013 lock.fl_pid = current->tgid;
2014 lock.fl_file = filp;
2015 lock.fl_ops = NULL;
2016 lock.fl_lmops = NULL;
2018 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2020 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2021 lock.fl_ops->fl_release_private(&lock);
2024 EXPORT_SYMBOL(locks_remove_posix);
2027 * This function is called on the last close of an open file.
2029 void locks_remove_flock(struct file *filp)
2031 struct inode * inode = filp->f_path.dentry->d_inode;
2032 struct file_lock *fl;
2033 struct file_lock **before;
2035 if (!inode->i_flock)
2036 return;
2038 if (filp->f_op && filp->f_op->flock) {
2039 struct file_lock fl = {
2040 .fl_pid = current->tgid,
2041 .fl_file = filp,
2042 .fl_flags = FL_FLOCK,
2043 .fl_type = F_UNLCK,
2044 .fl_end = OFFSET_MAX,
2046 filp->f_op->flock(filp, F_SETLKW, &fl);
2047 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2048 fl.fl_ops->fl_release_private(&fl);
2051 lock_flocks();
2052 before = &inode->i_flock;
2054 while ((fl = *before) != NULL) {
2055 if (fl->fl_file == filp) {
2056 if (IS_FLOCK(fl)) {
2057 locks_delete_lock(before);
2058 continue;
2060 if (IS_LEASE(fl)) {
2061 lease_modify(before, F_UNLCK);
2062 continue;
2064 /* What? */
2065 BUG();
2067 before = &fl->fl_next;
2069 unlock_flocks();
2073 * posix_unblock_lock - stop waiting for a file lock
2074 * @filp: how the file was opened
2075 * @waiter: the lock which was waiting
2077 * lockd needs to block waiting for locks.
2080 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2082 int status = 0;
2084 lock_flocks();
2085 if (waiter->fl_next)
2086 __locks_delete_block(waiter);
2087 else
2088 status = -ENOENT;
2089 unlock_flocks();
2090 return status;
2093 EXPORT_SYMBOL(posix_unblock_lock);
2096 * vfs_cancel_lock - file byte range unblock lock
2097 * @filp: The file to apply the unblock to
2098 * @fl: The lock to be unblocked
2100 * Used by lock managers to cancel blocked requests
2102 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2104 if (filp->f_op && filp->f_op->lock)
2105 return filp->f_op->lock(filp, F_CANCELLK, fl);
2106 return 0;
2109 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2111 #ifdef CONFIG_PROC_FS
2112 #include <linux/proc_fs.h>
2113 #include <linux/seq_file.h>
2115 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2116 int id, char *pfx)
2118 struct inode *inode = NULL;
2119 unsigned int fl_pid;
2121 if (fl->fl_nspid)
2122 fl_pid = pid_vnr(fl->fl_nspid);
2123 else
2124 fl_pid = fl->fl_pid;
2126 if (fl->fl_file != NULL)
2127 inode = fl->fl_file->f_path.dentry->d_inode;
2129 seq_printf(f, "%d:%s ", id, pfx);
2130 if (IS_POSIX(fl)) {
2131 seq_printf(f, "%6s %s ",
2132 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2133 (inode == NULL) ? "*NOINODE*" :
2134 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2135 } else if (IS_FLOCK(fl)) {
2136 if (fl->fl_type & LOCK_MAND) {
2137 seq_printf(f, "FLOCK MSNFS ");
2138 } else {
2139 seq_printf(f, "FLOCK ADVISORY ");
2141 } else if (IS_LEASE(fl)) {
2142 seq_printf(f, "LEASE ");
2143 if (fl->fl_type & F_INPROGRESS)
2144 seq_printf(f, "BREAKING ");
2145 else if (fl->fl_file)
2146 seq_printf(f, "ACTIVE ");
2147 else
2148 seq_printf(f, "BREAKER ");
2149 } else {
2150 seq_printf(f, "UNKNOWN UNKNOWN ");
2152 if (fl->fl_type & LOCK_MAND) {
2153 seq_printf(f, "%s ",
2154 (fl->fl_type & LOCK_READ)
2155 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2156 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2157 } else {
2158 seq_printf(f, "%s ",
2159 (fl->fl_type & F_INPROGRESS)
2160 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2161 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2163 if (inode) {
2164 #ifdef WE_CAN_BREAK_LSLK_NOW
2165 seq_printf(f, "%d %s:%ld ", fl_pid,
2166 inode->i_sb->s_id, inode->i_ino);
2167 #else
2168 /* userspace relies on this representation of dev_t ;-( */
2169 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2170 MAJOR(inode->i_sb->s_dev),
2171 MINOR(inode->i_sb->s_dev), inode->i_ino);
2172 #endif
2173 } else {
2174 seq_printf(f, "%d <none>:0 ", fl_pid);
2176 if (IS_POSIX(fl)) {
2177 if (fl->fl_end == OFFSET_MAX)
2178 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2179 else
2180 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2181 } else {
2182 seq_printf(f, "0 EOF\n");
2186 static int locks_show(struct seq_file *f, void *v)
2188 struct file_lock *fl, *bfl;
2190 fl = list_entry(v, struct file_lock, fl_link);
2192 lock_get_status(f, fl, (long)f->private, "");
2194 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2195 lock_get_status(f, bfl, (long)f->private, " ->");
2197 f->private++;
2198 return 0;
2201 static void *locks_start(struct seq_file *f, loff_t *pos)
2203 lock_flocks();
2204 f->private = (void *)1;
2205 return seq_list_start(&file_lock_list, *pos);
2208 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2210 return seq_list_next(v, &file_lock_list, pos);
2213 static void locks_stop(struct seq_file *f, void *v)
2215 unlock_flocks();
2218 static const struct seq_operations locks_seq_operations = {
2219 .start = locks_start,
2220 .next = locks_next,
2221 .stop = locks_stop,
2222 .show = locks_show,
2225 static int locks_open(struct inode *inode, struct file *filp)
2227 return seq_open(filp, &locks_seq_operations);
2230 static const struct file_operations proc_locks_operations = {
2231 .open = locks_open,
2232 .read = seq_read,
2233 .llseek = seq_lseek,
2234 .release = seq_release,
2237 static int __init proc_locks_init(void)
2239 proc_create("locks", 0, NULL, &proc_locks_operations);
2240 return 0;
2242 module_init(proc_locks_init);
2243 #endif
2246 * lock_may_read - checks that the region is free of locks
2247 * @inode: the inode that is being read
2248 * @start: the first byte to read
2249 * @len: the number of bytes to read
2251 * Emulates Windows locking requirements. Whole-file
2252 * mandatory locks (share modes) can prohibit a read and
2253 * byte-range POSIX locks can prohibit a read if they overlap.
2255 * N.B. this function is only ever called
2256 * from knfsd and ownership of locks is never checked.
2258 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2260 struct file_lock *fl;
2261 int result = 1;
2262 lock_flocks();
2263 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2264 if (IS_POSIX(fl)) {
2265 if (fl->fl_type == F_RDLCK)
2266 continue;
2267 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2268 continue;
2269 } else if (IS_FLOCK(fl)) {
2270 if (!(fl->fl_type & LOCK_MAND))
2271 continue;
2272 if (fl->fl_type & LOCK_READ)
2273 continue;
2274 } else
2275 continue;
2276 result = 0;
2277 break;
2279 unlock_flocks();
2280 return result;
2283 EXPORT_SYMBOL(lock_may_read);
2286 * lock_may_write - checks that the region is free of locks
2287 * @inode: the inode that is being written
2288 * @start: the first byte to write
2289 * @len: the number of bytes to write
2291 * Emulates Windows locking requirements. Whole-file
2292 * mandatory locks (share modes) can prohibit a write and
2293 * byte-range POSIX locks can prohibit a write if they overlap.
2295 * N.B. this function is only ever called
2296 * from knfsd and ownership of locks is never checked.
2298 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2300 struct file_lock *fl;
2301 int result = 1;
2302 lock_flocks();
2303 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2304 if (IS_POSIX(fl)) {
2305 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2306 continue;
2307 } else if (IS_FLOCK(fl)) {
2308 if (!(fl->fl_type & LOCK_MAND))
2309 continue;
2310 if (fl->fl_type & LOCK_WRITE)
2311 continue;
2312 } else
2313 continue;
2314 result = 0;
2315 break;
2317 unlock_flocks();
2318 return result;
2321 EXPORT_SYMBOL(lock_may_write);
2323 static int __init filelock_init(void)
2325 filelock_cache = kmem_cache_create("file_lock_cache",
2326 sizeof(struct file_lock), 0, SLAB_PANIC,
2327 init_once);
2328 return 0;
2331 core_initcall(filelock_init);