bfs: fix bitmap size argument to find_first_zero_bit()
[linux-2.6/kvm.git] / fs / locks.c
blob822c3d1843af732934c9a393ffe8a51a0f7ff73f
1 /*
2 * linux/fs/locks.c
4 * Provide support for fcntl()'s F_GETLK, F_SETLK, and F_SETLKW calls.
5 * Doug Evans (dje@spiff.uucp), August 07, 1992
7 * Deadlock detection added.
8 * FIXME: one thing isn't handled yet:
9 * - mandatory locks (requires lots of changes elsewhere)
10 * Kelly Carmichael (kelly@[142.24.8.65]), September 17, 1994.
12 * Miscellaneous edits, and a total rewrite of posix_lock_file() code.
13 * Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
15 * Converted file_lock_table to a linked list from an array, which eliminates
16 * the limits on how many active file locks are open.
17 * Chad Page (pageone@netcom.com), November 27, 1994
19 * Removed dependency on file descriptors. dup()'ed file descriptors now
20 * get the same locks as the original file descriptors, and a close() on
21 * any file descriptor removes ALL the locks on the file for the current
22 * process. Since locks still depend on the process id, locks are inherited
23 * after an exec() but not after a fork(). This agrees with POSIX, and both
24 * BSD and SVR4 practice.
25 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
27 * Scrapped free list which is redundant now that we allocate locks
28 * dynamically with kmalloc()/kfree().
29 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
31 * Implemented two lock personalities - FL_FLOCK and FL_POSIX.
33 * FL_POSIX locks are created with calls to fcntl() and lockf() through the
34 * fcntl() system call. They have the semantics described above.
36 * FL_FLOCK locks are created with calls to flock(), through the flock()
37 * system call, which is new. Old C libraries implement flock() via fcntl()
38 * and will continue to use the old, broken implementation.
40 * FL_FLOCK locks follow the 4.4 BSD flock() semantics. They are associated
41 * with a file pointer (filp). As a result they can be shared by a parent
42 * process and its children after a fork(). They are removed when the last
43 * file descriptor referring to the file pointer is closed (unless explicitly
44 * unlocked).
46 * FL_FLOCK locks never deadlock, an existing lock is always removed before
47 * upgrading from shared to exclusive (or vice versa). When this happens
48 * any processes blocked by the current lock are woken up and allowed to
49 * run before the new lock is applied.
50 * Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
52 * Removed some race conditions in flock_lock_file(), marked other possible
53 * races. Just grep for FIXME to see them.
54 * Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
56 * Addressed Dmitry's concerns. Deadlock checking no longer recursive.
57 * Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
58 * once we've checked for blocking and deadlocking.
59 * Andy Walker (andy@lysaker.kvaerner.no), April 03, 1996.
61 * Initial implementation of mandatory locks. SunOS turned out to be
62 * a rotten model, so I implemented the "obvious" semantics.
63 * See 'Documentation/mandatory.txt' for details.
64 * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996.
66 * Don't allow mandatory locks on mmap()'ed files. Added simple functions to
67 * check if a file has mandatory locks, used by mmap(), open() and creat() to
68 * see if system call should be rejected. Ref. HP-UX/SunOS/Solaris Reference
69 * Manual, Section 2.
70 * Andy Walker (andy@lysaker.kvaerner.no), April 09, 1996.
72 * Tidied up block list handling. Added '/proc/locks' interface.
73 * Andy Walker (andy@lysaker.kvaerner.no), April 24, 1996.
75 * Fixed deadlock condition for pathological code that mixes calls to
76 * flock() and fcntl().
77 * Andy Walker (andy@lysaker.kvaerner.no), April 29, 1996.
79 * Allow only one type of locking scheme (FL_POSIX or FL_FLOCK) to be in use
80 * for a given file at a time. Changed the CONFIG_LOCK_MANDATORY scheme to
81 * guarantee sensible behaviour in the case where file system modules might
82 * be compiled with different options than the kernel itself.
83 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
85 * Added a couple of missing wake_up() calls. Thanks to Thomas Meckel
86 * (Thomas.Meckel@mni.fh-giessen.de) for spotting this.
87 * Andy Walker (andy@lysaker.kvaerner.no), May 15, 1996.
89 * Changed FL_POSIX locks to use the block list in the same way as FL_FLOCK
90 * locks. Changed process synchronisation to avoid dereferencing locks that
91 * have already been freed.
92 * Andy Walker (andy@lysaker.kvaerner.no), Sep 21, 1996.
94 * Made the block list a circular list to minimise searching in the list.
95 * Andy Walker (andy@lysaker.kvaerner.no), Sep 25, 1996.
97 * Made mandatory locking a mount option. Default is not to allow mandatory
98 * locking.
99 * Andy Walker (andy@lysaker.kvaerner.no), Oct 04, 1996.
101 * Some adaptations for NFS support.
102 * Olaf Kirch (okir@monad.swb.de), Dec 1996,
104 * Fixed /proc/locks interface so that we can't overrun the buffer we are handed.
105 * Andy Walker (andy@lysaker.kvaerner.no), May 12, 1997.
107 * Use slab allocator instead of kmalloc/kfree.
108 * Use generic list implementation from <linux/list.h>.
109 * Sped up posix_locks_deadlock by only considering blocked locks.
110 * Matthew Wilcox <willy@debian.org>, March, 2000.
112 * Leases and LOCK_MAND
113 * Matthew Wilcox <willy@debian.org>, June, 2000.
114 * Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
117 #include <linux/capability.h>
118 #include <linux/file.h>
119 #include <linux/fdtable.h>
120 #include <linux/fs.h>
121 #include <linux/init.h>
122 #include <linux/module.h>
123 #include <linux/security.h>
124 #include <linux/slab.h>
125 #include <linux/syscalls.h>
126 #include <linux/time.h>
127 #include <linux/rcupdate.h>
128 #include <linux/pid_namespace.h>
130 #include <asm/uaccess.h>
132 #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX)
133 #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK)
134 #define IS_LEASE(fl) (fl->fl_flags & FL_LEASE)
136 int leases_enable = 1;
137 int lease_break_time = 45;
139 #define for_each_lock(inode, lockp) \
140 for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
142 static LIST_HEAD(file_lock_list);
143 static LIST_HEAD(blocked_list);
144 static DEFINE_SPINLOCK(file_lock_lock);
147 * Protects the two list heads above, plus the inode->i_flock list
149 void lock_flocks(void)
151 spin_lock(&file_lock_lock);
153 EXPORT_SYMBOL_GPL(lock_flocks);
155 void unlock_flocks(void)
157 spin_unlock(&file_lock_lock);
159 EXPORT_SYMBOL_GPL(unlock_flocks);
161 static struct kmem_cache *filelock_cache __read_mostly;
163 /* Allocate an empty lock structure. */
164 struct file_lock *locks_alloc_lock(void)
166 return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
168 EXPORT_SYMBOL_GPL(locks_alloc_lock);
170 void locks_release_private(struct file_lock *fl)
172 if (fl->fl_ops) {
173 if (fl->fl_ops->fl_release_private)
174 fl->fl_ops->fl_release_private(fl);
175 fl->fl_ops = NULL;
177 if (fl->fl_lmops) {
178 if (fl->fl_lmops->fl_release_private)
179 fl->fl_lmops->fl_release_private(fl);
180 fl->fl_lmops = NULL;
184 EXPORT_SYMBOL_GPL(locks_release_private);
186 /* Free a lock which is not in use. */
187 void locks_free_lock(struct file_lock *fl)
189 BUG_ON(waitqueue_active(&fl->fl_wait));
190 BUG_ON(!list_empty(&fl->fl_block));
191 BUG_ON(!list_empty(&fl->fl_link));
193 locks_release_private(fl);
194 kmem_cache_free(filelock_cache, fl);
196 EXPORT_SYMBOL(locks_free_lock);
198 void locks_init_lock(struct file_lock *fl)
200 INIT_LIST_HEAD(&fl->fl_link);
201 INIT_LIST_HEAD(&fl->fl_block);
202 init_waitqueue_head(&fl->fl_wait);
203 fl->fl_next = NULL;
204 fl->fl_fasync = NULL;
205 fl->fl_owner = NULL;
206 fl->fl_pid = 0;
207 fl->fl_nspid = NULL;
208 fl->fl_file = NULL;
209 fl->fl_flags = 0;
210 fl->fl_type = 0;
211 fl->fl_start = fl->fl_end = 0;
212 fl->fl_ops = NULL;
213 fl->fl_lmops = NULL;
216 EXPORT_SYMBOL(locks_init_lock);
219 * Initialises the fields of the file lock which are invariant for
220 * free file_locks.
222 static void init_once(void *foo)
224 struct file_lock *lock = (struct file_lock *) foo;
226 locks_init_lock(lock);
229 static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
231 if (fl->fl_ops) {
232 if (fl->fl_ops->fl_copy_lock)
233 fl->fl_ops->fl_copy_lock(new, fl);
234 new->fl_ops = fl->fl_ops;
236 if (fl->fl_lmops)
237 new->fl_lmops = fl->fl_lmops;
241 * Initialize a new lock from an existing file_lock structure.
243 void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl)
245 new->fl_owner = fl->fl_owner;
246 new->fl_pid = fl->fl_pid;
247 new->fl_file = NULL;
248 new->fl_flags = fl->fl_flags;
249 new->fl_type = fl->fl_type;
250 new->fl_start = fl->fl_start;
251 new->fl_end = fl->fl_end;
252 new->fl_ops = NULL;
253 new->fl_lmops = NULL;
255 EXPORT_SYMBOL(__locks_copy_lock);
257 void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
259 locks_release_private(new);
261 __locks_copy_lock(new, fl);
262 new->fl_file = fl->fl_file;
263 new->fl_ops = fl->fl_ops;
264 new->fl_lmops = fl->fl_lmops;
266 locks_copy_private(new, fl);
269 EXPORT_SYMBOL(locks_copy_lock);
271 static inline int flock_translate_cmd(int cmd) {
272 if (cmd & LOCK_MAND)
273 return cmd & (LOCK_MAND | LOCK_RW);
274 switch (cmd) {
275 case LOCK_SH:
276 return F_RDLCK;
277 case LOCK_EX:
278 return F_WRLCK;
279 case LOCK_UN:
280 return F_UNLCK;
282 return -EINVAL;
285 /* Fill in a file_lock structure with an appropriate FLOCK lock. */
286 static int flock_make_lock(struct file *filp, struct file_lock **lock,
287 unsigned int cmd)
289 struct file_lock *fl;
290 int type = flock_translate_cmd(cmd);
291 if (type < 0)
292 return type;
294 fl = locks_alloc_lock();
295 if (fl == NULL)
296 return -ENOMEM;
298 fl->fl_file = filp;
299 fl->fl_pid = current->tgid;
300 fl->fl_flags = FL_FLOCK;
301 fl->fl_type = type;
302 fl->fl_end = OFFSET_MAX;
304 *lock = fl;
305 return 0;
308 static int assign_type(struct file_lock *fl, int type)
310 switch (type) {
311 case F_RDLCK:
312 case F_WRLCK:
313 case F_UNLCK:
314 fl->fl_type = type;
315 break;
316 default:
317 return -EINVAL;
319 return 0;
322 /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX
323 * style lock.
325 static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
326 struct flock *l)
328 off_t start, end;
330 switch (l->l_whence) {
331 case SEEK_SET:
332 start = 0;
333 break;
334 case SEEK_CUR:
335 start = filp->f_pos;
336 break;
337 case SEEK_END:
338 start = i_size_read(filp->f_path.dentry->d_inode);
339 break;
340 default:
341 return -EINVAL;
344 /* POSIX-1996 leaves the case l->l_len < 0 undefined;
345 POSIX-2001 defines it. */
346 start += l->l_start;
347 if (start < 0)
348 return -EINVAL;
349 fl->fl_end = OFFSET_MAX;
350 if (l->l_len > 0) {
351 end = start + l->l_len - 1;
352 fl->fl_end = end;
353 } else if (l->l_len < 0) {
354 end = start - 1;
355 fl->fl_end = end;
356 start += l->l_len;
357 if (start < 0)
358 return -EINVAL;
360 fl->fl_start = start; /* we record the absolute position */
361 if (fl->fl_end < fl->fl_start)
362 return -EOVERFLOW;
364 fl->fl_owner = current->files;
365 fl->fl_pid = current->tgid;
366 fl->fl_file = filp;
367 fl->fl_flags = FL_POSIX;
368 fl->fl_ops = NULL;
369 fl->fl_lmops = NULL;
371 return assign_type(fl, l->l_type);
374 #if BITS_PER_LONG == 32
375 static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
376 struct flock64 *l)
378 loff_t start;
380 switch (l->l_whence) {
381 case SEEK_SET:
382 start = 0;
383 break;
384 case SEEK_CUR:
385 start = filp->f_pos;
386 break;
387 case SEEK_END:
388 start = i_size_read(filp->f_path.dentry->d_inode);
389 break;
390 default:
391 return -EINVAL;
394 start += l->l_start;
395 if (start < 0)
396 return -EINVAL;
397 fl->fl_end = OFFSET_MAX;
398 if (l->l_len > 0) {
399 fl->fl_end = start + l->l_len - 1;
400 } else if (l->l_len < 0) {
401 fl->fl_end = start - 1;
402 start += l->l_len;
403 if (start < 0)
404 return -EINVAL;
406 fl->fl_start = start; /* we record the absolute position */
407 if (fl->fl_end < fl->fl_start)
408 return -EOVERFLOW;
410 fl->fl_owner = current->files;
411 fl->fl_pid = current->tgid;
412 fl->fl_file = filp;
413 fl->fl_flags = FL_POSIX;
414 fl->fl_ops = NULL;
415 fl->fl_lmops = NULL;
417 switch (l->l_type) {
418 case F_RDLCK:
419 case F_WRLCK:
420 case F_UNLCK:
421 fl->fl_type = l->l_type;
422 break;
423 default:
424 return -EINVAL;
427 return (0);
429 #endif
431 /* default lease lock manager operations */
432 static void lease_break_callback(struct file_lock *fl)
434 kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
437 static void lease_release_private_callback(struct file_lock *fl)
439 if (!fl->fl_file)
440 return;
442 f_delown(fl->fl_file);
443 fl->fl_file->f_owner.signum = 0;
446 static const struct lock_manager_operations lease_manager_ops = {
447 .fl_break = lease_break_callback,
448 .fl_release_private = lease_release_private_callback,
449 .fl_change = lease_modify,
453 * Initialize a lease, use the default lock manager operations
455 static int lease_init(struct file *filp, int type, struct file_lock *fl)
457 if (assign_type(fl, type) != 0)
458 return -EINVAL;
460 fl->fl_owner = current->files;
461 fl->fl_pid = current->tgid;
463 fl->fl_file = filp;
464 fl->fl_flags = FL_LEASE;
465 fl->fl_start = 0;
466 fl->fl_end = OFFSET_MAX;
467 fl->fl_ops = NULL;
468 fl->fl_lmops = &lease_manager_ops;
469 return 0;
472 /* Allocate a file_lock initialised to this type of lease */
473 static struct file_lock *lease_alloc(struct file *filp, int type)
475 struct file_lock *fl = locks_alloc_lock();
476 int error = -ENOMEM;
478 if (fl == NULL)
479 return ERR_PTR(error);
481 error = lease_init(filp, type, fl);
482 if (error) {
483 locks_free_lock(fl);
484 return ERR_PTR(error);
486 return fl;
489 /* Check if two locks overlap each other.
491 static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
493 return ((fl1->fl_end >= fl2->fl_start) &&
494 (fl2->fl_end >= fl1->fl_start));
498 * Check whether two locks have the same owner.
500 static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
502 if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
503 return fl2->fl_lmops == fl1->fl_lmops &&
504 fl1->fl_lmops->fl_compare_owner(fl1, fl2);
505 return fl1->fl_owner == fl2->fl_owner;
508 /* Remove waiter from blocker's block list.
509 * When blocker ends up pointing to itself then the list is empty.
511 static void __locks_delete_block(struct file_lock *waiter)
513 list_del_init(&waiter->fl_block);
514 list_del_init(&waiter->fl_link);
515 waiter->fl_next = NULL;
520 static void locks_delete_block(struct file_lock *waiter)
522 lock_flocks();
523 __locks_delete_block(waiter);
524 unlock_flocks();
527 /* Insert waiter into blocker's block list.
528 * We use a circular list so that processes can be easily woken up in
529 * the order they blocked. The documentation doesn't require this but
530 * it seems like the reasonable thing to do.
532 static void locks_insert_block(struct file_lock *blocker,
533 struct file_lock *waiter)
535 BUG_ON(!list_empty(&waiter->fl_block));
536 list_add_tail(&waiter->fl_block, &blocker->fl_block);
537 waiter->fl_next = blocker;
538 if (IS_POSIX(blocker))
539 list_add(&waiter->fl_link, &blocked_list);
542 /* Wake up processes blocked waiting for blocker.
543 * If told to wait then schedule the processes until the block list
544 * is empty, otherwise empty the block list ourselves.
546 static void locks_wake_up_blocks(struct file_lock *blocker)
548 while (!list_empty(&blocker->fl_block)) {
549 struct file_lock *waiter;
551 waiter = list_first_entry(&blocker->fl_block,
552 struct file_lock, fl_block);
553 __locks_delete_block(waiter);
554 if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
555 waiter->fl_lmops->fl_notify(waiter);
556 else
557 wake_up(&waiter->fl_wait);
561 /* Insert file lock fl into an inode's lock list at the position indicated
562 * by pos. At the same time add the lock to the global file lock list.
564 static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
566 list_add(&fl->fl_link, &file_lock_list);
568 fl->fl_nspid = get_pid(task_tgid(current));
570 /* insert into file's list */
571 fl->fl_next = *pos;
572 *pos = fl;
576 * Delete a lock and then free it.
577 * Wake up processes that are blocked waiting for this lock,
578 * notify the FS that the lock has been cleared and
579 * finally free the lock.
581 static void locks_delete_lock(struct file_lock **thisfl_p)
583 struct file_lock *fl = *thisfl_p;
585 *thisfl_p = fl->fl_next;
586 fl->fl_next = NULL;
587 list_del_init(&fl->fl_link);
589 fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
590 if (fl->fl_fasync != NULL) {
591 printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
592 fl->fl_fasync = NULL;
595 if (fl->fl_nspid) {
596 put_pid(fl->fl_nspid);
597 fl->fl_nspid = NULL;
600 locks_wake_up_blocks(fl);
601 locks_free_lock(fl);
604 /* Determine if lock sys_fl blocks lock caller_fl. Common functionality
605 * checks for shared/exclusive status of overlapping locks.
607 static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
609 if (sys_fl->fl_type == F_WRLCK)
610 return 1;
611 if (caller_fl->fl_type == F_WRLCK)
612 return 1;
613 return 0;
616 /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
617 * checking before calling the locks_conflict().
619 static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
621 /* POSIX locks owned by the same process do not conflict with
622 * each other.
624 if (!IS_POSIX(sys_fl) || posix_same_owner(caller_fl, sys_fl))
625 return (0);
627 /* Check whether they overlap */
628 if (!locks_overlap(caller_fl, sys_fl))
629 return 0;
631 return (locks_conflict(caller_fl, sys_fl));
634 /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
635 * checking before calling the locks_conflict().
637 static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
639 /* FLOCK locks referring to the same filp do not conflict with
640 * each other.
642 if (!IS_FLOCK(sys_fl) || (caller_fl->fl_file == sys_fl->fl_file))
643 return (0);
644 if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
645 return 0;
647 return (locks_conflict(caller_fl, sys_fl));
650 void
651 posix_test_lock(struct file *filp, struct file_lock *fl)
653 struct file_lock *cfl;
655 lock_flocks();
656 for (cfl = filp->f_path.dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) {
657 if (!IS_POSIX(cfl))
658 continue;
659 if (posix_locks_conflict(fl, cfl))
660 break;
662 if (cfl) {
663 __locks_copy_lock(fl, cfl);
664 if (cfl->fl_nspid)
665 fl->fl_pid = pid_vnr(cfl->fl_nspid);
666 } else
667 fl->fl_type = F_UNLCK;
668 unlock_flocks();
669 return;
671 EXPORT_SYMBOL(posix_test_lock);
674 * Deadlock detection:
676 * We attempt to detect deadlocks that are due purely to posix file
677 * locks.
679 * We assume that a task can be waiting for at most one lock at a time.
680 * So for any acquired lock, the process holding that lock may be
681 * waiting on at most one other lock. That lock in turns may be held by
682 * someone waiting for at most one other lock. Given a requested lock
683 * caller_fl which is about to wait for a conflicting lock block_fl, we
684 * follow this chain of waiters to ensure we are not about to create a
685 * cycle.
687 * Since we do this before we ever put a process to sleep on a lock, we
688 * are ensured that there is never a cycle; that is what guarantees that
689 * the while() loop in posix_locks_deadlock() eventually completes.
691 * Note: the above assumption may not be true when handling lock
692 * requests from a broken NFS client. It may also fail in the presence
693 * of tasks (such as posix threads) sharing the same open file table.
695 * To handle those cases, we just bail out after a few iterations.
698 #define MAX_DEADLK_ITERATIONS 10
700 /* Find a lock that the owner of the given block_fl is blocking on. */
701 static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
703 struct file_lock *fl;
705 list_for_each_entry(fl, &blocked_list, fl_link) {
706 if (posix_same_owner(fl, block_fl))
707 return fl->fl_next;
709 return NULL;
712 static int posix_locks_deadlock(struct file_lock *caller_fl,
713 struct file_lock *block_fl)
715 int i = 0;
717 while ((block_fl = what_owner_is_waiting_for(block_fl))) {
718 if (i++ > MAX_DEADLK_ITERATIONS)
719 return 0;
720 if (posix_same_owner(caller_fl, block_fl))
721 return 1;
723 return 0;
726 /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks
727 * after any leases, but before any posix locks.
729 * Note that if called with an FL_EXISTS argument, the caller may determine
730 * whether or not a lock was successfully freed by testing the return
731 * value for -ENOENT.
733 static int flock_lock_file(struct file *filp, struct file_lock *request)
735 struct file_lock *new_fl = NULL;
736 struct file_lock **before;
737 struct inode * inode = filp->f_path.dentry->d_inode;
738 int error = 0;
739 int found = 0;
741 if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
742 new_fl = locks_alloc_lock();
743 if (!new_fl)
744 return -ENOMEM;
747 lock_flocks();
748 if (request->fl_flags & FL_ACCESS)
749 goto find_conflict;
751 for_each_lock(inode, before) {
752 struct file_lock *fl = *before;
753 if (IS_POSIX(fl))
754 break;
755 if (IS_LEASE(fl))
756 continue;
757 if (filp != fl->fl_file)
758 continue;
759 if (request->fl_type == fl->fl_type)
760 goto out;
761 found = 1;
762 locks_delete_lock(before);
763 break;
766 if (request->fl_type == F_UNLCK) {
767 if ((request->fl_flags & FL_EXISTS) && !found)
768 error = -ENOENT;
769 goto out;
773 * If a higher-priority process was blocked on the old file lock,
774 * give it the opportunity to lock the file.
776 if (found) {
777 unlock_flocks();
778 cond_resched();
779 lock_flocks();
782 find_conflict:
783 for_each_lock(inode, before) {
784 struct file_lock *fl = *before;
785 if (IS_POSIX(fl))
786 break;
787 if (IS_LEASE(fl))
788 continue;
789 if (!flock_locks_conflict(request, fl))
790 continue;
791 error = -EAGAIN;
792 if (!(request->fl_flags & FL_SLEEP))
793 goto out;
794 error = FILE_LOCK_DEFERRED;
795 locks_insert_block(fl, request);
796 goto out;
798 if (request->fl_flags & FL_ACCESS)
799 goto out;
800 locks_copy_lock(new_fl, request);
801 locks_insert_lock(before, new_fl);
802 new_fl = NULL;
803 error = 0;
805 out:
806 unlock_flocks();
807 if (new_fl)
808 locks_free_lock(new_fl);
809 return error;
812 static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
814 struct file_lock *fl;
815 struct file_lock *new_fl = NULL;
816 struct file_lock *new_fl2 = NULL;
817 struct file_lock *left = NULL;
818 struct file_lock *right = NULL;
819 struct file_lock **before;
820 int error, added = 0;
823 * We may need two file_lock structures for this operation,
824 * so we get them in advance to avoid races.
826 * In some cases we can be sure, that no new locks will be needed
828 if (!(request->fl_flags & FL_ACCESS) &&
829 (request->fl_type != F_UNLCK ||
830 request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
831 new_fl = locks_alloc_lock();
832 new_fl2 = locks_alloc_lock();
835 lock_flocks();
836 if (request->fl_type != F_UNLCK) {
837 for_each_lock(inode, before) {
838 fl = *before;
839 if (!IS_POSIX(fl))
840 continue;
841 if (!posix_locks_conflict(request, fl))
842 continue;
843 if (conflock)
844 __locks_copy_lock(conflock, fl);
845 error = -EAGAIN;
846 if (!(request->fl_flags & FL_SLEEP))
847 goto out;
848 error = -EDEADLK;
849 if (posix_locks_deadlock(request, fl))
850 goto out;
851 error = FILE_LOCK_DEFERRED;
852 locks_insert_block(fl, request);
853 goto out;
857 /* If we're just looking for a conflict, we're done. */
858 error = 0;
859 if (request->fl_flags & FL_ACCESS)
860 goto out;
863 * Find the first old lock with the same owner as the new lock.
866 before = &inode->i_flock;
868 /* First skip locks owned by other processes. */
869 while ((fl = *before) && (!IS_POSIX(fl) ||
870 !posix_same_owner(request, fl))) {
871 before = &fl->fl_next;
874 /* Process locks with this owner. */
875 while ((fl = *before) && posix_same_owner(request, fl)) {
876 /* Detect adjacent or overlapping regions (if same lock type)
878 if (request->fl_type == fl->fl_type) {
879 /* In all comparisons of start vs end, use
880 * "start - 1" rather than "end + 1". If end
881 * is OFFSET_MAX, end + 1 will become negative.
883 if (fl->fl_end < request->fl_start - 1)
884 goto next_lock;
885 /* If the next lock in the list has entirely bigger
886 * addresses than the new one, insert the lock here.
888 if (fl->fl_start - 1 > request->fl_end)
889 break;
891 /* If we come here, the new and old lock are of the
892 * same type and adjacent or overlapping. Make one
893 * lock yielding from the lower start address of both
894 * locks to the higher end address.
896 if (fl->fl_start > request->fl_start)
897 fl->fl_start = request->fl_start;
898 else
899 request->fl_start = fl->fl_start;
900 if (fl->fl_end < request->fl_end)
901 fl->fl_end = request->fl_end;
902 else
903 request->fl_end = fl->fl_end;
904 if (added) {
905 locks_delete_lock(before);
906 continue;
908 request = fl;
909 added = 1;
911 else {
912 /* Processing for different lock types is a bit
913 * more complex.
915 if (fl->fl_end < request->fl_start)
916 goto next_lock;
917 if (fl->fl_start > request->fl_end)
918 break;
919 if (request->fl_type == F_UNLCK)
920 added = 1;
921 if (fl->fl_start < request->fl_start)
922 left = fl;
923 /* If the next lock in the list has a higher end
924 * address than the new one, insert the new one here.
926 if (fl->fl_end > request->fl_end) {
927 right = fl;
928 break;
930 if (fl->fl_start >= request->fl_start) {
931 /* The new lock completely replaces an old
932 * one (This may happen several times).
934 if (added) {
935 locks_delete_lock(before);
936 continue;
938 /* Replace the old lock with the new one.
939 * Wake up anybody waiting for the old one,
940 * as the change in lock type might satisfy
941 * their needs.
943 locks_wake_up_blocks(fl);
944 fl->fl_start = request->fl_start;
945 fl->fl_end = request->fl_end;
946 fl->fl_type = request->fl_type;
947 locks_release_private(fl);
948 locks_copy_private(fl, request);
949 request = fl;
950 added = 1;
953 /* Go on to next lock.
955 next_lock:
956 before = &fl->fl_next;
960 * The above code only modifies existing locks in case of
961 * merging or replacing. If new lock(s) need to be inserted
962 * all modifications are done bellow this, so it's safe yet to
963 * bail out.
965 error = -ENOLCK; /* "no luck" */
966 if (right && left == right && !new_fl2)
967 goto out;
969 error = 0;
970 if (!added) {
971 if (request->fl_type == F_UNLCK) {
972 if (request->fl_flags & FL_EXISTS)
973 error = -ENOENT;
974 goto out;
977 if (!new_fl) {
978 error = -ENOLCK;
979 goto out;
981 locks_copy_lock(new_fl, request);
982 locks_insert_lock(before, new_fl);
983 new_fl = NULL;
985 if (right) {
986 if (left == right) {
987 /* The new lock breaks the old one in two pieces,
988 * so we have to use the second new lock.
990 left = new_fl2;
991 new_fl2 = NULL;
992 locks_copy_lock(left, right);
993 locks_insert_lock(before, left);
995 right->fl_start = request->fl_end + 1;
996 locks_wake_up_blocks(right);
998 if (left) {
999 left->fl_end = request->fl_start - 1;
1000 locks_wake_up_blocks(left);
1002 out:
1003 unlock_flocks();
1005 * Free any unused locks.
1007 if (new_fl)
1008 locks_free_lock(new_fl);
1009 if (new_fl2)
1010 locks_free_lock(new_fl2);
1011 return error;
1015 * posix_lock_file - Apply a POSIX-style lock to a file
1016 * @filp: The file to apply the lock to
1017 * @fl: The lock to be applied
1018 * @conflock: Place to return a copy of the conflicting lock, if found.
1020 * Add a POSIX style lock to a file.
1021 * We merge adjacent & overlapping locks whenever possible.
1022 * POSIX locks are sorted by owner task, then by starting address
1024 * Note that if called with an FL_EXISTS argument, the caller may determine
1025 * whether or not a lock was successfully freed by testing the return
1026 * value for -ENOENT.
1028 int posix_lock_file(struct file *filp, struct file_lock *fl,
1029 struct file_lock *conflock)
1031 return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
1033 EXPORT_SYMBOL(posix_lock_file);
1036 * posix_lock_file_wait - Apply a POSIX-style lock to a file
1037 * @filp: The file to apply the lock to
1038 * @fl: The lock to be applied
1040 * Add a POSIX style lock to a file.
1041 * We merge adjacent & overlapping locks whenever possible.
1042 * POSIX locks are sorted by owner task, then by starting address
1044 int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
1046 int error;
1047 might_sleep ();
1048 for (;;) {
1049 error = posix_lock_file(filp, fl, NULL);
1050 if (error != FILE_LOCK_DEFERRED)
1051 break;
1052 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1053 if (!error)
1054 continue;
1056 locks_delete_block(fl);
1057 break;
1059 return error;
1061 EXPORT_SYMBOL(posix_lock_file_wait);
1064 * locks_mandatory_locked - Check for an active lock
1065 * @inode: the file to check
1067 * Searches the inode's list of locks to find any POSIX locks which conflict.
1068 * This function is called from locks_verify_locked() only.
1070 int locks_mandatory_locked(struct inode *inode)
1072 fl_owner_t owner = current->files;
1073 struct file_lock *fl;
1076 * Search the lock list for this inode for any POSIX locks.
1078 lock_flocks();
1079 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
1080 if (!IS_POSIX(fl))
1081 continue;
1082 if (fl->fl_owner != owner)
1083 break;
1085 unlock_flocks();
1086 return fl ? -EAGAIN : 0;
1090 * locks_mandatory_area - Check for a conflicting lock
1091 * @read_write: %FLOCK_VERIFY_WRITE for exclusive access, %FLOCK_VERIFY_READ
1092 * for shared
1093 * @inode: the file to check
1094 * @filp: how the file was opened (if it was)
1095 * @offset: start of area to check
1096 * @count: length of area to check
1098 * Searches the inode's list of locks to find any POSIX locks which conflict.
1099 * This function is called from rw_verify_area() and
1100 * locks_verify_truncate().
1102 int locks_mandatory_area(int read_write, struct inode *inode,
1103 struct file *filp, loff_t offset,
1104 size_t count)
1106 struct file_lock fl;
1107 int error;
1109 locks_init_lock(&fl);
1110 fl.fl_owner = current->files;
1111 fl.fl_pid = current->tgid;
1112 fl.fl_file = filp;
1113 fl.fl_flags = FL_POSIX | FL_ACCESS;
1114 if (filp && !(filp->f_flags & O_NONBLOCK))
1115 fl.fl_flags |= FL_SLEEP;
1116 fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK;
1117 fl.fl_start = offset;
1118 fl.fl_end = offset + count - 1;
1120 for (;;) {
1121 error = __posix_lock_file(inode, &fl, NULL);
1122 if (error != FILE_LOCK_DEFERRED)
1123 break;
1124 error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
1125 if (!error) {
1127 * If we've been sleeping someone might have
1128 * changed the permissions behind our back.
1130 if (__mandatory_lock(inode))
1131 continue;
1134 locks_delete_block(&fl);
1135 break;
1138 return error;
1141 EXPORT_SYMBOL(locks_mandatory_area);
1143 /* We already had a lease on this file; just change its type */
1144 int lease_modify(struct file_lock **before, int arg)
1146 struct file_lock *fl = *before;
1147 int error = assign_type(fl, arg);
1149 if (error)
1150 return error;
1151 locks_wake_up_blocks(fl);
1152 if (arg == F_UNLCK)
1153 locks_delete_lock(before);
1154 return 0;
1157 EXPORT_SYMBOL(lease_modify);
1159 static void time_out_leases(struct inode *inode)
1161 struct file_lock **before;
1162 struct file_lock *fl;
1164 before = &inode->i_flock;
1165 while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) {
1166 if ((fl->fl_break_time == 0)
1167 || time_before(jiffies, fl->fl_break_time)) {
1168 before = &fl->fl_next;
1169 continue;
1171 lease_modify(before, fl->fl_type & ~F_INPROGRESS);
1172 if (fl == *before) /* lease_modify may have freed fl */
1173 before = &fl->fl_next;
1178 * __break_lease - revoke all outstanding leases on file
1179 * @inode: the inode of the file to return
1180 * @mode: the open mode (read or write)
1182 * break_lease (inlined for speed) has checked there already is at least
1183 * some kind of lock (maybe a lease) on this file. Leases are broken on
1184 * a call to open() or truncate(). This function can sleep unless you
1185 * specified %O_NONBLOCK to your open().
1187 int __break_lease(struct inode *inode, unsigned int mode)
1189 int error = 0, future;
1190 struct file_lock *new_fl, *flock;
1191 struct file_lock *fl;
1192 unsigned long break_time;
1193 int i_have_this_lease = 0;
1194 int want_write = (mode & O_ACCMODE) != O_RDONLY;
1196 new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
1198 lock_flocks();
1200 time_out_leases(inode);
1202 flock = inode->i_flock;
1203 if ((flock == NULL) || !IS_LEASE(flock))
1204 goto out;
1206 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next)
1207 if (fl->fl_owner == current->files)
1208 i_have_this_lease = 1;
1210 if (want_write) {
1211 /* If we want write access, we have to revoke any lease. */
1212 future = F_UNLCK | F_INPROGRESS;
1213 } else if (flock->fl_type & F_INPROGRESS) {
1214 /* If the lease is already being broken, we just leave it */
1215 future = flock->fl_type;
1216 } else if (flock->fl_type & F_WRLCK) {
1217 /* Downgrade the exclusive lease to a read-only lease. */
1218 future = F_RDLCK | F_INPROGRESS;
1219 } else {
1220 /* the existing lease was read-only, so we can read too. */
1221 goto out;
1224 if (IS_ERR(new_fl) && !i_have_this_lease
1225 && ((mode & O_NONBLOCK) == 0)) {
1226 error = PTR_ERR(new_fl);
1227 goto out;
1230 break_time = 0;
1231 if (lease_break_time > 0) {
1232 break_time = jiffies + lease_break_time * HZ;
1233 if (break_time == 0)
1234 break_time++; /* so that 0 means no break time */
1237 for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) {
1238 if (fl->fl_type != future) {
1239 fl->fl_type = future;
1240 fl->fl_break_time = break_time;
1241 /* lease must have lmops break callback */
1242 fl->fl_lmops->fl_break(fl);
1246 if (i_have_this_lease || (mode & O_NONBLOCK)) {
1247 error = -EWOULDBLOCK;
1248 goto out;
1251 restart:
1252 break_time = flock->fl_break_time;
1253 if (break_time != 0) {
1254 break_time -= jiffies;
1255 if (break_time == 0)
1256 break_time++;
1258 locks_insert_block(flock, new_fl);
1259 unlock_flocks();
1260 error = wait_event_interruptible_timeout(new_fl->fl_wait,
1261 !new_fl->fl_next, break_time);
1262 lock_flocks();
1263 __locks_delete_block(new_fl);
1264 if (error >= 0) {
1265 if (error == 0)
1266 time_out_leases(inode);
1267 /* Wait for the next lease that has not been broken yet */
1268 for (flock = inode->i_flock; flock && IS_LEASE(flock);
1269 flock = flock->fl_next) {
1270 if (flock->fl_type & F_INPROGRESS)
1271 goto restart;
1273 error = 0;
1276 out:
1277 unlock_flocks();
1278 if (!IS_ERR(new_fl))
1279 locks_free_lock(new_fl);
1280 return error;
1283 EXPORT_SYMBOL(__break_lease);
1286 * lease_get_mtime - get the last modified time of an inode
1287 * @inode: the inode
1288 * @time: pointer to a timespec which will contain the last modified time
1290 * This is to force NFS clients to flush their caches for files with
1291 * exclusive leases. The justification is that if someone has an
1292 * exclusive lease, then they could be modifying it.
1294 void lease_get_mtime(struct inode *inode, struct timespec *time)
1296 struct file_lock *flock = inode->i_flock;
1297 if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK))
1298 *time = current_fs_time(inode->i_sb);
1299 else
1300 *time = inode->i_mtime;
1303 EXPORT_SYMBOL(lease_get_mtime);
1306 * fcntl_getlease - Enquire what lease is currently active
1307 * @filp: the file
1309 * The value returned by this function will be one of
1310 * (if no lease break is pending):
1312 * %F_RDLCK to indicate a shared lease is held.
1314 * %F_WRLCK to indicate an exclusive lease is held.
1316 * %F_UNLCK to indicate no lease is held.
1318 * (if a lease break is pending):
1320 * %F_RDLCK to indicate an exclusive lease needs to be
1321 * changed to a shared lease (or removed).
1323 * %F_UNLCK to indicate the lease needs to be removed.
1325 * XXX: sfr & willy disagree over whether F_INPROGRESS
1326 * should be returned to userspace.
1328 int fcntl_getlease(struct file *filp)
1330 struct file_lock *fl;
1331 int type = F_UNLCK;
1333 lock_flocks();
1334 time_out_leases(filp->f_path.dentry->d_inode);
1335 for (fl = filp->f_path.dentry->d_inode->i_flock; fl && IS_LEASE(fl);
1336 fl = fl->fl_next) {
1337 if (fl->fl_file == filp) {
1338 type = fl->fl_type & ~F_INPROGRESS;
1339 break;
1342 unlock_flocks();
1343 return type;
1347 * generic_setlease - sets a lease on an open file
1348 * @filp: file pointer
1349 * @arg: type of lease to obtain
1350 * @flp: input - file_lock to use, output - file_lock inserted
1352 * The (input) flp->fl_lmops->fl_break function is required
1353 * by break_lease().
1355 * Called with file_lock_lock held.
1357 int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
1359 struct file_lock *fl, **before, **my_before = NULL, *lease;
1360 struct dentry *dentry = filp->f_path.dentry;
1361 struct inode *inode = dentry->d_inode;
1362 int error, rdlease_count = 0, wrlease_count = 0;
1364 lease = *flp;
1366 error = -EACCES;
1367 if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE))
1368 goto out;
1369 error = -EINVAL;
1370 if (!S_ISREG(inode->i_mode))
1371 goto out;
1372 error = security_file_lock(filp, arg);
1373 if (error)
1374 goto out;
1376 time_out_leases(inode);
1378 BUG_ON(!(*flp)->fl_lmops->fl_break);
1380 if (arg != F_UNLCK) {
1381 error = -EAGAIN;
1382 if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
1383 goto out;
1384 if ((arg == F_WRLCK)
1385 && ((dentry->d_count > 1)
1386 || (atomic_read(&inode->i_count) > 1)))
1387 goto out;
1391 * At this point, we know that if there is an exclusive
1392 * lease on this file, then we hold it on this filp
1393 * (otherwise our open of this file would have blocked).
1394 * And if we are trying to acquire an exclusive lease,
1395 * then the file is not open by anyone (including us)
1396 * except for this filp.
1398 for (before = &inode->i_flock;
1399 ((fl = *before) != NULL) && IS_LEASE(fl);
1400 before = &fl->fl_next) {
1401 if (fl->fl_file == filp)
1402 my_before = before;
1403 else if (fl->fl_type == (F_INPROGRESS | F_UNLCK))
1405 * Someone is in the process of opening this
1406 * file for writing so we may not take an
1407 * exclusive lease on it.
1409 wrlease_count++;
1410 else
1411 rdlease_count++;
1414 error = -EAGAIN;
1415 if ((arg == F_RDLCK && (wrlease_count > 0)) ||
1416 (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0)))
1417 goto out;
1419 if (my_before != NULL) {
1420 error = lease->fl_lmops->fl_change(my_before, arg);
1421 if (!error)
1422 *flp = *my_before;
1423 goto out;
1426 if (arg == F_UNLCK)
1427 goto out;
1429 error = -EINVAL;
1430 if (!leases_enable)
1431 goto out;
1433 locks_insert_lock(before, lease);
1434 return 0;
1436 out:
1437 return error;
1439 EXPORT_SYMBOL(generic_setlease);
1441 static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1443 if (filp->f_op && filp->f_op->setlease)
1444 return filp->f_op->setlease(filp, arg, lease);
1445 else
1446 return generic_setlease(filp, arg, lease);
1450 * vfs_setlease - sets a lease on an open file
1451 * @filp: file pointer
1452 * @arg: type of lease to obtain
1453 * @lease: file_lock to use
1455 * Call this to establish a lease on the file.
1456 * The (*lease)->fl_lmops->fl_break operation must be set; if not,
1457 * break_lease will oops!
1459 * This will call the filesystem's setlease file method, if
1460 * defined. Note that there is no getlease method; instead, the
1461 * filesystem setlease method should call back to setlease() to
1462 * add a lease to the inode's lease list, where fcntl_getlease() can
1463 * find it. Since fcntl_getlease() only reports whether the current
1464 * task holds a lease, a cluster filesystem need only do this for
1465 * leases held by processes on this node.
1467 * There is also no break_lease method; filesystems that
1468 * handle their own leases should break leases themselves from the
1469 * filesystem's open, create, and (on truncate) setattr methods.
1471 * Warning: the only current setlease methods exist only to disable
1472 * leases in certain cases. More vfs changes may be required to
1473 * allow a full filesystem lease implementation.
1476 int vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
1478 int error;
1480 lock_flocks();
1481 error = __vfs_setlease(filp, arg, lease);
1482 unlock_flocks();
1484 return error;
1486 EXPORT_SYMBOL_GPL(vfs_setlease);
1488 static int do_fcntl_delete_lease(struct file *filp)
1490 struct file_lock fl, *flp = &fl;
1492 lease_init(filp, F_UNLCK, flp);
1494 return vfs_setlease(filp, F_UNLCK, &flp);
1497 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
1499 struct file_lock *fl, *ret;
1500 struct fasync_struct *new;
1501 int error;
1503 fl = lease_alloc(filp, arg);
1504 if (IS_ERR(fl))
1505 return PTR_ERR(fl);
1507 new = fasync_alloc();
1508 if (!new) {
1509 locks_free_lock(fl);
1510 return -ENOMEM;
1512 ret = fl;
1513 lock_flocks();
1514 error = __vfs_setlease(filp, arg, &ret);
1515 if (error) {
1516 unlock_flocks();
1517 locks_free_lock(fl);
1518 goto out_free_fasync;
1520 if (ret != fl)
1521 locks_free_lock(fl);
1524 * fasync_insert_entry() returns the old entry if any.
1525 * If there was no old entry, then it used 'new' and
1526 * inserted it into the fasync list. Clear new so that
1527 * we don't release it here.
1529 if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
1530 new = NULL;
1532 error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
1533 unlock_flocks();
1535 out_free_fasync:
1536 if (new)
1537 fasync_free(new);
1538 return error;
1542 * fcntl_setlease - sets a lease on an open file
1543 * @fd: open file descriptor
1544 * @filp: file pointer
1545 * @arg: type of lease to obtain
1547 * Call this fcntl to establish a lease on the file.
1548 * Note that you also need to call %F_SETSIG to
1549 * receive a signal when the lease is broken.
1551 int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1553 if (arg == F_UNLCK)
1554 return do_fcntl_delete_lease(filp);
1555 return do_fcntl_add_lease(fd, filp, arg);
1559 * flock_lock_file_wait - Apply a FLOCK-style lock to a file
1560 * @filp: The file to apply the lock to
1561 * @fl: The lock to be applied
1563 * Add a FLOCK style lock to a file.
1565 int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
1567 int error;
1568 might_sleep();
1569 for (;;) {
1570 error = flock_lock_file(filp, fl);
1571 if (error != FILE_LOCK_DEFERRED)
1572 break;
1573 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1574 if (!error)
1575 continue;
1577 locks_delete_block(fl);
1578 break;
1580 return error;
1583 EXPORT_SYMBOL(flock_lock_file_wait);
1586 * sys_flock: - flock() system call.
1587 * @fd: the file descriptor to lock.
1588 * @cmd: the type of lock to apply.
1590 * Apply a %FL_FLOCK style lock to an open file descriptor.
1591 * The @cmd can be one of
1593 * %LOCK_SH -- a shared lock.
1595 * %LOCK_EX -- an exclusive lock.
1597 * %LOCK_UN -- remove an existing lock.
1599 * %LOCK_MAND -- a `mandatory' flock. This exists to emulate Windows Share Modes.
1601 * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other
1602 * processes read and write access respectively.
1604 SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1606 struct file *filp;
1607 struct file_lock *lock;
1608 int can_sleep, unlock;
1609 int error;
1611 error = -EBADF;
1612 filp = fget(fd);
1613 if (!filp)
1614 goto out;
1616 can_sleep = !(cmd & LOCK_NB);
1617 cmd &= ~LOCK_NB;
1618 unlock = (cmd == LOCK_UN);
1620 if (!unlock && !(cmd & LOCK_MAND) &&
1621 !(filp->f_mode & (FMODE_READ|FMODE_WRITE)))
1622 goto out_putf;
1624 error = flock_make_lock(filp, &lock, cmd);
1625 if (error)
1626 goto out_putf;
1627 if (can_sleep)
1628 lock->fl_flags |= FL_SLEEP;
1630 error = security_file_lock(filp, lock->fl_type);
1631 if (error)
1632 goto out_free;
1634 if (filp->f_op && filp->f_op->flock)
1635 error = filp->f_op->flock(filp,
1636 (can_sleep) ? F_SETLKW : F_SETLK,
1637 lock);
1638 else
1639 error = flock_lock_file_wait(filp, lock);
1641 out_free:
1642 locks_free_lock(lock);
1644 out_putf:
1645 fput(filp);
1646 out:
1647 return error;
1651 * vfs_test_lock - test file byte range lock
1652 * @filp: The file to test lock for
1653 * @fl: The lock to test; also used to hold result
1655 * Returns -ERRNO on failure. Indicates presence of conflicting lock by
1656 * setting conf->fl_type to something other than F_UNLCK.
1658 int vfs_test_lock(struct file *filp, struct file_lock *fl)
1660 if (filp->f_op && filp->f_op->lock)
1661 return filp->f_op->lock(filp, F_GETLK, fl);
1662 posix_test_lock(filp, fl);
1663 return 0;
1665 EXPORT_SYMBOL_GPL(vfs_test_lock);
1667 static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl)
1669 flock->l_pid = fl->fl_pid;
1670 #if BITS_PER_LONG == 32
1672 * Make sure we can represent the posix lock via
1673 * legacy 32bit flock.
1675 if (fl->fl_start > OFFT_OFFSET_MAX)
1676 return -EOVERFLOW;
1677 if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX)
1678 return -EOVERFLOW;
1679 #endif
1680 flock->l_start = fl->fl_start;
1681 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1682 fl->fl_end - fl->fl_start + 1;
1683 flock->l_whence = 0;
1684 flock->l_type = fl->fl_type;
1685 return 0;
1688 #if BITS_PER_LONG == 32
1689 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl)
1691 flock->l_pid = fl->fl_pid;
1692 flock->l_start = fl->fl_start;
1693 flock->l_len = fl->fl_end == OFFSET_MAX ? 0 :
1694 fl->fl_end - fl->fl_start + 1;
1695 flock->l_whence = 0;
1696 flock->l_type = fl->fl_type;
1698 #endif
1700 /* Report the first existing lock that would conflict with l.
1701 * This implements the F_GETLK command of fcntl().
1703 int fcntl_getlk(struct file *filp, struct flock __user *l)
1705 struct file_lock file_lock;
1706 struct flock flock;
1707 int error;
1709 error = -EFAULT;
1710 if (copy_from_user(&flock, l, sizeof(flock)))
1711 goto out;
1712 error = -EINVAL;
1713 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1714 goto out;
1716 error = flock_to_posix_lock(filp, &file_lock, &flock);
1717 if (error)
1718 goto out;
1720 error = vfs_test_lock(filp, &file_lock);
1721 if (error)
1722 goto out;
1724 flock.l_type = file_lock.fl_type;
1725 if (file_lock.fl_type != F_UNLCK) {
1726 error = posix_lock_to_flock(&flock, &file_lock);
1727 if (error)
1728 goto out;
1730 error = -EFAULT;
1731 if (!copy_to_user(l, &flock, sizeof(flock)))
1732 error = 0;
1733 out:
1734 return error;
1738 * vfs_lock_file - file byte range lock
1739 * @filp: The file to apply the lock to
1740 * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.)
1741 * @fl: The lock to be applied
1742 * @conf: Place to return a copy of the conflicting lock, if found.
1744 * A caller that doesn't care about the conflicting lock may pass NULL
1745 * as the final argument.
1747 * If the filesystem defines a private ->lock() method, then @conf will
1748 * be left unchanged; so a caller that cares should initialize it to
1749 * some acceptable default.
1751 * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
1752 * locks, the ->lock() interface may return asynchronously, before the lock has
1753 * been granted or denied by the underlying filesystem, if (and only if)
1754 * fl_grant is set. Callers expecting ->lock() to return asynchronously
1755 * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
1756 * the request is for a blocking lock. When ->lock() does return asynchronously,
1757 * it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
1758 * request completes.
1759 * If the request is for non-blocking lock the file system should return
1760 * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
1761 * with the result. If the request timed out the callback routine will return a
1762 * nonzero return code and the file system should release the lock. The file
1763 * system is also responsible to keep a corresponding posix lock when it
1764 * grants a lock so the VFS can find out which locks are locally held and do
1765 * the correct lock cleanup when required.
1766 * The underlying filesystem must not drop the kernel lock or call
1767 * ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
1768 * return code.
1770 int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
1772 if (filp->f_op && filp->f_op->lock)
1773 return filp->f_op->lock(filp, cmd, fl);
1774 else
1775 return posix_lock_file(filp, fl, conf);
1777 EXPORT_SYMBOL_GPL(vfs_lock_file);
1779 static int do_lock_file_wait(struct file *filp, unsigned int cmd,
1780 struct file_lock *fl)
1782 int error;
1784 error = security_file_lock(filp, fl->fl_type);
1785 if (error)
1786 return error;
1788 for (;;) {
1789 error = vfs_lock_file(filp, cmd, fl, NULL);
1790 if (error != FILE_LOCK_DEFERRED)
1791 break;
1792 error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
1793 if (!error)
1794 continue;
1796 locks_delete_block(fl);
1797 break;
1800 return error;
1803 /* Apply the lock described by l to an open file descriptor.
1804 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1806 int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
1807 struct flock __user *l)
1809 struct file_lock *file_lock = locks_alloc_lock();
1810 struct flock flock;
1811 struct inode *inode;
1812 struct file *f;
1813 int error;
1815 if (file_lock == NULL)
1816 return -ENOLCK;
1819 * This might block, so we do it before checking the inode.
1821 error = -EFAULT;
1822 if (copy_from_user(&flock, l, sizeof(flock)))
1823 goto out;
1825 inode = filp->f_path.dentry->d_inode;
1827 /* Don't allow mandatory locks on files that may be memory mapped
1828 * and shared.
1830 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1831 error = -EAGAIN;
1832 goto out;
1835 again:
1836 error = flock_to_posix_lock(filp, file_lock, &flock);
1837 if (error)
1838 goto out;
1839 if (cmd == F_SETLKW) {
1840 file_lock->fl_flags |= FL_SLEEP;
1843 error = -EBADF;
1844 switch (flock.l_type) {
1845 case F_RDLCK:
1846 if (!(filp->f_mode & FMODE_READ))
1847 goto out;
1848 break;
1849 case F_WRLCK:
1850 if (!(filp->f_mode & FMODE_WRITE))
1851 goto out;
1852 break;
1853 case F_UNLCK:
1854 break;
1855 default:
1856 error = -EINVAL;
1857 goto out;
1860 error = do_lock_file_wait(filp, cmd, file_lock);
1863 * Attempt to detect a close/fcntl race and recover by
1864 * releasing the lock that was just acquired.
1867 * we need that spin_lock here - it prevents reordering between
1868 * update of inode->i_flock and check for it done in close().
1869 * rcu_read_lock() wouldn't do.
1871 spin_lock(&current->files->file_lock);
1872 f = fcheck(fd);
1873 spin_unlock(&current->files->file_lock);
1874 if (!error && f != filp && flock.l_type != F_UNLCK) {
1875 flock.l_type = F_UNLCK;
1876 goto again;
1879 out:
1880 locks_free_lock(file_lock);
1881 return error;
1884 #if BITS_PER_LONG == 32
1885 /* Report the first existing lock that would conflict with l.
1886 * This implements the F_GETLK command of fcntl().
1888 int fcntl_getlk64(struct file *filp, struct flock64 __user *l)
1890 struct file_lock file_lock;
1891 struct flock64 flock;
1892 int error;
1894 error = -EFAULT;
1895 if (copy_from_user(&flock, l, sizeof(flock)))
1896 goto out;
1897 error = -EINVAL;
1898 if ((flock.l_type != F_RDLCK) && (flock.l_type != F_WRLCK))
1899 goto out;
1901 error = flock64_to_posix_lock(filp, &file_lock, &flock);
1902 if (error)
1903 goto out;
1905 error = vfs_test_lock(filp, &file_lock);
1906 if (error)
1907 goto out;
1909 flock.l_type = file_lock.fl_type;
1910 if (file_lock.fl_type != F_UNLCK)
1911 posix_lock_to_flock64(&flock, &file_lock);
1913 error = -EFAULT;
1914 if (!copy_to_user(l, &flock, sizeof(flock)))
1915 error = 0;
1917 out:
1918 return error;
1921 /* Apply the lock described by l to an open file descriptor.
1922 * This implements both the F_SETLK and F_SETLKW commands of fcntl().
1924 int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
1925 struct flock64 __user *l)
1927 struct file_lock *file_lock = locks_alloc_lock();
1928 struct flock64 flock;
1929 struct inode *inode;
1930 struct file *f;
1931 int error;
1933 if (file_lock == NULL)
1934 return -ENOLCK;
1937 * This might block, so we do it before checking the inode.
1939 error = -EFAULT;
1940 if (copy_from_user(&flock, l, sizeof(flock)))
1941 goto out;
1943 inode = filp->f_path.dentry->d_inode;
1945 /* Don't allow mandatory locks on files that may be memory mapped
1946 * and shared.
1948 if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) {
1949 error = -EAGAIN;
1950 goto out;
1953 again:
1954 error = flock64_to_posix_lock(filp, file_lock, &flock);
1955 if (error)
1956 goto out;
1957 if (cmd == F_SETLKW64) {
1958 file_lock->fl_flags |= FL_SLEEP;
1961 error = -EBADF;
1962 switch (flock.l_type) {
1963 case F_RDLCK:
1964 if (!(filp->f_mode & FMODE_READ))
1965 goto out;
1966 break;
1967 case F_WRLCK:
1968 if (!(filp->f_mode & FMODE_WRITE))
1969 goto out;
1970 break;
1971 case F_UNLCK:
1972 break;
1973 default:
1974 error = -EINVAL;
1975 goto out;
1978 error = do_lock_file_wait(filp, cmd, file_lock);
1981 * Attempt to detect a close/fcntl race and recover by
1982 * releasing the lock that was just acquired.
1984 spin_lock(&current->files->file_lock);
1985 f = fcheck(fd);
1986 spin_unlock(&current->files->file_lock);
1987 if (!error && f != filp && flock.l_type != F_UNLCK) {
1988 flock.l_type = F_UNLCK;
1989 goto again;
1992 out:
1993 locks_free_lock(file_lock);
1994 return error;
1996 #endif /* BITS_PER_LONG == 32 */
1999 * This function is called when the file is being removed
2000 * from the task's fd array. POSIX locks belonging to this task
2001 * are deleted at this time.
2003 void locks_remove_posix(struct file *filp, fl_owner_t owner)
2005 struct file_lock lock;
2008 * If there are no locks held on this file, we don't need to call
2009 * posix_lock_file(). Another process could be setting a lock on this
2010 * file at the same time, but we wouldn't remove that lock anyway.
2012 if (!filp->f_path.dentry->d_inode->i_flock)
2013 return;
2015 lock.fl_type = F_UNLCK;
2016 lock.fl_flags = FL_POSIX | FL_CLOSE;
2017 lock.fl_start = 0;
2018 lock.fl_end = OFFSET_MAX;
2019 lock.fl_owner = owner;
2020 lock.fl_pid = current->tgid;
2021 lock.fl_file = filp;
2022 lock.fl_ops = NULL;
2023 lock.fl_lmops = NULL;
2025 vfs_lock_file(filp, F_SETLK, &lock, NULL);
2027 if (lock.fl_ops && lock.fl_ops->fl_release_private)
2028 lock.fl_ops->fl_release_private(&lock);
2031 EXPORT_SYMBOL(locks_remove_posix);
2034 * This function is called on the last close of an open file.
2036 void locks_remove_flock(struct file *filp)
2038 struct inode * inode = filp->f_path.dentry->d_inode;
2039 struct file_lock *fl;
2040 struct file_lock **before;
2042 if (!inode->i_flock)
2043 return;
2045 if (filp->f_op && filp->f_op->flock) {
2046 struct file_lock fl = {
2047 .fl_pid = current->tgid,
2048 .fl_file = filp,
2049 .fl_flags = FL_FLOCK,
2050 .fl_type = F_UNLCK,
2051 .fl_end = OFFSET_MAX,
2053 filp->f_op->flock(filp, F_SETLKW, &fl);
2054 if (fl.fl_ops && fl.fl_ops->fl_release_private)
2055 fl.fl_ops->fl_release_private(&fl);
2058 lock_flocks();
2059 before = &inode->i_flock;
2061 while ((fl = *before) != NULL) {
2062 if (fl->fl_file == filp) {
2063 if (IS_FLOCK(fl)) {
2064 locks_delete_lock(before);
2065 continue;
2067 if (IS_LEASE(fl)) {
2068 lease_modify(before, F_UNLCK);
2069 continue;
2071 /* What? */
2072 BUG();
2074 before = &fl->fl_next;
2076 unlock_flocks();
2080 * posix_unblock_lock - stop waiting for a file lock
2081 * @filp: how the file was opened
2082 * @waiter: the lock which was waiting
2084 * lockd needs to block waiting for locks.
2087 posix_unblock_lock(struct file *filp, struct file_lock *waiter)
2089 int status = 0;
2091 lock_flocks();
2092 if (waiter->fl_next)
2093 __locks_delete_block(waiter);
2094 else
2095 status = -ENOENT;
2096 unlock_flocks();
2097 return status;
2100 EXPORT_SYMBOL(posix_unblock_lock);
2103 * vfs_cancel_lock - file byte range unblock lock
2104 * @filp: The file to apply the unblock to
2105 * @fl: The lock to be unblocked
2107 * Used by lock managers to cancel blocked requests
2109 int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
2111 if (filp->f_op && filp->f_op->lock)
2112 return filp->f_op->lock(filp, F_CANCELLK, fl);
2113 return 0;
2116 EXPORT_SYMBOL_GPL(vfs_cancel_lock);
2118 #ifdef CONFIG_PROC_FS
2119 #include <linux/proc_fs.h>
2120 #include <linux/seq_file.h>
2122 static void lock_get_status(struct seq_file *f, struct file_lock *fl,
2123 loff_t id, char *pfx)
2125 struct inode *inode = NULL;
2126 unsigned int fl_pid;
2128 if (fl->fl_nspid)
2129 fl_pid = pid_vnr(fl->fl_nspid);
2130 else
2131 fl_pid = fl->fl_pid;
2133 if (fl->fl_file != NULL)
2134 inode = fl->fl_file->f_path.dentry->d_inode;
2136 seq_printf(f, "%lld:%s ", id, pfx);
2137 if (IS_POSIX(fl)) {
2138 seq_printf(f, "%6s %s ",
2139 (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ",
2140 (inode == NULL) ? "*NOINODE*" :
2141 mandatory_lock(inode) ? "MANDATORY" : "ADVISORY ");
2142 } else if (IS_FLOCK(fl)) {
2143 if (fl->fl_type & LOCK_MAND) {
2144 seq_printf(f, "FLOCK MSNFS ");
2145 } else {
2146 seq_printf(f, "FLOCK ADVISORY ");
2148 } else if (IS_LEASE(fl)) {
2149 seq_printf(f, "LEASE ");
2150 if (fl->fl_type & F_INPROGRESS)
2151 seq_printf(f, "BREAKING ");
2152 else if (fl->fl_file)
2153 seq_printf(f, "ACTIVE ");
2154 else
2155 seq_printf(f, "BREAKER ");
2156 } else {
2157 seq_printf(f, "UNKNOWN UNKNOWN ");
2159 if (fl->fl_type & LOCK_MAND) {
2160 seq_printf(f, "%s ",
2161 (fl->fl_type & LOCK_READ)
2162 ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
2163 : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
2164 } else {
2165 seq_printf(f, "%s ",
2166 (fl->fl_type & F_INPROGRESS)
2167 ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ "
2168 : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ ");
2170 if (inode) {
2171 #ifdef WE_CAN_BREAK_LSLK_NOW
2172 seq_printf(f, "%d %s:%ld ", fl_pid,
2173 inode->i_sb->s_id, inode->i_ino);
2174 #else
2175 /* userspace relies on this representation of dev_t ;-( */
2176 seq_printf(f, "%d %02x:%02x:%ld ", fl_pid,
2177 MAJOR(inode->i_sb->s_dev),
2178 MINOR(inode->i_sb->s_dev), inode->i_ino);
2179 #endif
2180 } else {
2181 seq_printf(f, "%d <none>:0 ", fl_pid);
2183 if (IS_POSIX(fl)) {
2184 if (fl->fl_end == OFFSET_MAX)
2185 seq_printf(f, "%Ld EOF\n", fl->fl_start);
2186 else
2187 seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end);
2188 } else {
2189 seq_printf(f, "0 EOF\n");
2193 static int locks_show(struct seq_file *f, void *v)
2195 struct file_lock *fl, *bfl;
2197 fl = list_entry(v, struct file_lock, fl_link);
2199 lock_get_status(f, fl, *((loff_t *)f->private), "");
2201 list_for_each_entry(bfl, &fl->fl_block, fl_block)
2202 lock_get_status(f, bfl, *((loff_t *)f->private), " ->");
2204 return 0;
2207 static void *locks_start(struct seq_file *f, loff_t *pos)
2209 loff_t *p = f->private;
2211 lock_flocks();
2212 *p = (*pos + 1);
2213 return seq_list_start(&file_lock_list, *pos);
2216 static void *locks_next(struct seq_file *f, void *v, loff_t *pos)
2218 loff_t *p = f->private;
2219 ++*p;
2220 return seq_list_next(v, &file_lock_list, pos);
2223 static void locks_stop(struct seq_file *f, void *v)
2225 unlock_flocks();
2228 static const struct seq_operations locks_seq_operations = {
2229 .start = locks_start,
2230 .next = locks_next,
2231 .stop = locks_stop,
2232 .show = locks_show,
2235 static int locks_open(struct inode *inode, struct file *filp)
2237 return seq_open_private(filp, &locks_seq_operations, sizeof(loff_t));
2240 static const struct file_operations proc_locks_operations = {
2241 .open = locks_open,
2242 .read = seq_read,
2243 .llseek = seq_lseek,
2244 .release = seq_release_private,
2247 static int __init proc_locks_init(void)
2249 proc_create("locks", 0, NULL, &proc_locks_operations);
2250 return 0;
2252 module_init(proc_locks_init);
2253 #endif
2256 * lock_may_read - checks that the region is free of locks
2257 * @inode: the inode that is being read
2258 * @start: the first byte to read
2259 * @len: the number of bytes to read
2261 * Emulates Windows locking requirements. Whole-file
2262 * mandatory locks (share modes) can prohibit a read and
2263 * byte-range POSIX locks can prohibit a read if they overlap.
2265 * N.B. this function is only ever called
2266 * from knfsd and ownership of locks is never checked.
2268 int lock_may_read(struct inode *inode, loff_t start, unsigned long len)
2270 struct file_lock *fl;
2271 int result = 1;
2272 lock_flocks();
2273 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2274 if (IS_POSIX(fl)) {
2275 if (fl->fl_type == F_RDLCK)
2276 continue;
2277 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2278 continue;
2279 } else if (IS_FLOCK(fl)) {
2280 if (!(fl->fl_type & LOCK_MAND))
2281 continue;
2282 if (fl->fl_type & LOCK_READ)
2283 continue;
2284 } else
2285 continue;
2286 result = 0;
2287 break;
2289 unlock_flocks();
2290 return result;
2293 EXPORT_SYMBOL(lock_may_read);
2296 * lock_may_write - checks that the region is free of locks
2297 * @inode: the inode that is being written
2298 * @start: the first byte to write
2299 * @len: the number of bytes to write
2301 * Emulates Windows locking requirements. Whole-file
2302 * mandatory locks (share modes) can prohibit a write and
2303 * byte-range POSIX locks can prohibit a write if they overlap.
2305 * N.B. this function is only ever called
2306 * from knfsd and ownership of locks is never checked.
2308 int lock_may_write(struct inode *inode, loff_t start, unsigned long len)
2310 struct file_lock *fl;
2311 int result = 1;
2312 lock_flocks();
2313 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
2314 if (IS_POSIX(fl)) {
2315 if ((fl->fl_end < start) || (fl->fl_start > (start + len)))
2316 continue;
2317 } else if (IS_FLOCK(fl)) {
2318 if (!(fl->fl_type & LOCK_MAND))
2319 continue;
2320 if (fl->fl_type & LOCK_WRITE)
2321 continue;
2322 } else
2323 continue;
2324 result = 0;
2325 break;
2327 unlock_flocks();
2328 return result;
2331 EXPORT_SYMBOL(lock_may_write);
2333 static int __init filelock_init(void)
2335 filelock_cache = kmem_cache_create("file_lock_cache",
2336 sizeof(struct file_lock), 0, SLAB_PANIC,
2337 init_once);
2338 return 0;
2341 core_initcall(filelock_init);