mmc: sh_mmcif: process error interrupts first
[linux-2.6.git] / fs / super.c
blobafd0f1ad45e052068499f7cc94699bef6fbc4084
1 /*
2 * linux/fs/super.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * super.c contains code to handle: - mount structures
7 * - super-block tables
8 * - filesystem drivers list
9 * - mount system call
10 * - umount system call
11 * - ustat system call
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/acct.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
35 #include "internal.h"
38 LIST_HEAD(super_blocks);
39 DEFINE_SPINLOCK(sb_lock);
42 * One thing we have to be careful of with a per-sb shrinker is that we don't
43 * drop the last active reference to the superblock from within the shrinker.
44 * If that happens we could trigger unregistering the shrinker from within the
45 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
46 * take a passive reference to the superblock to avoid this from occurring.
48 static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
50 struct super_block *sb;
51 int fs_objects = 0;
52 int total_objects;
54 sb = container_of(shrink, struct super_block, s_shrink);
57 * Deadlock avoidance. We may hold various FS locks, and we don't want
58 * to recurse into the FS that called us in clear_inode() and friends..
60 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
61 return -1;
63 if (!grab_super_passive(sb))
64 return !sc->nr_to_scan ? 0 : -1;
66 if (sb->s_op && sb->s_op->nr_cached_objects)
67 fs_objects = sb->s_op->nr_cached_objects(sb);
69 total_objects = sb->s_nr_dentry_unused +
70 sb->s_nr_inodes_unused + fs_objects + 1;
72 if (sc->nr_to_scan) {
73 int dentries;
74 int inodes;
76 /* proportion the scan between the caches */
77 dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
78 total_objects;
79 inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
80 total_objects;
81 if (fs_objects)
82 fs_objects = (sc->nr_to_scan * fs_objects) /
83 total_objects;
85 * prune the dcache first as the icache is pinned by it, then
86 * prune the icache, followed by the filesystem specific caches
88 prune_dcache_sb(sb, dentries);
89 prune_icache_sb(sb, inodes);
91 if (fs_objects && sb->s_op->free_cached_objects) {
92 sb->s_op->free_cached_objects(sb, fs_objects);
93 fs_objects = sb->s_op->nr_cached_objects(sb);
95 total_objects = sb->s_nr_dentry_unused +
96 sb->s_nr_inodes_unused + fs_objects;
99 total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
100 drop_super(sb);
101 return total_objects;
105 * alloc_super - create new superblock
106 * @type: filesystem type superblock should belong to
108 * Allocates and initializes a new &struct super_block. alloc_super()
109 * returns a pointer new superblock or %NULL if allocation had failed.
111 static struct super_block *alloc_super(struct file_system_type *type)
113 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
114 static const struct super_operations default_op;
116 if (s) {
117 if (security_sb_alloc(s)) {
118 kfree(s);
119 s = NULL;
120 goto out;
122 #ifdef CONFIG_SMP
123 s->s_files = alloc_percpu(struct list_head);
124 if (!s->s_files) {
125 security_sb_free(s);
126 kfree(s);
127 s = NULL;
128 goto out;
129 } else {
130 int i;
132 for_each_possible_cpu(i)
133 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
135 #else
136 INIT_LIST_HEAD(&s->s_files);
137 #endif
138 s->s_bdi = &default_backing_dev_info;
139 INIT_LIST_HEAD(&s->s_instances);
140 INIT_HLIST_BL_HEAD(&s->s_anon);
141 INIT_LIST_HEAD(&s->s_inodes);
142 INIT_LIST_HEAD(&s->s_dentry_lru);
143 INIT_LIST_HEAD(&s->s_inode_lru);
144 spin_lock_init(&s->s_inode_lru_lock);
145 init_rwsem(&s->s_umount);
146 mutex_init(&s->s_lock);
147 lockdep_set_class(&s->s_umount, &type->s_umount_key);
149 * The locking rules for s_lock are up to the
150 * filesystem. For example ext3fs has different
151 * lock ordering than usbfs:
153 lockdep_set_class(&s->s_lock, &type->s_lock_key);
155 * sget() can have s_umount recursion.
157 * When it cannot find a suitable sb, it allocates a new
158 * one (this one), and tries again to find a suitable old
159 * one.
161 * In case that succeeds, it will acquire the s_umount
162 * lock of the old one. Since these are clearly distrinct
163 * locks, and this object isn't exposed yet, there's no
164 * risk of deadlocks.
166 * Annotate this by putting this lock in a different
167 * subclass.
169 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
170 s->s_count = 1;
171 atomic_set(&s->s_active, 1);
172 mutex_init(&s->s_vfs_rename_mutex);
173 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
174 mutex_init(&s->s_dquot.dqio_mutex);
175 mutex_init(&s->s_dquot.dqonoff_mutex);
176 init_rwsem(&s->s_dquot.dqptr_sem);
177 init_waitqueue_head(&s->s_wait_unfrozen);
178 s->s_maxbytes = MAX_NON_LFS;
179 s->s_op = &default_op;
180 s->s_time_gran = 1000000000;
181 s->cleancache_poolid = -1;
183 s->s_shrink.seeks = DEFAULT_SEEKS;
184 s->s_shrink.shrink = prune_super;
185 s->s_shrink.batch = 1024;
187 out:
188 return s;
192 * destroy_super - frees a superblock
193 * @s: superblock to free
195 * Frees a superblock.
197 static inline void destroy_super(struct super_block *s)
199 #ifdef CONFIG_SMP
200 free_percpu(s->s_files);
201 #endif
202 security_sb_free(s);
203 kfree(s->s_subtype);
204 kfree(s->s_options);
205 kfree(s);
208 /* Superblock refcounting */
211 * Drop a superblock's refcount. The caller must hold sb_lock.
213 void __put_super(struct super_block *sb)
215 if (!--sb->s_count) {
216 list_del_init(&sb->s_list);
217 destroy_super(sb);
222 * put_super - drop a temporary reference to superblock
223 * @sb: superblock in question
225 * Drops a temporary reference, frees superblock if there's no
226 * references left.
228 void put_super(struct super_block *sb)
230 spin_lock(&sb_lock);
231 __put_super(sb);
232 spin_unlock(&sb_lock);
237 * deactivate_locked_super - drop an active reference to superblock
238 * @s: superblock to deactivate
240 * Drops an active reference to superblock, converting it into a temprory
241 * one if there is no other active references left. In that case we
242 * tell fs driver to shut it down and drop the temporary reference we
243 * had just acquired.
245 * Caller holds exclusive lock on superblock; that lock is released.
247 void deactivate_locked_super(struct super_block *s)
249 struct file_system_type *fs = s->s_type;
250 if (atomic_dec_and_test(&s->s_active)) {
251 cleancache_flush_fs(s);
252 fs->kill_sb(s);
254 /* caches are now gone, we can safely kill the shrinker now */
255 unregister_shrinker(&s->s_shrink);
258 * We need to call rcu_barrier so all the delayed rcu free
259 * inodes are flushed before we release the fs module.
261 rcu_barrier();
262 put_filesystem(fs);
263 put_super(s);
264 } else {
265 up_write(&s->s_umount);
269 EXPORT_SYMBOL(deactivate_locked_super);
272 * deactivate_super - drop an active reference to superblock
273 * @s: superblock to deactivate
275 * Variant of deactivate_locked_super(), except that superblock is *not*
276 * locked by caller. If we are going to drop the final active reference,
277 * lock will be acquired prior to that.
279 void deactivate_super(struct super_block *s)
281 if (!atomic_add_unless(&s->s_active, -1, 1)) {
282 down_write(&s->s_umount);
283 deactivate_locked_super(s);
287 EXPORT_SYMBOL(deactivate_super);
290 * grab_super - acquire an active reference
291 * @s: reference we are trying to make active
293 * Tries to acquire an active reference. grab_super() is used when we
294 * had just found a superblock in super_blocks or fs_type->fs_supers
295 * and want to turn it into a full-blown active reference. grab_super()
296 * is called with sb_lock held and drops it. Returns 1 in case of
297 * success, 0 if we had failed (superblock contents was already dead or
298 * dying when grab_super() had been called).
300 static int grab_super(struct super_block *s) __releases(sb_lock)
302 if (atomic_inc_not_zero(&s->s_active)) {
303 spin_unlock(&sb_lock);
304 return 1;
306 /* it's going away */
307 s->s_count++;
308 spin_unlock(&sb_lock);
309 /* wait for it to die */
310 down_write(&s->s_umount);
311 up_write(&s->s_umount);
312 put_super(s);
313 return 0;
317 * grab_super_passive - acquire a passive reference
318 * @s: reference we are trying to grab
320 * Tries to acquire a passive reference. This is used in places where we
321 * cannot take an active reference but we need to ensure that the
322 * superblock does not go away while we are working on it. It returns
323 * false if a reference was not gained, and returns true with the s_umount
324 * lock held in read mode if a reference is gained. On successful return,
325 * the caller must drop the s_umount lock and the passive reference when
326 * done.
328 bool grab_super_passive(struct super_block *sb)
330 spin_lock(&sb_lock);
331 if (list_empty(&sb->s_instances)) {
332 spin_unlock(&sb_lock);
333 return false;
336 sb->s_count++;
337 spin_unlock(&sb_lock);
339 if (down_read_trylock(&sb->s_umount)) {
340 if (sb->s_root)
341 return true;
342 up_read(&sb->s_umount);
345 put_super(sb);
346 return false;
350 * Superblock locking. We really ought to get rid of these two.
352 void lock_super(struct super_block * sb)
354 mutex_lock(&sb->s_lock);
357 void unlock_super(struct super_block * sb)
359 mutex_unlock(&sb->s_lock);
362 EXPORT_SYMBOL(lock_super);
363 EXPORT_SYMBOL(unlock_super);
366 * generic_shutdown_super - common helper for ->kill_sb()
367 * @sb: superblock to kill
369 * generic_shutdown_super() does all fs-independent work on superblock
370 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
371 * that need destruction out of superblock, call generic_shutdown_super()
372 * and release aforementioned objects. Note: dentries and inodes _are_
373 * taken care of and do not need specific handling.
375 * Upon calling this function, the filesystem may no longer alter or
376 * rearrange the set of dentries belonging to this super_block, nor may it
377 * change the attachments of dentries to inodes.
379 void generic_shutdown_super(struct super_block *sb)
381 const struct super_operations *sop = sb->s_op;
383 if (sb->s_root) {
384 shrink_dcache_for_umount(sb);
385 sync_filesystem(sb);
386 sb->s_flags &= ~MS_ACTIVE;
388 fsnotify_unmount_inodes(&sb->s_inodes);
390 evict_inodes(sb);
392 if (sop->put_super)
393 sop->put_super(sb);
395 if (!list_empty(&sb->s_inodes)) {
396 printk("VFS: Busy inodes after unmount of %s. "
397 "Self-destruct in 5 seconds. Have a nice day...\n",
398 sb->s_id);
401 spin_lock(&sb_lock);
402 /* should be initialized for __put_super_and_need_restart() */
403 list_del_init(&sb->s_instances);
404 spin_unlock(&sb_lock);
405 up_write(&sb->s_umount);
408 EXPORT_SYMBOL(generic_shutdown_super);
411 * sget - find or create a superblock
412 * @type: filesystem type superblock should belong to
413 * @test: comparison callback
414 * @set: setup callback
415 * @data: argument to each of them
417 struct super_block *sget(struct file_system_type *type,
418 int (*test)(struct super_block *,void *),
419 int (*set)(struct super_block *,void *),
420 void *data)
422 struct super_block *s = NULL;
423 struct super_block *old;
424 int err;
426 retry:
427 spin_lock(&sb_lock);
428 if (test) {
429 list_for_each_entry(old, &type->fs_supers, s_instances) {
430 if (!test(old, data))
431 continue;
432 if (!grab_super(old))
433 goto retry;
434 if (s) {
435 up_write(&s->s_umount);
436 destroy_super(s);
437 s = NULL;
439 down_write(&old->s_umount);
440 if (unlikely(!(old->s_flags & MS_BORN))) {
441 deactivate_locked_super(old);
442 goto retry;
444 return old;
447 if (!s) {
448 spin_unlock(&sb_lock);
449 s = alloc_super(type);
450 if (!s)
451 return ERR_PTR(-ENOMEM);
452 goto retry;
455 err = set(s, data);
456 if (err) {
457 spin_unlock(&sb_lock);
458 up_write(&s->s_umount);
459 destroy_super(s);
460 return ERR_PTR(err);
462 s->s_type = type;
463 strlcpy(s->s_id, type->name, sizeof(s->s_id));
464 list_add_tail(&s->s_list, &super_blocks);
465 list_add(&s->s_instances, &type->fs_supers);
466 spin_unlock(&sb_lock);
467 get_filesystem(type);
468 register_shrinker(&s->s_shrink);
469 return s;
472 EXPORT_SYMBOL(sget);
474 void drop_super(struct super_block *sb)
476 up_read(&sb->s_umount);
477 put_super(sb);
480 EXPORT_SYMBOL(drop_super);
483 * sync_supers - helper for periodic superblock writeback
485 * Call the write_super method if present on all dirty superblocks in
486 * the system. This is for the periodic writeback used by most older
487 * filesystems. For data integrity superblock writeback use
488 * sync_filesystems() instead.
490 * Note: check the dirty flag before waiting, so we don't
491 * hold up the sync while mounting a device. (The newly
492 * mounted device won't need syncing.)
494 void sync_supers(void)
496 struct super_block *sb, *p = NULL;
498 spin_lock(&sb_lock);
499 list_for_each_entry(sb, &super_blocks, s_list) {
500 if (list_empty(&sb->s_instances))
501 continue;
502 if (sb->s_op->write_super && sb->s_dirt) {
503 sb->s_count++;
504 spin_unlock(&sb_lock);
506 down_read(&sb->s_umount);
507 if (sb->s_root && sb->s_dirt)
508 sb->s_op->write_super(sb);
509 up_read(&sb->s_umount);
511 spin_lock(&sb_lock);
512 if (p)
513 __put_super(p);
514 p = sb;
517 if (p)
518 __put_super(p);
519 spin_unlock(&sb_lock);
523 * iterate_supers - call function for all active superblocks
524 * @f: function to call
525 * @arg: argument to pass to it
527 * Scans the superblock list and calls given function, passing it
528 * locked superblock and given argument.
530 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
532 struct super_block *sb, *p = NULL;
534 spin_lock(&sb_lock);
535 list_for_each_entry(sb, &super_blocks, s_list) {
536 if (list_empty(&sb->s_instances))
537 continue;
538 sb->s_count++;
539 spin_unlock(&sb_lock);
541 down_read(&sb->s_umount);
542 if (sb->s_root)
543 f(sb, arg);
544 up_read(&sb->s_umount);
546 spin_lock(&sb_lock);
547 if (p)
548 __put_super(p);
549 p = sb;
551 if (p)
552 __put_super(p);
553 spin_unlock(&sb_lock);
557 * iterate_supers_type - call function for superblocks of given type
558 * @type: fs type
559 * @f: function to call
560 * @arg: argument to pass to it
562 * Scans the superblock list and calls given function, passing it
563 * locked superblock and given argument.
565 void iterate_supers_type(struct file_system_type *type,
566 void (*f)(struct super_block *, void *), void *arg)
568 struct super_block *sb, *p = NULL;
570 spin_lock(&sb_lock);
571 list_for_each_entry(sb, &type->fs_supers, s_instances) {
572 sb->s_count++;
573 spin_unlock(&sb_lock);
575 down_read(&sb->s_umount);
576 if (sb->s_root)
577 f(sb, arg);
578 up_read(&sb->s_umount);
580 spin_lock(&sb_lock);
581 if (p)
582 __put_super(p);
583 p = sb;
585 if (p)
586 __put_super(p);
587 spin_unlock(&sb_lock);
590 EXPORT_SYMBOL(iterate_supers_type);
593 * get_super - get the superblock of a device
594 * @bdev: device to get the superblock for
596 * Scans the superblock list and finds the superblock of the file system
597 * mounted on the device given. %NULL is returned if no match is found.
600 struct super_block *get_super(struct block_device *bdev)
602 struct super_block *sb;
604 if (!bdev)
605 return NULL;
607 spin_lock(&sb_lock);
608 rescan:
609 list_for_each_entry(sb, &super_blocks, s_list) {
610 if (list_empty(&sb->s_instances))
611 continue;
612 if (sb->s_bdev == bdev) {
613 sb->s_count++;
614 spin_unlock(&sb_lock);
615 down_read(&sb->s_umount);
616 /* still alive? */
617 if (sb->s_root)
618 return sb;
619 up_read(&sb->s_umount);
620 /* nope, got unmounted */
621 spin_lock(&sb_lock);
622 __put_super(sb);
623 goto rescan;
626 spin_unlock(&sb_lock);
627 return NULL;
630 EXPORT_SYMBOL(get_super);
633 * get_active_super - get an active reference to the superblock of a device
634 * @bdev: device to get the superblock for
636 * Scans the superblock list and finds the superblock of the file system
637 * mounted on the device given. Returns the superblock with an active
638 * reference or %NULL if none was found.
640 struct super_block *get_active_super(struct block_device *bdev)
642 struct super_block *sb;
644 if (!bdev)
645 return NULL;
647 restart:
648 spin_lock(&sb_lock);
649 list_for_each_entry(sb, &super_blocks, s_list) {
650 if (list_empty(&sb->s_instances))
651 continue;
652 if (sb->s_bdev == bdev) {
653 if (grab_super(sb)) /* drops sb_lock */
654 return sb;
655 else
656 goto restart;
659 spin_unlock(&sb_lock);
660 return NULL;
663 struct super_block *user_get_super(dev_t dev)
665 struct super_block *sb;
667 spin_lock(&sb_lock);
668 rescan:
669 list_for_each_entry(sb, &super_blocks, s_list) {
670 if (list_empty(&sb->s_instances))
671 continue;
672 if (sb->s_dev == dev) {
673 sb->s_count++;
674 spin_unlock(&sb_lock);
675 down_read(&sb->s_umount);
676 /* still alive? */
677 if (sb->s_root)
678 return sb;
679 up_read(&sb->s_umount);
680 /* nope, got unmounted */
681 spin_lock(&sb_lock);
682 __put_super(sb);
683 goto rescan;
686 spin_unlock(&sb_lock);
687 return NULL;
691 * do_remount_sb - asks filesystem to change mount options.
692 * @sb: superblock in question
693 * @flags: numeric part of options
694 * @data: the rest of options
695 * @force: whether or not to force the change
697 * Alters the mount options of a mounted file system.
699 int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
701 int retval;
702 int remount_ro;
704 if (sb->s_frozen != SB_UNFROZEN)
705 return -EBUSY;
707 #ifdef CONFIG_BLOCK
708 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
709 return -EACCES;
710 #endif
712 if (flags & MS_RDONLY)
713 acct_auto_close(sb);
714 shrink_dcache_sb(sb);
715 sync_filesystem(sb);
717 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
719 /* If we are remounting RDONLY and current sb is read/write,
720 make sure there are no rw files opened */
721 if (remount_ro) {
722 if (force)
723 mark_files_ro(sb);
724 else if (!fs_may_remount_ro(sb))
725 return -EBUSY;
728 if (sb->s_op->remount_fs) {
729 retval = sb->s_op->remount_fs(sb, &flags, data);
730 if (retval) {
731 if (!force)
732 return retval;
733 /* If forced remount, go ahead despite any errors */
734 WARN(1, "forced remount of a %s fs returned %i\n",
735 sb->s_type->name, retval);
738 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
741 * Some filesystems modify their metadata via some other path than the
742 * bdev buffer cache (eg. use a private mapping, or directories in
743 * pagecache, etc). Also file data modifications go via their own
744 * mappings. So If we try to mount readonly then copy the filesystem
745 * from bdev, we could get stale data, so invalidate it to give a best
746 * effort at coherency.
748 if (remount_ro && sb->s_bdev)
749 invalidate_bdev(sb->s_bdev);
750 return 0;
753 static void do_emergency_remount(struct work_struct *work)
755 struct super_block *sb, *p = NULL;
757 spin_lock(&sb_lock);
758 list_for_each_entry(sb, &super_blocks, s_list) {
759 if (list_empty(&sb->s_instances))
760 continue;
761 sb->s_count++;
762 spin_unlock(&sb_lock);
763 down_write(&sb->s_umount);
764 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
766 * What lock protects sb->s_flags??
768 do_remount_sb(sb, MS_RDONLY, NULL, 1);
770 up_write(&sb->s_umount);
771 spin_lock(&sb_lock);
772 if (p)
773 __put_super(p);
774 p = sb;
776 if (p)
777 __put_super(p);
778 spin_unlock(&sb_lock);
779 kfree(work);
780 printk("Emergency Remount complete\n");
783 void emergency_remount(void)
785 struct work_struct *work;
787 work = kmalloc(sizeof(*work), GFP_ATOMIC);
788 if (work) {
789 INIT_WORK(work, do_emergency_remount);
790 schedule_work(work);
795 * Unnamed block devices are dummy devices used by virtual
796 * filesystems which don't use real block-devices. -- jrs
799 static DEFINE_IDA(unnamed_dev_ida);
800 static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
801 static int unnamed_dev_start = 0; /* don't bother trying below it */
803 int get_anon_bdev(dev_t *p)
805 int dev;
806 int error;
808 retry:
809 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
810 return -ENOMEM;
811 spin_lock(&unnamed_dev_lock);
812 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
813 if (!error)
814 unnamed_dev_start = dev + 1;
815 spin_unlock(&unnamed_dev_lock);
816 if (error == -EAGAIN)
817 /* We raced and lost with another CPU. */
818 goto retry;
819 else if (error)
820 return -EAGAIN;
822 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
823 spin_lock(&unnamed_dev_lock);
824 ida_remove(&unnamed_dev_ida, dev);
825 if (unnamed_dev_start > dev)
826 unnamed_dev_start = dev;
827 spin_unlock(&unnamed_dev_lock);
828 return -EMFILE;
830 *p = MKDEV(0, dev & MINORMASK);
831 return 0;
833 EXPORT_SYMBOL(get_anon_bdev);
835 void free_anon_bdev(dev_t dev)
837 int slot = MINOR(dev);
838 spin_lock(&unnamed_dev_lock);
839 ida_remove(&unnamed_dev_ida, slot);
840 if (slot < unnamed_dev_start)
841 unnamed_dev_start = slot;
842 spin_unlock(&unnamed_dev_lock);
844 EXPORT_SYMBOL(free_anon_bdev);
846 int set_anon_super(struct super_block *s, void *data)
848 int error = get_anon_bdev(&s->s_dev);
849 if (!error)
850 s->s_bdi = &noop_backing_dev_info;
851 return error;
854 EXPORT_SYMBOL(set_anon_super);
856 void kill_anon_super(struct super_block *sb)
858 dev_t dev = sb->s_dev;
859 generic_shutdown_super(sb);
860 free_anon_bdev(dev);
863 EXPORT_SYMBOL(kill_anon_super);
865 void kill_litter_super(struct super_block *sb)
867 if (sb->s_root)
868 d_genocide(sb->s_root);
869 kill_anon_super(sb);
872 EXPORT_SYMBOL(kill_litter_super);
874 static int ns_test_super(struct super_block *sb, void *data)
876 return sb->s_fs_info == data;
879 static int ns_set_super(struct super_block *sb, void *data)
881 sb->s_fs_info = data;
882 return set_anon_super(sb, NULL);
885 struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
886 void *data, int (*fill_super)(struct super_block *, void *, int))
888 struct super_block *sb;
890 sb = sget(fs_type, ns_test_super, ns_set_super, data);
891 if (IS_ERR(sb))
892 return ERR_CAST(sb);
894 if (!sb->s_root) {
895 int err;
896 sb->s_flags = flags;
897 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
898 if (err) {
899 deactivate_locked_super(sb);
900 return ERR_PTR(err);
903 sb->s_flags |= MS_ACTIVE;
906 return dget(sb->s_root);
909 EXPORT_SYMBOL(mount_ns);
911 #ifdef CONFIG_BLOCK
912 static int set_bdev_super(struct super_block *s, void *data)
914 s->s_bdev = data;
915 s->s_dev = s->s_bdev->bd_dev;
918 * We set the bdi here to the queue backing, file systems can
919 * overwrite this in ->fill_super()
921 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
922 return 0;
925 static int test_bdev_super(struct super_block *s, void *data)
927 return (void *)s->s_bdev == data;
930 struct dentry *mount_bdev(struct file_system_type *fs_type,
931 int flags, const char *dev_name, void *data,
932 int (*fill_super)(struct super_block *, void *, int))
934 struct block_device *bdev;
935 struct super_block *s;
936 fmode_t mode = FMODE_READ | FMODE_EXCL;
937 int error = 0;
939 if (!(flags & MS_RDONLY))
940 mode |= FMODE_WRITE;
942 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
943 if (IS_ERR(bdev))
944 return ERR_CAST(bdev);
947 * once the super is inserted into the list by sget, s_umount
948 * will protect the lockfs code from trying to start a snapshot
949 * while we are mounting
951 mutex_lock(&bdev->bd_fsfreeze_mutex);
952 if (bdev->bd_fsfreeze_count > 0) {
953 mutex_unlock(&bdev->bd_fsfreeze_mutex);
954 error = -EBUSY;
955 goto error_bdev;
957 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
958 mutex_unlock(&bdev->bd_fsfreeze_mutex);
959 if (IS_ERR(s))
960 goto error_s;
962 if (s->s_root) {
963 if ((flags ^ s->s_flags) & MS_RDONLY) {
964 deactivate_locked_super(s);
965 error = -EBUSY;
966 goto error_bdev;
970 * s_umount nests inside bd_mutex during
971 * __invalidate_device(). blkdev_put() acquires
972 * bd_mutex and can't be called under s_umount. Drop
973 * s_umount temporarily. This is safe as we're
974 * holding an active reference.
976 up_write(&s->s_umount);
977 blkdev_put(bdev, mode);
978 down_write(&s->s_umount);
979 } else {
980 char b[BDEVNAME_SIZE];
982 s->s_flags = flags | MS_NOSEC;
983 s->s_mode = mode;
984 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
985 sb_set_blocksize(s, block_size(bdev));
986 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
987 if (error) {
988 deactivate_locked_super(s);
989 goto error;
992 s->s_flags |= MS_ACTIVE;
993 bdev->bd_super = s;
996 return dget(s->s_root);
998 error_s:
999 error = PTR_ERR(s);
1000 error_bdev:
1001 blkdev_put(bdev, mode);
1002 error:
1003 return ERR_PTR(error);
1005 EXPORT_SYMBOL(mount_bdev);
1007 void kill_block_super(struct super_block *sb)
1009 struct block_device *bdev = sb->s_bdev;
1010 fmode_t mode = sb->s_mode;
1012 bdev->bd_super = NULL;
1013 generic_shutdown_super(sb);
1014 sync_blockdev(bdev);
1015 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1016 blkdev_put(bdev, mode | FMODE_EXCL);
1019 EXPORT_SYMBOL(kill_block_super);
1020 #endif
1022 struct dentry *mount_nodev(struct file_system_type *fs_type,
1023 int flags, void *data,
1024 int (*fill_super)(struct super_block *, void *, int))
1026 int error;
1027 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
1029 if (IS_ERR(s))
1030 return ERR_CAST(s);
1032 s->s_flags = flags;
1034 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1035 if (error) {
1036 deactivate_locked_super(s);
1037 return ERR_PTR(error);
1039 s->s_flags |= MS_ACTIVE;
1040 return dget(s->s_root);
1042 EXPORT_SYMBOL(mount_nodev);
1044 static int compare_single(struct super_block *s, void *p)
1046 return 1;
1049 struct dentry *mount_single(struct file_system_type *fs_type,
1050 int flags, void *data,
1051 int (*fill_super)(struct super_block *, void *, int))
1053 struct super_block *s;
1054 int error;
1056 s = sget(fs_type, compare_single, set_anon_super, NULL);
1057 if (IS_ERR(s))
1058 return ERR_CAST(s);
1059 if (!s->s_root) {
1060 s->s_flags = flags;
1061 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1062 if (error) {
1063 deactivate_locked_super(s);
1064 return ERR_PTR(error);
1066 s->s_flags |= MS_ACTIVE;
1067 } else {
1068 do_remount_sb(s, flags, data, 0);
1070 return dget(s->s_root);
1072 EXPORT_SYMBOL(mount_single);
1074 struct dentry *
1075 mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1077 struct dentry *root;
1078 struct super_block *sb;
1079 char *secdata = NULL;
1080 int error = -ENOMEM;
1082 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1083 secdata = alloc_secdata();
1084 if (!secdata)
1085 goto out;
1087 error = security_sb_copy_data(data, secdata);
1088 if (error)
1089 goto out_free_secdata;
1092 root = type->mount(type, flags, name, data);
1093 if (IS_ERR(root)) {
1094 error = PTR_ERR(root);
1095 goto out_free_secdata;
1097 sb = root->d_sb;
1098 BUG_ON(!sb);
1099 WARN_ON(!sb->s_bdi);
1100 WARN_ON(sb->s_bdi == &default_backing_dev_info);
1101 sb->s_flags |= MS_BORN;
1103 error = security_sb_kern_mount(sb, flags, secdata);
1104 if (error)
1105 goto out_sb;
1108 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1109 * but s_maxbytes was an unsigned long long for many releases. Throw
1110 * this warning for a little while to try and catch filesystems that
1111 * violate this rule.
1113 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1114 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1116 up_write(&sb->s_umount);
1117 free_secdata(secdata);
1118 return root;
1119 out_sb:
1120 dput(root);
1121 deactivate_locked_super(sb);
1122 out_free_secdata:
1123 free_secdata(secdata);
1124 out:
1125 return ERR_PTR(error);
1129 * freeze_super - lock the filesystem and force it into a consistent state
1130 * @sb: the super to lock
1132 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1133 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1134 * -EBUSY.
1136 int freeze_super(struct super_block *sb)
1138 int ret;
1140 atomic_inc(&sb->s_active);
1141 down_write(&sb->s_umount);
1142 if (sb->s_frozen) {
1143 deactivate_locked_super(sb);
1144 return -EBUSY;
1147 if (sb->s_flags & MS_RDONLY) {
1148 sb->s_frozen = SB_FREEZE_TRANS;
1149 smp_wmb();
1150 up_write(&sb->s_umount);
1151 return 0;
1154 sb->s_frozen = SB_FREEZE_WRITE;
1155 smp_wmb();
1157 sync_filesystem(sb);
1159 sb->s_frozen = SB_FREEZE_TRANS;
1160 smp_wmb();
1162 sync_blockdev(sb->s_bdev);
1163 if (sb->s_op->freeze_fs) {
1164 ret = sb->s_op->freeze_fs(sb);
1165 if (ret) {
1166 printk(KERN_ERR
1167 "VFS:Filesystem freeze failed\n");
1168 sb->s_frozen = SB_UNFROZEN;
1169 deactivate_locked_super(sb);
1170 return ret;
1173 up_write(&sb->s_umount);
1174 return 0;
1176 EXPORT_SYMBOL(freeze_super);
1179 * thaw_super -- unlock filesystem
1180 * @sb: the super to thaw
1182 * Unlocks the filesystem and marks it writeable again after freeze_super().
1184 int thaw_super(struct super_block *sb)
1186 int error;
1188 down_write(&sb->s_umount);
1189 if (sb->s_frozen == SB_UNFROZEN) {
1190 up_write(&sb->s_umount);
1191 return -EINVAL;
1194 if (sb->s_flags & MS_RDONLY)
1195 goto out;
1197 if (sb->s_op->unfreeze_fs) {
1198 error = sb->s_op->unfreeze_fs(sb);
1199 if (error) {
1200 printk(KERN_ERR
1201 "VFS:Filesystem thaw failed\n");
1202 sb->s_frozen = SB_FREEZE_TRANS;
1203 up_write(&sb->s_umount);
1204 return error;
1208 out:
1209 sb->s_frozen = SB_UNFROZEN;
1210 smp_wmb();
1211 wake_up(&sb->s_wait_unfrozen);
1212 deactivate_locked_super(sb);
1214 return 0;
1216 EXPORT_SYMBOL(thaw_super);