4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * super.c contains code to handle: - mount structures
8 * - filesystem drivers list
10 * - umount system call
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/acct.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/cleancache.h>
38 LIST_HEAD(super_blocks
);
39 DEFINE_SPINLOCK(sb_lock
);
42 * One thing we have to be careful of with a per-sb shrinker is that we don't
43 * drop the last active reference to the superblock from within the shrinker.
44 * If that happens we could trigger unregistering the shrinker from within the
45 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
46 * take a passive reference to the superblock to avoid this from occurring.
48 static int prune_super(struct shrinker
*shrink
, struct shrink_control
*sc
)
50 struct super_block
*sb
;
54 sb
= container_of(shrink
, struct super_block
, s_shrink
);
57 * Deadlock avoidance. We may hold various FS locks, and we don't want
58 * to recurse into the FS that called us in clear_inode() and friends..
60 if (sc
->nr_to_scan
&& !(sc
->gfp_mask
& __GFP_FS
))
63 if (!grab_super_passive(sb
))
66 if (sb
->s_op
&& sb
->s_op
->nr_cached_objects
)
67 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
69 total_objects
= sb
->s_nr_dentry_unused
+
70 sb
->s_nr_inodes_unused
+ fs_objects
+ 1;
76 /* proportion the scan between the caches */
77 dentries
= (sc
->nr_to_scan
* sb
->s_nr_dentry_unused
) /
79 inodes
= (sc
->nr_to_scan
* sb
->s_nr_inodes_unused
) /
82 fs_objects
= (sc
->nr_to_scan
* fs_objects
) /
85 * prune the dcache first as the icache is pinned by it, then
86 * prune the icache, followed by the filesystem specific caches
88 prune_dcache_sb(sb
, dentries
);
89 prune_icache_sb(sb
, inodes
);
91 if (fs_objects
&& sb
->s_op
->free_cached_objects
) {
92 sb
->s_op
->free_cached_objects(sb
, fs_objects
);
93 fs_objects
= sb
->s_op
->nr_cached_objects(sb
);
95 total_objects
= sb
->s_nr_dentry_unused
+
96 sb
->s_nr_inodes_unused
+ fs_objects
;
99 total_objects
= (total_objects
/ 100) * sysctl_vfs_cache_pressure
;
101 return total_objects
;
105 * alloc_super - create new superblock
106 * @type: filesystem type superblock should belong to
108 * Allocates and initializes a new &struct super_block. alloc_super()
109 * returns a pointer new superblock or %NULL if allocation had failed.
111 static struct super_block
*alloc_super(struct file_system_type
*type
)
113 struct super_block
*s
= kzalloc(sizeof(struct super_block
), GFP_USER
);
114 static const struct super_operations default_op
;
117 if (security_sb_alloc(s
)) {
123 s
->s_files
= alloc_percpu(struct list_head
);
132 for_each_possible_cpu(i
)
133 INIT_LIST_HEAD(per_cpu_ptr(s
->s_files
, i
));
136 INIT_LIST_HEAD(&s
->s_files
);
138 s
->s_bdi
= &default_backing_dev_info
;
139 INIT_LIST_HEAD(&s
->s_instances
);
140 INIT_HLIST_BL_HEAD(&s
->s_anon
);
141 INIT_LIST_HEAD(&s
->s_inodes
);
142 INIT_LIST_HEAD(&s
->s_dentry_lru
);
143 INIT_LIST_HEAD(&s
->s_inode_lru
);
144 spin_lock_init(&s
->s_inode_lru_lock
);
145 init_rwsem(&s
->s_umount
);
146 mutex_init(&s
->s_lock
);
147 lockdep_set_class(&s
->s_umount
, &type
->s_umount_key
);
149 * The locking rules for s_lock are up to the
150 * filesystem. For example ext3fs has different
151 * lock ordering than usbfs:
153 lockdep_set_class(&s
->s_lock
, &type
->s_lock_key
);
155 * sget() can have s_umount recursion.
157 * When it cannot find a suitable sb, it allocates a new
158 * one (this one), and tries again to find a suitable old
161 * In case that succeeds, it will acquire the s_umount
162 * lock of the old one. Since these are clearly distrinct
163 * locks, and this object isn't exposed yet, there's no
166 * Annotate this by putting this lock in a different
169 down_write_nested(&s
->s_umount
, SINGLE_DEPTH_NESTING
);
171 atomic_set(&s
->s_active
, 1);
172 mutex_init(&s
->s_vfs_rename_mutex
);
173 lockdep_set_class(&s
->s_vfs_rename_mutex
, &type
->s_vfs_rename_key
);
174 mutex_init(&s
->s_dquot
.dqio_mutex
);
175 mutex_init(&s
->s_dquot
.dqonoff_mutex
);
176 init_rwsem(&s
->s_dquot
.dqptr_sem
);
177 init_waitqueue_head(&s
->s_wait_unfrozen
);
178 s
->s_maxbytes
= MAX_NON_LFS
;
179 s
->s_op
= &default_op
;
180 s
->s_time_gran
= 1000000000;
181 s
->cleancache_poolid
= -1;
183 s
->s_shrink
.seeks
= DEFAULT_SEEKS
;
184 s
->s_shrink
.shrink
= prune_super
;
191 * destroy_super - frees a superblock
192 * @s: superblock to free
194 * Frees a superblock.
196 static inline void destroy_super(struct super_block
*s
)
199 free_percpu(s
->s_files
);
207 /* Superblock refcounting */
210 * Drop a superblock's refcount. The caller must hold sb_lock.
212 void __put_super(struct super_block
*sb
)
214 if (!--sb
->s_count
) {
215 list_del_init(&sb
->s_list
);
221 * put_super - drop a temporary reference to superblock
222 * @sb: superblock in question
224 * Drops a temporary reference, frees superblock if there's no
227 void put_super(struct super_block
*sb
)
231 spin_unlock(&sb_lock
);
236 * deactivate_locked_super - drop an active reference to superblock
237 * @s: superblock to deactivate
239 * Drops an active reference to superblock, converting it into a temprory
240 * one if there is no other active references left. In that case we
241 * tell fs driver to shut it down and drop the temporary reference we
244 * Caller holds exclusive lock on superblock; that lock is released.
246 void deactivate_locked_super(struct super_block
*s
)
248 struct file_system_type
*fs
= s
->s_type
;
249 if (atomic_dec_and_test(&s
->s_active
)) {
250 cleancache_flush_fs(s
);
253 /* caches are now gone, we can safely kill the shrinker now */
254 unregister_shrinker(&s
->s_shrink
);
257 * We need to call rcu_barrier so all the delayed rcu free
258 * inodes are flushed before we release the fs module.
264 up_write(&s
->s_umount
);
268 EXPORT_SYMBOL(deactivate_locked_super
);
271 * deactivate_super - drop an active reference to superblock
272 * @s: superblock to deactivate
274 * Variant of deactivate_locked_super(), except that superblock is *not*
275 * locked by caller. If we are going to drop the final active reference,
276 * lock will be acquired prior to that.
278 void deactivate_super(struct super_block
*s
)
280 if (!atomic_add_unless(&s
->s_active
, -1, 1)) {
281 down_write(&s
->s_umount
);
282 deactivate_locked_super(s
);
286 EXPORT_SYMBOL(deactivate_super
);
289 * grab_super - acquire an active reference
290 * @s: reference we are trying to make active
292 * Tries to acquire an active reference. grab_super() is used when we
293 * had just found a superblock in super_blocks or fs_type->fs_supers
294 * and want to turn it into a full-blown active reference. grab_super()
295 * is called with sb_lock held and drops it. Returns 1 in case of
296 * success, 0 if we had failed (superblock contents was already dead or
297 * dying when grab_super() had been called).
299 static int grab_super(struct super_block
*s
) __releases(sb_lock
)
301 if (atomic_inc_not_zero(&s
->s_active
)) {
302 spin_unlock(&sb_lock
);
305 /* it's going away */
307 spin_unlock(&sb_lock
);
308 /* wait for it to die */
309 down_write(&s
->s_umount
);
310 up_write(&s
->s_umount
);
316 * grab_super_passive - acquire a passive reference
317 * @s: reference we are trying to grab
319 * Tries to acquire a passive reference. This is used in places where we
320 * cannot take an active reference but we need to ensure that the
321 * superblock does not go away while we are working on it. It returns
322 * false if a reference was not gained, and returns true with the s_umount
323 * lock held in read mode if a reference is gained. On successful return,
324 * the caller must drop the s_umount lock and the passive reference when
327 bool grab_super_passive(struct super_block
*sb
)
330 if (list_empty(&sb
->s_instances
)) {
331 spin_unlock(&sb_lock
);
336 spin_unlock(&sb_lock
);
338 if (down_read_trylock(&sb
->s_umount
)) {
341 up_read(&sb
->s_umount
);
349 * Superblock locking. We really ought to get rid of these two.
351 void lock_super(struct super_block
* sb
)
354 mutex_lock(&sb
->s_lock
);
357 void unlock_super(struct super_block
* sb
)
360 mutex_unlock(&sb
->s_lock
);
363 EXPORT_SYMBOL(lock_super
);
364 EXPORT_SYMBOL(unlock_super
);
367 * generic_shutdown_super - common helper for ->kill_sb()
368 * @sb: superblock to kill
370 * generic_shutdown_super() does all fs-independent work on superblock
371 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
372 * that need destruction out of superblock, call generic_shutdown_super()
373 * and release aforementioned objects. Note: dentries and inodes _are_
374 * taken care of and do not need specific handling.
376 * Upon calling this function, the filesystem may no longer alter or
377 * rearrange the set of dentries belonging to this super_block, nor may it
378 * change the attachments of dentries to inodes.
380 void generic_shutdown_super(struct super_block
*sb
)
382 const struct super_operations
*sop
= sb
->s_op
;
385 shrink_dcache_for_umount(sb
);
388 sb
->s_flags
&= ~MS_ACTIVE
;
390 fsnotify_unmount_inodes(&sb
->s_inodes
);
397 if (!list_empty(&sb
->s_inodes
)) {
398 printk("VFS: Busy inodes after unmount of %s. "
399 "Self-destruct in 5 seconds. Have a nice day...\n",
405 /* should be initialized for __put_super_and_need_restart() */
406 list_del_init(&sb
->s_instances
);
407 spin_unlock(&sb_lock
);
408 up_write(&sb
->s_umount
);
411 EXPORT_SYMBOL(generic_shutdown_super
);
414 * sget - find or create a superblock
415 * @type: filesystem type superblock should belong to
416 * @test: comparison callback
417 * @set: setup callback
418 * @data: argument to each of them
420 struct super_block
*sget(struct file_system_type
*type
,
421 int (*test
)(struct super_block
*,void *),
422 int (*set
)(struct super_block
*,void *),
425 struct super_block
*s
= NULL
;
426 struct super_block
*old
;
432 list_for_each_entry(old
, &type
->fs_supers
, s_instances
) {
433 if (!test(old
, data
))
435 if (!grab_super(old
))
438 up_write(&s
->s_umount
);
442 down_write(&old
->s_umount
);
443 if (unlikely(!(old
->s_flags
& MS_BORN
))) {
444 deactivate_locked_super(old
);
451 spin_unlock(&sb_lock
);
452 s
= alloc_super(type
);
454 return ERR_PTR(-ENOMEM
);
460 spin_unlock(&sb_lock
);
461 up_write(&s
->s_umount
);
466 strlcpy(s
->s_id
, type
->name
, sizeof(s
->s_id
));
467 list_add_tail(&s
->s_list
, &super_blocks
);
468 list_add(&s
->s_instances
, &type
->fs_supers
);
469 spin_unlock(&sb_lock
);
470 get_filesystem(type
);
471 register_shrinker(&s
->s_shrink
);
477 void drop_super(struct super_block
*sb
)
479 up_read(&sb
->s_umount
);
483 EXPORT_SYMBOL(drop_super
);
486 * sync_supers - helper for periodic superblock writeback
488 * Call the write_super method if present on all dirty superblocks in
489 * the system. This is for the periodic writeback used by most older
490 * filesystems. For data integrity superblock writeback use
491 * sync_filesystems() instead.
493 * Note: check the dirty flag before waiting, so we don't
494 * hold up the sync while mounting a device. (The newly
495 * mounted device won't need syncing.)
497 void sync_supers(void)
499 struct super_block
*sb
, *p
= NULL
;
502 list_for_each_entry(sb
, &super_blocks
, s_list
) {
503 if (list_empty(&sb
->s_instances
))
505 if (sb
->s_op
->write_super
&& sb
->s_dirt
) {
507 spin_unlock(&sb_lock
);
509 down_read(&sb
->s_umount
);
510 if (sb
->s_root
&& sb
->s_dirt
)
511 sb
->s_op
->write_super(sb
);
512 up_read(&sb
->s_umount
);
522 spin_unlock(&sb_lock
);
526 * iterate_supers - call function for all active superblocks
527 * @f: function to call
528 * @arg: argument to pass to it
530 * Scans the superblock list and calls given function, passing it
531 * locked superblock and given argument.
533 void iterate_supers(void (*f
)(struct super_block
*, void *), void *arg
)
535 struct super_block
*sb
, *p
= NULL
;
538 list_for_each_entry(sb
, &super_blocks
, s_list
) {
539 if (list_empty(&sb
->s_instances
))
542 spin_unlock(&sb_lock
);
544 down_read(&sb
->s_umount
);
547 up_read(&sb
->s_umount
);
556 spin_unlock(&sb_lock
);
560 * iterate_supers_type - call function for superblocks of given type
562 * @f: function to call
563 * @arg: argument to pass to it
565 * Scans the superblock list and calls given function, passing it
566 * locked superblock and given argument.
568 void iterate_supers_type(struct file_system_type
*type
,
569 void (*f
)(struct super_block
*, void *), void *arg
)
571 struct super_block
*sb
, *p
= NULL
;
574 list_for_each_entry(sb
, &type
->fs_supers
, s_instances
) {
576 spin_unlock(&sb_lock
);
578 down_read(&sb
->s_umount
);
581 up_read(&sb
->s_umount
);
590 spin_unlock(&sb_lock
);
593 EXPORT_SYMBOL(iterate_supers_type
);
596 * get_super - get the superblock of a device
597 * @bdev: device to get the superblock for
599 * Scans the superblock list and finds the superblock of the file system
600 * mounted on the device given. %NULL is returned if no match is found.
603 struct super_block
*get_super(struct block_device
*bdev
)
605 struct super_block
*sb
;
612 list_for_each_entry(sb
, &super_blocks
, s_list
) {
613 if (list_empty(&sb
->s_instances
))
615 if (sb
->s_bdev
== bdev
) {
617 spin_unlock(&sb_lock
);
618 down_read(&sb
->s_umount
);
622 up_read(&sb
->s_umount
);
623 /* nope, got unmounted */
629 spin_unlock(&sb_lock
);
633 EXPORT_SYMBOL(get_super
);
636 * get_active_super - get an active reference to the superblock of a device
637 * @bdev: device to get the superblock for
639 * Scans the superblock list and finds the superblock of the file system
640 * mounted on the device given. Returns the superblock with an active
641 * reference or %NULL if none was found.
643 struct super_block
*get_active_super(struct block_device
*bdev
)
645 struct super_block
*sb
;
652 list_for_each_entry(sb
, &super_blocks
, s_list
) {
653 if (list_empty(&sb
->s_instances
))
655 if (sb
->s_bdev
== bdev
) {
656 if (grab_super(sb
)) /* drops sb_lock */
662 spin_unlock(&sb_lock
);
666 struct super_block
*user_get_super(dev_t dev
)
668 struct super_block
*sb
;
672 list_for_each_entry(sb
, &super_blocks
, s_list
) {
673 if (list_empty(&sb
->s_instances
))
675 if (sb
->s_dev
== dev
) {
677 spin_unlock(&sb_lock
);
678 down_read(&sb
->s_umount
);
682 up_read(&sb
->s_umount
);
683 /* nope, got unmounted */
689 spin_unlock(&sb_lock
);
694 * do_remount_sb - asks filesystem to change mount options.
695 * @sb: superblock in question
696 * @flags: numeric part of options
697 * @data: the rest of options
698 * @force: whether or not to force the change
700 * Alters the mount options of a mounted file system.
702 int do_remount_sb(struct super_block
*sb
, int flags
, void *data
, int force
)
707 if (sb
->s_frozen
!= SB_UNFROZEN
)
711 if (!(flags
& MS_RDONLY
) && bdev_read_only(sb
->s_bdev
))
715 if (flags
& MS_RDONLY
)
717 shrink_dcache_sb(sb
);
720 remount_ro
= (flags
& MS_RDONLY
) && !(sb
->s_flags
& MS_RDONLY
);
722 /* If we are remounting RDONLY and current sb is read/write,
723 make sure there are no rw files opened */
727 else if (!fs_may_remount_ro(sb
))
731 if (sb
->s_op
->remount_fs
) {
732 retval
= sb
->s_op
->remount_fs(sb
, &flags
, data
);
736 sb
->s_flags
= (sb
->s_flags
& ~MS_RMT_MASK
) | (flags
& MS_RMT_MASK
);
739 * Some filesystems modify their metadata via some other path than the
740 * bdev buffer cache (eg. use a private mapping, or directories in
741 * pagecache, etc). Also file data modifications go via their own
742 * mappings. So If we try to mount readonly then copy the filesystem
743 * from bdev, we could get stale data, so invalidate it to give a best
744 * effort at coherency.
746 if (remount_ro
&& sb
->s_bdev
)
747 invalidate_bdev(sb
->s_bdev
);
751 static void do_emergency_remount(struct work_struct
*work
)
753 struct super_block
*sb
, *p
= NULL
;
756 list_for_each_entry(sb
, &super_blocks
, s_list
) {
757 if (list_empty(&sb
->s_instances
))
760 spin_unlock(&sb_lock
);
761 down_write(&sb
->s_umount
);
762 if (sb
->s_root
&& sb
->s_bdev
&& !(sb
->s_flags
& MS_RDONLY
)) {
764 * What lock protects sb->s_flags??
766 do_remount_sb(sb
, MS_RDONLY
, NULL
, 1);
768 up_write(&sb
->s_umount
);
776 spin_unlock(&sb_lock
);
778 printk("Emergency Remount complete\n");
781 void emergency_remount(void)
783 struct work_struct
*work
;
785 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
787 INIT_WORK(work
, do_emergency_remount
);
793 * Unnamed block devices are dummy devices used by virtual
794 * filesystems which don't use real block-devices. -- jrs
797 static DEFINE_IDA(unnamed_dev_ida
);
798 static DEFINE_SPINLOCK(unnamed_dev_lock
);/* protects the above */
799 static int unnamed_dev_start
= 0; /* don't bother trying below it */
801 int get_anon_bdev(dev_t
*p
)
807 if (ida_pre_get(&unnamed_dev_ida
, GFP_ATOMIC
) == 0)
809 spin_lock(&unnamed_dev_lock
);
810 error
= ida_get_new_above(&unnamed_dev_ida
, unnamed_dev_start
, &dev
);
812 unnamed_dev_start
= dev
+ 1;
813 spin_unlock(&unnamed_dev_lock
);
814 if (error
== -EAGAIN
)
815 /* We raced and lost with another CPU. */
820 if ((dev
& MAX_ID_MASK
) == (1 << MINORBITS
)) {
821 spin_lock(&unnamed_dev_lock
);
822 ida_remove(&unnamed_dev_ida
, dev
);
823 if (unnamed_dev_start
> dev
)
824 unnamed_dev_start
= dev
;
825 spin_unlock(&unnamed_dev_lock
);
828 *p
= MKDEV(0, dev
& MINORMASK
);
831 EXPORT_SYMBOL(get_anon_bdev
);
833 void free_anon_bdev(dev_t dev
)
835 int slot
= MINOR(dev
);
836 spin_lock(&unnamed_dev_lock
);
837 ida_remove(&unnamed_dev_ida
, slot
);
838 if (slot
< unnamed_dev_start
)
839 unnamed_dev_start
= slot
;
840 spin_unlock(&unnamed_dev_lock
);
842 EXPORT_SYMBOL(free_anon_bdev
);
844 int set_anon_super(struct super_block
*s
, void *data
)
846 int error
= get_anon_bdev(&s
->s_dev
);
848 s
->s_bdi
= &noop_backing_dev_info
;
852 EXPORT_SYMBOL(set_anon_super
);
854 void kill_anon_super(struct super_block
*sb
)
856 dev_t dev
= sb
->s_dev
;
857 generic_shutdown_super(sb
);
861 EXPORT_SYMBOL(kill_anon_super
);
863 void kill_litter_super(struct super_block
*sb
)
866 d_genocide(sb
->s_root
);
870 EXPORT_SYMBOL(kill_litter_super
);
872 static int ns_test_super(struct super_block
*sb
, void *data
)
874 return sb
->s_fs_info
== data
;
877 static int ns_set_super(struct super_block
*sb
, void *data
)
879 sb
->s_fs_info
= data
;
880 return set_anon_super(sb
, NULL
);
883 struct dentry
*mount_ns(struct file_system_type
*fs_type
, int flags
,
884 void *data
, int (*fill_super
)(struct super_block
*, void *, int))
886 struct super_block
*sb
;
888 sb
= sget(fs_type
, ns_test_super
, ns_set_super
, data
);
895 err
= fill_super(sb
, data
, flags
& MS_SILENT
? 1 : 0);
897 deactivate_locked_super(sb
);
901 sb
->s_flags
|= MS_ACTIVE
;
904 return dget(sb
->s_root
);
907 EXPORT_SYMBOL(mount_ns
);
910 static int set_bdev_super(struct super_block
*s
, void *data
)
913 s
->s_dev
= s
->s_bdev
->bd_dev
;
916 * We set the bdi here to the queue backing, file systems can
917 * overwrite this in ->fill_super()
919 s
->s_bdi
= &bdev_get_queue(s
->s_bdev
)->backing_dev_info
;
923 static int test_bdev_super(struct super_block
*s
, void *data
)
925 return (void *)s
->s_bdev
== data
;
928 struct dentry
*mount_bdev(struct file_system_type
*fs_type
,
929 int flags
, const char *dev_name
, void *data
,
930 int (*fill_super
)(struct super_block
*, void *, int))
932 struct block_device
*bdev
;
933 struct super_block
*s
;
934 fmode_t mode
= FMODE_READ
| FMODE_EXCL
;
937 if (!(flags
& MS_RDONLY
))
940 bdev
= blkdev_get_by_path(dev_name
, mode
, fs_type
);
942 return ERR_CAST(bdev
);
945 * once the super is inserted into the list by sget, s_umount
946 * will protect the lockfs code from trying to start a snapshot
947 * while we are mounting
949 mutex_lock(&bdev
->bd_fsfreeze_mutex
);
950 if (bdev
->bd_fsfreeze_count
> 0) {
951 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
955 s
= sget(fs_type
, test_bdev_super
, set_bdev_super
, bdev
);
956 mutex_unlock(&bdev
->bd_fsfreeze_mutex
);
961 if ((flags
^ s
->s_flags
) & MS_RDONLY
) {
962 deactivate_locked_super(s
);
968 * s_umount nests inside bd_mutex during
969 * __invalidate_device(). blkdev_put() acquires
970 * bd_mutex and can't be called under s_umount. Drop
971 * s_umount temporarily. This is safe as we're
972 * holding an active reference.
974 up_write(&s
->s_umount
);
975 blkdev_put(bdev
, mode
);
976 down_write(&s
->s_umount
);
978 char b
[BDEVNAME_SIZE
];
980 s
->s_flags
= flags
| MS_NOSEC
;
982 strlcpy(s
->s_id
, bdevname(bdev
, b
), sizeof(s
->s_id
));
983 sb_set_blocksize(s
, block_size(bdev
));
984 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
986 deactivate_locked_super(s
);
990 s
->s_flags
|= MS_ACTIVE
;
994 return dget(s
->s_root
);
999 blkdev_put(bdev
, mode
);
1001 return ERR_PTR(error
);
1003 EXPORT_SYMBOL(mount_bdev
);
1005 void kill_block_super(struct super_block
*sb
)
1007 struct block_device
*bdev
= sb
->s_bdev
;
1008 fmode_t mode
= sb
->s_mode
;
1010 bdev
->bd_super
= NULL
;
1011 generic_shutdown_super(sb
);
1012 sync_blockdev(bdev
);
1013 WARN_ON_ONCE(!(mode
& FMODE_EXCL
));
1014 blkdev_put(bdev
, mode
| FMODE_EXCL
);
1017 EXPORT_SYMBOL(kill_block_super
);
1020 struct dentry
*mount_nodev(struct file_system_type
*fs_type
,
1021 int flags
, void *data
,
1022 int (*fill_super
)(struct super_block
*, void *, int))
1025 struct super_block
*s
= sget(fs_type
, NULL
, set_anon_super
, NULL
);
1032 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1034 deactivate_locked_super(s
);
1035 return ERR_PTR(error
);
1037 s
->s_flags
|= MS_ACTIVE
;
1038 return dget(s
->s_root
);
1040 EXPORT_SYMBOL(mount_nodev
);
1042 static int compare_single(struct super_block
*s
, void *p
)
1047 struct dentry
*mount_single(struct file_system_type
*fs_type
,
1048 int flags
, void *data
,
1049 int (*fill_super
)(struct super_block
*, void *, int))
1051 struct super_block
*s
;
1054 s
= sget(fs_type
, compare_single
, set_anon_super
, NULL
);
1059 error
= fill_super(s
, data
, flags
& MS_SILENT
? 1 : 0);
1061 deactivate_locked_super(s
);
1062 return ERR_PTR(error
);
1064 s
->s_flags
|= MS_ACTIVE
;
1066 do_remount_sb(s
, flags
, data
, 0);
1068 return dget(s
->s_root
);
1070 EXPORT_SYMBOL(mount_single
);
1073 mount_fs(struct file_system_type
*type
, int flags
, const char *name
, void *data
)
1075 struct dentry
*root
;
1076 struct super_block
*sb
;
1077 char *secdata
= NULL
;
1078 int error
= -ENOMEM
;
1080 if (data
&& !(type
->fs_flags
& FS_BINARY_MOUNTDATA
)) {
1081 secdata
= alloc_secdata();
1085 error
= security_sb_copy_data(data
, secdata
);
1087 goto out_free_secdata
;
1090 root
= type
->mount(type
, flags
, name
, data
);
1092 error
= PTR_ERR(root
);
1093 goto out_free_secdata
;
1097 WARN_ON(!sb
->s_bdi
);
1098 WARN_ON(sb
->s_bdi
== &default_backing_dev_info
);
1099 sb
->s_flags
|= MS_BORN
;
1101 error
= security_sb_kern_mount(sb
, flags
, secdata
);
1106 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1107 * but s_maxbytes was an unsigned long long for many releases. Throw
1108 * this warning for a little while to try and catch filesystems that
1109 * violate this rule.
1111 WARN((sb
->s_maxbytes
< 0), "%s set sb->s_maxbytes to "
1112 "negative value (%lld)\n", type
->name
, sb
->s_maxbytes
);
1114 up_write(&sb
->s_umount
);
1115 free_secdata(secdata
);
1119 deactivate_locked_super(sb
);
1121 free_secdata(secdata
);
1123 return ERR_PTR(error
);
1127 * freeze_super - lock the filesystem and force it into a consistent state
1128 * @sb: the super to lock
1130 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1131 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1134 int freeze_super(struct super_block
*sb
)
1138 atomic_inc(&sb
->s_active
);
1139 down_write(&sb
->s_umount
);
1141 deactivate_locked_super(sb
);
1145 if (sb
->s_flags
& MS_RDONLY
) {
1146 sb
->s_frozen
= SB_FREEZE_TRANS
;
1148 up_write(&sb
->s_umount
);
1152 sb
->s_frozen
= SB_FREEZE_WRITE
;
1155 sync_filesystem(sb
);
1157 sb
->s_frozen
= SB_FREEZE_TRANS
;
1160 sync_blockdev(sb
->s_bdev
);
1161 if (sb
->s_op
->freeze_fs
) {
1162 ret
= sb
->s_op
->freeze_fs(sb
);
1165 "VFS:Filesystem freeze failed\n");
1166 sb
->s_frozen
= SB_UNFROZEN
;
1167 deactivate_locked_super(sb
);
1171 up_write(&sb
->s_umount
);
1174 EXPORT_SYMBOL(freeze_super
);
1177 * thaw_super -- unlock filesystem
1178 * @sb: the super to thaw
1180 * Unlocks the filesystem and marks it writeable again after freeze_super().
1182 int thaw_super(struct super_block
*sb
)
1186 down_write(&sb
->s_umount
);
1187 if (sb
->s_frozen
== SB_UNFROZEN
) {
1188 up_write(&sb
->s_umount
);
1192 if (sb
->s_flags
& MS_RDONLY
)
1195 if (sb
->s_op
->unfreeze_fs
) {
1196 error
= sb
->s_op
->unfreeze_fs(sb
);
1199 "VFS:Filesystem thaw failed\n");
1200 sb
->s_frozen
= SB_FREEZE_TRANS
;
1201 up_write(&sb
->s_umount
);
1207 sb
->s_frozen
= SB_UNFROZEN
;
1209 wake_up(&sb
->s_wait_unfrozen
);
1210 deactivate_locked_super(sb
);
1214 EXPORT_SYMBOL(thaw_super
);