4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/syscalls.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/acct.h>
18 #include <linux/capability.h>
19 #include <linux/cpumask.h>
20 #include <linux/module.h>
21 #include <linux/sysfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/mnt_namespace.h>
24 #include <linux/namei.h>
25 #include <linux/security.h>
26 #include <linux/mount.h>
27 #include <linux/ramfs.h>
28 #include <linux/log2.h>
29 #include <linux/idr.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
35 #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
36 #define HASH_SIZE (1UL << HASH_SHIFT)
38 /* spinlock for vfsmount related operations, inplace of dcache_lock */
39 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(vfsmount_lock
);
42 static DEFINE_IDA(mnt_id_ida
);
43 static DEFINE_IDA(mnt_group_ida
);
45 static struct list_head
*mount_hashtable __read_mostly
;
46 static struct kmem_cache
*mnt_cache __read_mostly
;
47 static struct rw_semaphore namespace_sem
;
50 struct kobject
*fs_kobj
;
51 EXPORT_SYMBOL_GPL(fs_kobj
);
53 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
55 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
56 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
57 tmp
= tmp
+ (tmp
>> HASH_SHIFT
);
58 return tmp
& (HASH_SIZE
- 1);
61 #define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16)
63 /* allocation is serialized by namespace_sem */
64 static int mnt_alloc_id(struct vfsmount
*mnt
)
69 ida_pre_get(&mnt_id_ida
, GFP_KERNEL
);
70 spin_lock(&vfsmount_lock
);
71 res
= ida_get_new(&mnt_id_ida
, &mnt
->mnt_id
);
72 spin_unlock(&vfsmount_lock
);
79 static void mnt_free_id(struct vfsmount
*mnt
)
81 spin_lock(&vfsmount_lock
);
82 ida_remove(&mnt_id_ida
, mnt
->mnt_id
);
83 spin_unlock(&vfsmount_lock
);
87 * Allocate a new peer group ID
89 * mnt_group_ida is protected by namespace_sem
91 static int mnt_alloc_group_id(struct vfsmount
*mnt
)
93 if (!ida_pre_get(&mnt_group_ida
, GFP_KERNEL
))
96 return ida_get_new_above(&mnt_group_ida
, 1, &mnt
->mnt_group_id
);
100 * Release a peer group ID
102 void mnt_release_group_id(struct vfsmount
*mnt
)
104 ida_remove(&mnt_group_ida
, mnt
->mnt_group_id
);
105 mnt
->mnt_group_id
= 0;
108 struct vfsmount
*alloc_vfsmnt(const char *name
)
110 struct vfsmount
*mnt
= kmem_cache_zalloc(mnt_cache
, GFP_KERNEL
);
114 err
= mnt_alloc_id(mnt
);
116 kmem_cache_free(mnt_cache
, mnt
);
120 atomic_set(&mnt
->mnt_count
, 1);
121 INIT_LIST_HEAD(&mnt
->mnt_hash
);
122 INIT_LIST_HEAD(&mnt
->mnt_child
);
123 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
124 INIT_LIST_HEAD(&mnt
->mnt_list
);
125 INIT_LIST_HEAD(&mnt
->mnt_expire
);
126 INIT_LIST_HEAD(&mnt
->mnt_share
);
127 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
128 INIT_LIST_HEAD(&mnt
->mnt_slave
);
129 atomic_set(&mnt
->__mnt_writers
, 0);
131 int size
= strlen(name
) + 1;
132 char *newname
= kmalloc(size
, GFP_KERNEL
);
134 memcpy(newname
, name
, size
);
135 mnt
->mnt_devname
= newname
;
143 * Most r/o checks on a fs are for operations that take
144 * discrete amounts of time, like a write() or unlink().
145 * We must keep track of when those operations start
146 * (for permission checks) and when they end, so that
147 * we can determine when writes are able to occur to
151 * __mnt_is_readonly: check whether a mount is read-only
152 * @mnt: the mount to check for its write status
154 * This shouldn't be used directly ouside of the VFS.
155 * It does not guarantee that the filesystem will stay
156 * r/w, just that it is right *now*. This can not and
157 * should not be used in place of IS_RDONLY(inode).
158 * mnt_want/drop_write() will _keep_ the filesystem
161 int __mnt_is_readonly(struct vfsmount
*mnt
)
163 if (mnt
->mnt_flags
& MNT_READONLY
)
165 if (mnt
->mnt_sb
->s_flags
& MS_RDONLY
)
169 EXPORT_SYMBOL_GPL(__mnt_is_readonly
);
173 * If holding multiple instances of this lock, they
174 * must be ordered by cpu number.
177 struct lock_class_key lock_class
; /* compiles out with !lockdep */
179 struct vfsmount
*mnt
;
180 } ____cacheline_aligned_in_smp
;
181 static DEFINE_PER_CPU(struct mnt_writer
, mnt_writers
);
183 static int __init
init_mnt_writers(void)
186 for_each_possible_cpu(cpu
) {
187 struct mnt_writer
*writer
= &per_cpu(mnt_writers
, cpu
);
188 spin_lock_init(&writer
->lock
);
189 lockdep_set_class(&writer
->lock
, &writer
->lock_class
);
194 fs_initcall(init_mnt_writers
);
196 static void unlock_mnt_writers(void)
199 struct mnt_writer
*cpu_writer
;
201 for_each_possible_cpu(cpu
) {
202 cpu_writer
= &per_cpu(mnt_writers
, cpu
);
203 spin_unlock(&cpu_writer
->lock
);
207 static inline void __clear_mnt_count(struct mnt_writer
*cpu_writer
)
209 if (!cpu_writer
->mnt
)
212 * This is in case anyone ever leaves an invalid,
213 * old ->mnt and a count of 0.
215 if (!cpu_writer
->count
)
217 atomic_add(cpu_writer
->count
, &cpu_writer
->mnt
->__mnt_writers
);
218 cpu_writer
->count
= 0;
221 * must hold cpu_writer->lock
223 static inline void use_cpu_writer_for_mount(struct mnt_writer
*cpu_writer
,
224 struct vfsmount
*mnt
)
226 if (cpu_writer
->mnt
== mnt
)
228 __clear_mnt_count(cpu_writer
);
229 cpu_writer
->mnt
= mnt
;
233 * Most r/o checks on a fs are for operations that take
234 * discrete amounts of time, like a write() or unlink().
235 * We must keep track of when those operations start
236 * (for permission checks) and when they end, so that
237 * we can determine when writes are able to occur to
241 * mnt_want_write - get write access to a mount
242 * @mnt: the mount on which to take a write
244 * This tells the low-level filesystem that a write is
245 * about to be performed to it, and makes sure that
246 * writes are allowed before returning success. When
247 * the write operation is finished, mnt_drop_write()
248 * must be called. This is effectively a refcount.
250 int mnt_want_write(struct vfsmount
*mnt
)
253 struct mnt_writer
*cpu_writer
;
255 cpu_writer
= &get_cpu_var(mnt_writers
);
256 spin_lock(&cpu_writer
->lock
);
257 if (__mnt_is_readonly(mnt
)) {
261 use_cpu_writer_for_mount(cpu_writer
, mnt
);
264 spin_unlock(&cpu_writer
->lock
);
265 put_cpu_var(mnt_writers
);
268 EXPORT_SYMBOL_GPL(mnt_want_write
);
270 static void lock_mnt_writers(void)
273 struct mnt_writer
*cpu_writer
;
275 for_each_possible_cpu(cpu
) {
276 cpu_writer
= &per_cpu(mnt_writers
, cpu
);
277 spin_lock(&cpu_writer
->lock
);
278 __clear_mnt_count(cpu_writer
);
279 cpu_writer
->mnt
= NULL
;
284 * These per-cpu write counts are not guaranteed to have
285 * matched increments and decrements on any given cpu.
286 * A file open()ed for write on one cpu and close()d on
287 * another cpu will imbalance this count. Make sure it
288 * does not get too far out of whack.
290 static void handle_write_count_underflow(struct vfsmount
*mnt
)
292 if (atomic_read(&mnt
->__mnt_writers
) >=
293 MNT_WRITER_UNDERFLOW_LIMIT
)
296 * It isn't necessary to hold all of the locks
297 * at the same time, but doing it this way makes
298 * us share a lot more code.
302 * vfsmount_lock is for mnt_flags.
304 spin_lock(&vfsmount_lock
);
306 * If coalescing the per-cpu writer counts did not
307 * get us back to a positive writer count, we have
310 if ((atomic_read(&mnt
->__mnt_writers
) < 0) &&
311 !(mnt
->mnt_flags
& MNT_IMBALANCED_WRITE_COUNT
)) {
312 WARN(1, KERN_DEBUG
"leak detected on mount(%p) writers "
314 mnt
, atomic_read(&mnt
->__mnt_writers
));
315 /* use the flag to keep the dmesg spam down */
316 mnt
->mnt_flags
|= MNT_IMBALANCED_WRITE_COUNT
;
318 spin_unlock(&vfsmount_lock
);
319 unlock_mnt_writers();
323 * mnt_drop_write - give up write access to a mount
324 * @mnt: the mount on which to give up write access
326 * Tells the low-level filesystem that we are done
327 * performing writes to it. Must be matched with
328 * mnt_want_write() call above.
330 void mnt_drop_write(struct vfsmount
*mnt
)
332 int must_check_underflow
= 0;
333 struct mnt_writer
*cpu_writer
;
335 cpu_writer
= &get_cpu_var(mnt_writers
);
336 spin_lock(&cpu_writer
->lock
);
338 use_cpu_writer_for_mount(cpu_writer
, mnt
);
339 if (cpu_writer
->count
> 0) {
342 must_check_underflow
= 1;
343 atomic_dec(&mnt
->__mnt_writers
);
346 spin_unlock(&cpu_writer
->lock
);
348 * Logically, we could call this each time,
349 * but the __mnt_writers cacheline tends to
350 * be cold, and makes this expensive.
352 if (must_check_underflow
)
353 handle_write_count_underflow(mnt
);
355 * This could be done right after the spinlock
356 * is taken because the spinlock keeps us on
357 * the cpu, and disables preemption. However,
358 * putting it here bounds the amount that
359 * __mnt_writers can underflow. Without it,
360 * we could theoretically wrap __mnt_writers.
362 put_cpu_var(mnt_writers
);
364 EXPORT_SYMBOL_GPL(mnt_drop_write
);
366 static int mnt_make_readonly(struct vfsmount
*mnt
)
372 * With all the locks held, this value is stable
374 if (atomic_read(&mnt
->__mnt_writers
) > 0) {
379 * nobody can do a successful mnt_want_write() with all
380 * of the counts in MNT_DENIED_WRITE and the locks held.
382 spin_lock(&vfsmount_lock
);
384 mnt
->mnt_flags
|= MNT_READONLY
;
385 spin_unlock(&vfsmount_lock
);
387 unlock_mnt_writers();
391 static void __mnt_unmake_readonly(struct vfsmount
*mnt
)
393 spin_lock(&vfsmount_lock
);
394 mnt
->mnt_flags
&= ~MNT_READONLY
;
395 spin_unlock(&vfsmount_lock
);
398 int simple_set_mnt(struct vfsmount
*mnt
, struct super_block
*sb
)
401 mnt
->mnt_root
= dget(sb
->s_root
);
405 EXPORT_SYMBOL(simple_set_mnt
);
407 void free_vfsmnt(struct vfsmount
*mnt
)
409 kfree(mnt
->mnt_devname
);
411 kmem_cache_free(mnt_cache
, mnt
);
415 * find the first or last mount at @dentry on vfsmount @mnt depending on
416 * @dir. If @dir is set return the first mount else return the last mount.
418 struct vfsmount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
421 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
422 struct list_head
*tmp
= head
;
423 struct vfsmount
*p
, *found
= NULL
;
426 tmp
= dir
? tmp
->next
: tmp
->prev
;
430 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
431 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
440 * lookup_mnt increments the ref count before returning
441 * the vfsmount struct.
443 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
445 struct vfsmount
*child_mnt
;
446 spin_lock(&vfsmount_lock
);
447 if ((child_mnt
= __lookup_mnt(mnt
, dentry
, 1)))
449 spin_unlock(&vfsmount_lock
);
453 static inline int check_mnt(struct vfsmount
*mnt
)
455 return mnt
->mnt_ns
== current
->nsproxy
->mnt_ns
;
458 static void touch_mnt_namespace(struct mnt_namespace
*ns
)
462 wake_up_interruptible(&ns
->poll
);
466 static void __touch_mnt_namespace(struct mnt_namespace
*ns
)
468 if (ns
&& ns
->event
!= event
) {
470 wake_up_interruptible(&ns
->poll
);
474 static void detach_mnt(struct vfsmount
*mnt
, struct path
*old_path
)
476 old_path
->dentry
= mnt
->mnt_mountpoint
;
477 old_path
->mnt
= mnt
->mnt_parent
;
478 mnt
->mnt_parent
= mnt
;
479 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
480 list_del_init(&mnt
->mnt_child
);
481 list_del_init(&mnt
->mnt_hash
);
482 old_path
->dentry
->d_mounted
--;
485 void mnt_set_mountpoint(struct vfsmount
*mnt
, struct dentry
*dentry
,
486 struct vfsmount
*child_mnt
)
488 child_mnt
->mnt_parent
= mntget(mnt
);
489 child_mnt
->mnt_mountpoint
= dget(dentry
);
493 static void attach_mnt(struct vfsmount
*mnt
, struct path
*path
)
495 mnt_set_mountpoint(path
->mnt
, path
->dentry
, mnt
);
496 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
497 hash(path
->mnt
, path
->dentry
));
498 list_add_tail(&mnt
->mnt_child
, &path
->mnt
->mnt_mounts
);
502 * the caller must hold vfsmount_lock
504 static void commit_tree(struct vfsmount
*mnt
)
506 struct vfsmount
*parent
= mnt
->mnt_parent
;
509 struct mnt_namespace
*n
= parent
->mnt_ns
;
511 BUG_ON(parent
== mnt
);
513 list_add_tail(&head
, &mnt
->mnt_list
);
514 list_for_each_entry(m
, &head
, mnt_list
)
516 list_splice(&head
, n
->list
.prev
);
518 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
519 hash(parent
, mnt
->mnt_mountpoint
));
520 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
521 touch_mnt_namespace(n
);
524 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
526 struct list_head
*next
= p
->mnt_mounts
.next
;
527 if (next
== &p
->mnt_mounts
) {
531 next
= p
->mnt_child
.next
;
532 if (next
!= &p
->mnt_parent
->mnt_mounts
)
537 return list_entry(next
, struct vfsmount
, mnt_child
);
540 static struct vfsmount
*skip_mnt_tree(struct vfsmount
*p
)
542 struct list_head
*prev
= p
->mnt_mounts
.prev
;
543 while (prev
!= &p
->mnt_mounts
) {
544 p
= list_entry(prev
, struct vfsmount
, mnt_child
);
545 prev
= p
->mnt_mounts
.prev
;
550 static struct vfsmount
*clone_mnt(struct vfsmount
*old
, struct dentry
*root
,
553 struct super_block
*sb
= old
->mnt_sb
;
554 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
557 if (flag
& (CL_SLAVE
| CL_PRIVATE
))
558 mnt
->mnt_group_id
= 0; /* not a peer of original */
560 mnt
->mnt_group_id
= old
->mnt_group_id
;
562 if ((flag
& CL_MAKE_SHARED
) && !mnt
->mnt_group_id
) {
563 int err
= mnt_alloc_group_id(mnt
);
568 mnt
->mnt_flags
= old
->mnt_flags
;
569 atomic_inc(&sb
->s_active
);
571 mnt
->mnt_root
= dget(root
);
572 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
573 mnt
->mnt_parent
= mnt
;
575 if (flag
& CL_SLAVE
) {
576 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
577 mnt
->mnt_master
= old
;
578 CLEAR_MNT_SHARED(mnt
);
579 } else if (!(flag
& CL_PRIVATE
)) {
580 if ((flag
& CL_PROPAGATION
) || IS_MNT_SHARED(old
))
581 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
582 if (IS_MNT_SLAVE(old
))
583 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
584 mnt
->mnt_master
= old
->mnt_master
;
586 if (flag
& CL_MAKE_SHARED
)
589 /* stick the duplicate mount on the same expiry list
590 * as the original if that was on one */
591 if (flag
& CL_EXPIRE
) {
592 if (!list_empty(&old
->mnt_expire
))
593 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
603 static inline void __mntput(struct vfsmount
*mnt
)
606 struct super_block
*sb
= mnt
->mnt_sb
;
608 * We don't have to hold all of the locks at the
609 * same time here because we know that we're the
610 * last reference to mnt and that no new writers
613 for_each_possible_cpu(cpu
) {
614 struct mnt_writer
*cpu_writer
= &per_cpu(mnt_writers
, cpu
);
615 if (cpu_writer
->mnt
!= mnt
)
617 spin_lock(&cpu_writer
->lock
);
618 atomic_add(cpu_writer
->count
, &mnt
->__mnt_writers
);
619 cpu_writer
->count
= 0;
621 * Might as well do this so that no one
622 * ever sees the pointer and expects
625 cpu_writer
->mnt
= NULL
;
626 spin_unlock(&cpu_writer
->lock
);
629 * This probably indicates that somebody messed
630 * up a mnt_want/drop_write() pair. If this
631 * happens, the filesystem was probably unable
632 * to make r/w->r/o transitions.
634 WARN_ON(atomic_read(&mnt
->__mnt_writers
));
637 deactivate_super(sb
);
640 void mntput_no_expire(struct vfsmount
*mnt
)
643 if (atomic_dec_and_lock(&mnt
->mnt_count
, &vfsmount_lock
)) {
644 if (likely(!mnt
->mnt_pinned
)) {
645 spin_unlock(&vfsmount_lock
);
649 atomic_add(mnt
->mnt_pinned
+ 1, &mnt
->mnt_count
);
651 spin_unlock(&vfsmount_lock
);
652 acct_auto_close_mnt(mnt
);
653 security_sb_umount_close(mnt
);
658 EXPORT_SYMBOL(mntput_no_expire
);
660 void mnt_pin(struct vfsmount
*mnt
)
662 spin_lock(&vfsmount_lock
);
664 spin_unlock(&vfsmount_lock
);
667 EXPORT_SYMBOL(mnt_pin
);
669 void mnt_unpin(struct vfsmount
*mnt
)
671 spin_lock(&vfsmount_lock
);
672 if (mnt
->mnt_pinned
) {
673 atomic_inc(&mnt
->mnt_count
);
676 spin_unlock(&vfsmount_lock
);
679 EXPORT_SYMBOL(mnt_unpin
);
681 static inline void mangle(struct seq_file
*m
, const char *s
)
683 seq_escape(m
, s
, " \t\n\\");
687 * Simple .show_options callback for filesystems which don't want to
688 * implement more complex mount option showing.
690 * See also save_mount_options().
692 int generic_show_options(struct seq_file
*m
, struct vfsmount
*mnt
)
694 const char *options
= mnt
->mnt_sb
->s_options
;
696 if (options
!= NULL
&& options
[0]) {
703 EXPORT_SYMBOL(generic_show_options
);
706 * If filesystem uses generic_show_options(), this function should be
707 * called from the fill_super() callback.
709 * The .remount_fs callback usually needs to be handled in a special
710 * way, to make sure, that previous options are not overwritten if the
713 * Also note, that if the filesystem's .remount_fs function doesn't
714 * reset all options to their default value, but changes only newly
715 * given options, then the displayed options will not reflect reality
718 void save_mount_options(struct super_block
*sb
, char *options
)
720 kfree(sb
->s_options
);
721 sb
->s_options
= kstrdup(options
, GFP_KERNEL
);
723 EXPORT_SYMBOL(save_mount_options
);
725 #ifdef CONFIG_PROC_FS
727 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
729 struct proc_mounts
*p
= m
->private;
731 down_read(&namespace_sem
);
732 return seq_list_start(&p
->ns
->list
, *pos
);
735 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
737 struct proc_mounts
*p
= m
->private;
739 return seq_list_next(v
, &p
->ns
->list
, pos
);
742 static void m_stop(struct seq_file
*m
, void *v
)
744 up_read(&namespace_sem
);
747 struct proc_fs_info
{
752 static int show_sb_opts(struct seq_file
*m
, struct super_block
*sb
)
754 static const struct proc_fs_info fs_info
[] = {
755 { MS_SYNCHRONOUS
, ",sync" },
756 { MS_DIRSYNC
, ",dirsync" },
757 { MS_MANDLOCK
, ",mand" },
760 const struct proc_fs_info
*fs_infop
;
762 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
763 if (sb
->s_flags
& fs_infop
->flag
)
764 seq_puts(m
, fs_infop
->str
);
767 return security_sb_show_options(m
, sb
);
770 static void show_mnt_opts(struct seq_file
*m
, struct vfsmount
*mnt
)
772 static const struct proc_fs_info mnt_info
[] = {
773 { MNT_NOSUID
, ",nosuid" },
774 { MNT_NODEV
, ",nodev" },
775 { MNT_NOEXEC
, ",noexec" },
776 { MNT_NOATIME
, ",noatime" },
777 { MNT_NODIRATIME
, ",nodiratime" },
778 { MNT_RELATIME
, ",relatime" },
781 const struct proc_fs_info
*fs_infop
;
783 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
784 if (mnt
->mnt_flags
& fs_infop
->flag
)
785 seq_puts(m
, fs_infop
->str
);
789 static void show_type(struct seq_file
*m
, struct super_block
*sb
)
791 mangle(m
, sb
->s_type
->name
);
792 if (sb
->s_subtype
&& sb
->s_subtype
[0]) {
794 mangle(m
, sb
->s_subtype
);
798 static int show_vfsmnt(struct seq_file
*m
, void *v
)
800 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
802 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
804 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
806 seq_path(m
, &mnt_path
, " \t\n\\");
808 show_type(m
, mnt
->mnt_sb
);
809 seq_puts(m
, __mnt_is_readonly(mnt
) ? " ro" : " rw");
810 err
= show_sb_opts(m
, mnt
->mnt_sb
);
813 show_mnt_opts(m
, mnt
);
814 if (mnt
->mnt_sb
->s_op
->show_options
)
815 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
816 seq_puts(m
, " 0 0\n");
821 const struct seq_operations mounts_op
= {
828 static int show_mountinfo(struct seq_file
*m
, void *v
)
830 struct proc_mounts
*p
= m
->private;
831 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
832 struct super_block
*sb
= mnt
->mnt_sb
;
833 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
834 struct path root
= p
->root
;
837 seq_printf(m
, "%i %i %u:%u ", mnt
->mnt_id
, mnt
->mnt_parent
->mnt_id
,
838 MAJOR(sb
->s_dev
), MINOR(sb
->s_dev
));
839 seq_dentry(m
, mnt
->mnt_root
, " \t\n\\");
841 seq_path_root(m
, &mnt_path
, &root
, " \t\n\\");
842 if (root
.mnt
!= p
->root
.mnt
|| root
.dentry
!= p
->root
.dentry
) {
844 * Mountpoint is outside root, discard that one. Ugly,
845 * but less so than trying to do that in iterator in a
846 * race-free way (due to renames).
850 seq_puts(m
, mnt
->mnt_flags
& MNT_READONLY
? " ro" : " rw");
851 show_mnt_opts(m
, mnt
);
853 /* Tagged fields ("foo:X" or "bar") */
854 if (IS_MNT_SHARED(mnt
))
855 seq_printf(m
, " shared:%i", mnt
->mnt_group_id
);
856 if (IS_MNT_SLAVE(mnt
)) {
857 int master
= mnt
->mnt_master
->mnt_group_id
;
858 int dom
= get_dominating_id(mnt
, &p
->root
);
859 seq_printf(m
, " master:%i", master
);
860 if (dom
&& dom
!= master
)
861 seq_printf(m
, " propagate_from:%i", dom
);
863 if (IS_MNT_UNBINDABLE(mnt
))
864 seq_puts(m
, " unbindable");
866 /* Filesystem specific data */
870 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
871 seq_puts(m
, sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
872 err
= show_sb_opts(m
, sb
);
875 if (sb
->s_op
->show_options
)
876 err
= sb
->s_op
->show_options(m
, mnt
);
882 const struct seq_operations mountinfo_op
= {
886 .show
= show_mountinfo
,
889 static int show_vfsstat(struct seq_file
*m
, void *v
)
891 struct vfsmount
*mnt
= list_entry(v
, struct vfsmount
, mnt_list
);
892 struct path mnt_path
= { .dentry
= mnt
->mnt_root
, .mnt
= mnt
};
896 if (mnt
->mnt_devname
) {
897 seq_puts(m
, "device ");
898 mangle(m
, mnt
->mnt_devname
);
900 seq_puts(m
, "no device");
903 seq_puts(m
, " mounted on ");
904 seq_path(m
, &mnt_path
, " \t\n\\");
907 /* file system type */
908 seq_puts(m
, "with fstype ");
909 show_type(m
, mnt
->mnt_sb
);
911 /* optional statistics */
912 if (mnt
->mnt_sb
->s_op
->show_stats
) {
914 err
= mnt
->mnt_sb
->s_op
->show_stats(m
, mnt
);
921 const struct seq_operations mountstats_op
= {
925 .show
= show_vfsstat
,
927 #endif /* CONFIG_PROC_FS */
930 * may_umount_tree - check if a mount tree is busy
931 * @mnt: root of mount tree
933 * This is called to check if a tree of mounts has any
934 * open files, pwds, chroots or sub mounts that are
937 int may_umount_tree(struct vfsmount
*mnt
)
940 int minimum_refs
= 0;
943 spin_lock(&vfsmount_lock
);
944 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
945 actual_refs
+= atomic_read(&p
->mnt_count
);
948 spin_unlock(&vfsmount_lock
);
950 if (actual_refs
> minimum_refs
)
956 EXPORT_SYMBOL(may_umount_tree
);
959 * may_umount - check if a mount point is busy
960 * @mnt: root of mount
962 * This is called to check if a mount point has any
963 * open files, pwds, chroots or sub mounts. If the
964 * mount has sub mounts this will return busy
965 * regardless of whether the sub mounts are busy.
967 * Doesn't take quota and stuff into account. IOW, in some cases it will
968 * give false negatives. The main reason why it's here is that we need
969 * a non-destructive way to look for easily umountable filesystems.
971 int may_umount(struct vfsmount
*mnt
)
974 spin_lock(&vfsmount_lock
);
975 if (propagate_mount_busy(mnt
, 2))
977 spin_unlock(&vfsmount_lock
);
981 EXPORT_SYMBOL(may_umount
);
983 void release_mounts(struct list_head
*head
)
985 struct vfsmount
*mnt
;
986 while (!list_empty(head
)) {
987 mnt
= list_first_entry(head
, struct vfsmount
, mnt_hash
);
988 list_del_init(&mnt
->mnt_hash
);
989 if (mnt
->mnt_parent
!= mnt
) {
990 struct dentry
*dentry
;
992 spin_lock(&vfsmount_lock
);
993 dentry
= mnt
->mnt_mountpoint
;
995 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
996 mnt
->mnt_parent
= mnt
;
998 spin_unlock(&vfsmount_lock
);
1006 void umount_tree(struct vfsmount
*mnt
, int propagate
, struct list_head
*kill
)
1010 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
))
1011 list_move(&p
->mnt_hash
, kill
);
1014 propagate_umount(kill
);
1016 list_for_each_entry(p
, kill
, mnt_hash
) {
1017 list_del_init(&p
->mnt_expire
);
1018 list_del_init(&p
->mnt_list
);
1019 __touch_mnt_namespace(p
->mnt_ns
);
1021 list_del_init(&p
->mnt_child
);
1022 if (p
->mnt_parent
!= p
) {
1023 p
->mnt_parent
->mnt_ghosts
++;
1024 p
->mnt_mountpoint
->d_mounted
--;
1026 change_mnt_propagation(p
, MS_PRIVATE
);
1030 static void shrink_submounts(struct vfsmount
*mnt
, struct list_head
*umounts
);
1032 static int do_umount(struct vfsmount
*mnt
, int flags
)
1034 struct super_block
*sb
= mnt
->mnt_sb
;
1036 LIST_HEAD(umount_list
);
1038 retval
= security_sb_umount(mnt
, flags
);
1043 * Allow userspace to request a mountpoint be expired rather than
1044 * unmounting unconditionally. Unmount only happens if:
1045 * (1) the mark is already set (the mark is cleared by mntput())
1046 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
1048 if (flags
& MNT_EXPIRE
) {
1049 if (mnt
== current
->fs
->root
.mnt
||
1050 flags
& (MNT_FORCE
| MNT_DETACH
))
1053 if (atomic_read(&mnt
->mnt_count
) != 2)
1056 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
1061 * If we may have to abort operations to get out of this
1062 * mount, and they will themselves hold resources we must
1063 * allow the fs to do things. In the Unix tradition of
1064 * 'Gee thats tricky lets do it in userspace' the umount_begin
1065 * might fail to complete on the first run through as other tasks
1066 * must return, and the like. Thats for the mount program to worry
1067 * about for the moment.
1070 if (flags
& MNT_FORCE
&& sb
->s_op
->umount_begin
) {
1072 sb
->s_op
->umount_begin(sb
);
1077 * No sense to grab the lock for this test, but test itself looks
1078 * somewhat bogus. Suggestions for better replacement?
1079 * Ho-hum... In principle, we might treat that as umount + switch
1080 * to rootfs. GC would eventually take care of the old vfsmount.
1081 * Actually it makes sense, especially if rootfs would contain a
1082 * /reboot - static binary that would close all descriptors and
1083 * call reboot(9). Then init(8) could umount root and exec /reboot.
1085 if (mnt
== current
->fs
->root
.mnt
&& !(flags
& MNT_DETACH
)) {
1087 * Special case for "unmounting" root ...
1088 * we just try to remount it readonly.
1090 down_write(&sb
->s_umount
);
1091 if (!(sb
->s_flags
& MS_RDONLY
)) {
1093 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
1096 up_write(&sb
->s_umount
);
1100 down_write(&namespace_sem
);
1101 spin_lock(&vfsmount_lock
);
1104 if (!(flags
& MNT_DETACH
))
1105 shrink_submounts(mnt
, &umount_list
);
1108 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
1109 if (!list_empty(&mnt
->mnt_list
))
1110 umount_tree(mnt
, 1, &umount_list
);
1113 spin_unlock(&vfsmount_lock
);
1115 security_sb_umount_busy(mnt
);
1116 up_write(&namespace_sem
);
1117 release_mounts(&umount_list
);
1122 * Now umount can handle mount points as well as block devices.
1123 * This is important for filesystems which use unnamed block devices.
1125 * We now support a flag for forced unmount like the other 'big iron'
1126 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
1129 asmlinkage
long sys_umount(char __user
* name
, int flags
)
1131 struct nameidata nd
;
1134 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
1138 if (nd
.path
.dentry
!= nd
.path
.mnt
->mnt_root
)
1140 if (!check_mnt(nd
.path
.mnt
))
1144 if (!capable(CAP_SYS_ADMIN
))
1147 retval
= do_umount(nd
.path
.mnt
, flags
);
1149 /* we mustn't call path_put() as that would clear mnt_expiry_mark */
1150 dput(nd
.path
.dentry
);
1151 mntput_no_expire(nd
.path
.mnt
);
1156 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
1159 * The 2.0 compatible umount. No flags.
1161 asmlinkage
long sys_oldumount(char __user
* name
)
1163 return sys_umount(name
, 0);
1168 static int mount_is_safe(struct nameidata
*nd
)
1170 if (capable(CAP_SYS_ADMIN
))
1174 if (S_ISLNK(nd
->path
.dentry
->d_inode
->i_mode
))
1176 if (nd
->path
.dentry
->d_inode
->i_mode
& S_ISVTX
) {
1177 if (current
->uid
!= nd
->path
.dentry
->d_inode
->i_uid
)
1180 if (vfs_permission(nd
, MAY_WRITE
))
1186 struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
,
1189 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
1192 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
1195 res
= q
= clone_mnt(mnt
, dentry
, flag
);
1198 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
1201 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
1202 if (!is_subdir(r
->mnt_mountpoint
, dentry
))
1205 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
1206 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
1207 s
= skip_mnt_tree(s
);
1210 while (p
!= s
->mnt_parent
) {
1216 path
.dentry
= p
->mnt_mountpoint
;
1217 q
= clone_mnt(p
, p
->mnt_root
, flag
);
1220 spin_lock(&vfsmount_lock
);
1221 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
1222 attach_mnt(q
, &path
);
1223 spin_unlock(&vfsmount_lock
);
1229 LIST_HEAD(umount_list
);
1230 spin_lock(&vfsmount_lock
);
1231 umount_tree(res
, 0, &umount_list
);
1232 spin_unlock(&vfsmount_lock
);
1233 release_mounts(&umount_list
);
1238 struct vfsmount
*collect_mounts(struct vfsmount
*mnt
, struct dentry
*dentry
)
1240 struct vfsmount
*tree
;
1241 down_write(&namespace_sem
);
1242 tree
= copy_tree(mnt
, dentry
, CL_COPY_ALL
| CL_PRIVATE
);
1243 up_write(&namespace_sem
);
1247 void drop_collected_mounts(struct vfsmount
*mnt
)
1249 LIST_HEAD(umount_list
);
1250 down_write(&namespace_sem
);
1251 spin_lock(&vfsmount_lock
);
1252 umount_tree(mnt
, 0, &umount_list
);
1253 spin_unlock(&vfsmount_lock
);
1254 up_write(&namespace_sem
);
1255 release_mounts(&umount_list
);
1258 static void cleanup_group_ids(struct vfsmount
*mnt
, struct vfsmount
*end
)
1262 for (p
= mnt
; p
!= end
; p
= next_mnt(p
, mnt
)) {
1263 if (p
->mnt_group_id
&& !IS_MNT_SHARED(p
))
1264 mnt_release_group_id(p
);
1268 static int invent_group_ids(struct vfsmount
*mnt
, bool recurse
)
1272 for (p
= mnt
; p
; p
= recurse
? next_mnt(p
, mnt
) : NULL
) {
1273 if (!p
->mnt_group_id
&& !IS_MNT_SHARED(p
)) {
1274 int err
= mnt_alloc_group_id(p
);
1276 cleanup_group_ids(mnt
, p
);
1286 * @source_mnt : mount tree to be attached
1287 * @nd : place the mount tree @source_mnt is attached
1288 * @parent_nd : if non-null, detach the source_mnt from its parent and
1289 * store the parent mount and mountpoint dentry.
1290 * (done when source_mnt is moved)
1292 * NOTE: in the table below explains the semantics when a source mount
1293 * of a given type is attached to a destination mount of a given type.
1294 * ---------------------------------------------------------------------------
1295 * | BIND MOUNT OPERATION |
1296 * |**************************************************************************
1297 * | source-->| shared | private | slave | unbindable |
1301 * |**************************************************************************
1302 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
1304 * |non-shared| shared (+) | private | slave (*) | invalid |
1305 * ***************************************************************************
1306 * A bind operation clones the source mount and mounts the clone on the
1307 * destination mount.
1309 * (++) the cloned mount is propagated to all the mounts in the propagation
1310 * tree of the destination mount and the cloned mount is added to
1311 * the peer group of the source mount.
1312 * (+) the cloned mount is created under the destination mount and is marked
1313 * as shared. The cloned mount is added to the peer group of the source
1315 * (+++) the mount is propagated to all the mounts in the propagation tree
1316 * of the destination mount and the cloned mount is made slave
1317 * of the same master as that of the source mount. The cloned mount
1318 * is marked as 'shared and slave'.
1319 * (*) the cloned mount is made a slave of the same master as that of the
1322 * ---------------------------------------------------------------------------
1323 * | MOVE MOUNT OPERATION |
1324 * |**************************************************************************
1325 * | source-->| shared | private | slave | unbindable |
1329 * |**************************************************************************
1330 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
1332 * |non-shared| shared (+*) | private | slave (*) | unbindable |
1333 * ***************************************************************************
1335 * (+) the mount is moved to the destination. And is then propagated to
1336 * all the mounts in the propagation tree of the destination mount.
1337 * (+*) the mount is moved to the destination.
1338 * (+++) the mount is moved to the destination and is then propagated to
1339 * all the mounts belonging to the destination mount's propagation tree.
1340 * the mount is marked as 'shared and slave'.
1341 * (*) the mount continues to be a slave at the new location.
1343 * if the source mount is a tree, the operations explained above is
1344 * applied to each mount in the tree.
1345 * Must be called without spinlocks held, since this function can sleep
1348 static int attach_recursive_mnt(struct vfsmount
*source_mnt
,
1349 struct path
*path
, struct path
*parent_path
)
1351 LIST_HEAD(tree_list
);
1352 struct vfsmount
*dest_mnt
= path
->mnt
;
1353 struct dentry
*dest_dentry
= path
->dentry
;
1354 struct vfsmount
*child
, *p
;
1357 if (IS_MNT_SHARED(dest_mnt
)) {
1358 err
= invent_group_ids(source_mnt
, true);
1362 err
= propagate_mnt(dest_mnt
, dest_dentry
, source_mnt
, &tree_list
);
1364 goto out_cleanup_ids
;
1366 if (IS_MNT_SHARED(dest_mnt
)) {
1367 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
1371 spin_lock(&vfsmount_lock
);
1373 detach_mnt(source_mnt
, parent_path
);
1374 attach_mnt(source_mnt
, path
);
1375 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
1377 mnt_set_mountpoint(dest_mnt
, dest_dentry
, source_mnt
);
1378 commit_tree(source_mnt
);
1381 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
1382 list_del_init(&child
->mnt_hash
);
1385 spin_unlock(&vfsmount_lock
);
1389 if (IS_MNT_SHARED(dest_mnt
))
1390 cleanup_group_ids(source_mnt
, NULL
);
1395 static int graft_tree(struct vfsmount
*mnt
, struct path
*path
)
1398 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
1401 if (S_ISDIR(path
->dentry
->d_inode
->i_mode
) !=
1402 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
1406 mutex_lock(&path
->dentry
->d_inode
->i_mutex
);
1407 if (IS_DEADDIR(path
->dentry
->d_inode
))
1410 err
= security_sb_check_sb(mnt
, path
);
1415 if (IS_ROOT(path
->dentry
) || !d_unhashed(path
->dentry
))
1416 err
= attach_recursive_mnt(mnt
, path
, NULL
);
1418 mutex_unlock(&path
->dentry
->d_inode
->i_mutex
);
1420 security_sb_post_addmount(mnt
, path
);
1425 * recursively change the type of the mountpoint.
1426 * noinline this do_mount helper to save do_mount stack space.
1428 static noinline
int do_change_type(struct nameidata
*nd
, int flag
)
1430 struct vfsmount
*m
, *mnt
= nd
->path
.mnt
;
1431 int recurse
= flag
& MS_REC
;
1432 int type
= flag
& ~MS_REC
;
1435 if (!capable(CAP_SYS_ADMIN
))
1438 if (nd
->path
.dentry
!= nd
->path
.mnt
->mnt_root
)
1441 down_write(&namespace_sem
);
1442 if (type
== MS_SHARED
) {
1443 err
= invent_group_ids(mnt
, recurse
);
1448 spin_lock(&vfsmount_lock
);
1449 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
1450 change_mnt_propagation(m
, type
);
1451 spin_unlock(&vfsmount_lock
);
1454 up_write(&namespace_sem
);
1459 * do loopback mount.
1460 * noinline this do_mount helper to save do_mount stack space.
1462 static noinline
int do_loopback(struct nameidata
*nd
, char *old_name
,
1465 struct nameidata old_nd
;
1466 struct vfsmount
*mnt
= NULL
;
1467 int err
= mount_is_safe(nd
);
1470 if (!old_name
|| !*old_name
)
1472 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
1476 down_write(&namespace_sem
);
1478 if (IS_MNT_UNBINDABLE(old_nd
.path
.mnt
))
1481 if (!check_mnt(nd
->path
.mnt
) || !check_mnt(old_nd
.path
.mnt
))
1486 mnt
= copy_tree(old_nd
.path
.mnt
, old_nd
.path
.dentry
, 0);
1488 mnt
= clone_mnt(old_nd
.path
.mnt
, old_nd
.path
.dentry
, 0);
1493 err
= graft_tree(mnt
, &nd
->path
);
1495 LIST_HEAD(umount_list
);
1496 spin_lock(&vfsmount_lock
);
1497 umount_tree(mnt
, 0, &umount_list
);
1498 spin_unlock(&vfsmount_lock
);
1499 release_mounts(&umount_list
);
1503 up_write(&namespace_sem
);
1504 path_put(&old_nd
.path
);
1508 static int change_mount_flags(struct vfsmount
*mnt
, int ms_flags
)
1511 int readonly_request
= 0;
1513 if (ms_flags
& MS_RDONLY
)
1514 readonly_request
= 1;
1515 if (readonly_request
== __mnt_is_readonly(mnt
))
1518 if (readonly_request
)
1519 error
= mnt_make_readonly(mnt
);
1521 __mnt_unmake_readonly(mnt
);
1526 * change filesystem flags. dir should be a physical root of filesystem.
1527 * If you've mounted a non-root directory somewhere and want to do remount
1528 * on it - tough luck.
1529 * noinline this do_mount helper to save do_mount stack space.
1531 static noinline
int do_remount(struct nameidata
*nd
, int flags
, int mnt_flags
,
1535 struct super_block
*sb
= nd
->path
.mnt
->mnt_sb
;
1537 if (!capable(CAP_SYS_ADMIN
))
1540 if (!check_mnt(nd
->path
.mnt
))
1543 if (nd
->path
.dentry
!= nd
->path
.mnt
->mnt_root
)
1546 down_write(&sb
->s_umount
);
1547 if (flags
& MS_BIND
)
1548 err
= change_mount_flags(nd
->path
.mnt
, flags
);
1550 err
= do_remount_sb(sb
, flags
, data
, 0);
1552 nd
->path
.mnt
->mnt_flags
= mnt_flags
;
1553 up_write(&sb
->s_umount
);
1555 security_sb_post_remount(nd
->path
.mnt
, flags
, data
);
1559 static inline int tree_contains_unbindable(struct vfsmount
*mnt
)
1562 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
1563 if (IS_MNT_UNBINDABLE(p
))
1570 * noinline this do_mount helper to save do_mount stack space.
1572 static noinline
int do_move_mount(struct nameidata
*nd
, char *old_name
)
1574 struct nameidata old_nd
;
1575 struct path parent_path
;
1578 if (!capable(CAP_SYS_ADMIN
))
1580 if (!old_name
|| !*old_name
)
1582 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
1586 down_write(&namespace_sem
);
1587 while (d_mountpoint(nd
->path
.dentry
) &&
1588 follow_down(&nd
->path
.mnt
, &nd
->path
.dentry
))
1591 if (!check_mnt(nd
->path
.mnt
) || !check_mnt(old_nd
.path
.mnt
))
1595 mutex_lock(&nd
->path
.dentry
->d_inode
->i_mutex
);
1596 if (IS_DEADDIR(nd
->path
.dentry
->d_inode
))
1599 if (!IS_ROOT(nd
->path
.dentry
) && d_unhashed(nd
->path
.dentry
))
1603 if (old_nd
.path
.dentry
!= old_nd
.path
.mnt
->mnt_root
)
1606 if (old_nd
.path
.mnt
== old_nd
.path
.mnt
->mnt_parent
)
1609 if (S_ISDIR(nd
->path
.dentry
->d_inode
->i_mode
) !=
1610 S_ISDIR(old_nd
.path
.dentry
->d_inode
->i_mode
))
1613 * Don't move a mount residing in a shared parent.
1615 if (old_nd
.path
.mnt
->mnt_parent
&&
1616 IS_MNT_SHARED(old_nd
.path
.mnt
->mnt_parent
))
1619 * Don't move a mount tree containing unbindable mounts to a destination
1620 * mount which is shared.
1622 if (IS_MNT_SHARED(nd
->path
.mnt
) &&
1623 tree_contains_unbindable(old_nd
.path
.mnt
))
1626 for (p
= nd
->path
.mnt
; p
->mnt_parent
!= p
; p
= p
->mnt_parent
)
1627 if (p
== old_nd
.path
.mnt
)
1630 err
= attach_recursive_mnt(old_nd
.path
.mnt
, &nd
->path
, &parent_path
);
1634 /* if the mount is moved, it should no longer be expire
1636 list_del_init(&old_nd
.path
.mnt
->mnt_expire
);
1638 mutex_unlock(&nd
->path
.dentry
->d_inode
->i_mutex
);
1640 up_write(&namespace_sem
);
1642 path_put(&parent_path
);
1643 path_put(&old_nd
.path
);
1648 * create a new mount for userspace and request it to be added into the
1650 * noinline this do_mount helper to save do_mount stack space.
1652 static noinline
int do_new_mount(struct nameidata
*nd
, char *type
, int flags
,
1653 int mnt_flags
, char *name
, void *data
)
1655 struct vfsmount
*mnt
;
1657 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
1660 /* we need capabilities... */
1661 if (!capable(CAP_SYS_ADMIN
))
1664 mnt
= do_kern_mount(type
, flags
, name
, data
);
1666 return PTR_ERR(mnt
);
1668 return do_add_mount(mnt
, nd
, mnt_flags
, NULL
);
1672 * add a mount into a namespace's mount tree
1673 * - provide the option of adding the new mount to an expiration list
1675 int do_add_mount(struct vfsmount
*newmnt
, struct nameidata
*nd
,
1676 int mnt_flags
, struct list_head
*fslist
)
1680 down_write(&namespace_sem
);
1681 /* Something was mounted here while we slept */
1682 while (d_mountpoint(nd
->path
.dentry
) &&
1683 follow_down(&nd
->path
.mnt
, &nd
->path
.dentry
))
1686 if (!check_mnt(nd
->path
.mnt
))
1689 /* Refuse the same filesystem on the same mount point */
1691 if (nd
->path
.mnt
->mnt_sb
== newmnt
->mnt_sb
&&
1692 nd
->path
.mnt
->mnt_root
== nd
->path
.dentry
)
1696 if (S_ISLNK(newmnt
->mnt_root
->d_inode
->i_mode
))
1699 newmnt
->mnt_flags
= mnt_flags
;
1700 if ((err
= graft_tree(newmnt
, &nd
->path
)))
1703 if (fslist
) /* add to the specified expiration list */
1704 list_add_tail(&newmnt
->mnt_expire
, fslist
);
1706 up_write(&namespace_sem
);
1710 up_write(&namespace_sem
);
1715 EXPORT_SYMBOL_GPL(do_add_mount
);
1718 * process a list of expirable mountpoints with the intent of discarding any
1719 * mountpoints that aren't in use and haven't been touched since last we came
1722 void mark_mounts_for_expiry(struct list_head
*mounts
)
1724 struct vfsmount
*mnt
, *next
;
1725 LIST_HEAD(graveyard
);
1728 if (list_empty(mounts
))
1731 down_write(&namespace_sem
);
1732 spin_lock(&vfsmount_lock
);
1734 /* extract from the expiration list every vfsmount that matches the
1735 * following criteria:
1736 * - only referenced by its parent vfsmount
1737 * - still marked for expiry (marked on the last call here; marks are
1738 * cleared by mntput())
1740 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
1741 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
1742 propagate_mount_busy(mnt
, 1))
1744 list_move(&mnt
->mnt_expire
, &graveyard
);
1746 while (!list_empty(&graveyard
)) {
1747 mnt
= list_first_entry(&graveyard
, struct vfsmount
, mnt_expire
);
1748 touch_mnt_namespace(mnt
->mnt_ns
);
1749 umount_tree(mnt
, 1, &umounts
);
1751 spin_unlock(&vfsmount_lock
);
1752 up_write(&namespace_sem
);
1754 release_mounts(&umounts
);
1757 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
1760 * Ripoff of 'select_parent()'
1762 * search the list of submounts for a given mountpoint, and move any
1763 * shrinkable submounts to the 'graveyard' list.
1765 static int select_submounts(struct vfsmount
*parent
, struct list_head
*graveyard
)
1767 struct vfsmount
*this_parent
= parent
;
1768 struct list_head
*next
;
1772 next
= this_parent
->mnt_mounts
.next
;
1774 while (next
!= &this_parent
->mnt_mounts
) {
1775 struct list_head
*tmp
= next
;
1776 struct vfsmount
*mnt
= list_entry(tmp
, struct vfsmount
, mnt_child
);
1779 if (!(mnt
->mnt_flags
& MNT_SHRINKABLE
))
1782 * Descend a level if the d_mounts list is non-empty.
1784 if (!list_empty(&mnt
->mnt_mounts
)) {
1789 if (!propagate_mount_busy(mnt
, 1)) {
1790 list_move_tail(&mnt
->mnt_expire
, graveyard
);
1795 * All done at this level ... ascend and resume the search
1797 if (this_parent
!= parent
) {
1798 next
= this_parent
->mnt_child
.next
;
1799 this_parent
= this_parent
->mnt_parent
;
1806 * process a list of expirable mountpoints with the intent of discarding any
1807 * submounts of a specific parent mountpoint
1809 static void shrink_submounts(struct vfsmount
*mnt
, struct list_head
*umounts
)
1811 LIST_HEAD(graveyard
);
1814 /* extract submounts of 'mountpoint' from the expiration list */
1815 while (select_submounts(mnt
, &graveyard
)) {
1816 while (!list_empty(&graveyard
)) {
1817 m
= list_first_entry(&graveyard
, struct vfsmount
,
1819 touch_mnt_namespace(mnt
->mnt_ns
);
1820 umount_tree(mnt
, 1, umounts
);
1826 * Some copy_from_user() implementations do not return the exact number of
1827 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1828 * Note that this function differs from copy_from_user() in that it will oops
1829 * on bad values of `to', rather than returning a short copy.
1831 static long exact_copy_from_user(void *to
, const void __user
* from
,
1835 const char __user
*f
= from
;
1838 if (!access_ok(VERIFY_READ
, from
, n
))
1842 if (__get_user(c
, f
)) {
1853 int copy_mount_options(const void __user
* data
, unsigned long *where
)
1863 if (!(page
= __get_free_page(GFP_KERNEL
)))
1866 /* We only care that *some* data at the address the user
1867 * gave us is valid. Just in case, we'll zero
1868 * the remainder of the page.
1870 /* copy_from_user cannot cross TASK_SIZE ! */
1871 size
= TASK_SIZE
- (unsigned long)data
;
1872 if (size
> PAGE_SIZE
)
1875 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
1881 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
1887 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1888 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1890 * data is a (void *) that can point to any structure up to
1891 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1892 * information (or be NULL).
1894 * Pre-0.97 versions of mount() didn't have a flags word.
1895 * When the flags word was introduced its top half was required
1896 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1897 * Therefore, if this magic number is present, it carries no information
1898 * and must be discarded.
1900 long do_mount(char *dev_name
, char *dir_name
, char *type_page
,
1901 unsigned long flags
, void *data_page
)
1903 struct nameidata nd
;
1908 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
1909 flags
&= ~MS_MGC_MSK
;
1911 /* Basic sanity checks */
1913 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
1915 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
1919 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
1921 /* Separate the per-mountpoint flags */
1922 if (flags
& MS_NOSUID
)
1923 mnt_flags
|= MNT_NOSUID
;
1924 if (flags
& MS_NODEV
)
1925 mnt_flags
|= MNT_NODEV
;
1926 if (flags
& MS_NOEXEC
)
1927 mnt_flags
|= MNT_NOEXEC
;
1928 if (flags
& MS_NOATIME
)
1929 mnt_flags
|= MNT_NOATIME
;
1930 if (flags
& MS_NODIRATIME
)
1931 mnt_flags
|= MNT_NODIRATIME
;
1932 if (flags
& MS_RELATIME
)
1933 mnt_flags
|= MNT_RELATIME
;
1934 if (flags
& MS_RDONLY
)
1935 mnt_flags
|= MNT_READONLY
;
1937 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
|
1938 MS_NOATIME
| MS_NODIRATIME
| MS_RELATIME
| MS_KERNMOUNT
);
1940 /* ... and get the mountpoint */
1941 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
1945 retval
= security_sb_mount(dev_name
, &nd
.path
,
1946 type_page
, flags
, data_page
);
1950 if (flags
& MS_REMOUNT
)
1951 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
1953 else if (flags
& MS_BIND
)
1954 retval
= do_loopback(&nd
, dev_name
, flags
& MS_REC
);
1955 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1956 retval
= do_change_type(&nd
, flags
);
1957 else if (flags
& MS_MOVE
)
1958 retval
= do_move_mount(&nd
, dev_name
);
1960 retval
= do_new_mount(&nd
, type_page
, flags
, mnt_flags
,
1961 dev_name
, data_page
);
1968 * Allocate a new namespace structure and populate it with contents
1969 * copied from the namespace of the passed in task structure.
1971 static struct mnt_namespace
*dup_mnt_ns(struct mnt_namespace
*mnt_ns
,
1972 struct fs_struct
*fs
)
1974 struct mnt_namespace
*new_ns
;
1975 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
1976 struct vfsmount
*p
, *q
;
1978 new_ns
= kmalloc(sizeof(struct mnt_namespace
), GFP_KERNEL
);
1980 return ERR_PTR(-ENOMEM
);
1982 atomic_set(&new_ns
->count
, 1);
1983 INIT_LIST_HEAD(&new_ns
->list
);
1984 init_waitqueue_head(&new_ns
->poll
);
1987 down_write(&namespace_sem
);
1988 /* First pass: copy the tree topology */
1989 new_ns
->root
= copy_tree(mnt_ns
->root
, mnt_ns
->root
->mnt_root
,
1990 CL_COPY_ALL
| CL_EXPIRE
);
1991 if (!new_ns
->root
) {
1992 up_write(&namespace_sem
);
1994 return ERR_PTR(-ENOMEM
);;
1996 spin_lock(&vfsmount_lock
);
1997 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
1998 spin_unlock(&vfsmount_lock
);
2001 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
2002 * as belonging to new namespace. We have already acquired a private
2003 * fs_struct, so tsk->fs->lock is not needed.
2010 if (p
== fs
->root
.mnt
) {
2012 fs
->root
.mnt
= mntget(q
);
2014 if (p
== fs
->pwd
.mnt
) {
2016 fs
->pwd
.mnt
= mntget(q
);
2018 if (p
== fs
->altroot
.mnt
) {
2020 fs
->altroot
.mnt
= mntget(q
);
2023 p
= next_mnt(p
, mnt_ns
->root
);
2024 q
= next_mnt(q
, new_ns
->root
);
2026 up_write(&namespace_sem
);
2038 struct mnt_namespace
*copy_mnt_ns(unsigned long flags
, struct mnt_namespace
*ns
,
2039 struct fs_struct
*new_fs
)
2041 struct mnt_namespace
*new_ns
;
2046 if (!(flags
& CLONE_NEWNS
))
2049 new_ns
= dup_mnt_ns(ns
, new_fs
);
2055 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
2056 char __user
* type
, unsigned long flags
,
2060 unsigned long data_page
;
2061 unsigned long type_page
;
2062 unsigned long dev_page
;
2065 retval
= copy_mount_options(type
, &type_page
);
2069 dir_page
= getname(dir_name
);
2070 retval
= PTR_ERR(dir_page
);
2071 if (IS_ERR(dir_page
))
2074 retval
= copy_mount_options(dev_name
, &dev_page
);
2078 retval
= copy_mount_options(data
, &data_page
);
2083 retval
= do_mount((char *)dev_page
, dir_page
, (char *)type_page
,
2084 flags
, (void *)data_page
);
2086 free_page(data_page
);
2089 free_page(dev_page
);
2093 free_page(type_page
);
2098 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2099 * It can block. Requires the big lock held.
2101 void set_fs_root(struct fs_struct
*fs
, struct path
*path
)
2103 struct path old_root
;
2105 write_lock(&fs
->lock
);
2106 old_root
= fs
->root
;
2109 write_unlock(&fs
->lock
);
2110 if (old_root
.dentry
)
2111 path_put(&old_root
);
2115 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
2116 * It can block. Requires the big lock held.
2118 void set_fs_pwd(struct fs_struct
*fs
, struct path
*path
)
2120 struct path old_pwd
;
2122 write_lock(&fs
->lock
);
2126 write_unlock(&fs
->lock
);
2132 static void chroot_fs_refs(struct path
*old_root
, struct path
*new_root
)
2134 struct task_struct
*g
, *p
;
2135 struct fs_struct
*fs
;
2137 read_lock(&tasklist_lock
);
2138 do_each_thread(g
, p
) {
2142 atomic_inc(&fs
->count
);
2144 if (fs
->root
.dentry
== old_root
->dentry
2145 && fs
->root
.mnt
== old_root
->mnt
)
2146 set_fs_root(fs
, new_root
);
2147 if (fs
->pwd
.dentry
== old_root
->dentry
2148 && fs
->pwd
.mnt
== old_root
->mnt
)
2149 set_fs_pwd(fs
, new_root
);
2153 } while_each_thread(g
, p
);
2154 read_unlock(&tasklist_lock
);
2158 * pivot_root Semantics:
2159 * Moves the root file system of the current process to the directory put_old,
2160 * makes new_root as the new root file system of the current process, and sets
2161 * root/cwd of all processes which had them on the current root to new_root.
2164 * The new_root and put_old must be directories, and must not be on the
2165 * same file system as the current process root. The put_old must be
2166 * underneath new_root, i.e. adding a non-zero number of /.. to the string
2167 * pointed to by put_old must yield the same directory as new_root. No other
2168 * file system may be mounted on put_old. After all, new_root is a mountpoint.
2170 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
2171 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
2172 * in this situation.
2175 * - we don't move root/cwd if they are not at the root (reason: if something
2176 * cared enough to change them, it's probably wrong to force them elsewhere)
2177 * - it's okay to pick a root that isn't the root of a file system, e.g.
2178 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
2179 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
2182 asmlinkage
long sys_pivot_root(const char __user
* new_root
,
2183 const char __user
* put_old
)
2185 struct vfsmount
*tmp
;
2186 struct nameidata new_nd
, old_nd
;
2187 struct path parent_path
, root_parent
, root
;
2190 if (!capable(CAP_SYS_ADMIN
))
2193 error
= __user_walk(new_root
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
,
2198 if (!check_mnt(new_nd
.path
.mnt
))
2201 error
= __user_walk(put_old
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old_nd
);
2205 error
= security_sb_pivotroot(&old_nd
.path
, &new_nd
.path
);
2207 path_put(&old_nd
.path
);
2211 read_lock(¤t
->fs
->lock
);
2212 root
= current
->fs
->root
;
2213 path_get(¤t
->fs
->root
);
2214 read_unlock(¤t
->fs
->lock
);
2215 down_write(&namespace_sem
);
2216 mutex_lock(&old_nd
.path
.dentry
->d_inode
->i_mutex
);
2218 if (IS_MNT_SHARED(old_nd
.path
.mnt
) ||
2219 IS_MNT_SHARED(new_nd
.path
.mnt
->mnt_parent
) ||
2220 IS_MNT_SHARED(root
.mnt
->mnt_parent
))
2222 if (!check_mnt(root
.mnt
))
2225 if (IS_DEADDIR(new_nd
.path
.dentry
->d_inode
))
2227 if (d_unhashed(new_nd
.path
.dentry
) && !IS_ROOT(new_nd
.path
.dentry
))
2229 if (d_unhashed(old_nd
.path
.dentry
) && !IS_ROOT(old_nd
.path
.dentry
))
2232 if (new_nd
.path
.mnt
== root
.mnt
||
2233 old_nd
.path
.mnt
== root
.mnt
)
2234 goto out2
; /* loop, on the same file system */
2236 if (root
.mnt
->mnt_root
!= root
.dentry
)
2237 goto out2
; /* not a mountpoint */
2238 if (root
.mnt
->mnt_parent
== root
.mnt
)
2239 goto out2
; /* not attached */
2240 if (new_nd
.path
.mnt
->mnt_root
!= new_nd
.path
.dentry
)
2241 goto out2
; /* not a mountpoint */
2242 if (new_nd
.path
.mnt
->mnt_parent
== new_nd
.path
.mnt
)
2243 goto out2
; /* not attached */
2244 /* make sure we can reach put_old from new_root */
2245 tmp
= old_nd
.path
.mnt
;
2246 spin_lock(&vfsmount_lock
);
2247 if (tmp
!= new_nd
.path
.mnt
) {
2249 if (tmp
->mnt_parent
== tmp
)
2250 goto out3
; /* already mounted on put_old */
2251 if (tmp
->mnt_parent
== new_nd
.path
.mnt
)
2253 tmp
= tmp
->mnt_parent
;
2255 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.path
.dentry
))
2257 } else if (!is_subdir(old_nd
.path
.dentry
, new_nd
.path
.dentry
))
2259 detach_mnt(new_nd
.path
.mnt
, &parent_path
);
2260 detach_mnt(root
.mnt
, &root_parent
);
2261 /* mount old root on put_old */
2262 attach_mnt(root
.mnt
, &old_nd
.path
);
2263 /* mount new_root on / */
2264 attach_mnt(new_nd
.path
.mnt
, &root_parent
);
2265 touch_mnt_namespace(current
->nsproxy
->mnt_ns
);
2266 spin_unlock(&vfsmount_lock
);
2267 chroot_fs_refs(&root
, &new_nd
.path
);
2268 security_sb_post_pivotroot(&root
, &new_nd
.path
);
2270 path_put(&root_parent
);
2271 path_put(&parent_path
);
2273 mutex_unlock(&old_nd
.path
.dentry
->d_inode
->i_mutex
);
2274 up_write(&namespace_sem
);
2276 path_put(&old_nd
.path
);
2278 path_put(&new_nd
.path
);
2282 spin_unlock(&vfsmount_lock
);
2286 static void __init
init_mount_tree(void)
2288 struct vfsmount
*mnt
;
2289 struct mnt_namespace
*ns
;
2292 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
2294 panic("Can't create rootfs");
2295 ns
= kmalloc(sizeof(*ns
), GFP_KERNEL
);
2297 panic("Can't allocate initial namespace");
2298 atomic_set(&ns
->count
, 1);
2299 INIT_LIST_HEAD(&ns
->list
);
2300 init_waitqueue_head(&ns
->poll
);
2302 list_add(&mnt
->mnt_list
, &ns
->list
);
2306 init_task
.nsproxy
->mnt_ns
= ns
;
2309 root
.mnt
= ns
->root
;
2310 root
.dentry
= ns
->root
->mnt_root
;
2312 set_fs_pwd(current
->fs
, &root
);
2313 set_fs_root(current
->fs
, &root
);
2316 void __init
mnt_init(void)
2321 init_rwsem(&namespace_sem
);
2323 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
2324 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
);
2326 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
2328 if (!mount_hashtable
)
2329 panic("Failed to allocate mount hash table\n");
2331 printk("Mount-cache hash table entries: %lu\n", HASH_SIZE
);
2333 for (u
= 0; u
< HASH_SIZE
; u
++)
2334 INIT_LIST_HEAD(&mount_hashtable
[u
]);
2338 printk(KERN_WARNING
"%s: sysfs_init error: %d\n",
2340 fs_kobj
= kobject_create_and_add("fs", NULL
);
2342 printk(KERN_WARNING
"%s: kobj create error\n", __func__
);
2347 void __put_mnt_ns(struct mnt_namespace
*ns
)
2349 struct vfsmount
*root
= ns
->root
;
2350 LIST_HEAD(umount_list
);
2352 spin_unlock(&vfsmount_lock
);
2353 down_write(&namespace_sem
);
2354 spin_lock(&vfsmount_lock
);
2355 umount_tree(root
, 0, &umount_list
);
2356 spin_unlock(&vfsmount_lock
);
2357 up_write(&namespace_sem
);
2358 release_mounts(&umount_list
);