4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/quotaops.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/module.h>
21 #include <linux/seq_file.h>
22 #include <linux/namespace.h>
23 #include <linux/namei.h>
24 #include <linux/security.h>
25 #include <linux/mount.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
30 extern int __init
init_rootfs(void);
33 extern int __init
sysfs_init(void);
35 static inline int sysfs_init(void)
41 /* spinlock for vfsmount related operations, inplace of dcache_lock */
42 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(vfsmount_lock
);
46 static struct list_head
*mount_hashtable __read_mostly
;
47 static int hash_mask __read_mostly
, hash_bits __read_mostly
;
48 static kmem_cache_t
*mnt_cache __read_mostly
;
49 static struct rw_semaphore namespace_sem
;
52 decl_subsys(fs
, NULL
, NULL
);
53 EXPORT_SYMBOL_GPL(fs_subsys
);
55 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
57 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
58 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
59 tmp
= tmp
+ (tmp
>> hash_bits
);
60 return tmp
& hash_mask
;
63 struct vfsmount
*alloc_vfsmnt(const char *name
)
65 struct vfsmount
*mnt
= kmem_cache_alloc(mnt_cache
, GFP_KERNEL
);
67 memset(mnt
, 0, sizeof(struct vfsmount
));
68 atomic_set(&mnt
->mnt_count
, 1);
69 INIT_LIST_HEAD(&mnt
->mnt_hash
);
70 INIT_LIST_HEAD(&mnt
->mnt_child
);
71 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
72 INIT_LIST_HEAD(&mnt
->mnt_list
);
73 INIT_LIST_HEAD(&mnt
->mnt_expire
);
74 INIT_LIST_HEAD(&mnt
->mnt_share
);
75 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
76 INIT_LIST_HEAD(&mnt
->mnt_slave
);
78 int size
= strlen(name
) + 1;
79 char *newname
= kmalloc(size
, GFP_KERNEL
);
81 memcpy(newname
, name
, size
);
82 mnt
->mnt_devname
= newname
;
89 void free_vfsmnt(struct vfsmount
*mnt
)
91 kfree(mnt
->mnt_devname
);
92 kmem_cache_free(mnt_cache
, mnt
);
96 * find the first or last mount at @dentry on vfsmount @mnt depending on
97 * @dir. If @dir is set return the first mount else return the last mount.
99 struct vfsmount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
102 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
103 struct list_head
*tmp
= head
;
104 struct vfsmount
*p
, *found
= NULL
;
107 tmp
= dir
? tmp
->next
: tmp
->prev
;
111 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
112 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
121 * lookup_mnt increments the ref count before returning
122 * the vfsmount struct.
124 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
126 struct vfsmount
*child_mnt
;
127 spin_lock(&vfsmount_lock
);
128 if ((child_mnt
= __lookup_mnt(mnt
, dentry
, 1)))
130 spin_unlock(&vfsmount_lock
);
134 static inline int check_mnt(struct vfsmount
*mnt
)
136 return mnt
->mnt_namespace
== current
->namespace;
139 static void touch_namespace(struct namespace *ns
)
143 wake_up_interruptible(&ns
->poll
);
147 static void __touch_namespace(struct namespace *ns
)
149 if (ns
&& ns
->event
!= event
) {
151 wake_up_interruptible(&ns
->poll
);
155 static void detach_mnt(struct vfsmount
*mnt
, struct nameidata
*old_nd
)
157 old_nd
->dentry
= mnt
->mnt_mountpoint
;
158 old_nd
->mnt
= mnt
->mnt_parent
;
159 mnt
->mnt_parent
= mnt
;
160 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
161 list_del_init(&mnt
->mnt_child
);
162 list_del_init(&mnt
->mnt_hash
);
163 old_nd
->dentry
->d_mounted
--;
166 void mnt_set_mountpoint(struct vfsmount
*mnt
, struct dentry
*dentry
,
167 struct vfsmount
*child_mnt
)
169 child_mnt
->mnt_parent
= mntget(mnt
);
170 child_mnt
->mnt_mountpoint
= dget(dentry
);
174 static void attach_mnt(struct vfsmount
*mnt
, struct nameidata
*nd
)
176 mnt_set_mountpoint(nd
->mnt
, nd
->dentry
, mnt
);
177 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
178 hash(nd
->mnt
, nd
->dentry
));
179 list_add_tail(&mnt
->mnt_child
, &nd
->mnt
->mnt_mounts
);
183 * the caller must hold vfsmount_lock
185 static void commit_tree(struct vfsmount
*mnt
)
187 struct vfsmount
*parent
= mnt
->mnt_parent
;
190 struct namespace *n
= parent
->mnt_namespace
;
192 BUG_ON(parent
== mnt
);
194 list_add_tail(&head
, &mnt
->mnt_list
);
195 list_for_each_entry(m
, &head
, mnt_list
)
196 m
->mnt_namespace
= n
;
197 list_splice(&head
, n
->list
.prev
);
199 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
200 hash(parent
, mnt
->mnt_mountpoint
));
201 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
205 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
207 struct list_head
*next
= p
->mnt_mounts
.next
;
208 if (next
== &p
->mnt_mounts
) {
212 next
= p
->mnt_child
.next
;
213 if (next
!= &p
->mnt_parent
->mnt_mounts
)
218 return list_entry(next
, struct vfsmount
, mnt_child
);
221 static struct vfsmount
*skip_mnt_tree(struct vfsmount
*p
)
223 struct list_head
*prev
= p
->mnt_mounts
.prev
;
224 while (prev
!= &p
->mnt_mounts
) {
225 p
= list_entry(prev
, struct vfsmount
, mnt_child
);
226 prev
= p
->mnt_mounts
.prev
;
231 static struct vfsmount
*clone_mnt(struct vfsmount
*old
, struct dentry
*root
,
234 struct super_block
*sb
= old
->mnt_sb
;
235 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
238 mnt
->mnt_flags
= old
->mnt_flags
;
239 atomic_inc(&sb
->s_active
);
241 mnt
->mnt_root
= dget(root
);
242 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
243 mnt
->mnt_parent
= mnt
;
245 if (flag
& CL_SLAVE
) {
246 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
247 mnt
->mnt_master
= old
;
248 CLEAR_MNT_SHARED(mnt
);
250 if ((flag
& CL_PROPAGATION
) || IS_MNT_SHARED(old
))
251 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
252 if (IS_MNT_SLAVE(old
))
253 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
254 mnt
->mnt_master
= old
->mnt_master
;
256 if (flag
& CL_MAKE_SHARED
)
259 /* stick the duplicate mount on the same expiry list
260 * as the original if that was on one */
261 if (flag
& CL_EXPIRE
) {
262 spin_lock(&vfsmount_lock
);
263 if (!list_empty(&old
->mnt_expire
))
264 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
265 spin_unlock(&vfsmount_lock
);
271 static inline void __mntput(struct vfsmount
*mnt
)
273 struct super_block
*sb
= mnt
->mnt_sb
;
276 deactivate_super(sb
);
279 void mntput_no_expire(struct vfsmount
*mnt
)
282 if (atomic_dec_and_lock(&mnt
->mnt_count
, &vfsmount_lock
)) {
283 if (likely(!mnt
->mnt_pinned
)) {
284 spin_unlock(&vfsmount_lock
);
288 atomic_add(mnt
->mnt_pinned
+ 1, &mnt
->mnt_count
);
290 spin_unlock(&vfsmount_lock
);
291 acct_auto_close_mnt(mnt
);
292 security_sb_umount_close(mnt
);
297 EXPORT_SYMBOL(mntput_no_expire
);
299 void mnt_pin(struct vfsmount
*mnt
)
301 spin_lock(&vfsmount_lock
);
303 spin_unlock(&vfsmount_lock
);
306 EXPORT_SYMBOL(mnt_pin
);
308 void mnt_unpin(struct vfsmount
*mnt
)
310 spin_lock(&vfsmount_lock
);
311 if (mnt
->mnt_pinned
) {
312 atomic_inc(&mnt
->mnt_count
);
315 spin_unlock(&vfsmount_lock
);
318 EXPORT_SYMBOL(mnt_unpin
);
321 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
323 struct namespace *n
= m
->private;
327 down_read(&namespace_sem
);
328 list_for_each(p
, &n
->list
)
330 return list_entry(p
, struct vfsmount
, mnt_list
);
334 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
336 struct namespace *n
= m
->private;
337 struct list_head
*p
= ((struct vfsmount
*)v
)->mnt_list
.next
;
339 return p
== &n
->list
? NULL
: list_entry(p
, struct vfsmount
, mnt_list
);
342 static void m_stop(struct seq_file
*m
, void *v
)
344 up_read(&namespace_sem
);
347 static inline void mangle(struct seq_file
*m
, const char *s
)
349 seq_escape(m
, s
, " \t\n\\");
352 static int show_vfsmnt(struct seq_file
*m
, void *v
)
354 struct vfsmount
*mnt
= v
;
356 static struct proc_fs_info
{
360 { MS_SYNCHRONOUS
, ",sync" },
361 { MS_DIRSYNC
, ",dirsync" },
362 { MS_MANDLOCK
, ",mand" },
365 static struct proc_fs_info mnt_info
[] = {
366 { MNT_NOSUID
, ",nosuid" },
367 { MNT_NODEV
, ",nodev" },
368 { MNT_NOEXEC
, ",noexec" },
369 { MNT_NOATIME
, ",noatime" },
370 { MNT_NODIRATIME
, ",nodiratime" },
373 struct proc_fs_info
*fs_infop
;
375 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
377 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
379 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
380 seq_puts(m
, mnt
->mnt_sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
381 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
382 if (mnt
->mnt_sb
->s_flags
& fs_infop
->flag
)
383 seq_puts(m
, fs_infop
->str
);
385 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
386 if (mnt
->mnt_flags
& fs_infop
->flag
)
387 seq_puts(m
, fs_infop
->str
);
389 if (mnt
->mnt_sb
->s_op
->show_options
)
390 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
391 seq_puts(m
, " 0 0\n");
395 struct seq_operations mounts_op
= {
402 static int show_vfsstat(struct seq_file
*m
, void *v
)
404 struct vfsmount
*mnt
= v
;
408 if (mnt
->mnt_devname
) {
409 seq_puts(m
, "device ");
410 mangle(m
, mnt
->mnt_devname
);
412 seq_puts(m
, "no device");
415 seq_puts(m
, " mounted on ");
416 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
419 /* file system type */
420 seq_puts(m
, "with fstype ");
421 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
423 /* optional statistics */
424 if (mnt
->mnt_sb
->s_op
->show_stats
) {
426 err
= mnt
->mnt_sb
->s_op
->show_stats(m
, mnt
);
433 struct seq_operations mountstats_op
= {
437 .show
= show_vfsstat
,
441 * may_umount_tree - check if a mount tree is busy
442 * @mnt: root of mount tree
444 * This is called to check if a tree of mounts has any
445 * open files, pwds, chroots or sub mounts that are
448 int may_umount_tree(struct vfsmount
*mnt
)
451 int minimum_refs
= 0;
454 spin_lock(&vfsmount_lock
);
455 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
456 actual_refs
+= atomic_read(&p
->mnt_count
);
459 spin_unlock(&vfsmount_lock
);
461 if (actual_refs
> minimum_refs
)
467 EXPORT_SYMBOL(may_umount_tree
);
470 * may_umount - check if a mount point is busy
471 * @mnt: root of mount
473 * This is called to check if a mount point has any
474 * open files, pwds, chroots or sub mounts. If the
475 * mount has sub mounts this will return busy
476 * regardless of whether the sub mounts are busy.
478 * Doesn't take quota and stuff into account. IOW, in some cases it will
479 * give false negatives. The main reason why it's here is that we need
480 * a non-destructive way to look for easily umountable filesystems.
482 int may_umount(struct vfsmount
*mnt
)
485 spin_lock(&vfsmount_lock
);
486 if (propagate_mount_busy(mnt
, 2))
488 spin_unlock(&vfsmount_lock
);
492 EXPORT_SYMBOL(may_umount
);
494 void release_mounts(struct list_head
*head
)
496 struct vfsmount
*mnt
;
497 while (!list_empty(head
)) {
498 mnt
= list_entry(head
->next
, struct vfsmount
, mnt_hash
);
499 list_del_init(&mnt
->mnt_hash
);
500 if (mnt
->mnt_parent
!= mnt
) {
501 struct dentry
*dentry
;
503 spin_lock(&vfsmount_lock
);
504 dentry
= mnt
->mnt_mountpoint
;
506 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
507 mnt
->mnt_parent
= mnt
;
508 spin_unlock(&vfsmount_lock
);
516 void umount_tree(struct vfsmount
*mnt
, int propagate
, struct list_head
*kill
)
520 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
521 list_del(&p
->mnt_hash
);
522 list_add(&p
->mnt_hash
, kill
);
526 propagate_umount(kill
);
528 list_for_each_entry(p
, kill
, mnt_hash
) {
529 list_del_init(&p
->mnt_expire
);
530 list_del_init(&p
->mnt_list
);
531 __touch_namespace(p
->mnt_namespace
);
532 p
->mnt_namespace
= NULL
;
533 list_del_init(&p
->mnt_child
);
534 if (p
->mnt_parent
!= p
)
535 p
->mnt_mountpoint
->d_mounted
--;
536 change_mnt_propagation(p
, MS_PRIVATE
);
540 static int do_umount(struct vfsmount
*mnt
, int flags
)
542 struct super_block
*sb
= mnt
->mnt_sb
;
544 LIST_HEAD(umount_list
);
546 retval
= security_sb_umount(mnt
, flags
);
551 * Allow userspace to request a mountpoint be expired rather than
552 * unmounting unconditionally. Unmount only happens if:
553 * (1) the mark is already set (the mark is cleared by mntput())
554 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
556 if (flags
& MNT_EXPIRE
) {
557 if (mnt
== current
->fs
->rootmnt
||
558 flags
& (MNT_FORCE
| MNT_DETACH
))
561 if (atomic_read(&mnt
->mnt_count
) != 2)
564 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
569 * If we may have to abort operations to get out of this
570 * mount, and they will themselves hold resources we must
571 * allow the fs to do things. In the Unix tradition of
572 * 'Gee thats tricky lets do it in userspace' the umount_begin
573 * might fail to complete on the first run through as other tasks
574 * must return, and the like. Thats for the mount program to worry
575 * about for the moment.
579 if ((flags
& MNT_FORCE
) && sb
->s_op
->umount_begin
)
580 sb
->s_op
->umount_begin(sb
);
584 * No sense to grab the lock for this test, but test itself looks
585 * somewhat bogus. Suggestions for better replacement?
586 * Ho-hum... In principle, we might treat that as umount + switch
587 * to rootfs. GC would eventually take care of the old vfsmount.
588 * Actually it makes sense, especially if rootfs would contain a
589 * /reboot - static binary that would close all descriptors and
590 * call reboot(9). Then init(8) could umount root and exec /reboot.
592 if (mnt
== current
->fs
->rootmnt
&& !(flags
& MNT_DETACH
)) {
594 * Special case for "unmounting" root ...
595 * we just try to remount it readonly.
597 down_write(&sb
->s_umount
);
598 if (!(sb
->s_flags
& MS_RDONLY
)) {
601 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
604 up_write(&sb
->s_umount
);
608 down_write(&namespace_sem
);
609 spin_lock(&vfsmount_lock
);
613 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
614 if (!list_empty(&mnt
->mnt_list
))
615 umount_tree(mnt
, 1, &umount_list
);
618 spin_unlock(&vfsmount_lock
);
620 security_sb_umount_busy(mnt
);
621 up_write(&namespace_sem
);
622 release_mounts(&umount_list
);
627 * Now umount can handle mount points as well as block devices.
628 * This is important for filesystems which use unnamed block devices.
630 * We now support a flag for forced unmount like the other 'big iron'
631 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
634 asmlinkage
long sys_umount(char __user
* name
, int flags
)
639 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
643 if (nd
.dentry
!= nd
.mnt
->mnt_root
)
645 if (!check_mnt(nd
.mnt
))
649 if (!capable(CAP_SYS_ADMIN
))
652 retval
= do_umount(nd
.mnt
, flags
);
654 path_release_on_umount(&nd
);
659 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
662 * The 2.0 compatible umount. No flags.
664 asmlinkage
long sys_oldumount(char __user
* name
)
666 return sys_umount(name
, 0);
671 static int mount_is_safe(struct nameidata
*nd
)
673 if (capable(CAP_SYS_ADMIN
))
677 if (S_ISLNK(nd
->dentry
->d_inode
->i_mode
))
679 if (nd
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
680 if (current
->uid
!= nd
->dentry
->d_inode
->i_uid
)
683 if (vfs_permission(nd
, MAY_WRITE
))
689 static int lives_below_in_same_fs(struct dentry
*d
, struct dentry
*dentry
)
694 if (d
== NULL
|| d
== d
->d_parent
)
700 struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
,
703 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
706 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
709 res
= q
= clone_mnt(mnt
, dentry
, flag
);
712 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
715 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
716 if (!lives_below_in_same_fs(r
->mnt_mountpoint
, dentry
))
719 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
720 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
721 s
= skip_mnt_tree(s
);
724 while (p
!= s
->mnt_parent
) {
730 nd
.dentry
= p
->mnt_mountpoint
;
731 q
= clone_mnt(p
, p
->mnt_root
, flag
);
734 spin_lock(&vfsmount_lock
);
735 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
737 spin_unlock(&vfsmount_lock
);
743 LIST_HEAD(umount_list
);
744 spin_lock(&vfsmount_lock
);
745 umount_tree(res
, 0, &umount_list
);
746 spin_unlock(&vfsmount_lock
);
747 release_mounts(&umount_list
);
753 * @source_mnt : mount tree to be attached
754 * @nd : place the mount tree @source_mnt is attached
755 * @parent_nd : if non-null, detach the source_mnt from its parent and
756 * store the parent mount and mountpoint dentry.
757 * (done when source_mnt is moved)
759 * NOTE: in the table below explains the semantics when a source mount
760 * of a given type is attached to a destination mount of a given type.
761 * ---------------------------------------------------------------------------
762 * | BIND MOUNT OPERATION |
763 * |**************************************************************************
764 * | source-->| shared | private | slave | unbindable |
768 * |**************************************************************************
769 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
771 * |non-shared| shared (+) | private | slave (*) | invalid |
772 * ***************************************************************************
773 * A bind operation clones the source mount and mounts the clone on the
776 * (++) the cloned mount is propagated to all the mounts in the propagation
777 * tree of the destination mount and the cloned mount is added to
778 * the peer group of the source mount.
779 * (+) the cloned mount is created under the destination mount and is marked
780 * as shared. The cloned mount is added to the peer group of the source
782 * (+++) the mount is propagated to all the mounts in the propagation tree
783 * of the destination mount and the cloned mount is made slave
784 * of the same master as that of the source mount. The cloned mount
785 * is marked as 'shared and slave'.
786 * (*) the cloned mount is made a slave of the same master as that of the
789 * ---------------------------------------------------------------------------
790 * | MOVE MOUNT OPERATION |
791 * |**************************************************************************
792 * | source-->| shared | private | slave | unbindable |
796 * |**************************************************************************
797 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
799 * |non-shared| shared (+*) | private | slave (*) | unbindable |
800 * ***************************************************************************
802 * (+) the mount is moved to the destination. And is then propagated to
803 * all the mounts in the propagation tree of the destination mount.
804 * (+*) the mount is moved to the destination.
805 * (+++) the mount is moved to the destination and is then propagated to
806 * all the mounts belonging to the destination mount's propagation tree.
807 * the mount is marked as 'shared and slave'.
808 * (*) the mount continues to be a slave at the new location.
810 * if the source mount is a tree, the operations explained above is
811 * applied to each mount in the tree.
812 * Must be called without spinlocks held, since this function can sleep
815 static int attach_recursive_mnt(struct vfsmount
*source_mnt
,
816 struct nameidata
*nd
, struct nameidata
*parent_nd
)
818 LIST_HEAD(tree_list
);
819 struct vfsmount
*dest_mnt
= nd
->mnt
;
820 struct dentry
*dest_dentry
= nd
->dentry
;
821 struct vfsmount
*child
, *p
;
823 if (propagate_mnt(dest_mnt
, dest_dentry
, source_mnt
, &tree_list
))
826 if (IS_MNT_SHARED(dest_mnt
)) {
827 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
831 spin_lock(&vfsmount_lock
);
833 detach_mnt(source_mnt
, parent_nd
);
834 attach_mnt(source_mnt
, nd
);
835 touch_namespace(current
->namespace);
837 mnt_set_mountpoint(dest_mnt
, dest_dentry
, source_mnt
);
838 commit_tree(source_mnt
);
841 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
842 list_del_init(&child
->mnt_hash
);
845 spin_unlock(&vfsmount_lock
);
849 static int graft_tree(struct vfsmount
*mnt
, struct nameidata
*nd
)
852 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
855 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
856 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
860 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
861 if (IS_DEADDIR(nd
->dentry
->d_inode
))
864 err
= security_sb_check_sb(mnt
, nd
);
869 if (IS_ROOT(nd
->dentry
) || !d_unhashed(nd
->dentry
))
870 err
= attach_recursive_mnt(mnt
, nd
, NULL
);
872 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
874 security_sb_post_addmount(mnt
, nd
);
879 * recursively change the type of the mountpoint.
881 static int do_change_type(struct nameidata
*nd
, int flag
)
883 struct vfsmount
*m
, *mnt
= nd
->mnt
;
884 int recurse
= flag
& MS_REC
;
885 int type
= flag
& ~MS_REC
;
887 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
890 down_write(&namespace_sem
);
891 spin_lock(&vfsmount_lock
);
892 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
893 change_mnt_propagation(m
, type
);
894 spin_unlock(&vfsmount_lock
);
895 up_write(&namespace_sem
);
902 static int do_loopback(struct nameidata
*nd
, char *old_name
, unsigned long flags
, int mnt_flags
)
904 struct nameidata old_nd
;
905 struct vfsmount
*mnt
= NULL
;
906 int recurse
= flags
& MS_REC
;
907 int err
= mount_is_safe(nd
);
911 if (!old_name
|| !*old_name
)
913 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
917 down_write(&namespace_sem
);
919 if (IS_MNT_UNBINDABLE(old_nd
.mnt
))
922 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
927 mnt
= copy_tree(old_nd
.mnt
, old_nd
.dentry
, 0);
929 mnt
= clone_mnt(old_nd
.mnt
, old_nd
.dentry
, 0);
934 err
= graft_tree(mnt
, nd
);
936 LIST_HEAD(umount_list
);
937 spin_lock(&vfsmount_lock
);
938 umount_tree(mnt
, 0, &umount_list
);
939 spin_unlock(&vfsmount_lock
);
940 release_mounts(&umount_list
);
942 mnt
->mnt_flags
= mnt_flags
;
945 up_write(&namespace_sem
);
946 path_release(&old_nd
);
951 * change filesystem flags. dir should be a physical root of filesystem.
952 * If you've mounted a non-root directory somewhere and want to do remount
953 * on it - tough luck.
955 static int do_remount(struct nameidata
*nd
, int flags
, int mnt_flags
,
959 struct super_block
*sb
= nd
->mnt
->mnt_sb
;
961 if (!capable(CAP_SYS_ADMIN
))
964 if (!check_mnt(nd
->mnt
))
967 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
970 down_write(&sb
->s_umount
);
971 err
= do_remount_sb(sb
, flags
, data
, 0);
973 nd
->mnt
->mnt_flags
= mnt_flags
;
974 up_write(&sb
->s_umount
);
976 security_sb_post_remount(nd
->mnt
, flags
, data
);
980 static inline int tree_contains_unbindable(struct vfsmount
*mnt
)
983 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
984 if (IS_MNT_UNBINDABLE(p
))
990 static int do_move_mount(struct nameidata
*nd
, char *old_name
)
992 struct nameidata old_nd
, parent_nd
;
995 if (!capable(CAP_SYS_ADMIN
))
997 if (!old_name
|| !*old_name
)
999 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
1003 down_write(&namespace_sem
);
1004 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
1007 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
1011 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
1012 if (IS_DEADDIR(nd
->dentry
->d_inode
))
1015 if (!IS_ROOT(nd
->dentry
) && d_unhashed(nd
->dentry
))
1019 if (old_nd
.dentry
!= old_nd
.mnt
->mnt_root
)
1022 if (old_nd
.mnt
== old_nd
.mnt
->mnt_parent
)
1025 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
1026 S_ISDIR(old_nd
.dentry
->d_inode
->i_mode
))
1029 * Don't move a mount residing in a shared parent.
1031 if (old_nd
.mnt
->mnt_parent
&& IS_MNT_SHARED(old_nd
.mnt
->mnt_parent
))
1034 * Don't move a mount tree containing unbindable mounts to a destination
1035 * mount which is shared.
1037 if (IS_MNT_SHARED(nd
->mnt
) && tree_contains_unbindable(old_nd
.mnt
))
1040 for (p
= nd
->mnt
; p
->mnt_parent
!= p
; p
= p
->mnt_parent
)
1041 if (p
== old_nd
.mnt
)
1044 if ((err
= attach_recursive_mnt(old_nd
.mnt
, nd
, &parent_nd
)))
1047 spin_lock(&vfsmount_lock
);
1048 /* if the mount is moved, it should no longer be expire
1050 list_del_init(&old_nd
.mnt
->mnt_expire
);
1051 spin_unlock(&vfsmount_lock
);
1053 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
1055 up_write(&namespace_sem
);
1057 path_release(&parent_nd
);
1058 path_release(&old_nd
);
1063 * create a new mount for userspace and request it to be added into the
1066 static int do_new_mount(struct nameidata
*nd
, char *type
, int flags
,
1067 int mnt_flags
, char *name
, void *data
)
1069 struct vfsmount
*mnt
;
1071 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
1074 /* we need capabilities... */
1075 if (!capable(CAP_SYS_ADMIN
))
1078 mnt
= do_kern_mount(type
, flags
, name
, data
);
1080 return PTR_ERR(mnt
);
1082 return do_add_mount(mnt
, nd
, mnt_flags
, NULL
);
1086 * add a mount into a namespace's mount tree
1087 * - provide the option of adding the new mount to an expiration list
1089 int do_add_mount(struct vfsmount
*newmnt
, struct nameidata
*nd
,
1090 int mnt_flags
, struct list_head
*fslist
)
1094 down_write(&namespace_sem
);
1095 /* Something was mounted here while we slept */
1096 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
1099 if (!check_mnt(nd
->mnt
))
1102 /* Refuse the same filesystem on the same mount point */
1104 if (nd
->mnt
->mnt_sb
== newmnt
->mnt_sb
&&
1105 nd
->mnt
->mnt_root
== nd
->dentry
)
1109 if (S_ISLNK(newmnt
->mnt_root
->d_inode
->i_mode
))
1112 newmnt
->mnt_flags
= mnt_flags
;
1113 if ((err
= graft_tree(newmnt
, nd
)))
1117 /* add to the specified expiration list */
1118 spin_lock(&vfsmount_lock
);
1119 list_add_tail(&newmnt
->mnt_expire
, fslist
);
1120 spin_unlock(&vfsmount_lock
);
1122 up_write(&namespace_sem
);
1126 up_write(&namespace_sem
);
1131 EXPORT_SYMBOL_GPL(do_add_mount
);
1133 static void expire_mount(struct vfsmount
*mnt
, struct list_head
*mounts
,
1134 struct list_head
*umounts
)
1136 spin_lock(&vfsmount_lock
);
1139 * Check if mount is still attached, if not, let whoever holds it deal
1142 if (mnt
->mnt_parent
== mnt
) {
1143 spin_unlock(&vfsmount_lock
);
1148 * Check that it is still dead: the count should now be 2 - as
1149 * contributed by the vfsmount parent and the mntget above
1151 if (!propagate_mount_busy(mnt
, 2)) {
1152 /* delete from the namespace */
1153 touch_namespace(mnt
->mnt_namespace
);
1154 list_del_init(&mnt
->mnt_list
);
1155 mnt
->mnt_namespace
= NULL
;
1156 umount_tree(mnt
, 1, umounts
);
1157 spin_unlock(&vfsmount_lock
);
1160 * Someone brought it back to life whilst we didn't have any
1161 * locks held so return it to the expiration list
1163 list_add_tail(&mnt
->mnt_expire
, mounts
);
1164 spin_unlock(&vfsmount_lock
);
1169 * process a list of expirable mountpoints with the intent of discarding any
1170 * mountpoints that aren't in use and haven't been touched since last we came
1173 void mark_mounts_for_expiry(struct list_head
*mounts
)
1175 struct namespace *namespace;
1176 struct vfsmount
*mnt
, *next
;
1177 LIST_HEAD(graveyard
);
1179 if (list_empty(mounts
))
1182 spin_lock(&vfsmount_lock
);
1184 /* extract from the expiration list every vfsmount that matches the
1185 * following criteria:
1186 * - only referenced by its parent vfsmount
1187 * - still marked for expiry (marked on the last call here; marks are
1188 * cleared by mntput())
1190 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
1191 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
1192 atomic_read(&mnt
->mnt_count
) != 1)
1196 list_move(&mnt
->mnt_expire
, &graveyard
);
1200 * go through the vfsmounts we've just consigned to the graveyard to
1201 * - check that they're still dead
1202 * - delete the vfsmount from the appropriate namespace under lock
1203 * - dispose of the corpse
1205 while (!list_empty(&graveyard
)) {
1207 mnt
= list_entry(graveyard
.next
, struct vfsmount
, mnt_expire
);
1208 list_del_init(&mnt
->mnt_expire
);
1210 /* don't do anything if the namespace is dead - all the
1211 * vfsmounts from it are going away anyway */
1212 namespace = mnt
->mnt_namespace
;
1213 if (!namespace || !namespace->root
)
1215 get_namespace(namespace);
1217 spin_unlock(&vfsmount_lock
);
1218 down_write(&namespace_sem
);
1219 expire_mount(mnt
, mounts
, &umounts
);
1220 up_write(&namespace_sem
);
1221 release_mounts(&umounts
);
1223 put_namespace(namespace);
1224 spin_lock(&vfsmount_lock
);
1227 spin_unlock(&vfsmount_lock
);
1230 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
1233 * Some copy_from_user() implementations do not return the exact number of
1234 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1235 * Note that this function differs from copy_from_user() in that it will oops
1236 * on bad values of `to', rather than returning a short copy.
1238 static long exact_copy_from_user(void *to
, const void __user
* from
,
1242 const char __user
*f
= from
;
1245 if (!access_ok(VERIFY_READ
, from
, n
))
1249 if (__get_user(c
, f
)) {
1260 int copy_mount_options(const void __user
* data
, unsigned long *where
)
1270 if (!(page
= __get_free_page(GFP_KERNEL
)))
1273 /* We only care that *some* data at the address the user
1274 * gave us is valid. Just in case, we'll zero
1275 * the remainder of the page.
1277 /* copy_from_user cannot cross TASK_SIZE ! */
1278 size
= TASK_SIZE
- (unsigned long)data
;
1279 if (size
> PAGE_SIZE
)
1282 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
1288 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
1294 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1295 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1297 * data is a (void *) that can point to any structure up to
1298 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1299 * information (or be NULL).
1301 * Pre-0.97 versions of mount() didn't have a flags word.
1302 * When the flags word was introduced its top half was required
1303 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1304 * Therefore, if this magic number is present, it carries no information
1305 * and must be discarded.
1307 long do_mount(char *dev_name
, char *dir_name
, char *type_page
,
1308 unsigned long flags
, void *data_page
)
1310 struct nameidata nd
;
1315 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
1316 flags
&= ~MS_MGC_MSK
;
1318 /* Basic sanity checks */
1320 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
1322 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
1326 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
1328 /* Separate the per-mountpoint flags */
1329 if (flags
& MS_NOSUID
)
1330 mnt_flags
|= MNT_NOSUID
;
1331 if (flags
& MS_NODEV
)
1332 mnt_flags
|= MNT_NODEV
;
1333 if (flags
& MS_NOEXEC
)
1334 mnt_flags
|= MNT_NOEXEC
;
1335 if (flags
& MS_NOATIME
)
1336 mnt_flags
|= MNT_NOATIME
;
1337 if (flags
& MS_NODIRATIME
)
1338 mnt_flags
|= MNT_NODIRATIME
;
1340 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
|
1341 MS_NOATIME
| MS_NODIRATIME
);
1343 /* ... and get the mountpoint */
1344 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
1348 retval
= security_sb_mount(dev_name
, &nd
, type_page
, flags
, data_page
);
1352 if (flags
& MS_REMOUNT
)
1353 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
1355 else if (flags
& MS_BIND
)
1356 retval
= do_loopback(&nd
, dev_name
, flags
, mnt_flags
);
1357 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1358 retval
= do_change_type(&nd
, flags
);
1359 else if (flags
& MS_MOVE
)
1360 retval
= do_move_mount(&nd
, dev_name
);
1362 retval
= do_new_mount(&nd
, type_page
, flags
, mnt_flags
,
1363 dev_name
, data_page
);
1370 * Allocate a new namespace structure and populate it with contents
1371 * copied from the namespace of the passed in task structure.
1373 struct namespace *dup_namespace(struct task_struct
*tsk
, struct fs_struct
*fs
)
1375 struct namespace *namespace = tsk
->namespace;
1376 struct namespace *new_ns
;
1377 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
1378 struct vfsmount
*p
, *q
;
1380 new_ns
= kmalloc(sizeof(struct namespace), GFP_KERNEL
);
1384 atomic_set(&new_ns
->count
, 1);
1385 INIT_LIST_HEAD(&new_ns
->list
);
1386 init_waitqueue_head(&new_ns
->poll
);
1389 down_write(&namespace_sem
);
1390 /* First pass: copy the tree topology */
1391 new_ns
->root
= copy_tree(namespace->root
, namespace->root
->mnt_root
,
1392 CL_COPY_ALL
| CL_EXPIRE
);
1393 if (!new_ns
->root
) {
1394 up_write(&namespace_sem
);
1398 spin_lock(&vfsmount_lock
);
1399 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
1400 spin_unlock(&vfsmount_lock
);
1403 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1404 * as belonging to new namespace. We have already acquired a private
1405 * fs_struct, so tsk->fs->lock is not needed.
1407 p
= namespace->root
;
1410 q
->mnt_namespace
= new_ns
;
1412 if (p
== fs
->rootmnt
) {
1414 fs
->rootmnt
= mntget(q
);
1416 if (p
== fs
->pwdmnt
) {
1418 fs
->pwdmnt
= mntget(q
);
1420 if (p
== fs
->altrootmnt
) {
1422 fs
->altrootmnt
= mntget(q
);
1425 p
= next_mnt(p
, namespace->root
);
1426 q
= next_mnt(q
, new_ns
->root
);
1428 up_write(&namespace_sem
);
1440 int copy_namespace(int flags
, struct task_struct
*tsk
)
1442 struct namespace *namespace = tsk
->namespace;
1443 struct namespace *new_ns
;
1449 get_namespace(namespace);
1451 if (!(flags
& CLONE_NEWNS
))
1454 if (!capable(CAP_SYS_ADMIN
)) {
1459 new_ns
= dup_namespace(tsk
, tsk
->fs
);
1465 tsk
->namespace = new_ns
;
1468 put_namespace(namespace);
1472 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
1473 char __user
* type
, unsigned long flags
,
1477 unsigned long data_page
;
1478 unsigned long type_page
;
1479 unsigned long dev_page
;
1482 retval
= copy_mount_options(type
, &type_page
);
1486 dir_page
= getname(dir_name
);
1487 retval
= PTR_ERR(dir_page
);
1488 if (IS_ERR(dir_page
))
1491 retval
= copy_mount_options(dev_name
, &dev_page
);
1495 retval
= copy_mount_options(data
, &data_page
);
1500 retval
= do_mount((char *)dev_page
, dir_page
, (char *)type_page
,
1501 flags
, (void *)data_page
);
1503 free_page(data_page
);
1506 free_page(dev_page
);
1510 free_page(type_page
);
1515 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1516 * It can block. Requires the big lock held.
1518 void set_fs_root(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1519 struct dentry
*dentry
)
1521 struct dentry
*old_root
;
1522 struct vfsmount
*old_rootmnt
;
1523 write_lock(&fs
->lock
);
1524 old_root
= fs
->root
;
1525 old_rootmnt
= fs
->rootmnt
;
1526 fs
->rootmnt
= mntget(mnt
);
1527 fs
->root
= dget(dentry
);
1528 write_unlock(&fs
->lock
);
1531 mntput(old_rootmnt
);
1536 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1537 * It can block. Requires the big lock held.
1539 void set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1540 struct dentry
*dentry
)
1542 struct dentry
*old_pwd
;
1543 struct vfsmount
*old_pwdmnt
;
1545 write_lock(&fs
->lock
);
1547 old_pwdmnt
= fs
->pwdmnt
;
1548 fs
->pwdmnt
= mntget(mnt
);
1549 fs
->pwd
= dget(dentry
);
1550 write_unlock(&fs
->lock
);
1558 static void chroot_fs_refs(struct nameidata
*old_nd
, struct nameidata
*new_nd
)
1560 struct task_struct
*g
, *p
;
1561 struct fs_struct
*fs
;
1563 read_lock(&tasklist_lock
);
1564 do_each_thread(g
, p
) {
1568 atomic_inc(&fs
->count
);
1570 if (fs
->root
== old_nd
->dentry
1571 && fs
->rootmnt
== old_nd
->mnt
)
1572 set_fs_root(fs
, new_nd
->mnt
, new_nd
->dentry
);
1573 if (fs
->pwd
== old_nd
->dentry
1574 && fs
->pwdmnt
== old_nd
->mnt
)
1575 set_fs_pwd(fs
, new_nd
->mnt
, new_nd
->dentry
);
1579 } while_each_thread(g
, p
);
1580 read_unlock(&tasklist_lock
);
1584 * pivot_root Semantics:
1585 * Moves the root file system of the current process to the directory put_old,
1586 * makes new_root as the new root file system of the current process, and sets
1587 * root/cwd of all processes which had them on the current root to new_root.
1590 * The new_root and put_old must be directories, and must not be on the
1591 * same file system as the current process root. The put_old must be
1592 * underneath new_root, i.e. adding a non-zero number of /.. to the string
1593 * pointed to by put_old must yield the same directory as new_root. No other
1594 * file system may be mounted on put_old. After all, new_root is a mountpoint.
1596 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
1597 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
1598 * in this situation.
1601 * - we don't move root/cwd if they are not at the root (reason: if something
1602 * cared enough to change them, it's probably wrong to force them elsewhere)
1603 * - it's okay to pick a root that isn't the root of a file system, e.g.
1604 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1605 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1608 asmlinkage
long sys_pivot_root(const char __user
* new_root
,
1609 const char __user
* put_old
)
1611 struct vfsmount
*tmp
;
1612 struct nameidata new_nd
, old_nd
, parent_nd
, root_parent
, user_nd
;
1615 if (!capable(CAP_SYS_ADMIN
))
1620 error
= __user_walk(new_root
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
,
1625 if (!check_mnt(new_nd
.mnt
))
1628 error
= __user_walk(put_old
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old_nd
);
1632 error
= security_sb_pivotroot(&old_nd
, &new_nd
);
1634 path_release(&old_nd
);
1638 read_lock(¤t
->fs
->lock
);
1639 user_nd
.mnt
= mntget(current
->fs
->rootmnt
);
1640 user_nd
.dentry
= dget(current
->fs
->root
);
1641 read_unlock(¤t
->fs
->lock
);
1642 down_write(&namespace_sem
);
1643 mutex_lock(&old_nd
.dentry
->d_inode
->i_mutex
);
1645 if (IS_MNT_SHARED(old_nd
.mnt
) ||
1646 IS_MNT_SHARED(new_nd
.mnt
->mnt_parent
) ||
1647 IS_MNT_SHARED(user_nd
.mnt
->mnt_parent
))
1649 if (!check_mnt(user_nd
.mnt
))
1652 if (IS_DEADDIR(new_nd
.dentry
->d_inode
))
1654 if (d_unhashed(new_nd
.dentry
) && !IS_ROOT(new_nd
.dentry
))
1656 if (d_unhashed(old_nd
.dentry
) && !IS_ROOT(old_nd
.dentry
))
1659 if (new_nd
.mnt
== user_nd
.mnt
|| old_nd
.mnt
== user_nd
.mnt
)
1660 goto out2
; /* loop, on the same file system */
1662 if (user_nd
.mnt
->mnt_root
!= user_nd
.dentry
)
1663 goto out2
; /* not a mountpoint */
1664 if (user_nd
.mnt
->mnt_parent
== user_nd
.mnt
)
1665 goto out2
; /* not attached */
1666 if (new_nd
.mnt
->mnt_root
!= new_nd
.dentry
)
1667 goto out2
; /* not a mountpoint */
1668 if (new_nd
.mnt
->mnt_parent
== new_nd
.mnt
)
1669 goto out2
; /* not attached */
1670 tmp
= old_nd
.mnt
; /* make sure we can reach put_old from new_root */
1671 spin_lock(&vfsmount_lock
);
1672 if (tmp
!= new_nd
.mnt
) {
1674 if (tmp
->mnt_parent
== tmp
)
1675 goto out3
; /* already mounted on put_old */
1676 if (tmp
->mnt_parent
== new_nd
.mnt
)
1678 tmp
= tmp
->mnt_parent
;
1680 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.dentry
))
1682 } else if (!is_subdir(old_nd
.dentry
, new_nd
.dentry
))
1684 detach_mnt(new_nd
.mnt
, &parent_nd
);
1685 detach_mnt(user_nd
.mnt
, &root_parent
);
1686 attach_mnt(user_nd
.mnt
, &old_nd
); /* mount old root on put_old */
1687 attach_mnt(new_nd
.mnt
, &root_parent
); /* mount new_root on / */
1688 touch_namespace(current
->namespace);
1689 spin_unlock(&vfsmount_lock
);
1690 chroot_fs_refs(&user_nd
, &new_nd
);
1691 security_sb_post_pivotroot(&user_nd
, &new_nd
);
1693 path_release(&root_parent
);
1694 path_release(&parent_nd
);
1696 mutex_unlock(&old_nd
.dentry
->d_inode
->i_mutex
);
1697 up_write(&namespace_sem
);
1698 path_release(&user_nd
);
1699 path_release(&old_nd
);
1701 path_release(&new_nd
);
1706 spin_unlock(&vfsmount_lock
);
1710 static void __init
init_mount_tree(void)
1712 struct vfsmount
*mnt
;
1713 struct namespace *namespace;
1714 struct task_struct
*g
, *p
;
1716 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
1718 panic("Can't create rootfs");
1719 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL
);
1721 panic("Can't allocate initial namespace");
1722 atomic_set(&namespace->count
, 1);
1723 INIT_LIST_HEAD(&namespace->list
);
1724 init_waitqueue_head(&namespace->poll
);
1725 namespace->event
= 0;
1726 list_add(&mnt
->mnt_list
, &namespace->list
);
1727 namespace->root
= mnt
;
1728 mnt
->mnt_namespace
= namespace;
1730 init_task
.namespace = namespace;
1731 read_lock(&tasklist_lock
);
1732 do_each_thread(g
, p
) {
1733 get_namespace(namespace);
1734 p
->namespace = namespace;
1735 } while_each_thread(g
, p
);
1736 read_unlock(&tasklist_lock
);
1738 set_fs_pwd(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1739 set_fs_root(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1742 void __init
mnt_init(unsigned long mempages
)
1744 struct list_head
*d
;
1745 unsigned int nr_hash
;
1748 init_rwsem(&namespace_sem
);
1750 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
1751 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
, NULL
);
1753 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
1755 if (!mount_hashtable
)
1756 panic("Failed to allocate mount hash table\n");
1759 * Find the power-of-two list-heads that can fit into the allocation..
1760 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1763 nr_hash
= PAGE_SIZE
/ sizeof(struct list_head
);
1767 } while ((nr_hash
>> hash_bits
) != 0);
1771 * Re-calculate the actual number of entries and the mask
1772 * from the number of bits we can fit.
1774 nr_hash
= 1UL << hash_bits
;
1775 hash_mask
= nr_hash
- 1;
1777 printk("Mount-cache hash table entries: %d\n", nr_hash
);
1779 /* And initialize the newly allocated array */
1780 d
= mount_hashtable
;
1788 subsystem_register(&fs_subsys
);
1793 void __put_namespace(struct namespace *namespace)
1795 struct vfsmount
*root
= namespace->root
;
1796 LIST_HEAD(umount_list
);
1797 namespace->root
= NULL
;
1798 spin_unlock(&vfsmount_lock
);
1799 down_write(&namespace_sem
);
1800 spin_lock(&vfsmount_lock
);
1801 umount_tree(root
, 0, &umount_list
);
1802 spin_unlock(&vfsmount_lock
);
1803 up_write(&namespace_sem
);
1804 release_mounts(&umount_list
);