4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/quotaops.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/module.h>
21 #include <linux/seq_file.h>
22 #include <linux/namespace.h>
23 #include <linux/namei.h>
24 #include <linux/security.h>
25 #include <linux/mount.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
30 extern int __init
init_rootfs(void);
33 extern int __init
sysfs_init(void);
35 static inline int sysfs_init(void)
41 /* spinlock for vfsmount related operations, inplace of dcache_lock */
42 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(vfsmount_lock
);
46 static struct list_head
*mount_hashtable
;
47 static int hash_mask __read_mostly
, hash_bits __read_mostly
;
48 static kmem_cache_t
*mnt_cache
;
49 static struct rw_semaphore namespace_sem
;
52 decl_subsys(fs
, NULL
, NULL
);
53 EXPORT_SYMBOL_GPL(fs_subsys
);
55 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
57 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
58 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
59 tmp
= tmp
+ (tmp
>> hash_bits
);
60 return tmp
& hash_mask
;
63 struct vfsmount
*alloc_vfsmnt(const char *name
)
65 struct vfsmount
*mnt
= kmem_cache_alloc(mnt_cache
, GFP_KERNEL
);
67 memset(mnt
, 0, sizeof(struct vfsmount
));
68 atomic_set(&mnt
->mnt_count
, 1);
69 INIT_LIST_HEAD(&mnt
->mnt_hash
);
70 INIT_LIST_HEAD(&mnt
->mnt_child
);
71 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
72 INIT_LIST_HEAD(&mnt
->mnt_list
);
73 INIT_LIST_HEAD(&mnt
->mnt_expire
);
74 INIT_LIST_HEAD(&mnt
->mnt_share
);
75 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
76 INIT_LIST_HEAD(&mnt
->mnt_slave
);
78 int size
= strlen(name
) + 1;
79 char *newname
= kmalloc(size
, GFP_KERNEL
);
81 memcpy(newname
, name
, size
);
82 mnt
->mnt_devname
= newname
;
89 void free_vfsmnt(struct vfsmount
*mnt
)
91 kfree(mnt
->mnt_devname
);
92 kmem_cache_free(mnt_cache
, mnt
);
96 * find the first or last mount at @dentry on vfsmount @mnt depending on
97 * @dir. If @dir is set return the first mount else return the last mount.
99 struct vfsmount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
102 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
103 struct list_head
*tmp
= head
;
104 struct vfsmount
*p
, *found
= NULL
;
107 tmp
= dir
? tmp
->next
: tmp
->prev
;
111 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
112 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
121 * lookup_mnt increments the ref count before returning
122 * the vfsmount struct.
124 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
126 struct vfsmount
*child_mnt
;
127 spin_lock(&vfsmount_lock
);
128 if ((child_mnt
= __lookup_mnt(mnt
, dentry
, 1)))
130 spin_unlock(&vfsmount_lock
);
134 static inline int check_mnt(struct vfsmount
*mnt
)
136 return mnt
->mnt_namespace
== current
->namespace;
139 static void touch_namespace(struct namespace *ns
)
143 wake_up_interruptible(&ns
->poll
);
147 static void __touch_namespace(struct namespace *ns
)
149 if (ns
&& ns
->event
!= event
) {
151 wake_up_interruptible(&ns
->poll
);
155 static void detach_mnt(struct vfsmount
*mnt
, struct nameidata
*old_nd
)
157 old_nd
->dentry
= mnt
->mnt_mountpoint
;
158 old_nd
->mnt
= mnt
->mnt_parent
;
159 mnt
->mnt_parent
= mnt
;
160 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
161 list_del_init(&mnt
->mnt_child
);
162 list_del_init(&mnt
->mnt_hash
);
163 old_nd
->dentry
->d_mounted
--;
166 void mnt_set_mountpoint(struct vfsmount
*mnt
, struct dentry
*dentry
,
167 struct vfsmount
*child_mnt
)
169 child_mnt
->mnt_parent
= mntget(mnt
);
170 child_mnt
->mnt_mountpoint
= dget(dentry
);
174 static void attach_mnt(struct vfsmount
*mnt
, struct nameidata
*nd
)
176 mnt_set_mountpoint(nd
->mnt
, nd
->dentry
, mnt
);
177 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
178 hash(nd
->mnt
, nd
->dentry
));
179 list_add_tail(&mnt
->mnt_child
, &nd
->mnt
->mnt_mounts
);
183 * the caller must hold vfsmount_lock
185 static void commit_tree(struct vfsmount
*mnt
)
187 struct vfsmount
*parent
= mnt
->mnt_parent
;
190 struct namespace *n
= parent
->mnt_namespace
;
192 BUG_ON(parent
== mnt
);
194 list_add_tail(&head
, &mnt
->mnt_list
);
195 list_for_each_entry(m
, &head
, mnt_list
)
196 m
->mnt_namespace
= n
;
197 list_splice(&head
, n
->list
.prev
);
199 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
200 hash(parent
, mnt
->mnt_mountpoint
));
201 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
205 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
207 struct list_head
*next
= p
->mnt_mounts
.next
;
208 if (next
== &p
->mnt_mounts
) {
212 next
= p
->mnt_child
.next
;
213 if (next
!= &p
->mnt_parent
->mnt_mounts
)
218 return list_entry(next
, struct vfsmount
, mnt_child
);
221 static struct vfsmount
*skip_mnt_tree(struct vfsmount
*p
)
223 struct list_head
*prev
= p
->mnt_mounts
.prev
;
224 while (prev
!= &p
->mnt_mounts
) {
225 p
= list_entry(prev
, struct vfsmount
, mnt_child
);
226 prev
= p
->mnt_mounts
.prev
;
231 static struct vfsmount
*clone_mnt(struct vfsmount
*old
, struct dentry
*root
,
234 struct super_block
*sb
= old
->mnt_sb
;
235 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
238 mnt
->mnt_flags
= old
->mnt_flags
;
239 atomic_inc(&sb
->s_active
);
241 mnt
->mnt_root
= dget(root
);
242 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
243 mnt
->mnt_parent
= mnt
;
245 if (flag
& CL_SLAVE
) {
246 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
247 mnt
->mnt_master
= old
;
248 CLEAR_MNT_SHARED(mnt
);
250 if ((flag
& CL_PROPAGATION
) || IS_MNT_SHARED(old
))
251 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
252 if (IS_MNT_SLAVE(old
))
253 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
254 mnt
->mnt_master
= old
->mnt_master
;
256 if (flag
& CL_MAKE_SHARED
)
259 /* stick the duplicate mount on the same expiry list
260 * as the original if that was on one */
261 if (flag
& CL_EXPIRE
) {
262 spin_lock(&vfsmount_lock
);
263 if (!list_empty(&old
->mnt_expire
))
264 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
265 spin_unlock(&vfsmount_lock
);
271 static inline void __mntput(struct vfsmount
*mnt
)
273 struct super_block
*sb
= mnt
->mnt_sb
;
276 deactivate_super(sb
);
279 void mntput_no_expire(struct vfsmount
*mnt
)
282 if (atomic_dec_and_lock(&mnt
->mnt_count
, &vfsmount_lock
)) {
283 if (likely(!mnt
->mnt_pinned
)) {
284 spin_unlock(&vfsmount_lock
);
288 atomic_add(mnt
->mnt_pinned
+ 1, &mnt
->mnt_count
);
290 spin_unlock(&vfsmount_lock
);
291 acct_auto_close_mnt(mnt
);
292 security_sb_umount_close(mnt
);
297 EXPORT_SYMBOL(mntput_no_expire
);
299 void mnt_pin(struct vfsmount
*mnt
)
301 spin_lock(&vfsmount_lock
);
303 spin_unlock(&vfsmount_lock
);
306 EXPORT_SYMBOL(mnt_pin
);
308 void mnt_unpin(struct vfsmount
*mnt
)
310 spin_lock(&vfsmount_lock
);
311 if (mnt
->mnt_pinned
) {
312 atomic_inc(&mnt
->mnt_count
);
315 spin_unlock(&vfsmount_lock
);
318 EXPORT_SYMBOL(mnt_unpin
);
321 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
323 struct namespace *n
= m
->private;
327 down_read(&namespace_sem
);
328 list_for_each(p
, &n
->list
)
330 return list_entry(p
, struct vfsmount
, mnt_list
);
334 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
336 struct namespace *n
= m
->private;
337 struct list_head
*p
= ((struct vfsmount
*)v
)->mnt_list
.next
;
339 return p
== &n
->list
? NULL
: list_entry(p
, struct vfsmount
, mnt_list
);
342 static void m_stop(struct seq_file
*m
, void *v
)
344 up_read(&namespace_sem
);
347 static inline void mangle(struct seq_file
*m
, const char *s
)
349 seq_escape(m
, s
, " \t\n\\");
352 static int show_vfsmnt(struct seq_file
*m
, void *v
)
354 struct vfsmount
*mnt
= v
;
356 static struct proc_fs_info
{
360 { MS_SYNCHRONOUS
, ",sync" },
361 { MS_DIRSYNC
, ",dirsync" },
362 { MS_MANDLOCK
, ",mand" },
365 static struct proc_fs_info mnt_info
[] = {
366 { MNT_NOSUID
, ",nosuid" },
367 { MNT_NODEV
, ",nodev" },
368 { MNT_NOEXEC
, ",noexec" },
369 { MNT_NOATIME
, ",noatime" },
370 { MNT_NODIRATIME
, ",nodiratime" },
373 struct proc_fs_info
*fs_infop
;
375 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
377 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
379 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
380 seq_puts(m
, mnt
->mnt_sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
381 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
382 if (mnt
->mnt_sb
->s_flags
& fs_infop
->flag
)
383 seq_puts(m
, fs_infop
->str
);
385 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
386 if (mnt
->mnt_flags
& fs_infop
->flag
)
387 seq_puts(m
, fs_infop
->str
);
389 if (mnt
->mnt_sb
->s_op
->show_options
)
390 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
391 seq_puts(m
, " 0 0\n");
395 struct seq_operations mounts_op
= {
403 * may_umount_tree - check if a mount tree is busy
404 * @mnt: root of mount tree
406 * This is called to check if a tree of mounts has any
407 * open files, pwds, chroots or sub mounts that are
410 int may_umount_tree(struct vfsmount
*mnt
)
413 int minimum_refs
= 0;
416 spin_lock(&vfsmount_lock
);
417 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
418 actual_refs
+= atomic_read(&p
->mnt_count
);
421 spin_unlock(&vfsmount_lock
);
423 if (actual_refs
> minimum_refs
)
429 EXPORT_SYMBOL(may_umount_tree
);
432 * may_umount - check if a mount point is busy
433 * @mnt: root of mount
435 * This is called to check if a mount point has any
436 * open files, pwds, chroots or sub mounts. If the
437 * mount has sub mounts this will return busy
438 * regardless of whether the sub mounts are busy.
440 * Doesn't take quota and stuff into account. IOW, in some cases it will
441 * give false negatives. The main reason why it's here is that we need
442 * a non-destructive way to look for easily umountable filesystems.
444 int may_umount(struct vfsmount
*mnt
)
447 spin_lock(&vfsmount_lock
);
448 if (propagate_mount_busy(mnt
, 2))
450 spin_unlock(&vfsmount_lock
);
454 EXPORT_SYMBOL(may_umount
);
456 void release_mounts(struct list_head
*head
)
458 struct vfsmount
*mnt
;
459 while (!list_empty(head
)) {
460 mnt
= list_entry(head
->next
, struct vfsmount
, mnt_hash
);
461 list_del_init(&mnt
->mnt_hash
);
462 if (mnt
->mnt_parent
!= mnt
) {
463 struct dentry
*dentry
;
465 spin_lock(&vfsmount_lock
);
466 dentry
= mnt
->mnt_mountpoint
;
468 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
469 mnt
->mnt_parent
= mnt
;
470 spin_unlock(&vfsmount_lock
);
478 void umount_tree(struct vfsmount
*mnt
, int propagate
, struct list_head
*kill
)
482 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
483 list_del(&p
->mnt_hash
);
484 list_add(&p
->mnt_hash
, kill
);
488 propagate_umount(kill
);
490 list_for_each_entry(p
, kill
, mnt_hash
) {
491 list_del_init(&p
->mnt_expire
);
492 list_del_init(&p
->mnt_list
);
493 __touch_namespace(p
->mnt_namespace
);
494 p
->mnt_namespace
= NULL
;
495 list_del_init(&p
->mnt_child
);
496 if (p
->mnt_parent
!= p
)
497 mnt
->mnt_mountpoint
->d_mounted
--;
498 change_mnt_propagation(p
, MS_PRIVATE
);
502 static int do_umount(struct vfsmount
*mnt
, int flags
)
504 struct super_block
*sb
= mnt
->mnt_sb
;
506 LIST_HEAD(umount_list
);
508 retval
= security_sb_umount(mnt
, flags
);
513 * Allow userspace to request a mountpoint be expired rather than
514 * unmounting unconditionally. Unmount only happens if:
515 * (1) the mark is already set (the mark is cleared by mntput())
516 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
518 if (flags
& MNT_EXPIRE
) {
519 if (mnt
== current
->fs
->rootmnt
||
520 flags
& (MNT_FORCE
| MNT_DETACH
))
523 if (atomic_read(&mnt
->mnt_count
) != 2)
526 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
531 * If we may have to abort operations to get out of this
532 * mount, and they will themselves hold resources we must
533 * allow the fs to do things. In the Unix tradition of
534 * 'Gee thats tricky lets do it in userspace' the umount_begin
535 * might fail to complete on the first run through as other tasks
536 * must return, and the like. Thats for the mount program to worry
537 * about for the moment.
541 if ((flags
& MNT_FORCE
) && sb
->s_op
->umount_begin
)
542 sb
->s_op
->umount_begin(sb
);
546 * No sense to grab the lock for this test, but test itself looks
547 * somewhat bogus. Suggestions for better replacement?
548 * Ho-hum... In principle, we might treat that as umount + switch
549 * to rootfs. GC would eventually take care of the old vfsmount.
550 * Actually it makes sense, especially if rootfs would contain a
551 * /reboot - static binary that would close all descriptors and
552 * call reboot(9). Then init(8) could umount root and exec /reboot.
554 if (mnt
== current
->fs
->rootmnt
&& !(flags
& MNT_DETACH
)) {
556 * Special case for "unmounting" root ...
557 * we just try to remount it readonly.
559 down_write(&sb
->s_umount
);
560 if (!(sb
->s_flags
& MS_RDONLY
)) {
563 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
566 up_write(&sb
->s_umount
);
570 down_write(&namespace_sem
);
571 spin_lock(&vfsmount_lock
);
575 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
576 if (!list_empty(&mnt
->mnt_list
))
577 umount_tree(mnt
, 1, &umount_list
);
580 spin_unlock(&vfsmount_lock
);
582 security_sb_umount_busy(mnt
);
583 up_write(&namespace_sem
);
584 release_mounts(&umount_list
);
589 * Now umount can handle mount points as well as block devices.
590 * This is important for filesystems which use unnamed block devices.
592 * We now support a flag for forced unmount like the other 'big iron'
593 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
596 asmlinkage
long sys_umount(char __user
* name
, int flags
)
601 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
605 if (nd
.dentry
!= nd
.mnt
->mnt_root
)
607 if (!check_mnt(nd
.mnt
))
611 if (!capable(CAP_SYS_ADMIN
))
614 retval
= do_umount(nd
.mnt
, flags
);
616 path_release_on_umount(&nd
);
621 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
624 * The 2.0 compatible umount. No flags.
626 asmlinkage
long sys_oldumount(char __user
* name
)
628 return sys_umount(name
, 0);
633 static int mount_is_safe(struct nameidata
*nd
)
635 if (capable(CAP_SYS_ADMIN
))
639 if (S_ISLNK(nd
->dentry
->d_inode
->i_mode
))
641 if (nd
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
642 if (current
->uid
!= nd
->dentry
->d_inode
->i_uid
)
645 if (vfs_permission(nd
, MAY_WRITE
))
651 static int lives_below_in_same_fs(struct dentry
*d
, struct dentry
*dentry
)
656 if (d
== NULL
|| d
== d
->d_parent
)
662 struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
,
665 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
668 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
671 res
= q
= clone_mnt(mnt
, dentry
, flag
);
674 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
677 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
678 if (!lives_below_in_same_fs(r
->mnt_mountpoint
, dentry
))
681 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
682 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
683 s
= skip_mnt_tree(s
);
686 while (p
!= s
->mnt_parent
) {
692 nd
.dentry
= p
->mnt_mountpoint
;
693 q
= clone_mnt(p
, p
->mnt_root
, flag
);
696 spin_lock(&vfsmount_lock
);
697 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
699 spin_unlock(&vfsmount_lock
);
705 LIST_HEAD(umount_list
);
706 spin_lock(&vfsmount_lock
);
707 umount_tree(res
, 0, &umount_list
);
708 spin_unlock(&vfsmount_lock
);
709 release_mounts(&umount_list
);
715 * @source_mnt : mount tree to be attached
716 * @nd : place the mount tree @source_mnt is attached
717 * @parent_nd : if non-null, detach the source_mnt from its parent and
718 * store the parent mount and mountpoint dentry.
719 * (done when source_mnt is moved)
721 * NOTE: in the table below explains the semantics when a source mount
722 * of a given type is attached to a destination mount of a given type.
723 * ---------------------------------------------------------------------------
724 * | BIND MOUNT OPERATION |
725 * |**************************************************************************
726 * | source-->| shared | private | slave | unbindable |
730 * |**************************************************************************
731 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
733 * |non-shared| shared (+) | private | slave (*) | invalid |
734 * ***************************************************************************
735 * A bind operation clones the source mount and mounts the clone on the
738 * (++) the cloned mount is propagated to all the mounts in the propagation
739 * tree of the destination mount and the cloned mount is added to
740 * the peer group of the source mount.
741 * (+) the cloned mount is created under the destination mount and is marked
742 * as shared. The cloned mount is added to the peer group of the source
744 * (+++) the mount is propagated to all the mounts in the propagation tree
745 * of the destination mount and the cloned mount is made slave
746 * of the same master as that of the source mount. The cloned mount
747 * is marked as 'shared and slave'.
748 * (*) the cloned mount is made a slave of the same master as that of the
751 * ---------------------------------------------------------------------------
752 * | MOVE MOUNT OPERATION |
753 * |**************************************************************************
754 * | source-->| shared | private | slave | unbindable |
758 * |**************************************************************************
759 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
761 * |non-shared| shared (+*) | private | slave (*) | unbindable |
762 * ***************************************************************************
764 * (+) the mount is moved to the destination. And is then propagated to
765 * all the mounts in the propagation tree of the destination mount.
766 * (+*) the mount is moved to the destination.
767 * (+++) the mount is moved to the destination and is then propagated to
768 * all the mounts belonging to the destination mount's propagation tree.
769 * the mount is marked as 'shared and slave'.
770 * (*) the mount continues to be a slave at the new location.
772 * if the source mount is a tree, the operations explained above is
773 * applied to each mount in the tree.
774 * Must be called without spinlocks held, since this function can sleep
777 static int attach_recursive_mnt(struct vfsmount
*source_mnt
,
778 struct nameidata
*nd
, struct nameidata
*parent_nd
)
780 LIST_HEAD(tree_list
);
781 struct vfsmount
*dest_mnt
= nd
->mnt
;
782 struct dentry
*dest_dentry
= nd
->dentry
;
783 struct vfsmount
*child
, *p
;
785 if (propagate_mnt(dest_mnt
, dest_dentry
, source_mnt
, &tree_list
))
788 if (IS_MNT_SHARED(dest_mnt
)) {
789 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
793 spin_lock(&vfsmount_lock
);
795 detach_mnt(source_mnt
, parent_nd
);
796 attach_mnt(source_mnt
, nd
);
797 touch_namespace(current
->namespace);
799 mnt_set_mountpoint(dest_mnt
, dest_dentry
, source_mnt
);
800 commit_tree(source_mnt
);
803 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
804 list_del_init(&child
->mnt_hash
);
807 spin_unlock(&vfsmount_lock
);
811 static int graft_tree(struct vfsmount
*mnt
, struct nameidata
*nd
)
814 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
817 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
818 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
822 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
823 if (IS_DEADDIR(nd
->dentry
->d_inode
))
826 err
= security_sb_check_sb(mnt
, nd
);
831 if (IS_ROOT(nd
->dentry
) || !d_unhashed(nd
->dentry
))
832 err
= attach_recursive_mnt(mnt
, nd
, NULL
);
834 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
836 security_sb_post_addmount(mnt
, nd
);
841 * recursively change the type of the mountpoint.
843 static int do_change_type(struct nameidata
*nd
, int flag
)
845 struct vfsmount
*m
, *mnt
= nd
->mnt
;
846 int recurse
= flag
& MS_REC
;
847 int type
= flag
& ~MS_REC
;
849 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
852 down_write(&namespace_sem
);
853 spin_lock(&vfsmount_lock
);
854 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
855 change_mnt_propagation(m
, type
);
856 spin_unlock(&vfsmount_lock
);
857 up_write(&namespace_sem
);
864 static int do_loopback(struct nameidata
*nd
, char *old_name
, int recurse
)
866 struct nameidata old_nd
;
867 struct vfsmount
*mnt
= NULL
;
868 int err
= mount_is_safe(nd
);
871 if (!old_name
|| !*old_name
)
873 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
877 down_write(&namespace_sem
);
879 if (IS_MNT_UNBINDABLE(old_nd
.mnt
))
882 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
887 mnt
= copy_tree(old_nd
.mnt
, old_nd
.dentry
, 0);
889 mnt
= clone_mnt(old_nd
.mnt
, old_nd
.dentry
, 0);
894 err
= graft_tree(mnt
, nd
);
896 LIST_HEAD(umount_list
);
897 spin_lock(&vfsmount_lock
);
898 umount_tree(mnt
, 0, &umount_list
);
899 spin_unlock(&vfsmount_lock
);
900 release_mounts(&umount_list
);
904 up_write(&namespace_sem
);
905 path_release(&old_nd
);
910 * change filesystem flags. dir should be a physical root of filesystem.
911 * If you've mounted a non-root directory somewhere and want to do remount
912 * on it - tough luck.
914 static int do_remount(struct nameidata
*nd
, int flags
, int mnt_flags
,
918 struct super_block
*sb
= nd
->mnt
->mnt_sb
;
920 if (!capable(CAP_SYS_ADMIN
))
923 if (!check_mnt(nd
->mnt
))
926 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
929 down_write(&sb
->s_umount
);
930 err
= do_remount_sb(sb
, flags
, data
, 0);
932 nd
->mnt
->mnt_flags
= mnt_flags
;
933 up_write(&sb
->s_umount
);
935 security_sb_post_remount(nd
->mnt
, flags
, data
);
939 static inline int tree_contains_unbindable(struct vfsmount
*mnt
)
942 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
943 if (IS_MNT_UNBINDABLE(p
))
949 static int do_move_mount(struct nameidata
*nd
, char *old_name
)
951 struct nameidata old_nd
, parent_nd
;
954 if (!capable(CAP_SYS_ADMIN
))
956 if (!old_name
|| !*old_name
)
958 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
962 down_write(&namespace_sem
);
963 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
966 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
970 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
971 if (IS_DEADDIR(nd
->dentry
->d_inode
))
974 if (!IS_ROOT(nd
->dentry
) && d_unhashed(nd
->dentry
))
978 if (old_nd
.dentry
!= old_nd
.mnt
->mnt_root
)
981 if (old_nd
.mnt
== old_nd
.mnt
->mnt_parent
)
984 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
985 S_ISDIR(old_nd
.dentry
->d_inode
->i_mode
))
988 * Don't move a mount residing in a shared parent.
990 if (old_nd
.mnt
->mnt_parent
&& IS_MNT_SHARED(old_nd
.mnt
->mnt_parent
))
993 * Don't move a mount tree containing unbindable mounts to a destination
994 * mount which is shared.
996 if (IS_MNT_SHARED(nd
->mnt
) && tree_contains_unbindable(old_nd
.mnt
))
999 for (p
= nd
->mnt
; p
->mnt_parent
!= p
; p
= p
->mnt_parent
)
1000 if (p
== old_nd
.mnt
)
1003 if ((err
= attach_recursive_mnt(old_nd
.mnt
, nd
, &parent_nd
)))
1006 spin_lock(&vfsmount_lock
);
1007 /* if the mount is moved, it should no longer be expire
1009 list_del_init(&old_nd
.mnt
->mnt_expire
);
1010 spin_unlock(&vfsmount_lock
);
1012 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
1014 up_write(&namespace_sem
);
1016 path_release(&parent_nd
);
1017 path_release(&old_nd
);
1022 * create a new mount for userspace and request it to be added into the
1025 static int do_new_mount(struct nameidata
*nd
, char *type
, int flags
,
1026 int mnt_flags
, char *name
, void *data
)
1028 struct vfsmount
*mnt
;
1030 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
1033 /* we need capabilities... */
1034 if (!capable(CAP_SYS_ADMIN
))
1037 mnt
= do_kern_mount(type
, flags
, name
, data
);
1039 return PTR_ERR(mnt
);
1041 return do_add_mount(mnt
, nd
, mnt_flags
, NULL
);
1045 * add a mount into a namespace's mount tree
1046 * - provide the option of adding the new mount to an expiration list
1048 int do_add_mount(struct vfsmount
*newmnt
, struct nameidata
*nd
,
1049 int mnt_flags
, struct list_head
*fslist
)
1053 down_write(&namespace_sem
);
1054 /* Something was mounted here while we slept */
1055 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
1058 if (!check_mnt(nd
->mnt
))
1061 /* Refuse the same filesystem on the same mount point */
1063 if (nd
->mnt
->mnt_sb
== newmnt
->mnt_sb
&&
1064 nd
->mnt
->mnt_root
== nd
->dentry
)
1068 if (S_ISLNK(newmnt
->mnt_root
->d_inode
->i_mode
))
1071 newmnt
->mnt_flags
= mnt_flags
;
1072 if ((err
= graft_tree(newmnt
, nd
)))
1076 /* add to the specified expiration list */
1077 spin_lock(&vfsmount_lock
);
1078 list_add_tail(&newmnt
->mnt_expire
, fslist
);
1079 spin_unlock(&vfsmount_lock
);
1081 up_write(&namespace_sem
);
1085 up_write(&namespace_sem
);
1090 EXPORT_SYMBOL_GPL(do_add_mount
);
1092 static void expire_mount(struct vfsmount
*mnt
, struct list_head
*mounts
,
1093 struct list_head
*umounts
)
1095 spin_lock(&vfsmount_lock
);
1098 * Check if mount is still attached, if not, let whoever holds it deal
1101 if (mnt
->mnt_parent
== mnt
) {
1102 spin_unlock(&vfsmount_lock
);
1107 * Check that it is still dead: the count should now be 2 - as
1108 * contributed by the vfsmount parent and the mntget above
1110 if (!propagate_mount_busy(mnt
, 2)) {
1111 /* delete from the namespace */
1112 touch_namespace(mnt
->mnt_namespace
);
1113 list_del_init(&mnt
->mnt_list
);
1114 mnt
->mnt_namespace
= NULL
;
1115 umount_tree(mnt
, 1, umounts
);
1116 spin_unlock(&vfsmount_lock
);
1119 * Someone brought it back to life whilst we didn't have any
1120 * locks held so return it to the expiration list
1122 list_add_tail(&mnt
->mnt_expire
, mounts
);
1123 spin_unlock(&vfsmount_lock
);
1128 * process a list of expirable mountpoints with the intent of discarding any
1129 * mountpoints that aren't in use and haven't been touched since last we came
1132 void mark_mounts_for_expiry(struct list_head
*mounts
)
1134 struct namespace *namespace;
1135 struct vfsmount
*mnt
, *next
;
1136 LIST_HEAD(graveyard
);
1138 if (list_empty(mounts
))
1141 spin_lock(&vfsmount_lock
);
1143 /* extract from the expiration list every vfsmount that matches the
1144 * following criteria:
1145 * - only referenced by its parent vfsmount
1146 * - still marked for expiry (marked on the last call here; marks are
1147 * cleared by mntput())
1149 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
1150 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
1151 atomic_read(&mnt
->mnt_count
) != 1)
1155 list_move(&mnt
->mnt_expire
, &graveyard
);
1159 * go through the vfsmounts we've just consigned to the graveyard to
1160 * - check that they're still dead
1161 * - delete the vfsmount from the appropriate namespace under lock
1162 * - dispose of the corpse
1164 while (!list_empty(&graveyard
)) {
1166 mnt
= list_entry(graveyard
.next
, struct vfsmount
, mnt_expire
);
1167 list_del_init(&mnt
->mnt_expire
);
1169 /* don't do anything if the namespace is dead - all the
1170 * vfsmounts from it are going away anyway */
1171 namespace = mnt
->mnt_namespace
;
1172 if (!namespace || !namespace->root
)
1174 get_namespace(namespace);
1176 spin_unlock(&vfsmount_lock
);
1177 down_write(&namespace_sem
);
1178 expire_mount(mnt
, mounts
, &umounts
);
1179 up_write(&namespace_sem
);
1180 release_mounts(&umounts
);
1182 put_namespace(namespace);
1183 spin_lock(&vfsmount_lock
);
1186 spin_unlock(&vfsmount_lock
);
1189 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
1192 * Some copy_from_user() implementations do not return the exact number of
1193 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1194 * Note that this function differs from copy_from_user() in that it will oops
1195 * on bad values of `to', rather than returning a short copy.
1197 static long exact_copy_from_user(void *to
, const void __user
* from
,
1201 const char __user
*f
= from
;
1204 if (!access_ok(VERIFY_READ
, from
, n
))
1208 if (__get_user(c
, f
)) {
1219 int copy_mount_options(const void __user
* data
, unsigned long *where
)
1229 if (!(page
= __get_free_page(GFP_KERNEL
)))
1232 /* We only care that *some* data at the address the user
1233 * gave us is valid. Just in case, we'll zero
1234 * the remainder of the page.
1236 /* copy_from_user cannot cross TASK_SIZE ! */
1237 size
= TASK_SIZE
- (unsigned long)data
;
1238 if (size
> PAGE_SIZE
)
1241 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
1247 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
1253 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1254 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1256 * data is a (void *) that can point to any structure up to
1257 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1258 * information (or be NULL).
1260 * Pre-0.97 versions of mount() didn't have a flags word.
1261 * When the flags word was introduced its top half was required
1262 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1263 * Therefore, if this magic number is present, it carries no information
1264 * and must be discarded.
1266 long do_mount(char *dev_name
, char *dir_name
, char *type_page
,
1267 unsigned long flags
, void *data_page
)
1269 struct nameidata nd
;
1274 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
1275 flags
&= ~MS_MGC_MSK
;
1277 /* Basic sanity checks */
1279 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
1281 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
1285 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
1287 /* Separate the per-mountpoint flags */
1288 if (flags
& MS_NOSUID
)
1289 mnt_flags
|= MNT_NOSUID
;
1290 if (flags
& MS_NODEV
)
1291 mnt_flags
|= MNT_NODEV
;
1292 if (flags
& MS_NOEXEC
)
1293 mnt_flags
|= MNT_NOEXEC
;
1294 if (flags
& MS_NOATIME
)
1295 mnt_flags
|= MNT_NOATIME
;
1296 if (flags
& MS_NODIRATIME
)
1297 mnt_flags
|= MNT_NODIRATIME
;
1299 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
|
1300 MS_NOATIME
| MS_NODIRATIME
);
1302 /* ... and get the mountpoint */
1303 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
1307 retval
= security_sb_mount(dev_name
, &nd
, type_page
, flags
, data_page
);
1311 if (flags
& MS_REMOUNT
)
1312 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
1314 else if (flags
& MS_BIND
)
1315 retval
= do_loopback(&nd
, dev_name
, flags
& MS_REC
);
1316 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1317 retval
= do_change_type(&nd
, flags
);
1318 else if (flags
& MS_MOVE
)
1319 retval
= do_move_mount(&nd
, dev_name
);
1321 retval
= do_new_mount(&nd
, type_page
, flags
, mnt_flags
,
1322 dev_name
, data_page
);
1328 int copy_namespace(int flags
, struct task_struct
*tsk
)
1330 struct namespace *namespace = tsk
->namespace;
1331 struct namespace *new_ns
;
1332 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
1333 struct fs_struct
*fs
= tsk
->fs
;
1334 struct vfsmount
*p
, *q
;
1339 get_namespace(namespace);
1341 if (!(flags
& CLONE_NEWNS
))
1344 if (!capable(CAP_SYS_ADMIN
)) {
1345 put_namespace(namespace);
1349 new_ns
= kmalloc(sizeof(struct namespace), GFP_KERNEL
);
1353 atomic_set(&new_ns
->count
, 1);
1354 INIT_LIST_HEAD(&new_ns
->list
);
1355 init_waitqueue_head(&new_ns
->poll
);
1358 down_write(&namespace_sem
);
1359 /* First pass: copy the tree topology */
1360 new_ns
->root
= copy_tree(namespace->root
, namespace->root
->mnt_root
,
1361 CL_COPY_ALL
| CL_EXPIRE
);
1362 if (!new_ns
->root
) {
1363 up_write(&namespace_sem
);
1367 spin_lock(&vfsmount_lock
);
1368 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
1369 spin_unlock(&vfsmount_lock
);
1372 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1373 * as belonging to new namespace. We have already acquired a private
1374 * fs_struct, so tsk->fs->lock is not needed.
1376 p
= namespace->root
;
1379 q
->mnt_namespace
= new_ns
;
1381 if (p
== fs
->rootmnt
) {
1383 fs
->rootmnt
= mntget(q
);
1385 if (p
== fs
->pwdmnt
) {
1387 fs
->pwdmnt
= mntget(q
);
1389 if (p
== fs
->altrootmnt
) {
1391 fs
->altrootmnt
= mntget(q
);
1394 p
= next_mnt(p
, namespace->root
);
1395 q
= next_mnt(q
, new_ns
->root
);
1397 up_write(&namespace_sem
);
1399 tsk
->namespace = new_ns
;
1408 put_namespace(namespace);
1412 put_namespace(namespace);
1416 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
1417 char __user
* type
, unsigned long flags
,
1421 unsigned long data_page
;
1422 unsigned long type_page
;
1423 unsigned long dev_page
;
1426 retval
= copy_mount_options(type
, &type_page
);
1430 dir_page
= getname(dir_name
);
1431 retval
= PTR_ERR(dir_page
);
1432 if (IS_ERR(dir_page
))
1435 retval
= copy_mount_options(dev_name
, &dev_page
);
1439 retval
= copy_mount_options(data
, &data_page
);
1444 retval
= do_mount((char *)dev_page
, dir_page
, (char *)type_page
,
1445 flags
, (void *)data_page
);
1447 free_page(data_page
);
1450 free_page(dev_page
);
1454 free_page(type_page
);
1459 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1460 * It can block. Requires the big lock held.
1462 void set_fs_root(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1463 struct dentry
*dentry
)
1465 struct dentry
*old_root
;
1466 struct vfsmount
*old_rootmnt
;
1467 write_lock(&fs
->lock
);
1468 old_root
= fs
->root
;
1469 old_rootmnt
= fs
->rootmnt
;
1470 fs
->rootmnt
= mntget(mnt
);
1471 fs
->root
= dget(dentry
);
1472 write_unlock(&fs
->lock
);
1475 mntput(old_rootmnt
);
1480 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1481 * It can block. Requires the big lock held.
1483 void set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1484 struct dentry
*dentry
)
1486 struct dentry
*old_pwd
;
1487 struct vfsmount
*old_pwdmnt
;
1489 write_lock(&fs
->lock
);
1491 old_pwdmnt
= fs
->pwdmnt
;
1492 fs
->pwdmnt
= mntget(mnt
);
1493 fs
->pwd
= dget(dentry
);
1494 write_unlock(&fs
->lock
);
1502 static void chroot_fs_refs(struct nameidata
*old_nd
, struct nameidata
*new_nd
)
1504 struct task_struct
*g
, *p
;
1505 struct fs_struct
*fs
;
1507 read_lock(&tasklist_lock
);
1508 do_each_thread(g
, p
) {
1512 atomic_inc(&fs
->count
);
1514 if (fs
->root
== old_nd
->dentry
1515 && fs
->rootmnt
== old_nd
->mnt
)
1516 set_fs_root(fs
, new_nd
->mnt
, new_nd
->dentry
);
1517 if (fs
->pwd
== old_nd
->dentry
1518 && fs
->pwdmnt
== old_nd
->mnt
)
1519 set_fs_pwd(fs
, new_nd
->mnt
, new_nd
->dentry
);
1523 } while_each_thread(g
, p
);
1524 read_unlock(&tasklist_lock
);
1528 * pivot_root Semantics:
1529 * Moves the root file system of the current process to the directory put_old,
1530 * makes new_root as the new root file system of the current process, and sets
1531 * root/cwd of all processes which had them on the current root to new_root.
1534 * The new_root and put_old must be directories, and must not be on the
1535 * same file system as the current process root. The put_old must be
1536 * underneath new_root, i.e. adding a non-zero number of /.. to the string
1537 * pointed to by put_old must yield the same directory as new_root. No other
1538 * file system may be mounted on put_old. After all, new_root is a mountpoint.
1540 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
1541 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
1542 * in this situation.
1545 * - we don't move root/cwd if they are not at the root (reason: if something
1546 * cared enough to change them, it's probably wrong to force them elsewhere)
1547 * - it's okay to pick a root that isn't the root of a file system, e.g.
1548 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1549 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1552 asmlinkage
long sys_pivot_root(const char __user
* new_root
,
1553 const char __user
* put_old
)
1555 struct vfsmount
*tmp
;
1556 struct nameidata new_nd
, old_nd
, parent_nd
, root_parent
, user_nd
;
1559 if (!capable(CAP_SYS_ADMIN
))
1564 error
= __user_walk(new_root
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
,
1569 if (!check_mnt(new_nd
.mnt
))
1572 error
= __user_walk(put_old
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old_nd
);
1576 error
= security_sb_pivotroot(&old_nd
, &new_nd
);
1578 path_release(&old_nd
);
1582 read_lock(¤t
->fs
->lock
);
1583 user_nd
.mnt
= mntget(current
->fs
->rootmnt
);
1584 user_nd
.dentry
= dget(current
->fs
->root
);
1585 read_unlock(¤t
->fs
->lock
);
1586 down_write(&namespace_sem
);
1587 mutex_lock(&old_nd
.dentry
->d_inode
->i_mutex
);
1589 if (IS_MNT_SHARED(old_nd
.mnt
) ||
1590 IS_MNT_SHARED(new_nd
.mnt
->mnt_parent
) ||
1591 IS_MNT_SHARED(user_nd
.mnt
->mnt_parent
))
1593 if (!check_mnt(user_nd
.mnt
))
1596 if (IS_DEADDIR(new_nd
.dentry
->d_inode
))
1598 if (d_unhashed(new_nd
.dentry
) && !IS_ROOT(new_nd
.dentry
))
1600 if (d_unhashed(old_nd
.dentry
) && !IS_ROOT(old_nd
.dentry
))
1603 if (new_nd
.mnt
== user_nd
.mnt
|| old_nd
.mnt
== user_nd
.mnt
)
1604 goto out2
; /* loop, on the same file system */
1606 if (user_nd
.mnt
->mnt_root
!= user_nd
.dentry
)
1607 goto out2
; /* not a mountpoint */
1608 if (user_nd
.mnt
->mnt_parent
== user_nd
.mnt
)
1609 goto out2
; /* not attached */
1610 if (new_nd
.mnt
->mnt_root
!= new_nd
.dentry
)
1611 goto out2
; /* not a mountpoint */
1612 if (new_nd
.mnt
->mnt_parent
== new_nd
.mnt
)
1613 goto out2
; /* not attached */
1614 tmp
= old_nd
.mnt
; /* make sure we can reach put_old from new_root */
1615 spin_lock(&vfsmount_lock
);
1616 if (tmp
!= new_nd
.mnt
) {
1618 if (tmp
->mnt_parent
== tmp
)
1619 goto out3
; /* already mounted on put_old */
1620 if (tmp
->mnt_parent
== new_nd
.mnt
)
1622 tmp
= tmp
->mnt_parent
;
1624 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.dentry
))
1626 } else if (!is_subdir(old_nd
.dentry
, new_nd
.dentry
))
1628 detach_mnt(new_nd
.mnt
, &parent_nd
);
1629 detach_mnt(user_nd
.mnt
, &root_parent
);
1630 attach_mnt(user_nd
.mnt
, &old_nd
); /* mount old root on put_old */
1631 attach_mnt(new_nd
.mnt
, &root_parent
); /* mount new_root on / */
1632 touch_namespace(current
->namespace);
1633 spin_unlock(&vfsmount_lock
);
1634 chroot_fs_refs(&user_nd
, &new_nd
);
1635 security_sb_post_pivotroot(&user_nd
, &new_nd
);
1637 path_release(&root_parent
);
1638 path_release(&parent_nd
);
1640 mutex_unlock(&old_nd
.dentry
->d_inode
->i_mutex
);
1641 up_write(&namespace_sem
);
1642 path_release(&user_nd
);
1643 path_release(&old_nd
);
1645 path_release(&new_nd
);
1650 spin_unlock(&vfsmount_lock
);
1654 static void __init
init_mount_tree(void)
1656 struct vfsmount
*mnt
;
1657 struct namespace *namespace;
1658 struct task_struct
*g
, *p
;
1660 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
1662 panic("Can't create rootfs");
1663 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL
);
1665 panic("Can't allocate initial namespace");
1666 atomic_set(&namespace->count
, 1);
1667 INIT_LIST_HEAD(&namespace->list
);
1668 init_waitqueue_head(&namespace->poll
);
1669 namespace->event
= 0;
1670 list_add(&mnt
->mnt_list
, &namespace->list
);
1671 namespace->root
= mnt
;
1672 mnt
->mnt_namespace
= namespace;
1674 init_task
.namespace = namespace;
1675 read_lock(&tasklist_lock
);
1676 do_each_thread(g
, p
) {
1677 get_namespace(namespace);
1678 p
->namespace = namespace;
1679 } while_each_thread(g
, p
);
1680 read_unlock(&tasklist_lock
);
1682 set_fs_pwd(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1683 set_fs_root(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1686 void __init
mnt_init(unsigned long mempages
)
1688 struct list_head
*d
;
1689 unsigned int nr_hash
;
1692 init_rwsem(&namespace_sem
);
1694 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
1695 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
, NULL
);
1697 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
1699 if (!mount_hashtable
)
1700 panic("Failed to allocate mount hash table\n");
1703 * Find the power-of-two list-heads that can fit into the allocation..
1704 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1707 nr_hash
= PAGE_SIZE
/ sizeof(struct list_head
);
1711 } while ((nr_hash
>> hash_bits
) != 0);
1715 * Re-calculate the actual number of entries and the mask
1716 * from the number of bits we can fit.
1718 nr_hash
= 1UL << hash_bits
;
1719 hash_mask
= nr_hash
- 1;
1721 printk("Mount-cache hash table entries: %d\n", nr_hash
);
1723 /* And initialize the newly allocated array */
1724 d
= mount_hashtable
;
1732 subsystem_register(&fs_subsys
);
1737 void __put_namespace(struct namespace *namespace)
1739 struct vfsmount
*root
= namespace->root
;
1740 LIST_HEAD(umount_list
);
1741 namespace->root
= NULL
;
1742 spin_unlock(&vfsmount_lock
);
1743 down_write(&namespace_sem
);
1744 spin_lock(&vfsmount_lock
);
1745 umount_tree(root
, 0, &umount_list
);
1746 spin_unlock(&vfsmount_lock
);
1747 up_write(&namespace_sem
);
1748 release_mounts(&umount_list
);