4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/quotaops.h>
18 #include <linux/acct.h>
19 #include <linux/capability.h>
20 #include <linux/module.h>
21 #include <linux/seq_file.h>
22 #include <linux/namespace.h>
23 #include <linux/namei.h>
24 #include <linux/security.h>
25 #include <linux/mount.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
30 extern int __init
init_rootfs(void);
33 extern int __init
sysfs_init(void);
35 static inline int sysfs_init(void)
41 /* spinlock for vfsmount related operations, inplace of dcache_lock */
42 __cacheline_aligned_in_smp
DEFINE_SPINLOCK(vfsmount_lock
);
46 static struct list_head
*mount_hashtable
;
47 static int hash_mask __read_mostly
, hash_bits __read_mostly
;
48 static kmem_cache_t
*mnt_cache
;
49 static struct rw_semaphore namespace_sem
;
51 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
53 unsigned long tmp
= ((unsigned long)mnt
/ L1_CACHE_BYTES
);
54 tmp
+= ((unsigned long)dentry
/ L1_CACHE_BYTES
);
55 tmp
= tmp
+ (tmp
>> hash_bits
);
56 return tmp
& hash_mask
;
59 struct vfsmount
*alloc_vfsmnt(const char *name
)
61 struct vfsmount
*mnt
= kmem_cache_alloc(mnt_cache
, GFP_KERNEL
);
63 memset(mnt
, 0, sizeof(struct vfsmount
));
64 atomic_set(&mnt
->mnt_count
, 1);
65 INIT_LIST_HEAD(&mnt
->mnt_hash
);
66 INIT_LIST_HEAD(&mnt
->mnt_child
);
67 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
68 INIT_LIST_HEAD(&mnt
->mnt_list
);
69 INIT_LIST_HEAD(&mnt
->mnt_expire
);
70 INIT_LIST_HEAD(&mnt
->mnt_share
);
71 INIT_LIST_HEAD(&mnt
->mnt_slave_list
);
72 INIT_LIST_HEAD(&mnt
->mnt_slave
);
74 int size
= strlen(name
) + 1;
75 char *newname
= kmalloc(size
, GFP_KERNEL
);
77 memcpy(newname
, name
, size
);
78 mnt
->mnt_devname
= newname
;
85 void free_vfsmnt(struct vfsmount
*mnt
)
87 kfree(mnt
->mnt_devname
);
88 kmem_cache_free(mnt_cache
, mnt
);
92 * find the first or last mount at @dentry on vfsmount @mnt depending on
93 * @dir. If @dir is set return the first mount else return the last mount.
95 struct vfsmount
*__lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
,
98 struct list_head
*head
= mount_hashtable
+ hash(mnt
, dentry
);
99 struct list_head
*tmp
= head
;
100 struct vfsmount
*p
, *found
= NULL
;
103 tmp
= dir
? tmp
->next
: tmp
->prev
;
107 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
108 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
117 * lookup_mnt increments the ref count before returning
118 * the vfsmount struct.
120 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
122 struct vfsmount
*child_mnt
;
123 spin_lock(&vfsmount_lock
);
124 if ((child_mnt
= __lookup_mnt(mnt
, dentry
, 1)))
126 spin_unlock(&vfsmount_lock
);
130 static inline int check_mnt(struct vfsmount
*mnt
)
132 return mnt
->mnt_namespace
== current
->namespace;
135 static void touch_namespace(struct namespace *ns
)
139 wake_up_interruptible(&ns
->poll
);
143 static void __touch_namespace(struct namespace *ns
)
145 if (ns
&& ns
->event
!= event
) {
147 wake_up_interruptible(&ns
->poll
);
151 static void detach_mnt(struct vfsmount
*mnt
, struct nameidata
*old_nd
)
153 old_nd
->dentry
= mnt
->mnt_mountpoint
;
154 old_nd
->mnt
= mnt
->mnt_parent
;
155 mnt
->mnt_parent
= mnt
;
156 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
157 list_del_init(&mnt
->mnt_child
);
158 list_del_init(&mnt
->mnt_hash
);
159 old_nd
->dentry
->d_mounted
--;
162 void mnt_set_mountpoint(struct vfsmount
*mnt
, struct dentry
*dentry
,
163 struct vfsmount
*child_mnt
)
165 child_mnt
->mnt_parent
= mntget(mnt
);
166 child_mnt
->mnt_mountpoint
= dget(dentry
);
170 static void attach_mnt(struct vfsmount
*mnt
, struct nameidata
*nd
)
172 mnt_set_mountpoint(nd
->mnt
, nd
->dentry
, mnt
);
173 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
174 hash(nd
->mnt
, nd
->dentry
));
175 list_add_tail(&mnt
->mnt_child
, &nd
->mnt
->mnt_mounts
);
179 * the caller must hold vfsmount_lock
181 static void commit_tree(struct vfsmount
*mnt
)
183 struct vfsmount
*parent
= mnt
->mnt_parent
;
186 struct namespace *n
= parent
->mnt_namespace
;
188 BUG_ON(parent
== mnt
);
190 list_add_tail(&head
, &mnt
->mnt_list
);
191 list_for_each_entry(m
, &head
, mnt_list
)
192 m
->mnt_namespace
= n
;
193 list_splice(&head
, n
->list
.prev
);
195 list_add_tail(&mnt
->mnt_hash
, mount_hashtable
+
196 hash(parent
, mnt
->mnt_mountpoint
));
197 list_add_tail(&mnt
->mnt_child
, &parent
->mnt_mounts
);
201 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
203 struct list_head
*next
= p
->mnt_mounts
.next
;
204 if (next
== &p
->mnt_mounts
) {
208 next
= p
->mnt_child
.next
;
209 if (next
!= &p
->mnt_parent
->mnt_mounts
)
214 return list_entry(next
, struct vfsmount
, mnt_child
);
217 static struct vfsmount
*skip_mnt_tree(struct vfsmount
*p
)
219 struct list_head
*prev
= p
->mnt_mounts
.prev
;
220 while (prev
!= &p
->mnt_mounts
) {
221 p
= list_entry(prev
, struct vfsmount
, mnt_child
);
222 prev
= p
->mnt_mounts
.prev
;
227 static struct vfsmount
*clone_mnt(struct vfsmount
*old
, struct dentry
*root
,
230 struct super_block
*sb
= old
->mnt_sb
;
231 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
234 mnt
->mnt_flags
= old
->mnt_flags
;
235 atomic_inc(&sb
->s_active
);
237 mnt
->mnt_root
= dget(root
);
238 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
239 mnt
->mnt_parent
= mnt
;
241 if (flag
& CL_SLAVE
) {
242 list_add(&mnt
->mnt_slave
, &old
->mnt_slave_list
);
243 mnt
->mnt_master
= old
;
244 CLEAR_MNT_SHARED(mnt
);
246 if ((flag
& CL_PROPAGATION
) || IS_MNT_SHARED(old
))
247 list_add(&mnt
->mnt_share
, &old
->mnt_share
);
248 if (IS_MNT_SLAVE(old
))
249 list_add(&mnt
->mnt_slave
, &old
->mnt_slave
);
250 mnt
->mnt_master
= old
->mnt_master
;
252 if (flag
& CL_MAKE_SHARED
)
255 /* stick the duplicate mount on the same expiry list
256 * as the original if that was on one */
257 if (flag
& CL_EXPIRE
) {
258 spin_lock(&vfsmount_lock
);
259 if (!list_empty(&old
->mnt_expire
))
260 list_add(&mnt
->mnt_expire
, &old
->mnt_expire
);
261 spin_unlock(&vfsmount_lock
);
267 static inline void __mntput(struct vfsmount
*mnt
)
269 struct super_block
*sb
= mnt
->mnt_sb
;
272 deactivate_super(sb
);
275 void mntput_no_expire(struct vfsmount
*mnt
)
278 if (atomic_dec_and_lock(&mnt
->mnt_count
, &vfsmount_lock
)) {
279 if (likely(!mnt
->mnt_pinned
)) {
280 spin_unlock(&vfsmount_lock
);
284 atomic_add(mnt
->mnt_pinned
+ 1, &mnt
->mnt_count
);
286 spin_unlock(&vfsmount_lock
);
287 acct_auto_close_mnt(mnt
);
288 security_sb_umount_close(mnt
);
293 EXPORT_SYMBOL(mntput_no_expire
);
295 void mnt_pin(struct vfsmount
*mnt
)
297 spin_lock(&vfsmount_lock
);
299 spin_unlock(&vfsmount_lock
);
302 EXPORT_SYMBOL(mnt_pin
);
304 void mnt_unpin(struct vfsmount
*mnt
)
306 spin_lock(&vfsmount_lock
);
307 if (mnt
->mnt_pinned
) {
308 atomic_inc(&mnt
->mnt_count
);
311 spin_unlock(&vfsmount_lock
);
314 EXPORT_SYMBOL(mnt_unpin
);
317 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
319 struct namespace *n
= m
->private;
323 down_read(&namespace_sem
);
324 list_for_each(p
, &n
->list
)
326 return list_entry(p
, struct vfsmount
, mnt_list
);
330 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
332 struct namespace *n
= m
->private;
333 struct list_head
*p
= ((struct vfsmount
*)v
)->mnt_list
.next
;
335 return p
== &n
->list
? NULL
: list_entry(p
, struct vfsmount
, mnt_list
);
338 static void m_stop(struct seq_file
*m
, void *v
)
340 up_read(&namespace_sem
);
343 static inline void mangle(struct seq_file
*m
, const char *s
)
345 seq_escape(m
, s
, " \t\n\\");
348 static int show_vfsmnt(struct seq_file
*m
, void *v
)
350 struct vfsmount
*mnt
= v
;
352 static struct proc_fs_info
{
356 { MS_SYNCHRONOUS
, ",sync" },
357 { MS_DIRSYNC
, ",dirsync" },
358 { MS_MANDLOCK
, ",mand" },
361 static struct proc_fs_info mnt_info
[] = {
362 { MNT_NOSUID
, ",nosuid" },
363 { MNT_NODEV
, ",nodev" },
364 { MNT_NOEXEC
, ",noexec" },
365 { MNT_NOATIME
, ",noatime" },
366 { MNT_NODIRATIME
, ",nodiratime" },
369 struct proc_fs_info
*fs_infop
;
371 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
373 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
375 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
376 seq_puts(m
, mnt
->mnt_sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
377 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
378 if (mnt
->mnt_sb
->s_flags
& fs_infop
->flag
)
379 seq_puts(m
, fs_infop
->str
);
381 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
382 if (mnt
->mnt_flags
& fs_infop
->flag
)
383 seq_puts(m
, fs_infop
->str
);
385 if (mnt
->mnt_sb
->s_op
->show_options
)
386 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
387 seq_puts(m
, " 0 0\n");
391 struct seq_operations mounts_op
= {
399 * may_umount_tree - check if a mount tree is busy
400 * @mnt: root of mount tree
402 * This is called to check if a tree of mounts has any
403 * open files, pwds, chroots or sub mounts that are
406 int may_umount_tree(struct vfsmount
*mnt
)
409 int minimum_refs
= 0;
412 spin_lock(&vfsmount_lock
);
413 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
414 actual_refs
+= atomic_read(&p
->mnt_count
);
417 spin_unlock(&vfsmount_lock
);
419 if (actual_refs
> minimum_refs
)
425 EXPORT_SYMBOL(may_umount_tree
);
428 * may_umount - check if a mount point is busy
429 * @mnt: root of mount
431 * This is called to check if a mount point has any
432 * open files, pwds, chroots or sub mounts. If the
433 * mount has sub mounts this will return busy
434 * regardless of whether the sub mounts are busy.
436 * Doesn't take quota and stuff into account. IOW, in some cases it will
437 * give false negatives. The main reason why it's here is that we need
438 * a non-destructive way to look for easily umountable filesystems.
440 int may_umount(struct vfsmount
*mnt
)
443 spin_lock(&vfsmount_lock
);
444 if (propagate_mount_busy(mnt
, 2))
446 spin_unlock(&vfsmount_lock
);
450 EXPORT_SYMBOL(may_umount
);
452 void release_mounts(struct list_head
*head
)
454 struct vfsmount
*mnt
;
455 while (!list_empty(head
)) {
456 mnt
= list_entry(head
->next
, struct vfsmount
, mnt_hash
);
457 list_del_init(&mnt
->mnt_hash
);
458 if (mnt
->mnt_parent
!= mnt
) {
459 struct dentry
*dentry
;
461 spin_lock(&vfsmount_lock
);
462 dentry
= mnt
->mnt_mountpoint
;
464 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
465 mnt
->mnt_parent
= mnt
;
466 spin_unlock(&vfsmount_lock
);
474 void umount_tree(struct vfsmount
*mnt
, int propagate
, struct list_head
*kill
)
478 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
479 list_del(&p
->mnt_hash
);
480 list_add(&p
->mnt_hash
, kill
);
484 propagate_umount(kill
);
486 list_for_each_entry(p
, kill
, mnt_hash
) {
487 list_del_init(&p
->mnt_expire
);
488 list_del_init(&p
->mnt_list
);
489 __touch_namespace(p
->mnt_namespace
);
490 p
->mnt_namespace
= NULL
;
491 list_del_init(&p
->mnt_child
);
492 if (p
->mnt_parent
!= p
)
493 mnt
->mnt_mountpoint
->d_mounted
--;
494 change_mnt_propagation(p
, MS_PRIVATE
);
498 static int do_umount(struct vfsmount
*mnt
, int flags
)
500 struct super_block
*sb
= mnt
->mnt_sb
;
502 LIST_HEAD(umount_list
);
504 retval
= security_sb_umount(mnt
, flags
);
509 * Allow userspace to request a mountpoint be expired rather than
510 * unmounting unconditionally. Unmount only happens if:
511 * (1) the mark is already set (the mark is cleared by mntput())
512 * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
514 if (flags
& MNT_EXPIRE
) {
515 if (mnt
== current
->fs
->rootmnt
||
516 flags
& (MNT_FORCE
| MNT_DETACH
))
519 if (atomic_read(&mnt
->mnt_count
) != 2)
522 if (!xchg(&mnt
->mnt_expiry_mark
, 1))
527 * If we may have to abort operations to get out of this
528 * mount, and they will themselves hold resources we must
529 * allow the fs to do things. In the Unix tradition of
530 * 'Gee thats tricky lets do it in userspace' the umount_begin
531 * might fail to complete on the first run through as other tasks
532 * must return, and the like. Thats for the mount program to worry
533 * about for the moment.
537 if ((flags
& MNT_FORCE
) && sb
->s_op
->umount_begin
)
538 sb
->s_op
->umount_begin(sb
);
542 * No sense to grab the lock for this test, but test itself looks
543 * somewhat bogus. Suggestions for better replacement?
544 * Ho-hum... In principle, we might treat that as umount + switch
545 * to rootfs. GC would eventually take care of the old vfsmount.
546 * Actually it makes sense, especially if rootfs would contain a
547 * /reboot - static binary that would close all descriptors and
548 * call reboot(9). Then init(8) could umount root and exec /reboot.
550 if (mnt
== current
->fs
->rootmnt
&& !(flags
& MNT_DETACH
)) {
552 * Special case for "unmounting" root ...
553 * we just try to remount it readonly.
555 down_write(&sb
->s_umount
);
556 if (!(sb
->s_flags
& MS_RDONLY
)) {
559 retval
= do_remount_sb(sb
, MS_RDONLY
, NULL
, 0);
562 up_write(&sb
->s_umount
);
566 down_write(&namespace_sem
);
567 spin_lock(&vfsmount_lock
);
571 if (flags
& MNT_DETACH
|| !propagate_mount_busy(mnt
, 2)) {
572 if (!list_empty(&mnt
->mnt_list
))
573 umount_tree(mnt
, 1, &umount_list
);
576 spin_unlock(&vfsmount_lock
);
578 security_sb_umount_busy(mnt
);
579 up_write(&namespace_sem
);
580 release_mounts(&umount_list
);
585 * Now umount can handle mount points as well as block devices.
586 * This is important for filesystems which use unnamed block devices.
588 * We now support a flag for forced unmount like the other 'big iron'
589 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
592 asmlinkage
long sys_umount(char __user
* name
, int flags
)
597 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
601 if (nd
.dentry
!= nd
.mnt
->mnt_root
)
603 if (!check_mnt(nd
.mnt
))
607 if (!capable(CAP_SYS_ADMIN
))
610 retval
= do_umount(nd
.mnt
, flags
);
612 path_release_on_umount(&nd
);
617 #ifdef __ARCH_WANT_SYS_OLDUMOUNT
620 * The 2.0 compatible umount. No flags.
622 asmlinkage
long sys_oldumount(char __user
* name
)
624 return sys_umount(name
, 0);
629 static int mount_is_safe(struct nameidata
*nd
)
631 if (capable(CAP_SYS_ADMIN
))
635 if (S_ISLNK(nd
->dentry
->d_inode
->i_mode
))
637 if (nd
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
638 if (current
->uid
!= nd
->dentry
->d_inode
->i_uid
)
641 if (vfs_permission(nd
, MAY_WRITE
))
647 static int lives_below_in_same_fs(struct dentry
*d
, struct dentry
*dentry
)
652 if (d
== NULL
|| d
== d
->d_parent
)
658 struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
,
661 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
664 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(mnt
))
667 res
= q
= clone_mnt(mnt
, dentry
, flag
);
670 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
673 list_for_each_entry(r
, &mnt
->mnt_mounts
, mnt_child
) {
674 if (!lives_below_in_same_fs(r
->mnt_mountpoint
, dentry
))
677 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
678 if (!(flag
& CL_COPY_ALL
) && IS_MNT_UNBINDABLE(s
)) {
679 s
= skip_mnt_tree(s
);
682 while (p
!= s
->mnt_parent
) {
688 nd
.dentry
= p
->mnt_mountpoint
;
689 q
= clone_mnt(p
, p
->mnt_root
, flag
);
692 spin_lock(&vfsmount_lock
);
693 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
695 spin_unlock(&vfsmount_lock
);
701 LIST_HEAD(umount_list
);
702 spin_lock(&vfsmount_lock
);
703 umount_tree(res
, 0, &umount_list
);
704 spin_unlock(&vfsmount_lock
);
705 release_mounts(&umount_list
);
711 * @source_mnt : mount tree to be attached
712 * @nd : place the mount tree @source_mnt is attached
713 * @parent_nd : if non-null, detach the source_mnt from its parent and
714 * store the parent mount and mountpoint dentry.
715 * (done when source_mnt is moved)
717 * NOTE: in the table below explains the semantics when a source mount
718 * of a given type is attached to a destination mount of a given type.
719 * ---------------------------------------------------------------------------
720 * | BIND MOUNT OPERATION |
721 * |**************************************************************************
722 * | source-->| shared | private | slave | unbindable |
726 * |**************************************************************************
727 * | shared | shared (++) | shared (+) | shared(+++)| invalid |
729 * |non-shared| shared (+) | private | slave (*) | invalid |
730 * ***************************************************************************
731 * A bind operation clones the source mount and mounts the clone on the
734 * (++) the cloned mount is propagated to all the mounts in the propagation
735 * tree of the destination mount and the cloned mount is added to
736 * the peer group of the source mount.
737 * (+) the cloned mount is created under the destination mount and is marked
738 * as shared. The cloned mount is added to the peer group of the source
740 * (+++) the mount is propagated to all the mounts in the propagation tree
741 * of the destination mount and the cloned mount is made slave
742 * of the same master as that of the source mount. The cloned mount
743 * is marked as 'shared and slave'.
744 * (*) the cloned mount is made a slave of the same master as that of the
747 * ---------------------------------------------------------------------------
748 * | MOVE MOUNT OPERATION |
749 * |**************************************************************************
750 * | source-->| shared | private | slave | unbindable |
754 * |**************************************************************************
755 * | shared | shared (+) | shared (+) | shared(+++) | invalid |
757 * |non-shared| shared (+*) | private | slave (*) | unbindable |
758 * ***************************************************************************
760 * (+) the mount is moved to the destination. And is then propagated to
761 * all the mounts in the propagation tree of the destination mount.
762 * (+*) the mount is moved to the destination.
763 * (+++) the mount is moved to the destination and is then propagated to
764 * all the mounts belonging to the destination mount's propagation tree.
765 * the mount is marked as 'shared and slave'.
766 * (*) the mount continues to be a slave at the new location.
768 * if the source mount is a tree, the operations explained above is
769 * applied to each mount in the tree.
770 * Must be called without spinlocks held, since this function can sleep
773 static int attach_recursive_mnt(struct vfsmount
*source_mnt
,
774 struct nameidata
*nd
, struct nameidata
*parent_nd
)
776 LIST_HEAD(tree_list
);
777 struct vfsmount
*dest_mnt
= nd
->mnt
;
778 struct dentry
*dest_dentry
= nd
->dentry
;
779 struct vfsmount
*child
, *p
;
781 if (propagate_mnt(dest_mnt
, dest_dentry
, source_mnt
, &tree_list
))
784 if (IS_MNT_SHARED(dest_mnt
)) {
785 for (p
= source_mnt
; p
; p
= next_mnt(p
, source_mnt
))
789 spin_lock(&vfsmount_lock
);
791 detach_mnt(source_mnt
, parent_nd
);
792 attach_mnt(source_mnt
, nd
);
793 touch_namespace(current
->namespace);
795 mnt_set_mountpoint(dest_mnt
, dest_dentry
, source_mnt
);
796 commit_tree(source_mnt
);
799 list_for_each_entry_safe(child
, p
, &tree_list
, mnt_hash
) {
800 list_del_init(&child
->mnt_hash
);
803 spin_unlock(&vfsmount_lock
);
807 static int graft_tree(struct vfsmount
*mnt
, struct nameidata
*nd
)
810 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
813 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
814 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
818 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
819 if (IS_DEADDIR(nd
->dentry
->d_inode
))
822 err
= security_sb_check_sb(mnt
, nd
);
827 if (IS_ROOT(nd
->dentry
) || !d_unhashed(nd
->dentry
))
828 err
= attach_recursive_mnt(mnt
, nd
, NULL
);
830 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
832 security_sb_post_addmount(mnt
, nd
);
837 * recursively change the type of the mountpoint.
839 static int do_change_type(struct nameidata
*nd
, int flag
)
841 struct vfsmount
*m
, *mnt
= nd
->mnt
;
842 int recurse
= flag
& MS_REC
;
843 int type
= flag
& ~MS_REC
;
845 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
848 down_write(&namespace_sem
);
849 spin_lock(&vfsmount_lock
);
850 for (m
= mnt
; m
; m
= (recurse
? next_mnt(m
, mnt
) : NULL
))
851 change_mnt_propagation(m
, type
);
852 spin_unlock(&vfsmount_lock
);
853 up_write(&namespace_sem
);
860 static int do_loopback(struct nameidata
*nd
, char *old_name
, int recurse
)
862 struct nameidata old_nd
;
863 struct vfsmount
*mnt
= NULL
;
864 int err
= mount_is_safe(nd
);
867 if (!old_name
|| !*old_name
)
869 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
873 down_write(&namespace_sem
);
875 if (IS_MNT_UNBINDABLE(old_nd
.mnt
))
878 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
883 mnt
= copy_tree(old_nd
.mnt
, old_nd
.dentry
, 0);
885 mnt
= clone_mnt(old_nd
.mnt
, old_nd
.dentry
, 0);
890 err
= graft_tree(mnt
, nd
);
892 LIST_HEAD(umount_list
);
893 spin_lock(&vfsmount_lock
);
894 umount_tree(mnt
, 0, &umount_list
);
895 spin_unlock(&vfsmount_lock
);
896 release_mounts(&umount_list
);
900 up_write(&namespace_sem
);
901 path_release(&old_nd
);
906 * change filesystem flags. dir should be a physical root of filesystem.
907 * If you've mounted a non-root directory somewhere and want to do remount
908 * on it - tough luck.
910 static int do_remount(struct nameidata
*nd
, int flags
, int mnt_flags
,
914 struct super_block
*sb
= nd
->mnt
->mnt_sb
;
916 if (!capable(CAP_SYS_ADMIN
))
919 if (!check_mnt(nd
->mnt
))
922 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
925 down_write(&sb
->s_umount
);
926 err
= do_remount_sb(sb
, flags
, data
, 0);
928 nd
->mnt
->mnt_flags
= mnt_flags
;
929 up_write(&sb
->s_umount
);
931 security_sb_post_remount(nd
->mnt
, flags
, data
);
935 static inline int tree_contains_unbindable(struct vfsmount
*mnt
)
938 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
939 if (IS_MNT_UNBINDABLE(p
))
945 static int do_move_mount(struct nameidata
*nd
, char *old_name
)
947 struct nameidata old_nd
, parent_nd
;
950 if (!capable(CAP_SYS_ADMIN
))
952 if (!old_name
|| !*old_name
)
954 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
958 down_write(&namespace_sem
);
959 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
962 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
966 mutex_lock(&nd
->dentry
->d_inode
->i_mutex
);
967 if (IS_DEADDIR(nd
->dentry
->d_inode
))
970 if (!IS_ROOT(nd
->dentry
) && d_unhashed(nd
->dentry
))
974 if (old_nd
.dentry
!= old_nd
.mnt
->mnt_root
)
977 if (old_nd
.mnt
== old_nd
.mnt
->mnt_parent
)
980 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
981 S_ISDIR(old_nd
.dentry
->d_inode
->i_mode
))
984 * Don't move a mount residing in a shared parent.
986 if (old_nd
.mnt
->mnt_parent
&& IS_MNT_SHARED(old_nd
.mnt
->mnt_parent
))
989 * Don't move a mount tree containing unbindable mounts to a destination
990 * mount which is shared.
992 if (IS_MNT_SHARED(nd
->mnt
) && tree_contains_unbindable(old_nd
.mnt
))
995 for (p
= nd
->mnt
; p
->mnt_parent
!= p
; p
= p
->mnt_parent
)
999 if ((err
= attach_recursive_mnt(old_nd
.mnt
, nd
, &parent_nd
)))
1002 spin_lock(&vfsmount_lock
);
1003 /* if the mount is moved, it should no longer be expire
1005 list_del_init(&old_nd
.mnt
->mnt_expire
);
1006 spin_unlock(&vfsmount_lock
);
1008 mutex_unlock(&nd
->dentry
->d_inode
->i_mutex
);
1010 up_write(&namespace_sem
);
1012 path_release(&parent_nd
);
1013 path_release(&old_nd
);
1018 * create a new mount for userspace and request it to be added into the
1021 static int do_new_mount(struct nameidata
*nd
, char *type
, int flags
,
1022 int mnt_flags
, char *name
, void *data
)
1024 struct vfsmount
*mnt
;
1026 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
1029 /* we need capabilities... */
1030 if (!capable(CAP_SYS_ADMIN
))
1033 mnt
= do_kern_mount(type
, flags
, name
, data
);
1035 return PTR_ERR(mnt
);
1037 return do_add_mount(mnt
, nd
, mnt_flags
, NULL
);
1041 * add a mount into a namespace's mount tree
1042 * - provide the option of adding the new mount to an expiration list
1044 int do_add_mount(struct vfsmount
*newmnt
, struct nameidata
*nd
,
1045 int mnt_flags
, struct list_head
*fslist
)
1049 down_write(&namespace_sem
);
1050 /* Something was mounted here while we slept */
1051 while (d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
1054 if (!check_mnt(nd
->mnt
))
1057 /* Refuse the same filesystem on the same mount point */
1059 if (nd
->mnt
->mnt_sb
== newmnt
->mnt_sb
&&
1060 nd
->mnt
->mnt_root
== nd
->dentry
)
1064 if (S_ISLNK(newmnt
->mnt_root
->d_inode
->i_mode
))
1067 newmnt
->mnt_flags
= mnt_flags
;
1068 if ((err
= graft_tree(newmnt
, nd
)))
1072 /* add to the specified expiration list */
1073 spin_lock(&vfsmount_lock
);
1074 list_add_tail(&newmnt
->mnt_expire
, fslist
);
1075 spin_unlock(&vfsmount_lock
);
1077 up_write(&namespace_sem
);
1081 up_write(&namespace_sem
);
1086 EXPORT_SYMBOL_GPL(do_add_mount
);
1088 static void expire_mount(struct vfsmount
*mnt
, struct list_head
*mounts
,
1089 struct list_head
*umounts
)
1091 spin_lock(&vfsmount_lock
);
1094 * Check if mount is still attached, if not, let whoever holds it deal
1097 if (mnt
->mnt_parent
== mnt
) {
1098 spin_unlock(&vfsmount_lock
);
1103 * Check that it is still dead: the count should now be 2 - as
1104 * contributed by the vfsmount parent and the mntget above
1106 if (!propagate_mount_busy(mnt
, 2)) {
1107 /* delete from the namespace */
1108 touch_namespace(mnt
->mnt_namespace
);
1109 list_del_init(&mnt
->mnt_list
);
1110 mnt
->mnt_namespace
= NULL
;
1111 umount_tree(mnt
, 1, umounts
);
1112 spin_unlock(&vfsmount_lock
);
1115 * Someone brought it back to life whilst we didn't have any
1116 * locks held so return it to the expiration list
1118 list_add_tail(&mnt
->mnt_expire
, mounts
);
1119 spin_unlock(&vfsmount_lock
);
1124 * process a list of expirable mountpoints with the intent of discarding any
1125 * mountpoints that aren't in use and haven't been touched since last we came
1128 void mark_mounts_for_expiry(struct list_head
*mounts
)
1130 struct namespace *namespace;
1131 struct vfsmount
*mnt
, *next
;
1132 LIST_HEAD(graveyard
);
1134 if (list_empty(mounts
))
1137 spin_lock(&vfsmount_lock
);
1139 /* extract from the expiration list every vfsmount that matches the
1140 * following criteria:
1141 * - only referenced by its parent vfsmount
1142 * - still marked for expiry (marked on the last call here; marks are
1143 * cleared by mntput())
1145 list_for_each_entry_safe(mnt
, next
, mounts
, mnt_expire
) {
1146 if (!xchg(&mnt
->mnt_expiry_mark
, 1) ||
1147 atomic_read(&mnt
->mnt_count
) != 1)
1151 list_move(&mnt
->mnt_expire
, &graveyard
);
1155 * go through the vfsmounts we've just consigned to the graveyard to
1156 * - check that they're still dead
1157 * - delete the vfsmount from the appropriate namespace under lock
1158 * - dispose of the corpse
1160 while (!list_empty(&graveyard
)) {
1162 mnt
= list_entry(graveyard
.next
, struct vfsmount
, mnt_expire
);
1163 list_del_init(&mnt
->mnt_expire
);
1165 /* don't do anything if the namespace is dead - all the
1166 * vfsmounts from it are going away anyway */
1167 namespace = mnt
->mnt_namespace
;
1168 if (!namespace || !namespace->root
)
1170 get_namespace(namespace);
1172 spin_unlock(&vfsmount_lock
);
1173 down_write(&namespace_sem
);
1174 expire_mount(mnt
, mounts
, &umounts
);
1175 up_write(&namespace_sem
);
1176 release_mounts(&umounts
);
1178 put_namespace(namespace);
1179 spin_lock(&vfsmount_lock
);
1182 spin_unlock(&vfsmount_lock
);
1185 EXPORT_SYMBOL_GPL(mark_mounts_for_expiry
);
1188 * Some copy_from_user() implementations do not return the exact number of
1189 * bytes remaining to copy on a fault. But copy_mount_options() requires that.
1190 * Note that this function differs from copy_from_user() in that it will oops
1191 * on bad values of `to', rather than returning a short copy.
1193 static long exact_copy_from_user(void *to
, const void __user
* from
,
1197 const char __user
*f
= from
;
1200 if (!access_ok(VERIFY_READ
, from
, n
))
1204 if (__get_user(c
, f
)) {
1215 int copy_mount_options(const void __user
* data
, unsigned long *where
)
1225 if (!(page
= __get_free_page(GFP_KERNEL
)))
1228 /* We only care that *some* data at the address the user
1229 * gave us is valid. Just in case, we'll zero
1230 * the remainder of the page.
1232 /* copy_from_user cannot cross TASK_SIZE ! */
1233 size
= TASK_SIZE
- (unsigned long)data
;
1234 if (size
> PAGE_SIZE
)
1237 i
= size
- exact_copy_from_user((void *)page
, data
, size
);
1243 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
1249 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
1250 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
1252 * data is a (void *) that can point to any structure up to
1253 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
1254 * information (or be NULL).
1256 * Pre-0.97 versions of mount() didn't have a flags word.
1257 * When the flags word was introduced its top half was required
1258 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
1259 * Therefore, if this magic number is present, it carries no information
1260 * and must be discarded.
1262 long do_mount(char *dev_name
, char *dir_name
, char *type_page
,
1263 unsigned long flags
, void *data_page
)
1265 struct nameidata nd
;
1270 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
1271 flags
&= ~MS_MGC_MSK
;
1273 /* Basic sanity checks */
1275 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
1277 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
1281 ((char *)data_page
)[PAGE_SIZE
- 1] = 0;
1283 /* Separate the per-mountpoint flags */
1284 if (flags
& MS_NOSUID
)
1285 mnt_flags
|= MNT_NOSUID
;
1286 if (flags
& MS_NODEV
)
1287 mnt_flags
|= MNT_NODEV
;
1288 if (flags
& MS_NOEXEC
)
1289 mnt_flags
|= MNT_NOEXEC
;
1290 if (flags
& MS_NOATIME
)
1291 mnt_flags
|= MNT_NOATIME
;
1292 if (flags
& MS_NODIRATIME
)
1293 mnt_flags
|= MNT_NODIRATIME
;
1295 flags
&= ~(MS_NOSUID
| MS_NOEXEC
| MS_NODEV
| MS_ACTIVE
|
1296 MS_NOATIME
| MS_NODIRATIME
);
1298 /* ... and get the mountpoint */
1299 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
1303 retval
= security_sb_mount(dev_name
, &nd
, type_page
, flags
, data_page
);
1307 if (flags
& MS_REMOUNT
)
1308 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
1310 else if (flags
& MS_BIND
)
1311 retval
= do_loopback(&nd
, dev_name
, flags
& MS_REC
);
1312 else if (flags
& (MS_SHARED
| MS_PRIVATE
| MS_SLAVE
| MS_UNBINDABLE
))
1313 retval
= do_change_type(&nd
, flags
);
1314 else if (flags
& MS_MOVE
)
1315 retval
= do_move_mount(&nd
, dev_name
);
1317 retval
= do_new_mount(&nd
, type_page
, flags
, mnt_flags
,
1318 dev_name
, data_page
);
1324 int copy_namespace(int flags
, struct task_struct
*tsk
)
1326 struct namespace *namespace = tsk
->namespace;
1327 struct namespace *new_ns
;
1328 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
1329 struct fs_struct
*fs
= tsk
->fs
;
1330 struct vfsmount
*p
, *q
;
1335 get_namespace(namespace);
1337 if (!(flags
& CLONE_NEWNS
))
1340 if (!capable(CAP_SYS_ADMIN
)) {
1341 put_namespace(namespace);
1345 new_ns
= kmalloc(sizeof(struct namespace), GFP_KERNEL
);
1349 atomic_set(&new_ns
->count
, 1);
1350 INIT_LIST_HEAD(&new_ns
->list
);
1351 init_waitqueue_head(&new_ns
->poll
);
1354 down_write(&namespace_sem
);
1355 /* First pass: copy the tree topology */
1356 new_ns
->root
= copy_tree(namespace->root
, namespace->root
->mnt_root
,
1357 CL_COPY_ALL
| CL_EXPIRE
);
1358 if (!new_ns
->root
) {
1359 up_write(&namespace_sem
);
1363 spin_lock(&vfsmount_lock
);
1364 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
1365 spin_unlock(&vfsmount_lock
);
1368 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
1369 * as belonging to new namespace. We have already acquired a private
1370 * fs_struct, so tsk->fs->lock is not needed.
1372 p
= namespace->root
;
1375 q
->mnt_namespace
= new_ns
;
1377 if (p
== fs
->rootmnt
) {
1379 fs
->rootmnt
= mntget(q
);
1381 if (p
== fs
->pwdmnt
) {
1383 fs
->pwdmnt
= mntget(q
);
1385 if (p
== fs
->altrootmnt
) {
1387 fs
->altrootmnt
= mntget(q
);
1390 p
= next_mnt(p
, namespace->root
);
1391 q
= next_mnt(q
, new_ns
->root
);
1393 up_write(&namespace_sem
);
1395 tsk
->namespace = new_ns
;
1404 put_namespace(namespace);
1408 put_namespace(namespace);
1412 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
1413 char __user
* type
, unsigned long flags
,
1417 unsigned long data_page
;
1418 unsigned long type_page
;
1419 unsigned long dev_page
;
1422 retval
= copy_mount_options(type
, &type_page
);
1426 dir_page
= getname(dir_name
);
1427 retval
= PTR_ERR(dir_page
);
1428 if (IS_ERR(dir_page
))
1431 retval
= copy_mount_options(dev_name
, &dev_page
);
1435 retval
= copy_mount_options(data
, &data_page
);
1440 retval
= do_mount((char *)dev_page
, dir_page
, (char *)type_page
,
1441 flags
, (void *)data_page
);
1443 free_page(data_page
);
1446 free_page(dev_page
);
1450 free_page(type_page
);
1455 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1456 * It can block. Requires the big lock held.
1458 void set_fs_root(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1459 struct dentry
*dentry
)
1461 struct dentry
*old_root
;
1462 struct vfsmount
*old_rootmnt
;
1463 write_lock(&fs
->lock
);
1464 old_root
= fs
->root
;
1465 old_rootmnt
= fs
->rootmnt
;
1466 fs
->rootmnt
= mntget(mnt
);
1467 fs
->root
= dget(dentry
);
1468 write_unlock(&fs
->lock
);
1471 mntput(old_rootmnt
);
1476 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
1477 * It can block. Requires the big lock held.
1479 void set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
,
1480 struct dentry
*dentry
)
1482 struct dentry
*old_pwd
;
1483 struct vfsmount
*old_pwdmnt
;
1485 write_lock(&fs
->lock
);
1487 old_pwdmnt
= fs
->pwdmnt
;
1488 fs
->pwdmnt
= mntget(mnt
);
1489 fs
->pwd
= dget(dentry
);
1490 write_unlock(&fs
->lock
);
1498 static void chroot_fs_refs(struct nameidata
*old_nd
, struct nameidata
*new_nd
)
1500 struct task_struct
*g
, *p
;
1501 struct fs_struct
*fs
;
1503 read_lock(&tasklist_lock
);
1504 do_each_thread(g
, p
) {
1508 atomic_inc(&fs
->count
);
1510 if (fs
->root
== old_nd
->dentry
1511 && fs
->rootmnt
== old_nd
->mnt
)
1512 set_fs_root(fs
, new_nd
->mnt
, new_nd
->dentry
);
1513 if (fs
->pwd
== old_nd
->dentry
1514 && fs
->pwdmnt
== old_nd
->mnt
)
1515 set_fs_pwd(fs
, new_nd
->mnt
, new_nd
->dentry
);
1519 } while_each_thread(g
, p
);
1520 read_unlock(&tasklist_lock
);
1524 * pivot_root Semantics:
1525 * Moves the root file system of the current process to the directory put_old,
1526 * makes new_root as the new root file system of the current process, and sets
1527 * root/cwd of all processes which had them on the current root to new_root.
1530 * The new_root and put_old must be directories, and must not be on the
1531 * same file system as the current process root. The put_old must be
1532 * underneath new_root, i.e. adding a non-zero number of /.. to the string
1533 * pointed to by put_old must yield the same directory as new_root. No other
1534 * file system may be mounted on put_old. After all, new_root is a mountpoint.
1536 * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem.
1537 * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives
1538 * in this situation.
1541 * - we don't move root/cwd if they are not at the root (reason: if something
1542 * cared enough to change them, it's probably wrong to force them elsewhere)
1543 * - it's okay to pick a root that isn't the root of a file system, e.g.
1544 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
1545 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
1548 asmlinkage
long sys_pivot_root(const char __user
* new_root
,
1549 const char __user
* put_old
)
1551 struct vfsmount
*tmp
;
1552 struct nameidata new_nd
, old_nd
, parent_nd
, root_parent
, user_nd
;
1555 if (!capable(CAP_SYS_ADMIN
))
1560 error
= __user_walk(new_root
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
,
1565 if (!check_mnt(new_nd
.mnt
))
1568 error
= __user_walk(put_old
, LOOKUP_FOLLOW
| LOOKUP_DIRECTORY
, &old_nd
);
1572 error
= security_sb_pivotroot(&old_nd
, &new_nd
);
1574 path_release(&old_nd
);
1578 read_lock(¤t
->fs
->lock
);
1579 user_nd
.mnt
= mntget(current
->fs
->rootmnt
);
1580 user_nd
.dentry
= dget(current
->fs
->root
);
1581 read_unlock(¤t
->fs
->lock
);
1582 down_write(&namespace_sem
);
1583 mutex_lock(&old_nd
.dentry
->d_inode
->i_mutex
);
1585 if (IS_MNT_SHARED(old_nd
.mnt
) ||
1586 IS_MNT_SHARED(new_nd
.mnt
->mnt_parent
) ||
1587 IS_MNT_SHARED(user_nd
.mnt
->mnt_parent
))
1589 if (!check_mnt(user_nd
.mnt
))
1592 if (IS_DEADDIR(new_nd
.dentry
->d_inode
))
1594 if (d_unhashed(new_nd
.dentry
) && !IS_ROOT(new_nd
.dentry
))
1596 if (d_unhashed(old_nd
.dentry
) && !IS_ROOT(old_nd
.dentry
))
1599 if (new_nd
.mnt
== user_nd
.mnt
|| old_nd
.mnt
== user_nd
.mnt
)
1600 goto out2
; /* loop, on the same file system */
1602 if (user_nd
.mnt
->mnt_root
!= user_nd
.dentry
)
1603 goto out2
; /* not a mountpoint */
1604 if (user_nd
.mnt
->mnt_parent
== user_nd
.mnt
)
1605 goto out2
; /* not attached */
1606 if (new_nd
.mnt
->mnt_root
!= new_nd
.dentry
)
1607 goto out2
; /* not a mountpoint */
1608 if (new_nd
.mnt
->mnt_parent
== new_nd
.mnt
)
1609 goto out2
; /* not attached */
1610 tmp
= old_nd
.mnt
; /* make sure we can reach put_old from new_root */
1611 spin_lock(&vfsmount_lock
);
1612 if (tmp
!= new_nd
.mnt
) {
1614 if (tmp
->mnt_parent
== tmp
)
1615 goto out3
; /* already mounted on put_old */
1616 if (tmp
->mnt_parent
== new_nd
.mnt
)
1618 tmp
= tmp
->mnt_parent
;
1620 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.dentry
))
1622 } else if (!is_subdir(old_nd
.dentry
, new_nd
.dentry
))
1624 detach_mnt(new_nd
.mnt
, &parent_nd
);
1625 detach_mnt(user_nd
.mnt
, &root_parent
);
1626 attach_mnt(user_nd
.mnt
, &old_nd
); /* mount old root on put_old */
1627 attach_mnt(new_nd
.mnt
, &root_parent
); /* mount new_root on / */
1628 touch_namespace(current
->namespace);
1629 spin_unlock(&vfsmount_lock
);
1630 chroot_fs_refs(&user_nd
, &new_nd
);
1631 security_sb_post_pivotroot(&user_nd
, &new_nd
);
1633 path_release(&root_parent
);
1634 path_release(&parent_nd
);
1636 mutex_unlock(&old_nd
.dentry
->d_inode
->i_mutex
);
1637 up_write(&namespace_sem
);
1638 path_release(&user_nd
);
1639 path_release(&old_nd
);
1641 path_release(&new_nd
);
1646 spin_unlock(&vfsmount_lock
);
1650 static void __init
init_mount_tree(void)
1652 struct vfsmount
*mnt
;
1653 struct namespace *namespace;
1654 struct task_struct
*g
, *p
;
1656 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
1658 panic("Can't create rootfs");
1659 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL
);
1661 panic("Can't allocate initial namespace");
1662 atomic_set(&namespace->count
, 1);
1663 INIT_LIST_HEAD(&namespace->list
);
1664 init_waitqueue_head(&namespace->poll
);
1665 namespace->event
= 0;
1666 list_add(&mnt
->mnt_list
, &namespace->list
);
1667 namespace->root
= mnt
;
1668 mnt
->mnt_namespace
= namespace;
1670 init_task
.namespace = namespace;
1671 read_lock(&tasklist_lock
);
1672 do_each_thread(g
, p
) {
1673 get_namespace(namespace);
1674 p
->namespace = namespace;
1675 } while_each_thread(g
, p
);
1676 read_unlock(&tasklist_lock
);
1678 set_fs_pwd(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1679 set_fs_root(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1682 void __init
mnt_init(unsigned long mempages
)
1684 struct list_head
*d
;
1685 unsigned int nr_hash
;
1688 init_rwsem(&namespace_sem
);
1690 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
1691 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
, NULL
, NULL
);
1693 mount_hashtable
= (struct list_head
*)__get_free_page(GFP_ATOMIC
);
1695 if (!mount_hashtable
)
1696 panic("Failed to allocate mount hash table\n");
1699 * Find the power-of-two list-heads that can fit into the allocation..
1700 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1703 nr_hash
= PAGE_SIZE
/ sizeof(struct list_head
);
1707 } while ((nr_hash
>> hash_bits
) != 0);
1711 * Re-calculate the actual number of entries and the mask
1712 * from the number of bits we can fit.
1714 nr_hash
= 1UL << hash_bits
;
1715 hash_mask
= nr_hash
- 1;
1717 printk("Mount-cache hash table entries: %d\n", nr_hash
);
1719 /* And initialize the newly allocated array */
1720 d
= mount_hashtable
;
1732 void __put_namespace(struct namespace *namespace)
1734 struct vfsmount
*root
= namespace->root
;
1735 LIST_HEAD(umount_list
);
1736 namespace->root
= NULL
;
1737 spin_unlock(&vfsmount_lock
);
1738 down_write(&namespace_sem
);
1739 spin_lock(&vfsmount_lock
);
1740 umount_tree(root
, 0, &umount_list
);
1741 spin_unlock(&vfsmount_lock
);
1742 up_write(&namespace_sem
);
1743 release_mounts(&umount_list
);