4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/quotaops.h>
17 #include <linux/acct.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20 #include <linux/namespace.h>
21 #include <linux/namei.h>
22 #include <linux/security.h>
23 #include <linux/mount.h>
24 #include <asm/uaccess.h>
26 extern int __init
init_rootfs(void);
27 extern int __init
sysfs_init(void);
29 static struct list_head
*mount_hashtable
;
30 static int hash_mask
, hash_bits
;
31 static kmem_cache_t
*mnt_cache
;
33 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
35 unsigned long tmp
= ((unsigned long) mnt
/ L1_CACHE_BYTES
);
36 tmp
+= ((unsigned long) dentry
/ L1_CACHE_BYTES
);
37 tmp
= tmp
+ (tmp
>> hash_bits
);
38 return tmp
& hash_mask
;
41 struct vfsmount
*alloc_vfsmnt(const char *name
)
43 struct vfsmount
*mnt
= kmem_cache_alloc(mnt_cache
, GFP_KERNEL
);
45 memset(mnt
, 0, sizeof(struct vfsmount
));
46 atomic_set(&mnt
->mnt_count
,1);
47 INIT_LIST_HEAD(&mnt
->mnt_hash
);
48 INIT_LIST_HEAD(&mnt
->mnt_child
);
49 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
50 INIT_LIST_HEAD(&mnt
->mnt_list
);
52 int size
= strlen(name
)+1;
53 char *newname
= kmalloc(size
, GFP_KERNEL
);
55 memcpy(newname
, name
, size
);
56 mnt
->mnt_devname
= newname
;
63 void free_vfsmnt(struct vfsmount
*mnt
)
65 kfree(mnt
->mnt_devname
);
66 kmem_cache_free(mnt_cache
, mnt
);
69 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
71 struct list_head
* head
= mount_hashtable
+ hash(mnt
, dentry
);
72 struct list_head
* tmp
= head
;
80 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
81 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
)
87 static int check_mnt(struct vfsmount
*mnt
)
89 spin_lock(&dcache_lock
);
90 while (mnt
->mnt_parent
!= mnt
)
91 mnt
= mnt
->mnt_parent
;
92 spin_unlock(&dcache_lock
);
93 return mnt
== current
->namespace->root
;
96 static void detach_mnt(struct vfsmount
*mnt
, struct nameidata
*old_nd
)
98 old_nd
->dentry
= mnt
->mnt_mountpoint
;
99 old_nd
->mnt
= mnt
->mnt_parent
;
100 mnt
->mnt_parent
= mnt
;
101 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
102 list_del_init(&mnt
->mnt_child
);
103 list_del_init(&mnt
->mnt_hash
);
104 old_nd
->dentry
->d_mounted
--;
107 static void attach_mnt(struct vfsmount
*mnt
, struct nameidata
*nd
)
109 mnt
->mnt_parent
= mntget(nd
->mnt
);
110 mnt
->mnt_mountpoint
= dget(nd
->dentry
);
111 list_add(&mnt
->mnt_hash
, mount_hashtable
+hash(nd
->mnt
, nd
->dentry
));
112 list_add_tail(&mnt
->mnt_child
, &nd
->mnt
->mnt_mounts
);
113 nd
->dentry
->d_mounted
++;
116 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
118 struct list_head
*next
= p
->mnt_mounts
.next
;
119 if (next
== &p
->mnt_mounts
) {
123 next
= p
->mnt_child
.next
;
124 if (next
!= &p
->mnt_parent
->mnt_mounts
)
129 return list_entry(next
, struct vfsmount
, mnt_child
);
132 static struct vfsmount
*
133 clone_mnt(struct vfsmount
*old
, struct dentry
*root
)
135 struct super_block
*sb
= old
->mnt_sb
;
136 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
139 mnt
->mnt_flags
= old
->mnt_flags
;
140 atomic_inc(&sb
->s_active
);
142 mnt
->mnt_root
= dget(root
);
143 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
144 mnt
->mnt_parent
= mnt
;
149 void __mntput(struct vfsmount
*mnt
)
151 struct super_block
*sb
= mnt
->mnt_sb
;
154 deactivate_super(sb
);
158 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
160 struct namespace *n
= m
->private;
165 list_for_each(p
, &n
->list
)
167 return list_entry(p
, struct vfsmount
, mnt_list
);
171 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
173 struct namespace *n
= m
->private;
174 struct list_head
*p
= ((struct vfsmount
*)v
)->mnt_list
.next
;
176 return p
==&n
->list
? NULL
: list_entry(p
, struct vfsmount
, mnt_list
);
179 static void m_stop(struct seq_file
*m
, void *v
)
181 struct namespace *n
= m
->private;
185 static inline void mangle(struct seq_file
*m
, const char *s
)
187 seq_escape(m
, s
, " \t\n\\");
190 static int show_vfsmnt(struct seq_file
*m
, void *v
)
192 struct vfsmount
*mnt
= v
;
194 static struct proc_fs_info
{
198 { MS_SYNCHRONOUS
, ",sync" },
199 { MS_DIRSYNC
, ",dirsync" },
200 { MS_MANDLOCK
, ",mand" },
201 { MS_NOATIME
, ",noatime" },
202 { MS_NODIRATIME
, ",nodiratime" },
205 static struct proc_fs_info mnt_info
[] = {
206 { MNT_NOSUID
, ",nosuid" },
207 { MNT_NODEV
, ",nodev" },
208 { MNT_NOEXEC
, ",noexec" },
211 struct proc_fs_info
*fs_infop
;
213 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
215 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
217 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
218 seq_puts(m
, mnt
->mnt_sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
219 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
220 if (mnt
->mnt_sb
->s_flags
& fs_infop
->flag
)
221 seq_puts(m
, fs_infop
->str
);
223 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
224 if (mnt
->mnt_flags
& fs_infop
->flag
)
225 seq_puts(m
, fs_infop
->str
);
227 if (mnt
->mnt_sb
->s_op
->show_options
)
228 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
229 seq_puts(m
, " 0 0\n");
233 struct seq_operations mounts_op
= {
241 * Doesn't take quota and stuff into account. IOW, in some cases it will
242 * give false negatives. The main reason why it's here is that we need
243 * a non-destructive way to look for easily umountable filesystems.
245 int may_umount(struct vfsmount
*mnt
)
247 if (atomic_read(&mnt
->mnt_count
) > 2)
252 void umount_tree(struct vfsmount
*mnt
)
257 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
258 list_del(&p
->mnt_list
);
259 list_add(&p
->mnt_list
, &kill
);
262 while (!list_empty(&kill
)) {
263 mnt
= list_entry(kill
.next
, struct vfsmount
, mnt_list
);
264 list_del_init(&mnt
->mnt_list
);
265 if (mnt
->mnt_parent
== mnt
) {
266 spin_unlock(&dcache_lock
);
268 struct nameidata old_nd
;
269 detach_mnt(mnt
, &old_nd
);
270 spin_unlock(&dcache_lock
);
271 path_release(&old_nd
);
274 spin_lock(&dcache_lock
);
278 static int do_umount(struct vfsmount
*mnt
, int flags
)
280 struct super_block
* sb
= mnt
->mnt_sb
;
283 retval
= security_sb_umount(mnt
, flags
);
288 * If we may have to abort operations to get out of this
289 * mount, and they will themselves hold resources we must
290 * allow the fs to do things. In the Unix tradition of
291 * 'Gee thats tricky lets do it in userspace' the umount_begin
292 * might fail to complete on the first run through as other tasks
293 * must return, and the like. Thats for the mount program to worry
294 * about for the moment.
298 if( (flags
&MNT_FORCE
) && sb
->s_op
->umount_begin
)
299 sb
->s_op
->umount_begin(sb
);
303 * No sense to grab the lock for this test, but test itself looks
304 * somewhat bogus. Suggestions for better replacement?
305 * Ho-hum... In principle, we might treat that as umount + switch
306 * to rootfs. GC would eventually take care of the old vfsmount.
307 * Actually it makes sense, especially if rootfs would contain a
308 * /reboot - static binary that would close all descriptors and
309 * call reboot(9). Then init(8) could umount root and exec /reboot.
311 if (mnt
== current
->fs
->rootmnt
&& !(flags
& MNT_DETACH
)) {
313 * Special case for "unmounting" root ...
314 * we just try to remount it readonly.
316 down_write(&sb
->s_umount
);
317 if (!(sb
->s_flags
& MS_RDONLY
)) {
319 retval
= do_remount_sb(sb
, MS_RDONLY
, 0, 0);
322 up_write(&sb
->s_umount
);
326 down_write(¤t
->namespace->sem
);
327 spin_lock(&dcache_lock
);
329 if (atomic_read(&sb
->s_active
) == 1) {
330 /* last instance - try to be smart */
331 spin_unlock(&dcache_lock
);
336 security_sb_umount_close(mnt
);
337 spin_lock(&dcache_lock
);
340 if (atomic_read(&mnt
->mnt_count
) == 2 || flags
& MNT_DETACH
) {
341 if (!list_empty(&mnt
->mnt_list
))
345 spin_unlock(&dcache_lock
);
347 security_sb_umount_busy(mnt
);
348 up_write(¤t
->namespace->sem
);
353 * Now umount can handle mount points as well as block devices.
354 * This is important for filesystems which use unnamed block devices.
356 * We now support a flag for forced unmount like the other 'big iron'
357 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
360 asmlinkage
long sys_umount(char __user
* name
, int flags
)
365 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
369 if (nd
.dentry
!= nd
.mnt
->mnt_root
)
371 if (!check_mnt(nd
.mnt
))
375 if (!capable(CAP_SYS_ADMIN
))
378 retval
= do_umount(nd
.mnt
, flags
);
386 * The 2.0 compatible umount. No flags.
389 asmlinkage
long sys_oldumount(char __user
* name
)
391 return sys_umount(name
,0);
394 static int mount_is_safe(struct nameidata
*nd
)
396 if (capable(CAP_SYS_ADMIN
))
400 if (S_ISLNK(nd
->dentry
->d_inode
->i_mode
))
402 if (nd
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
403 if (current
->uid
!= nd
->dentry
->d_inode
->i_uid
)
406 if (permission(nd
->dentry
->d_inode
, MAY_WRITE
))
413 lives_below_in_same_fs(struct dentry
*d
, struct dentry
*dentry
)
418 if (d
== NULL
|| d
== d
->d_parent
)
424 static struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
)
426 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
430 res
= q
= clone_mnt(mnt
, dentry
);
433 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
436 for (h
= mnt
->mnt_mounts
.next
; h
!= &mnt
->mnt_mounts
; h
= h
->next
) {
437 r
= list_entry(h
, struct vfsmount
, mnt_child
);
438 if (!lives_below_in_same_fs(r
->mnt_mountpoint
, dentry
))
441 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
442 while (p
!= s
->mnt_parent
) {
448 nd
.dentry
= p
->mnt_mountpoint
;
449 q
= clone_mnt(p
, p
->mnt_root
);
452 spin_lock(&dcache_lock
);
453 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
455 spin_unlock(&dcache_lock
);
461 spin_lock(&dcache_lock
);
463 spin_unlock(&dcache_lock
);
468 static int graft_tree(struct vfsmount
*mnt
, struct nameidata
*nd
)
471 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
474 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
475 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
479 down(&nd
->dentry
->d_inode
->i_sem
);
480 if (IS_DEADDIR(nd
->dentry
->d_inode
))
483 err
= security_sb_check_sb(mnt
, nd
);
488 spin_lock(&dcache_lock
);
489 if (IS_ROOT(nd
->dentry
) || !d_unhashed(nd
->dentry
)) {
490 struct list_head head
;
493 list_add_tail(&head
, &mnt
->mnt_list
);
494 list_splice(&head
, current
->namespace->list
.prev
);
498 spin_unlock(&dcache_lock
);
500 up(&nd
->dentry
->d_inode
->i_sem
);
502 security_sb_post_addmount(mnt
, nd
);
509 static int do_loopback(struct nameidata
*nd
, char *old_name
, int recurse
)
511 struct nameidata old_nd
;
512 struct vfsmount
*mnt
= NULL
;
513 int err
= mount_is_safe(nd
);
516 if (!old_name
|| !*old_name
)
518 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
522 down_write(¤t
->namespace->sem
);
524 if (check_mnt(nd
->mnt
) && (!recurse
|| check_mnt(old_nd
.mnt
))) {
527 mnt
= copy_tree(old_nd
.mnt
, old_nd
.dentry
);
529 mnt
= clone_mnt(old_nd
.mnt
, old_nd
.dentry
);
533 err
= graft_tree(mnt
, nd
);
535 spin_lock(&dcache_lock
);
537 spin_unlock(&dcache_lock
);
542 up_write(¤t
->namespace->sem
);
543 path_release(&old_nd
);
548 * change filesystem flags. dir should be a physical root of filesystem.
549 * If you've mounted a non-root directory somewhere and want to do remount
550 * on it - tough luck.
553 static int do_remount(struct nameidata
*nd
,int flags
,int mnt_flags
,void *data
)
556 struct super_block
* sb
= nd
->mnt
->mnt_sb
;
558 if (!capable(CAP_SYS_ADMIN
))
561 if (!check_mnt(nd
->mnt
))
564 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
567 down_write(&sb
->s_umount
);
568 err
= do_remount_sb(sb
, flags
, data
, 0);
570 nd
->mnt
->mnt_flags
=mnt_flags
;
571 up_write(&sb
->s_umount
);
573 security_sb_post_remount(nd
->mnt
, flags
, data
);
577 static int do_move_mount(struct nameidata
*nd
, char *old_name
)
579 struct nameidata old_nd
, parent_nd
;
582 if (!capable(CAP_SYS_ADMIN
))
584 if (!old_name
|| !*old_name
)
586 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
590 down_write(¤t
->namespace->sem
);
591 while(d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
594 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
598 down(&nd
->dentry
->d_inode
->i_sem
);
599 if (IS_DEADDIR(nd
->dentry
->d_inode
))
602 spin_lock(&dcache_lock
);
603 if (!IS_ROOT(nd
->dentry
) && d_unhashed(nd
->dentry
))
607 if (old_nd
.dentry
!= old_nd
.mnt
->mnt_root
)
610 if (old_nd
.mnt
== old_nd
.mnt
->mnt_parent
)
613 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
614 S_ISDIR(old_nd
.dentry
->d_inode
->i_mode
))
618 for (p
= nd
->mnt
; p
->mnt_parent
!=p
; p
= p
->mnt_parent
)
623 detach_mnt(old_nd
.mnt
, &parent_nd
);
624 attach_mnt(old_nd
.mnt
, nd
);
626 spin_unlock(&dcache_lock
);
628 up(&nd
->dentry
->d_inode
->i_sem
);
630 up_write(¤t
->namespace->sem
);
632 path_release(&parent_nd
);
633 path_release(&old_nd
);
637 static int do_add_mount(struct nameidata
*nd
, char *type
, int flags
,
638 int mnt_flags
, char *name
, void *data
)
640 struct vfsmount
*mnt
;
643 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
646 /* we need capabilities... */
647 if (!capable(CAP_SYS_ADMIN
))
650 mnt
= do_kern_mount(type
, flags
, name
, data
);
655 down_write(¤t
->namespace->sem
);
656 /* Something was mounted here while we slept */
657 while(d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
660 if (!check_mnt(nd
->mnt
))
663 /* Refuse the same filesystem on the same mount point */
665 if (nd
->mnt
->mnt_sb
== mnt
->mnt_sb
&& nd
->mnt
->mnt_root
== nd
->dentry
)
668 mnt
->mnt_flags
= mnt_flags
;
669 err
= graft_tree(mnt
, nd
);
671 up_write(¤t
->namespace->sem
);
677 static int copy_mount_options (const void __user
*data
, unsigned long *where
)
687 if (!(page
= __get_free_page(GFP_KERNEL
)))
690 /* We only care that *some* data at the address the user
691 * gave us is valid. Just in case, we'll zero
692 * the remainder of the page.
694 /* copy_from_user cannot cross TASK_SIZE ! */
695 size
= TASK_SIZE
- (unsigned long)data
;
696 if (size
> PAGE_SIZE
)
699 i
= size
- copy_from_user((void *)page
, data
, size
);
705 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
711 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
712 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
714 * data is a (void *) that can point to any structure up to
715 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
716 * information (or be NULL).
718 * Pre-0.97 versions of mount() didn't have a flags word.
719 * When the flags word was introduced its top half was required
720 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
721 * Therefore, if this magic number is present, it carries no information
722 * and must be discarded.
724 long do_mount(char * dev_name
, char * dir_name
, char *type_page
,
725 unsigned long flags
, void *data_page
)
732 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
733 flags
&= ~MS_MGC_MSK
;
735 /* Basic sanity checks */
737 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
739 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
742 /* Separate the per-mountpoint flags */
743 if (flags
& MS_NOSUID
)
744 mnt_flags
|= MNT_NOSUID
;
745 if (flags
& MS_NODEV
)
746 mnt_flags
|= MNT_NODEV
;
747 if (flags
& MS_NOEXEC
)
748 mnt_flags
|= MNT_NOEXEC
;
749 flags
&= ~(MS_NOSUID
|MS_NOEXEC
|MS_NODEV
);
751 /* ... and get the mountpoint */
752 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
756 retval
= security_sb_mount(dev_name
, &nd
, type_page
, flags
, data_page
);
760 if (flags
& MS_REMOUNT
)
761 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
763 else if (flags
& MS_BIND
)
764 retval
= do_loopback(&nd
, dev_name
, flags
& MS_REC
);
765 else if (flags
& MS_MOVE
)
766 retval
= do_move_mount(&nd
, dev_name
);
768 retval
= do_add_mount(&nd
, type_page
, flags
, mnt_flags
,
769 dev_name
, data_page
);
775 int copy_namespace(int flags
, struct task_struct
*tsk
)
777 struct namespace *namespace = tsk
->namespace;
778 struct namespace *new_ns
;
779 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
780 struct fs_struct
*fs
= tsk
->fs
;
785 get_namespace(namespace);
787 if (!(flags
& CLONE_NEWNS
))
790 if (!capable(CAP_SYS_ADMIN
)) {
791 put_namespace(namespace);
795 new_ns
= kmalloc(sizeof(struct namespace), GFP_KERNEL
);
799 atomic_set(&new_ns
->count
, 1);
800 init_rwsem(&new_ns
->sem
);
802 INIT_LIST_HEAD(&new_ns
->list
);
804 down_write(&tsk
->namespace->sem
);
805 /* First pass: copy the tree topology */
806 new_ns
->root
= copy_tree(namespace->root
, namespace->root
->mnt_root
);
807 spin_lock(&dcache_lock
);
808 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
809 spin_unlock(&dcache_lock
);
811 /* Second pass: switch the tsk->fs->* elements */
813 struct vfsmount
*p
, *q
;
814 write_lock(&fs
->lock
);
819 if (p
== fs
->rootmnt
) {
821 fs
->rootmnt
= mntget(q
);
823 if (p
== fs
->pwdmnt
) {
825 fs
->pwdmnt
= mntget(q
);
827 if (p
== fs
->altrootmnt
) {
829 fs
->altrootmnt
= mntget(q
);
831 p
= next_mnt(p
, namespace->root
);
832 q
= next_mnt(q
, new_ns
->root
);
834 write_unlock(&fs
->lock
);
836 up_write(&tsk
->namespace->sem
);
838 tsk
->namespace = new_ns
;
847 put_namespace(namespace);
851 put_namespace(namespace);
855 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
856 char __user
* type
, unsigned long flags
,
860 unsigned long data_page
;
861 unsigned long type_page
;
862 unsigned long dev_page
;
865 retval
= copy_mount_options (type
, &type_page
);
869 dir_page
= getname(dir_name
);
870 retval
= PTR_ERR(dir_page
);
871 if (IS_ERR(dir_page
))
874 retval
= copy_mount_options (dev_name
, &dev_page
);
878 retval
= copy_mount_options (data
, &data_page
);
883 retval
= do_mount((char*)dev_page
, dir_page
, (char*)type_page
,
884 flags
, (void*)data_page
);
886 free_page(data_page
);
893 free_page(type_page
);
898 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
899 * It can block. Requires the big lock held.
901 void set_fs_root(struct fs_struct
*fs
, struct vfsmount
*mnt
,
902 struct dentry
*dentry
)
904 struct dentry
*old_root
;
905 struct vfsmount
*old_rootmnt
;
906 write_lock(&fs
->lock
);
908 old_rootmnt
= fs
->rootmnt
;
909 fs
->rootmnt
= mntget(mnt
);
910 fs
->root
= dget(dentry
);
911 write_unlock(&fs
->lock
);
919 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
920 * It can block. Requires the big lock held.
922 void set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
,
923 struct dentry
*dentry
)
925 struct dentry
*old_pwd
;
926 struct vfsmount
*old_pwdmnt
;
928 write_lock(&fs
->lock
);
930 old_pwdmnt
= fs
->pwdmnt
;
931 fs
->pwdmnt
= mntget(mnt
);
932 fs
->pwd
= dget(dentry
);
933 write_unlock(&fs
->lock
);
941 static void chroot_fs_refs(struct nameidata
*old_nd
, struct nameidata
*new_nd
)
943 struct task_struct
*g
, *p
;
944 struct fs_struct
*fs
;
946 read_lock(&tasklist_lock
);
947 do_each_thread(g
, p
) {
951 atomic_inc(&fs
->count
);
953 if (fs
->root
==old_nd
->dentry
&&fs
->rootmnt
==old_nd
->mnt
)
954 set_fs_root(fs
, new_nd
->mnt
, new_nd
->dentry
);
955 if (fs
->pwd
==old_nd
->dentry
&&fs
->pwdmnt
==old_nd
->mnt
)
956 set_fs_pwd(fs
, new_nd
->mnt
, new_nd
->dentry
);
960 } while_each_thread(g
, p
);
961 read_unlock(&tasklist_lock
);
965 * Moves the current root to put_root, and sets root/cwd of all processes
966 * which had them on the old root to new_root.
969 * - we don't move root/cwd if they are not at the root (reason: if something
970 * cared enough to change them, it's probably wrong to force them elsewhere)
971 * - it's okay to pick a root that isn't the root of a file system, e.g.
972 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
973 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
977 asmlinkage
long sys_pivot_root(const char __user
*new_root
, const char __user
*put_old
)
979 struct vfsmount
*tmp
;
980 struct nameidata new_nd
, old_nd
, parent_nd
, root_parent
, user_nd
;
983 if (!capable(CAP_SYS_ADMIN
))
988 error
= __user_walk(new_root
, LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
, &new_nd
);
992 if (!check_mnt(new_nd
.mnt
))
995 error
= __user_walk(put_old
, LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
, &old_nd
);
999 error
= security_sb_pivotroot(&old_nd
, &new_nd
);
1001 path_release(&old_nd
);
1005 read_lock(¤t
->fs
->lock
);
1006 user_nd
.mnt
= mntget(current
->fs
->rootmnt
);
1007 user_nd
.dentry
= dget(current
->fs
->root
);
1008 read_unlock(¤t
->fs
->lock
);
1009 down_write(¤t
->namespace->sem
);
1010 down(&old_nd
.dentry
->d_inode
->i_sem
);
1012 if (!check_mnt(user_nd
.mnt
))
1015 if (IS_DEADDIR(new_nd
.dentry
->d_inode
))
1017 if (d_unhashed(new_nd
.dentry
) && !IS_ROOT(new_nd
.dentry
))
1019 if (d_unhashed(old_nd
.dentry
) && !IS_ROOT(old_nd
.dentry
))
1022 if (new_nd
.mnt
== user_nd
.mnt
|| old_nd
.mnt
== user_nd
.mnt
)
1023 goto out2
; /* loop */
1025 if (user_nd
.mnt
->mnt_root
!= user_nd
.dentry
)
1027 if (new_nd
.mnt
->mnt_root
!= new_nd
.dentry
)
1028 goto out2
; /* not a mountpoint */
1029 tmp
= old_nd
.mnt
; /* make sure we can reach put_old from new_root */
1030 spin_lock(&dcache_lock
);
1031 if (tmp
!= new_nd
.mnt
) {
1033 if (tmp
->mnt_parent
== tmp
)
1035 if (tmp
->mnt_parent
== new_nd
.mnt
)
1037 tmp
= tmp
->mnt_parent
;
1039 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.dentry
))
1041 } else if (!is_subdir(old_nd
.dentry
, new_nd
.dentry
))
1043 detach_mnt(new_nd
.mnt
, &parent_nd
);
1044 detach_mnt(user_nd
.mnt
, &root_parent
);
1045 attach_mnt(user_nd
.mnt
, &old_nd
);
1046 attach_mnt(new_nd
.mnt
, &root_parent
);
1047 spin_unlock(&dcache_lock
);
1048 chroot_fs_refs(&user_nd
, &new_nd
);
1049 security_sb_post_pivotroot(&user_nd
, &new_nd
);
1051 path_release(&root_parent
);
1052 path_release(&parent_nd
);
1054 up(&old_nd
.dentry
->d_inode
->i_sem
);
1055 up_write(¤t
->namespace->sem
);
1056 path_release(&user_nd
);
1057 path_release(&old_nd
);
1059 path_release(&new_nd
);
1064 spin_unlock(&dcache_lock
);
1068 static void __init
init_mount_tree(void)
1070 struct vfsmount
*mnt
;
1071 struct namespace *namespace;
1072 struct task_struct
*g
, *p
;
1074 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
1076 panic("Can't create rootfs");
1077 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL
);
1079 panic("Can't allocate initial namespace");
1080 atomic_set(&namespace->count
, 1);
1081 INIT_LIST_HEAD(&namespace->list
);
1082 init_rwsem(&namespace->sem
);
1083 list_add(&mnt
->mnt_list
, &namespace->list
);
1084 namespace->root
= mnt
;
1086 init_task
.namespace = namespace;
1087 read_lock(&tasklist_lock
);
1088 do_each_thread(g
, p
) {
1089 get_namespace(namespace);
1090 p
->namespace = namespace;
1091 } while_each_thread(g
, p
);
1092 read_unlock(&tasklist_lock
);
1094 set_fs_pwd(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1095 set_fs_root(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1098 void __init
mnt_init(unsigned long mempages
)
1100 struct list_head
*d
;
1101 unsigned long order
;
1102 unsigned int nr_hash
;
1105 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
1106 0, SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1108 panic("Cannot create vfsmount cache");
1111 mount_hashtable
= (struct list_head
*)
1112 __get_free_pages(GFP_ATOMIC
, order
);
1114 if (!mount_hashtable
)
1115 panic("Failed to allocate mount hash table\n");
1118 * Find the power-of-two list-heads that can fit into the allocation..
1119 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1122 nr_hash
= (1UL << order
) * PAGE_SIZE
/ sizeof(struct list_head
);
1126 } while ((nr_hash
>> hash_bits
) != 0);
1130 * Re-calculate the actual number of entries and the mask
1131 * from the number of bits we can fit.
1133 nr_hash
= 1UL << hash_bits
;
1134 hash_mask
= nr_hash
-1;
1136 printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1137 nr_hash
, order
, (PAGE_SIZE
<< order
));
1139 /* And initialize the newly allocated array */
1140 d
= mount_hashtable
;