4 * (C) Copyright Al Viro 2000, 2001
5 * Released under GPL v2.
7 * Based on code from fs/super.c, copyright Linus Torvalds and others.
11 #include <linux/config.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/smp_lock.h>
15 #include <linux/init.h>
16 #include <linux/quotaops.h>
17 #include <linux/acct.h>
18 #include <linux/module.h>
19 #include <linux/seq_file.h>
20 #include <linux/namespace.h>
21 #include <linux/namei.h>
22 #include <linux/security.h>
23 #include <linux/mount.h>
24 #include <asm/uaccess.h>
26 extern int __init
init_rootfs(void);
27 extern int __init
sysfs_init(void);
29 /* spinlock for vfsmount related operations, inplace of dcache_lock */
30 spinlock_t vfsmount_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
31 static struct list_head
*mount_hashtable
;
32 static int hash_mask
, hash_bits
;
33 static kmem_cache_t
*mnt_cache
;
35 static inline unsigned long hash(struct vfsmount
*mnt
, struct dentry
*dentry
)
37 unsigned long tmp
= ((unsigned long) mnt
/ L1_CACHE_BYTES
);
38 tmp
+= ((unsigned long) dentry
/ L1_CACHE_BYTES
);
39 tmp
= tmp
+ (tmp
>> hash_bits
);
40 return tmp
& hash_mask
;
43 struct vfsmount
*alloc_vfsmnt(const char *name
)
45 struct vfsmount
*mnt
= kmem_cache_alloc(mnt_cache
, GFP_KERNEL
);
47 memset(mnt
, 0, sizeof(struct vfsmount
));
48 atomic_set(&mnt
->mnt_count
,1);
49 INIT_LIST_HEAD(&mnt
->mnt_hash
);
50 INIT_LIST_HEAD(&mnt
->mnt_child
);
51 INIT_LIST_HEAD(&mnt
->mnt_mounts
);
52 INIT_LIST_HEAD(&mnt
->mnt_list
);
54 int size
= strlen(name
)+1;
55 char *newname
= kmalloc(size
, GFP_KERNEL
);
57 memcpy(newname
, name
, size
);
58 mnt
->mnt_devname
= newname
;
65 void free_vfsmnt(struct vfsmount
*mnt
)
67 kfree(mnt
->mnt_devname
);
68 kmem_cache_free(mnt_cache
, mnt
);
72 * Now, lookup_mnt increments the ref count before returning
73 * the vfsmount struct.
75 struct vfsmount
*lookup_mnt(struct vfsmount
*mnt
, struct dentry
*dentry
)
77 struct list_head
* head
= mount_hashtable
+ hash(mnt
, dentry
);
78 struct list_head
* tmp
= head
;
79 struct vfsmount
*p
, *found
= NULL
;
81 spin_lock(&vfsmount_lock
);
87 p
= list_entry(tmp
, struct vfsmount
, mnt_hash
);
88 if (p
->mnt_parent
== mnt
&& p
->mnt_mountpoint
== dentry
) {
93 spin_unlock(&vfsmount_lock
);
97 static int check_mnt(struct vfsmount
*mnt
)
99 spin_lock(&vfsmount_lock
);
100 while (mnt
->mnt_parent
!= mnt
)
101 mnt
= mnt
->mnt_parent
;
102 spin_unlock(&vfsmount_lock
);
103 return mnt
== current
->namespace->root
;
106 static void detach_mnt(struct vfsmount
*mnt
, struct nameidata
*old_nd
)
108 old_nd
->dentry
= mnt
->mnt_mountpoint
;
109 old_nd
->mnt
= mnt
->mnt_parent
;
110 mnt
->mnt_parent
= mnt
;
111 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
112 list_del_init(&mnt
->mnt_child
);
113 list_del_init(&mnt
->mnt_hash
);
114 old_nd
->dentry
->d_mounted
--;
117 static void attach_mnt(struct vfsmount
*mnt
, struct nameidata
*nd
)
119 mnt
->mnt_parent
= mntget(nd
->mnt
);
120 mnt
->mnt_mountpoint
= dget(nd
->dentry
);
121 list_add(&mnt
->mnt_hash
, mount_hashtable
+hash(nd
->mnt
, nd
->dentry
));
122 list_add_tail(&mnt
->mnt_child
, &nd
->mnt
->mnt_mounts
);
123 nd
->dentry
->d_mounted
++;
126 static struct vfsmount
*next_mnt(struct vfsmount
*p
, struct vfsmount
*root
)
128 struct list_head
*next
= p
->mnt_mounts
.next
;
129 if (next
== &p
->mnt_mounts
) {
133 next
= p
->mnt_child
.next
;
134 if (next
!= &p
->mnt_parent
->mnt_mounts
)
139 return list_entry(next
, struct vfsmount
, mnt_child
);
142 static struct vfsmount
*
143 clone_mnt(struct vfsmount
*old
, struct dentry
*root
)
145 struct super_block
*sb
= old
->mnt_sb
;
146 struct vfsmount
*mnt
= alloc_vfsmnt(old
->mnt_devname
);
149 mnt
->mnt_flags
= old
->mnt_flags
;
150 atomic_inc(&sb
->s_active
);
152 mnt
->mnt_root
= dget(root
);
153 mnt
->mnt_mountpoint
= mnt
->mnt_root
;
154 mnt
->mnt_parent
= mnt
;
159 void __mntput(struct vfsmount
*mnt
)
161 struct super_block
*sb
= mnt
->mnt_sb
;
164 deactivate_super(sb
);
168 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
170 struct namespace *n
= m
->private;
175 list_for_each(p
, &n
->list
)
177 return list_entry(p
, struct vfsmount
, mnt_list
);
181 static void *m_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
183 struct namespace *n
= m
->private;
184 struct list_head
*p
= ((struct vfsmount
*)v
)->mnt_list
.next
;
186 return p
==&n
->list
? NULL
: list_entry(p
, struct vfsmount
, mnt_list
);
189 static void m_stop(struct seq_file
*m
, void *v
)
191 struct namespace *n
= m
->private;
195 static inline void mangle(struct seq_file
*m
, const char *s
)
197 seq_escape(m
, s
, " \t\n\\");
200 static int show_vfsmnt(struct seq_file
*m
, void *v
)
202 struct vfsmount
*mnt
= v
;
204 static struct proc_fs_info
{
208 { MS_SYNCHRONOUS
, ",sync" },
209 { MS_DIRSYNC
, ",dirsync" },
210 { MS_MANDLOCK
, ",mand" },
211 { MS_NOATIME
, ",noatime" },
212 { MS_NODIRATIME
, ",nodiratime" },
215 static struct proc_fs_info mnt_info
[] = {
216 { MNT_NOSUID
, ",nosuid" },
217 { MNT_NODEV
, ",nodev" },
218 { MNT_NOEXEC
, ",noexec" },
221 struct proc_fs_info
*fs_infop
;
223 mangle(m
, mnt
->mnt_devname
? mnt
->mnt_devname
: "none");
225 seq_path(m
, mnt
, mnt
->mnt_root
, " \t\n\\");
227 mangle(m
, mnt
->mnt_sb
->s_type
->name
);
228 seq_puts(m
, mnt
->mnt_sb
->s_flags
& MS_RDONLY
? " ro" : " rw");
229 for (fs_infop
= fs_info
; fs_infop
->flag
; fs_infop
++) {
230 if (mnt
->mnt_sb
->s_flags
& fs_infop
->flag
)
231 seq_puts(m
, fs_infop
->str
);
233 for (fs_infop
= mnt_info
; fs_infop
->flag
; fs_infop
++) {
234 if (mnt
->mnt_flags
& fs_infop
->flag
)
235 seq_puts(m
, fs_infop
->str
);
237 if (mnt
->mnt_sb
->s_op
->show_options
)
238 err
= mnt
->mnt_sb
->s_op
->show_options(m
, mnt
);
239 seq_puts(m
, " 0 0\n");
243 struct seq_operations mounts_op
= {
251 * Doesn't take quota and stuff into account. IOW, in some cases it will
252 * give false negatives. The main reason why it's here is that we need
253 * a non-destructive way to look for easily umountable filesystems.
255 int may_umount(struct vfsmount
*mnt
)
257 if (atomic_read(&mnt
->mnt_count
) > 2)
262 void umount_tree(struct vfsmount
*mnt
)
267 for (p
= mnt
; p
; p
= next_mnt(p
, mnt
)) {
268 list_del(&p
->mnt_list
);
269 list_add(&p
->mnt_list
, &kill
);
272 while (!list_empty(&kill
)) {
273 mnt
= list_entry(kill
.next
, struct vfsmount
, mnt_list
);
274 list_del_init(&mnt
->mnt_list
);
275 if (mnt
->mnt_parent
== mnt
) {
276 spin_unlock(&vfsmount_lock
);
278 struct nameidata old_nd
;
279 detach_mnt(mnt
, &old_nd
);
280 spin_unlock(&vfsmount_lock
);
281 path_release(&old_nd
);
284 spin_lock(&vfsmount_lock
);
288 static int do_umount(struct vfsmount
*mnt
, int flags
)
290 struct super_block
* sb
= mnt
->mnt_sb
;
293 retval
= security_sb_umount(mnt
, flags
);
298 * If we may have to abort operations to get out of this
299 * mount, and they will themselves hold resources we must
300 * allow the fs to do things. In the Unix tradition of
301 * 'Gee thats tricky lets do it in userspace' the umount_begin
302 * might fail to complete on the first run through as other tasks
303 * must return, and the like. Thats for the mount program to worry
304 * about for the moment.
308 if( (flags
&MNT_FORCE
) && sb
->s_op
->umount_begin
)
309 sb
->s_op
->umount_begin(sb
);
313 * No sense to grab the lock for this test, but test itself looks
314 * somewhat bogus. Suggestions for better replacement?
315 * Ho-hum... In principle, we might treat that as umount + switch
316 * to rootfs. GC would eventually take care of the old vfsmount.
317 * Actually it makes sense, especially if rootfs would contain a
318 * /reboot - static binary that would close all descriptors and
319 * call reboot(9). Then init(8) could umount root and exec /reboot.
321 if (mnt
== current
->fs
->rootmnt
&& !(flags
& MNT_DETACH
)) {
323 * Special case for "unmounting" root ...
324 * we just try to remount it readonly.
326 down_write(&sb
->s_umount
);
327 if (!(sb
->s_flags
& MS_RDONLY
)) {
329 retval
= do_remount_sb(sb
, MS_RDONLY
, 0, 0);
332 up_write(&sb
->s_umount
);
336 down_write(¤t
->namespace->sem
);
337 spin_lock(&vfsmount_lock
);
339 if (atomic_read(&sb
->s_active
) == 1) {
340 /* last instance - try to be smart */
341 spin_unlock(&vfsmount_lock
);
346 security_sb_umount_close(mnt
);
347 spin_lock(&vfsmount_lock
);
350 if (atomic_read(&mnt
->mnt_count
) == 2 || flags
& MNT_DETACH
) {
351 if (!list_empty(&mnt
->mnt_list
))
355 spin_unlock(&vfsmount_lock
);
357 security_sb_umount_busy(mnt
);
358 up_write(¤t
->namespace->sem
);
363 * Now umount can handle mount points as well as block devices.
364 * This is important for filesystems which use unnamed block devices.
366 * We now support a flag for forced unmount like the other 'big iron'
367 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
370 asmlinkage
long sys_umount(char __user
* name
, int flags
)
375 retval
= __user_walk(name
, LOOKUP_FOLLOW
, &nd
);
379 if (nd
.dentry
!= nd
.mnt
->mnt_root
)
381 if (!check_mnt(nd
.mnt
))
385 if (!capable(CAP_SYS_ADMIN
))
388 retval
= do_umount(nd
.mnt
, flags
);
396 * The 2.0 compatible umount. No flags.
399 asmlinkage
long sys_oldumount(char __user
* name
)
401 return sys_umount(name
,0);
404 static int mount_is_safe(struct nameidata
*nd
)
406 if (capable(CAP_SYS_ADMIN
))
410 if (S_ISLNK(nd
->dentry
->d_inode
->i_mode
))
412 if (nd
->dentry
->d_inode
->i_mode
& S_ISVTX
) {
413 if (current
->uid
!= nd
->dentry
->d_inode
->i_uid
)
416 if (permission(nd
->dentry
->d_inode
, MAY_WRITE
, nd
))
423 lives_below_in_same_fs(struct dentry
*d
, struct dentry
*dentry
)
428 if (d
== NULL
|| d
== d
->d_parent
)
434 static struct vfsmount
*copy_tree(struct vfsmount
*mnt
, struct dentry
*dentry
)
436 struct vfsmount
*res
, *p
, *q
, *r
, *s
;
440 res
= q
= clone_mnt(mnt
, dentry
);
443 q
->mnt_mountpoint
= mnt
->mnt_mountpoint
;
446 for (h
= mnt
->mnt_mounts
.next
; h
!= &mnt
->mnt_mounts
; h
= h
->next
) {
447 r
= list_entry(h
, struct vfsmount
, mnt_child
);
448 if (!lives_below_in_same_fs(r
->mnt_mountpoint
, dentry
))
451 for (s
= r
; s
; s
= next_mnt(s
, r
)) {
452 while (p
!= s
->mnt_parent
) {
458 nd
.dentry
= p
->mnt_mountpoint
;
459 q
= clone_mnt(p
, p
->mnt_root
);
462 spin_lock(&vfsmount_lock
);
463 list_add_tail(&q
->mnt_list
, &res
->mnt_list
);
465 spin_unlock(&vfsmount_lock
);
471 spin_lock(&vfsmount_lock
);
473 spin_unlock(&vfsmount_lock
);
478 static int graft_tree(struct vfsmount
*mnt
, struct nameidata
*nd
)
481 if (mnt
->mnt_sb
->s_flags
& MS_NOUSER
)
484 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
485 S_ISDIR(mnt
->mnt_root
->d_inode
->i_mode
))
489 down(&nd
->dentry
->d_inode
->i_sem
);
490 if (IS_DEADDIR(nd
->dentry
->d_inode
))
493 err
= security_sb_check_sb(mnt
, nd
);
498 spin_lock(&vfsmount_lock
);
499 if (IS_ROOT(nd
->dentry
) || !d_unhashed(nd
->dentry
)) {
500 struct list_head head
;
503 list_add_tail(&head
, &mnt
->mnt_list
);
504 list_splice(&head
, current
->namespace->list
.prev
);
508 spin_unlock(&vfsmount_lock
);
510 up(&nd
->dentry
->d_inode
->i_sem
);
512 security_sb_post_addmount(mnt
, nd
);
519 static int do_loopback(struct nameidata
*nd
, char *old_name
, int recurse
)
521 struct nameidata old_nd
;
522 struct vfsmount
*mnt
= NULL
;
523 int err
= mount_is_safe(nd
);
526 if (!old_name
|| !*old_name
)
528 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
532 down_write(¤t
->namespace->sem
);
534 if (check_mnt(nd
->mnt
) && (!recurse
|| check_mnt(old_nd
.mnt
))) {
537 mnt
= copy_tree(old_nd
.mnt
, old_nd
.dentry
);
539 mnt
= clone_mnt(old_nd
.mnt
, old_nd
.dentry
);
543 err
= graft_tree(mnt
, nd
);
545 spin_lock(&vfsmount_lock
);
547 spin_unlock(&vfsmount_lock
);
552 up_write(¤t
->namespace->sem
);
553 path_release(&old_nd
);
558 * change filesystem flags. dir should be a physical root of filesystem.
559 * If you've mounted a non-root directory somewhere and want to do remount
560 * on it - tough luck.
563 static int do_remount(struct nameidata
*nd
,int flags
,int mnt_flags
,void *data
)
566 struct super_block
* sb
= nd
->mnt
->mnt_sb
;
568 if (!capable(CAP_SYS_ADMIN
))
571 if (!check_mnt(nd
->mnt
))
574 if (nd
->dentry
!= nd
->mnt
->mnt_root
)
577 down_write(&sb
->s_umount
);
578 err
= do_remount_sb(sb
, flags
, data
, 0);
580 nd
->mnt
->mnt_flags
=mnt_flags
;
581 up_write(&sb
->s_umount
);
583 security_sb_post_remount(nd
->mnt
, flags
, data
);
587 static int do_move_mount(struct nameidata
*nd
, char *old_name
)
589 struct nameidata old_nd
, parent_nd
;
592 if (!capable(CAP_SYS_ADMIN
))
594 if (!old_name
|| !*old_name
)
596 err
= path_lookup(old_name
, LOOKUP_FOLLOW
, &old_nd
);
600 down_write(¤t
->namespace->sem
);
601 while(d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
604 if (!check_mnt(nd
->mnt
) || !check_mnt(old_nd
.mnt
))
608 down(&nd
->dentry
->d_inode
->i_sem
);
609 if (IS_DEADDIR(nd
->dentry
->d_inode
))
612 spin_lock(&vfsmount_lock
);
613 if (!IS_ROOT(nd
->dentry
) && d_unhashed(nd
->dentry
))
617 if (old_nd
.dentry
!= old_nd
.mnt
->mnt_root
)
620 if (old_nd
.mnt
== old_nd
.mnt
->mnt_parent
)
623 if (S_ISDIR(nd
->dentry
->d_inode
->i_mode
) !=
624 S_ISDIR(old_nd
.dentry
->d_inode
->i_mode
))
628 for (p
= nd
->mnt
; p
->mnt_parent
!=p
; p
= p
->mnt_parent
)
633 detach_mnt(old_nd
.mnt
, &parent_nd
);
634 attach_mnt(old_nd
.mnt
, nd
);
636 spin_unlock(&vfsmount_lock
);
638 up(&nd
->dentry
->d_inode
->i_sem
);
640 up_write(¤t
->namespace->sem
);
642 path_release(&parent_nd
);
643 path_release(&old_nd
);
647 static int do_add_mount(struct nameidata
*nd
, char *type
, int flags
,
648 int mnt_flags
, char *name
, void *data
)
650 struct vfsmount
*mnt
;
653 if (!type
|| !memchr(type
, 0, PAGE_SIZE
))
656 /* we need capabilities... */
657 if (!capable(CAP_SYS_ADMIN
))
660 mnt
= do_kern_mount(type
, flags
, name
, data
);
665 down_write(¤t
->namespace->sem
);
666 /* Something was mounted here while we slept */
667 while(d_mountpoint(nd
->dentry
) && follow_down(&nd
->mnt
, &nd
->dentry
))
670 if (!check_mnt(nd
->mnt
))
673 /* Refuse the same filesystem on the same mount point */
675 if (nd
->mnt
->mnt_sb
== mnt
->mnt_sb
&& nd
->mnt
->mnt_root
== nd
->dentry
)
678 mnt
->mnt_flags
= mnt_flags
;
679 err
= graft_tree(mnt
, nd
);
681 up_write(¤t
->namespace->sem
);
687 static int copy_mount_options (const void __user
*data
, unsigned long *where
)
697 if (!(page
= __get_free_page(GFP_KERNEL
)))
700 /* We only care that *some* data at the address the user
701 * gave us is valid. Just in case, we'll zero
702 * the remainder of the page.
704 /* copy_from_user cannot cross TASK_SIZE ! */
705 size
= TASK_SIZE
- (unsigned long)data
;
706 if (size
> PAGE_SIZE
)
709 i
= size
- copy_from_user((void *)page
, data
, size
);
715 memset((char *)page
+ i
, 0, PAGE_SIZE
- i
);
721 * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
722 * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
724 * data is a (void *) that can point to any structure up to
725 * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
726 * information (or be NULL).
728 * Pre-0.97 versions of mount() didn't have a flags word.
729 * When the flags word was introduced its top half was required
730 * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
731 * Therefore, if this magic number is present, it carries no information
732 * and must be discarded.
734 long do_mount(char * dev_name
, char * dir_name
, char *type_page
,
735 unsigned long flags
, void *data_page
)
742 if ((flags
& MS_MGC_MSK
) == MS_MGC_VAL
)
743 flags
&= ~MS_MGC_MSK
;
745 /* Basic sanity checks */
747 if (!dir_name
|| !*dir_name
|| !memchr(dir_name
, 0, PAGE_SIZE
))
749 if (dev_name
&& !memchr(dev_name
, 0, PAGE_SIZE
))
752 /* Separate the per-mountpoint flags */
753 if (flags
& MS_NOSUID
)
754 mnt_flags
|= MNT_NOSUID
;
755 if (flags
& MS_NODEV
)
756 mnt_flags
|= MNT_NODEV
;
757 if (flags
& MS_NOEXEC
)
758 mnt_flags
|= MNT_NOEXEC
;
759 flags
&= ~(MS_NOSUID
|MS_NOEXEC
|MS_NODEV
);
761 /* ... and get the mountpoint */
762 retval
= path_lookup(dir_name
, LOOKUP_FOLLOW
, &nd
);
766 retval
= security_sb_mount(dev_name
, &nd
, type_page
, flags
, data_page
);
770 if (flags
& MS_REMOUNT
)
771 retval
= do_remount(&nd
, flags
& ~MS_REMOUNT
, mnt_flags
,
773 else if (flags
& MS_BIND
)
774 retval
= do_loopback(&nd
, dev_name
, flags
& MS_REC
);
775 else if (flags
& MS_MOVE
)
776 retval
= do_move_mount(&nd
, dev_name
);
778 retval
= do_add_mount(&nd
, type_page
, flags
, mnt_flags
,
779 dev_name
, data_page
);
785 int copy_namespace(int flags
, struct task_struct
*tsk
)
787 struct namespace *namespace = tsk
->namespace;
788 struct namespace *new_ns
;
789 struct vfsmount
*rootmnt
= NULL
, *pwdmnt
= NULL
, *altrootmnt
= NULL
;
790 struct fs_struct
*fs
= tsk
->fs
;
795 get_namespace(namespace);
797 if (!(flags
& CLONE_NEWNS
))
800 if (!capable(CAP_SYS_ADMIN
)) {
801 put_namespace(namespace);
805 new_ns
= kmalloc(sizeof(struct namespace), GFP_KERNEL
);
809 atomic_set(&new_ns
->count
, 1);
810 init_rwsem(&new_ns
->sem
);
812 INIT_LIST_HEAD(&new_ns
->list
);
814 down_write(&tsk
->namespace->sem
);
815 /* First pass: copy the tree topology */
816 new_ns
->root
= copy_tree(namespace->root
, namespace->root
->mnt_root
);
817 spin_lock(&vfsmount_lock
);
818 list_add_tail(&new_ns
->list
, &new_ns
->root
->mnt_list
);
819 spin_unlock(&vfsmount_lock
);
821 /* Second pass: switch the tsk->fs->* elements */
823 struct vfsmount
*p
, *q
;
824 write_lock(&fs
->lock
);
829 if (p
== fs
->rootmnt
) {
831 fs
->rootmnt
= mntget(q
);
833 if (p
== fs
->pwdmnt
) {
835 fs
->pwdmnt
= mntget(q
);
837 if (p
== fs
->altrootmnt
) {
839 fs
->altrootmnt
= mntget(q
);
841 p
= next_mnt(p
, namespace->root
);
842 q
= next_mnt(q
, new_ns
->root
);
844 write_unlock(&fs
->lock
);
846 up_write(&tsk
->namespace->sem
);
848 tsk
->namespace = new_ns
;
857 put_namespace(namespace);
861 put_namespace(namespace);
865 asmlinkage
long sys_mount(char __user
* dev_name
, char __user
* dir_name
,
866 char __user
* type
, unsigned long flags
,
870 unsigned long data_page
;
871 unsigned long type_page
;
872 unsigned long dev_page
;
875 retval
= copy_mount_options (type
, &type_page
);
879 dir_page
= getname(dir_name
);
880 retval
= PTR_ERR(dir_page
);
881 if (IS_ERR(dir_page
))
884 retval
= copy_mount_options (dev_name
, &dev_page
);
888 retval
= copy_mount_options (data
, &data_page
);
893 retval
= do_mount((char*)dev_page
, dir_page
, (char*)type_page
,
894 flags
, (void*)data_page
);
896 free_page(data_page
);
903 free_page(type_page
);
908 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
909 * It can block. Requires the big lock held.
911 void set_fs_root(struct fs_struct
*fs
, struct vfsmount
*mnt
,
912 struct dentry
*dentry
)
914 struct dentry
*old_root
;
915 struct vfsmount
*old_rootmnt
;
916 write_lock(&fs
->lock
);
918 old_rootmnt
= fs
->rootmnt
;
919 fs
->rootmnt
= mntget(mnt
);
920 fs
->root
= dget(dentry
);
921 write_unlock(&fs
->lock
);
929 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
930 * It can block. Requires the big lock held.
932 void set_fs_pwd(struct fs_struct
*fs
, struct vfsmount
*mnt
,
933 struct dentry
*dentry
)
935 struct dentry
*old_pwd
;
936 struct vfsmount
*old_pwdmnt
;
938 write_lock(&fs
->lock
);
940 old_pwdmnt
= fs
->pwdmnt
;
941 fs
->pwdmnt
= mntget(mnt
);
942 fs
->pwd
= dget(dentry
);
943 write_unlock(&fs
->lock
);
951 static void chroot_fs_refs(struct nameidata
*old_nd
, struct nameidata
*new_nd
)
953 struct task_struct
*g
, *p
;
954 struct fs_struct
*fs
;
956 read_lock(&tasklist_lock
);
957 do_each_thread(g
, p
) {
961 atomic_inc(&fs
->count
);
963 if (fs
->root
==old_nd
->dentry
&&fs
->rootmnt
==old_nd
->mnt
)
964 set_fs_root(fs
, new_nd
->mnt
, new_nd
->dentry
);
965 if (fs
->pwd
==old_nd
->dentry
&&fs
->pwdmnt
==old_nd
->mnt
)
966 set_fs_pwd(fs
, new_nd
->mnt
, new_nd
->dentry
);
970 } while_each_thread(g
, p
);
971 read_unlock(&tasklist_lock
);
975 * Moves the current root to put_root, and sets root/cwd of all processes
976 * which had them on the old root to new_root.
979 * - we don't move root/cwd if they are not at the root (reason: if something
980 * cared enough to change them, it's probably wrong to force them elsewhere)
981 * - it's okay to pick a root that isn't the root of a file system, e.g.
982 * /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
983 * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
987 asmlinkage
long sys_pivot_root(const char __user
*new_root
, const char __user
*put_old
)
989 struct vfsmount
*tmp
;
990 struct nameidata new_nd
, old_nd
, parent_nd
, root_parent
, user_nd
;
993 if (!capable(CAP_SYS_ADMIN
))
998 error
= __user_walk(new_root
, LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
, &new_nd
);
1002 if (!check_mnt(new_nd
.mnt
))
1005 error
= __user_walk(put_old
, LOOKUP_FOLLOW
|LOOKUP_DIRECTORY
, &old_nd
);
1009 error
= security_sb_pivotroot(&old_nd
, &new_nd
);
1011 path_release(&old_nd
);
1015 read_lock(¤t
->fs
->lock
);
1016 user_nd
.mnt
= mntget(current
->fs
->rootmnt
);
1017 user_nd
.dentry
= dget(current
->fs
->root
);
1018 read_unlock(¤t
->fs
->lock
);
1019 down_write(¤t
->namespace->sem
);
1020 down(&old_nd
.dentry
->d_inode
->i_sem
);
1022 if (!check_mnt(user_nd
.mnt
))
1025 if (IS_DEADDIR(new_nd
.dentry
->d_inode
))
1027 if (d_unhashed(new_nd
.dentry
) && !IS_ROOT(new_nd
.dentry
))
1029 if (d_unhashed(old_nd
.dentry
) && !IS_ROOT(old_nd
.dentry
))
1032 if (new_nd
.mnt
== user_nd
.mnt
|| old_nd
.mnt
== user_nd
.mnt
)
1033 goto out2
; /* loop */
1035 if (user_nd
.mnt
->mnt_root
!= user_nd
.dentry
)
1037 if (new_nd
.mnt
->mnt_root
!= new_nd
.dentry
)
1038 goto out2
; /* not a mountpoint */
1039 tmp
= old_nd
.mnt
; /* make sure we can reach put_old from new_root */
1040 spin_lock(&vfsmount_lock
);
1041 if (tmp
!= new_nd
.mnt
) {
1043 if (tmp
->mnt_parent
== tmp
)
1045 if (tmp
->mnt_parent
== new_nd
.mnt
)
1047 tmp
= tmp
->mnt_parent
;
1049 if (!is_subdir(tmp
->mnt_mountpoint
, new_nd
.dentry
))
1051 } else if (!is_subdir(old_nd
.dentry
, new_nd
.dentry
))
1053 detach_mnt(new_nd
.mnt
, &parent_nd
);
1054 detach_mnt(user_nd
.mnt
, &root_parent
);
1055 attach_mnt(user_nd
.mnt
, &old_nd
);
1056 attach_mnt(new_nd
.mnt
, &root_parent
);
1057 spin_unlock(&vfsmount_lock
);
1058 chroot_fs_refs(&user_nd
, &new_nd
);
1059 security_sb_post_pivotroot(&user_nd
, &new_nd
);
1061 path_release(&root_parent
);
1062 path_release(&parent_nd
);
1064 up(&old_nd
.dentry
->d_inode
->i_sem
);
1065 up_write(¤t
->namespace->sem
);
1066 path_release(&user_nd
);
1067 path_release(&old_nd
);
1069 path_release(&new_nd
);
1074 spin_unlock(&vfsmount_lock
);
1078 static void __init
init_mount_tree(void)
1080 struct vfsmount
*mnt
;
1081 struct namespace *namespace;
1082 struct task_struct
*g
, *p
;
1084 mnt
= do_kern_mount("rootfs", 0, "rootfs", NULL
);
1086 panic("Can't create rootfs");
1087 namespace = kmalloc(sizeof(*namespace), GFP_KERNEL
);
1089 panic("Can't allocate initial namespace");
1090 atomic_set(&namespace->count
, 1);
1091 INIT_LIST_HEAD(&namespace->list
);
1092 init_rwsem(&namespace->sem
);
1093 list_add(&mnt
->mnt_list
, &namespace->list
);
1094 namespace->root
= mnt
;
1096 init_task
.namespace = namespace;
1097 read_lock(&tasklist_lock
);
1098 do_each_thread(g
, p
) {
1099 get_namespace(namespace);
1100 p
->namespace = namespace;
1101 } while_each_thread(g
, p
);
1102 read_unlock(&tasklist_lock
);
1104 set_fs_pwd(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1105 set_fs_root(current
->fs
, namespace->root
, namespace->root
->mnt_root
);
1108 void __init
mnt_init(unsigned long mempages
)
1110 struct list_head
*d
;
1111 unsigned long order
;
1112 unsigned int nr_hash
;
1115 mnt_cache
= kmem_cache_create("mnt_cache", sizeof(struct vfsmount
),
1116 0, SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1118 panic("Cannot create vfsmount cache");
1121 mount_hashtable
= (struct list_head
*)
1122 __get_free_pages(GFP_ATOMIC
, order
);
1124 if (!mount_hashtable
)
1125 panic("Failed to allocate mount hash table\n");
1128 * Find the power-of-two list-heads that can fit into the allocation..
1129 * We don't guarantee that "sizeof(struct list_head)" is necessarily
1132 nr_hash
= (1UL << order
) * PAGE_SIZE
/ sizeof(struct list_head
);
1136 } while ((nr_hash
>> hash_bits
) != 0);
1140 * Re-calculate the actual number of entries and the mask
1141 * from the number of bits we can fit.
1143 nr_hash
= 1UL << hash_bits
;
1144 hash_mask
= nr_hash
-1;
1146 printk("Mount-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1147 nr_hash
, order
, (PAGE_SIZE
<< order
));
1149 /* And initialize the newly allocated array */
1150 d
= mount_hashtable
;