hwmon: (via-cputemp) sync hotplug handling with coretemp/pkgtemp
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / fs_struct.c
blob68ca487bedb18e4d39b756659e6be33e6e633451
1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
8 /*
9 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
10 * It can block.
12 void set_fs_root(struct fs_struct *fs, struct path *path)
14 struct path old_root;
16 spin_lock(&fs->lock);
17 write_seqcount_begin(&fs->seq);
18 old_root = fs->root;
19 fs->root = *path;
20 path_get_long(path);
21 write_seqcount_end(&fs->seq);
22 spin_unlock(&fs->lock);
23 if (old_root.dentry)
24 path_put_long(&old_root);
28 * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
29 * It can block.
31 void set_fs_pwd(struct fs_struct *fs, struct path *path)
33 struct path old_pwd;
35 spin_lock(&fs->lock);
36 write_seqcount_begin(&fs->seq);
37 old_pwd = fs->pwd;
38 fs->pwd = *path;
39 path_get_long(path);
40 write_seqcount_end(&fs->seq);
41 spin_unlock(&fs->lock);
43 if (old_pwd.dentry)
44 path_put_long(&old_pwd);
47 void chroot_fs_refs(struct path *old_root, struct path *new_root)
49 struct task_struct *g, *p;
50 struct fs_struct *fs;
51 int count = 0;
53 read_lock(&tasklist_lock);
54 do_each_thread(g, p) {
55 task_lock(p);
56 fs = p->fs;
57 if (fs) {
58 spin_lock(&fs->lock);
59 write_seqcount_begin(&fs->seq);
60 if (fs->root.dentry == old_root->dentry
61 && fs->root.mnt == old_root->mnt) {
62 path_get_long(new_root);
63 fs->root = *new_root;
64 count++;
66 if (fs->pwd.dentry == old_root->dentry
67 && fs->pwd.mnt == old_root->mnt) {
68 path_get_long(new_root);
69 fs->pwd = *new_root;
70 count++;
72 write_seqcount_end(&fs->seq);
73 spin_unlock(&fs->lock);
75 task_unlock(p);
76 } while_each_thread(g, p);
77 read_unlock(&tasklist_lock);
78 while (count--)
79 path_put_long(old_root);
82 void free_fs_struct(struct fs_struct *fs)
84 path_put_long(&fs->root);
85 path_put_long(&fs->pwd);
86 kmem_cache_free(fs_cachep, fs);
89 void exit_fs(struct task_struct *tsk)
91 struct fs_struct *fs = tsk->fs;
93 if (fs) {
94 int kill;
95 task_lock(tsk);
96 spin_lock(&fs->lock);
97 write_seqcount_begin(&fs->seq);
98 tsk->fs = NULL;
99 kill = !--fs->users;
100 write_seqcount_end(&fs->seq);
101 spin_unlock(&fs->lock);
102 task_unlock(tsk);
103 if (kill)
104 free_fs_struct(fs);
108 struct fs_struct *copy_fs_struct(struct fs_struct *old)
110 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
111 /* We don't need to lock fs - think why ;-) */
112 if (fs) {
113 fs->users = 1;
114 fs->in_exec = 0;
115 spin_lock_init(&fs->lock);
116 seqcount_init(&fs->seq);
117 fs->umask = old->umask;
119 spin_lock(&old->lock);
120 fs->root = old->root;
121 path_get_long(&fs->root);
122 fs->pwd = old->pwd;
123 path_get_long(&fs->pwd);
124 spin_unlock(&old->lock);
126 return fs;
129 int unshare_fs_struct(void)
131 struct fs_struct *fs = current->fs;
132 struct fs_struct *new_fs = copy_fs_struct(fs);
133 int kill;
135 if (!new_fs)
136 return -ENOMEM;
138 task_lock(current);
139 spin_lock(&fs->lock);
140 kill = !--fs->users;
141 current->fs = new_fs;
142 spin_unlock(&fs->lock);
143 task_unlock(current);
145 if (kill)
146 free_fs_struct(fs);
148 return 0;
150 EXPORT_SYMBOL_GPL(unshare_fs_struct);
152 int current_umask(void)
154 return current->fs->umask;
156 EXPORT_SYMBOL(current_umask);
158 /* to be mentioned only in INIT_TASK */
159 struct fs_struct init_fs = {
160 .users = 1,
161 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
162 .seq = SEQCNT_ZERO,
163 .umask = 0022,
166 void daemonize_fs_struct(void)
168 struct fs_struct *fs = current->fs;
170 if (fs) {
171 int kill;
173 task_lock(current);
175 spin_lock(&init_fs.lock);
176 init_fs.users++;
177 spin_unlock(&init_fs.lock);
179 spin_lock(&fs->lock);
180 current->fs = &init_fs;
181 kill = !--fs->users;
182 spin_unlock(&fs->lock);
184 task_unlock(current);
185 if (kill)
186 free_fs_struct(fs);