partial revert of asynchronous inode delete
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / user.c
blob477b6660f447e639a8b181d456c7c4e64e30b008
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
21 struct user_namespace init_user_ns = {
22 .kref = {
23 .refcount = ATOMIC_INIT(1),
25 .creator = &root_user,
27 EXPORT_SYMBOL_GPL(init_user_ns);
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
38 static struct kmem_cache *uid_cachep;
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
49 static DEFINE_SPINLOCK(uidhash_lock);
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user = {
53 .__count = ATOMIC_INIT(2),
54 .processes = ATOMIC_INIT(1),
55 .files = ATOMIC_INIT(0),
56 .sigpending = ATOMIC_INIT(0),
57 .locked_shm = 0,
58 .user_ns = &init_user_ns,
59 #ifdef CONFIG_USER_SCHED
60 .tg = &init_task_group,
61 #endif
65 * These routines must be called with the uidhash spinlock held!
67 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
69 hlist_add_head(&up->uidhash_node, hashent);
72 static void uid_hash_remove(struct user_struct *up)
74 hlist_del_init(&up->uidhash_node);
77 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
79 struct user_struct *user;
80 struct hlist_node *h;
82 hlist_for_each_entry(user, h, hashent, uidhash_node) {
83 if (user->uid == uid) {
84 atomic_inc(&user->__count);
85 return user;
89 return NULL;
92 #ifdef CONFIG_USER_SCHED
94 static void sched_destroy_user(struct user_struct *up)
96 sched_destroy_group(up->tg);
99 static int sched_create_user(struct user_struct *up)
101 int rc = 0;
103 up->tg = sched_create_group(&root_task_group);
104 if (IS_ERR(up->tg))
105 rc = -ENOMEM;
107 set_tg_uid(up);
109 return rc;
112 #else /* CONFIG_USER_SCHED */
114 static void sched_destroy_user(struct user_struct *up) { }
115 static int sched_create_user(struct user_struct *up) { return 0; }
117 #endif /* CONFIG_USER_SCHED */
119 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
121 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
122 static DEFINE_MUTEX(uids_mutex);
124 static inline void uids_mutex_lock(void)
126 mutex_lock(&uids_mutex);
129 static inline void uids_mutex_unlock(void)
131 mutex_unlock(&uids_mutex);
134 /* uid directory attributes */
135 #ifdef CONFIG_FAIR_GROUP_SCHED
136 static ssize_t cpu_shares_show(struct kobject *kobj,
137 struct kobj_attribute *attr,
138 char *buf)
140 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
142 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
145 static ssize_t cpu_shares_store(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 const char *buf, size_t size)
149 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
150 unsigned long shares;
151 int rc;
153 sscanf(buf, "%lu", &shares);
155 rc = sched_group_set_shares(up->tg, shares);
157 return (rc ? rc : size);
160 static struct kobj_attribute cpu_share_attr =
161 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
162 #endif
164 #ifdef CONFIG_RT_GROUP_SCHED
165 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
166 struct kobj_attribute *attr,
167 char *buf)
169 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
171 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
174 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
175 struct kobj_attribute *attr,
176 const char *buf, size_t size)
178 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
179 unsigned long rt_runtime;
180 int rc;
182 sscanf(buf, "%ld", &rt_runtime);
184 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
186 return (rc ? rc : size);
189 static struct kobj_attribute cpu_rt_runtime_attr =
190 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
192 static ssize_t cpu_rt_period_show(struct kobject *kobj,
193 struct kobj_attribute *attr,
194 char *buf)
196 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
198 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
201 static ssize_t cpu_rt_period_store(struct kobject *kobj,
202 struct kobj_attribute *attr,
203 const char *buf, size_t size)
205 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
206 unsigned long rt_period;
207 int rc;
209 sscanf(buf, "%lu", &rt_period);
211 rc = sched_group_set_rt_period(up->tg, rt_period);
213 return (rc ? rc : size);
216 static struct kobj_attribute cpu_rt_period_attr =
217 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
218 #endif
220 /* default attributes per uid directory */
221 static struct attribute *uids_attributes[] = {
222 #ifdef CONFIG_FAIR_GROUP_SCHED
223 &cpu_share_attr.attr,
224 #endif
225 #ifdef CONFIG_RT_GROUP_SCHED
226 &cpu_rt_runtime_attr.attr,
227 &cpu_rt_period_attr.attr,
228 #endif
229 NULL
232 /* the lifetime of user_struct is not managed by the core (now) */
233 static void uids_release(struct kobject *kobj)
235 return;
238 static struct kobj_type uids_ktype = {
239 .sysfs_ops = &kobj_sysfs_ops,
240 .default_attrs = uids_attributes,
241 .release = uids_release,
245 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
246 * We do not create this file for users in a user namespace (until
247 * sysfs tagging is implemented).
249 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
251 static int uids_user_create(struct user_struct *up)
253 struct kobject *kobj = &up->kobj;
254 int error;
256 memset(kobj, 0, sizeof(struct kobject));
257 if (up->user_ns != &init_user_ns)
258 return 0;
259 kobj->kset = uids_kset;
260 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
261 if (error) {
262 kobject_put(kobj);
263 goto done;
266 kobject_uevent(kobj, KOBJ_ADD);
267 done:
268 return error;
271 /* create these entries in sysfs:
272 * "/sys/kernel/uids" directory
273 * "/sys/kernel/uids/0" directory (for root user)
274 * "/sys/kernel/uids/0/cpu_share" file (for root user)
276 int __init uids_sysfs_init(void)
278 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
279 if (!uids_kset)
280 return -ENOMEM;
282 return uids_user_create(&root_user);
285 /* work function to remove sysfs directory for a user and free up
286 * corresponding structures.
288 static void remove_user_sysfs_dir(struct work_struct *w)
290 struct user_struct *up = container_of(w, struct user_struct, work);
291 unsigned long flags;
292 int remove_user = 0;
294 if (up->user_ns != &init_user_ns)
295 return;
296 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
297 * atomic.
299 uids_mutex_lock();
301 local_irq_save(flags);
303 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
304 uid_hash_remove(up);
305 remove_user = 1;
306 spin_unlock_irqrestore(&uidhash_lock, flags);
307 } else {
308 local_irq_restore(flags);
311 if (!remove_user)
312 goto done;
314 kobject_uevent(&up->kobj, KOBJ_REMOVE);
315 kobject_del(&up->kobj);
316 kobject_put(&up->kobj);
318 sched_destroy_user(up);
319 key_put(up->uid_keyring);
320 key_put(up->session_keyring);
321 kmem_cache_free(uid_cachep, up);
323 done:
324 uids_mutex_unlock();
327 /* IRQs are disabled and uidhash_lock is held upon function entry.
328 * IRQ state (as stored in flags) is restored and uidhash_lock released
329 * upon function exit.
331 static void free_user(struct user_struct *up, unsigned long flags)
333 /* restore back the count */
334 atomic_inc(&up->__count);
335 spin_unlock_irqrestore(&uidhash_lock, flags);
337 put_user_ns(up->user_ns);
338 INIT_WORK(&up->work, remove_user_sysfs_dir);
339 schedule_work(&up->work);
342 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
344 int uids_sysfs_init(void) { return 0; }
345 static inline int uids_user_create(struct user_struct *up) { return 0; }
346 static inline void uids_mutex_lock(void) { }
347 static inline void uids_mutex_unlock(void) { }
349 /* IRQs are disabled and uidhash_lock is held upon function entry.
350 * IRQ state (as stored in flags) is restored and uidhash_lock released
351 * upon function exit.
353 static void free_user(struct user_struct *up, unsigned long flags)
355 uid_hash_remove(up);
356 spin_unlock_irqrestore(&uidhash_lock, flags);
357 sched_destroy_user(up);
358 key_put(up->uid_keyring);
359 key_put(up->session_keyring);
360 put_user_ns(up->user_ns);
361 kmem_cache_free(uid_cachep, up);
364 #endif
367 * Locate the user_struct for the passed UID. If found, take a ref on it. The
368 * caller must undo that ref with free_uid().
370 * If the user_struct could not be found, return NULL.
372 struct user_struct *find_user(uid_t uid)
374 struct user_struct *ret;
375 unsigned long flags;
376 struct user_namespace *ns = current_user_ns();
378 spin_lock_irqsave(&uidhash_lock, flags);
379 ret = uid_hash_find(uid, uidhashentry(ns, uid));
380 spin_unlock_irqrestore(&uidhash_lock, flags);
381 return ret;
384 void free_uid(struct user_struct *up)
386 unsigned long flags;
388 if (!up)
389 return;
391 local_irq_save(flags);
392 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
393 free_user(up, flags);
394 else
395 local_irq_restore(flags);
398 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
400 struct hlist_head *hashent = uidhashentry(ns, uid);
401 struct user_struct *up, *new;
403 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
404 * atomic.
406 uids_mutex_lock();
408 spin_lock_irq(&uidhash_lock);
409 up = uid_hash_find(uid, hashent);
410 spin_unlock_irq(&uidhash_lock);
412 if (!up) {
413 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
414 if (!new)
415 goto out_unlock;
417 new->uid = uid;
418 atomic_set(&new->__count, 1);
420 if (sched_create_user(new) < 0)
421 goto out_free_user;
423 new->user_ns = get_user_ns(ns);
425 if (uids_user_create(new))
426 goto out_destoy_sched;
429 * Before adding this, check whether we raced
430 * on adding the same user already..
432 spin_lock_irq(&uidhash_lock);
433 up = uid_hash_find(uid, hashent);
434 if (up) {
435 /* This case is not possible when CONFIG_USER_SCHED
436 * is defined, since we serialize alloc_uid() using
437 * uids_mutex. Hence no need to call
438 * sched_destroy_user() or remove_user_sysfs_dir().
440 key_put(new->uid_keyring);
441 key_put(new->session_keyring);
442 kmem_cache_free(uid_cachep, new);
443 } else {
444 uid_hash_insert(new, hashent);
445 up = new;
447 spin_unlock_irq(&uidhash_lock);
450 uids_mutex_unlock();
452 return up;
454 out_destoy_sched:
455 sched_destroy_user(new);
456 put_user_ns(new->user_ns);
457 out_free_user:
458 kmem_cache_free(uid_cachep, new);
459 out_unlock:
460 uids_mutex_unlock();
461 return NULL;
464 static int __init uid_cache_init(void)
466 int n;
468 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
469 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
471 for(n = 0; n < UIDHASH_SZ; ++n)
472 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
474 /* Insert the root user immediately (init already runs as root) */
475 spin_lock_irq(&uidhash_lock);
476 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
477 spin_unlock_irq(&uidhash_lock);
479 return 0;
482 module_init(uid_cache_init);