Merge branch 'linus' into cpus4096
[linux-2.6/mini2440.git] / kernel / user.c
blobcec2224bc9f5249293c4b2a15a2ad27416c45735
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
20 struct user_namespace init_user_ns = {
21 .kref = {
22 .refcount = ATOMIC_INIT(2),
24 .root_user = &root_user,
26 EXPORT_SYMBOL_GPL(init_user_ns);
29 * UID task count cache, to get fast user lookup in "alloc_uid"
30 * when changing user ID's (ie setuid() and friends).
33 #define UIDHASH_MASK (UIDHASH_SZ - 1)
34 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37 static struct kmem_cache *uid_cachep;
40 * The uidhash_lock is mostly taken from process context, but it is
41 * occasionally also taken from softirq/tasklet context, when
42 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43 * But free_uid() is also called with local interrupts disabled, and running
44 * local_bh_enable() with local interrupts disabled is an error - we'll run
45 * softirq callbacks, and they can unconditionally enable interrupts, and
46 * the caller of free_uid() didn't expect that..
48 static DEFINE_SPINLOCK(uidhash_lock);
50 struct user_struct root_user = {
51 .__count = ATOMIC_INIT(1),
52 .processes = ATOMIC_INIT(1),
53 .files = ATOMIC_INIT(0),
54 .sigpending = ATOMIC_INIT(0),
55 .locked_shm = 0,
56 #ifdef CONFIG_USER_SCHED
57 .tg = &init_task_group,
58 #endif
62 * These routines must be called with the uidhash spinlock held!
64 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
66 hlist_add_head(&up->uidhash_node, hashent);
69 static void uid_hash_remove(struct user_struct *up)
71 hlist_del_init(&up->uidhash_node);
74 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
76 struct user_struct *user;
77 struct hlist_node *h;
79 hlist_for_each_entry(user, h, hashent, uidhash_node) {
80 if (user->uid == uid) {
81 atomic_inc(&user->__count);
82 return user;
86 return NULL;
89 #ifdef CONFIG_USER_SCHED
91 static void sched_destroy_user(struct user_struct *up)
93 sched_destroy_group(up->tg);
96 static int sched_create_user(struct user_struct *up)
98 int rc = 0;
100 up->tg = sched_create_group(&root_task_group);
101 if (IS_ERR(up->tg))
102 rc = -ENOMEM;
104 set_tg_uid(up);
106 return rc;
109 static void sched_switch_user(struct task_struct *p)
111 sched_move_task(p);
114 #else /* CONFIG_USER_SCHED */
116 static void sched_destroy_user(struct user_struct *up) { }
117 static int sched_create_user(struct user_struct *up) { return 0; }
118 static void sched_switch_user(struct task_struct *p) { }
120 #endif /* CONFIG_USER_SCHED */
122 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
124 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
125 static DEFINE_MUTEX(uids_mutex);
127 static inline void uids_mutex_lock(void)
129 mutex_lock(&uids_mutex);
132 static inline void uids_mutex_unlock(void)
134 mutex_unlock(&uids_mutex);
137 /* uid directory attributes */
138 #ifdef CONFIG_FAIR_GROUP_SCHED
139 static ssize_t cpu_shares_show(struct kobject *kobj,
140 struct kobj_attribute *attr,
141 char *buf)
143 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
145 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
148 static ssize_t cpu_shares_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t size)
152 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
153 unsigned long shares;
154 int rc;
156 sscanf(buf, "%lu", &shares);
158 rc = sched_group_set_shares(up->tg, shares);
160 return (rc ? rc : size);
163 static struct kobj_attribute cpu_share_attr =
164 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
165 #endif
167 #ifdef CONFIG_RT_GROUP_SCHED
168 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 char *buf)
172 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
174 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
177 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t size)
181 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
182 unsigned long rt_runtime;
183 int rc;
185 sscanf(buf, "%ld", &rt_runtime);
187 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
189 return (rc ? rc : size);
192 static struct kobj_attribute cpu_rt_runtime_attr =
193 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
195 static ssize_t cpu_rt_period_show(struct kobject *kobj,
196 struct kobj_attribute *attr,
197 char *buf)
199 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
201 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
204 static ssize_t cpu_rt_period_store(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 const char *buf, size_t size)
208 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
209 unsigned long rt_period;
210 int rc;
212 sscanf(buf, "%lu", &rt_period);
214 rc = sched_group_set_rt_period(up->tg, rt_period);
216 return (rc ? rc : size);
219 static struct kobj_attribute cpu_rt_period_attr =
220 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
221 #endif
223 /* default attributes per uid directory */
224 static struct attribute *uids_attributes[] = {
225 #ifdef CONFIG_FAIR_GROUP_SCHED
226 &cpu_share_attr.attr,
227 #endif
228 #ifdef CONFIG_RT_GROUP_SCHED
229 &cpu_rt_runtime_attr.attr,
230 &cpu_rt_period_attr.attr,
231 #endif
232 NULL
235 /* the lifetime of user_struct is not managed by the core (now) */
236 static void uids_release(struct kobject *kobj)
238 return;
241 static struct kobj_type uids_ktype = {
242 .sysfs_ops = &kobj_sysfs_ops,
243 .default_attrs = uids_attributes,
244 .release = uids_release,
247 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
248 static int uids_user_create(struct user_struct *up)
250 struct kobject *kobj = &up->kobj;
251 int error;
253 memset(kobj, 0, sizeof(struct kobject));
254 kobj->kset = uids_kset;
255 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
256 if (error) {
257 kobject_put(kobj);
258 goto done;
261 kobject_uevent(kobj, KOBJ_ADD);
262 done:
263 return error;
266 /* create these entries in sysfs:
267 * "/sys/kernel/uids" directory
268 * "/sys/kernel/uids/0" directory (for root user)
269 * "/sys/kernel/uids/0/cpu_share" file (for root user)
271 int __init uids_sysfs_init(void)
273 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
274 if (!uids_kset)
275 return -ENOMEM;
277 return uids_user_create(&root_user);
280 /* work function to remove sysfs directory for a user and free up
281 * corresponding structures.
283 static void remove_user_sysfs_dir(struct work_struct *w)
285 struct user_struct *up = container_of(w, struct user_struct, work);
286 unsigned long flags;
287 int remove_user = 0;
289 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
290 * atomic.
292 uids_mutex_lock();
294 local_irq_save(flags);
296 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
297 uid_hash_remove(up);
298 remove_user = 1;
299 spin_unlock_irqrestore(&uidhash_lock, flags);
300 } else {
301 local_irq_restore(flags);
304 if (!remove_user)
305 goto done;
307 kobject_uevent(&up->kobj, KOBJ_REMOVE);
308 kobject_del(&up->kobj);
309 kobject_put(&up->kobj);
311 sched_destroy_user(up);
312 key_put(up->uid_keyring);
313 key_put(up->session_keyring);
314 kmem_cache_free(uid_cachep, up);
316 done:
317 uids_mutex_unlock();
320 /* IRQs are disabled and uidhash_lock is held upon function entry.
321 * IRQ state (as stored in flags) is restored and uidhash_lock released
322 * upon function exit.
324 static inline void free_user(struct user_struct *up, unsigned long flags)
326 /* restore back the count */
327 atomic_inc(&up->__count);
328 spin_unlock_irqrestore(&uidhash_lock, flags);
330 INIT_WORK(&up->work, remove_user_sysfs_dir);
331 schedule_work(&up->work);
334 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
336 int uids_sysfs_init(void) { return 0; }
337 static inline int uids_user_create(struct user_struct *up) { return 0; }
338 static inline void uids_mutex_lock(void) { }
339 static inline void uids_mutex_unlock(void) { }
341 /* IRQs are disabled and uidhash_lock is held upon function entry.
342 * IRQ state (as stored in flags) is restored and uidhash_lock released
343 * upon function exit.
345 static inline void free_user(struct user_struct *up, unsigned long flags)
347 uid_hash_remove(up);
348 spin_unlock_irqrestore(&uidhash_lock, flags);
349 sched_destroy_user(up);
350 key_put(up->uid_keyring);
351 key_put(up->session_keyring);
352 kmem_cache_free(uid_cachep, up);
355 #endif
358 * Locate the user_struct for the passed UID. If found, take a ref on it. The
359 * caller must undo that ref with free_uid().
361 * If the user_struct could not be found, return NULL.
363 struct user_struct *find_user(uid_t uid)
365 struct user_struct *ret;
366 unsigned long flags;
367 struct user_namespace *ns = current->nsproxy->user_ns;
369 spin_lock_irqsave(&uidhash_lock, flags);
370 ret = uid_hash_find(uid, uidhashentry(ns, uid));
371 spin_unlock_irqrestore(&uidhash_lock, flags);
372 return ret;
375 void free_uid(struct user_struct *up)
377 unsigned long flags;
379 if (!up)
380 return;
382 local_irq_save(flags);
383 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
384 free_user(up, flags);
385 else
386 local_irq_restore(flags);
389 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
391 struct hlist_head *hashent = uidhashentry(ns, uid);
392 struct user_struct *up, *new;
394 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
395 * atomic.
397 uids_mutex_lock();
399 spin_lock_irq(&uidhash_lock);
400 up = uid_hash_find(uid, hashent);
401 spin_unlock_irq(&uidhash_lock);
403 if (!up) {
404 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
405 if (!new)
406 goto out_unlock;
408 new->uid = uid;
409 atomic_set(&new->__count, 1);
411 if (sched_create_user(new) < 0)
412 goto out_free_user;
414 if (uids_user_create(new))
415 goto out_destoy_sched;
418 * Before adding this, check whether we raced
419 * on adding the same user already..
421 spin_lock_irq(&uidhash_lock);
422 up = uid_hash_find(uid, hashent);
423 if (up) {
424 /* This case is not possible when CONFIG_USER_SCHED
425 * is defined, since we serialize alloc_uid() using
426 * uids_mutex. Hence no need to call
427 * sched_destroy_user() or remove_user_sysfs_dir().
429 key_put(new->uid_keyring);
430 key_put(new->session_keyring);
431 kmem_cache_free(uid_cachep, new);
432 } else {
433 uid_hash_insert(new, hashent);
434 up = new;
436 spin_unlock_irq(&uidhash_lock);
440 uids_mutex_unlock();
442 return up;
444 out_destoy_sched:
445 sched_destroy_user(new);
446 out_free_user:
447 kmem_cache_free(uid_cachep, new);
448 out_unlock:
449 uids_mutex_unlock();
450 return NULL;
453 void switch_uid(struct user_struct *new_user)
455 struct user_struct *old_user;
457 /* What if a process setreuid()'s and this brings the
458 * new uid over his NPROC rlimit? We can check this now
459 * cheaply with the new uid cache, so if it matters
460 * we should be checking for it. -DaveM
462 old_user = current->user;
463 atomic_inc(&new_user->processes);
464 atomic_dec(&old_user->processes);
465 switch_uid_keyring(new_user);
466 current->user = new_user;
467 sched_switch_user(current);
470 * We need to synchronize with __sigqueue_alloc()
471 * doing a get_uid(p->user).. If that saw the old
472 * user value, we need to wait until it has exited
473 * its critical region before we can free the old
474 * structure.
476 smp_mb();
477 spin_unlock_wait(&current->sighand->siglock);
479 free_uid(old_user);
480 suid_keys(current);
483 #ifdef CONFIG_USER_NS
484 void release_uids(struct user_namespace *ns)
486 int i;
487 unsigned long flags;
488 struct hlist_head *head;
489 struct hlist_node *nd;
491 spin_lock_irqsave(&uidhash_lock, flags);
493 * collapse the chains so that the user_struct-s will
494 * be still alive, but not in hashes. subsequent free_uid()
495 * will free them.
497 for (i = 0; i < UIDHASH_SZ; i++) {
498 head = ns->uidhash_table + i;
499 while (!hlist_empty(head)) {
500 nd = head->first;
501 hlist_del_init(nd);
504 spin_unlock_irqrestore(&uidhash_lock, flags);
506 free_uid(ns->root_user);
508 #endif
510 static int __init uid_cache_init(void)
512 int n;
514 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
515 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
517 for(n = 0; n < UIDHASH_SZ; ++n)
518 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
520 /* Insert the root user immediately (init already runs as root) */
521 spin_lock_irq(&uidhash_lock);
522 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
523 spin_unlock_irq(&uidhash_lock);
525 return 0;
528 module_init(uid_cache_init);