ALSA: hda - Fix buffer-alignment regression with Nvidia HDMI
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / user.c
blob71dd2363ab0f66dcbb2a3574d7b977af08369bea
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
21 * userns count is 1 for root user, 1 for init_uts_ns,
22 * and 1 for... ?
24 struct user_namespace init_user_ns = {
25 .kref = {
26 .refcount = ATOMIC_INIT(3),
28 .creator = &root_user,
30 EXPORT_SYMBOL_GPL(init_user_ns);
33 * UID task count cache, to get fast user lookup in "alloc_uid"
34 * when changing user ID's (ie setuid() and friends).
37 #define UIDHASH_MASK (UIDHASH_SZ - 1)
38 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
39 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
41 static struct kmem_cache *uid_cachep;
44 * The uidhash_lock is mostly taken from process context, but it is
45 * occasionally also taken from softirq/tasklet context, when
46 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
47 * But free_uid() is also called with local interrupts disabled, and running
48 * local_bh_enable() with local interrupts disabled is an error - we'll run
49 * softirq callbacks, and they can unconditionally enable interrupts, and
50 * the caller of free_uid() didn't expect that..
52 static DEFINE_SPINLOCK(uidhash_lock);
54 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */
55 struct user_struct root_user = {
56 .__count = ATOMIC_INIT(2),
57 .processes = ATOMIC_INIT(1),
58 .files = ATOMIC_INIT(0),
59 .sigpending = ATOMIC_INIT(0),
60 .locked_shm = 0,
61 .user_ns = &init_user_ns,
65 * These routines must be called with the uidhash spinlock held!
67 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
69 hlist_add_head(&up->uidhash_node, hashent);
72 static void uid_hash_remove(struct user_struct *up)
74 hlist_del_init(&up->uidhash_node);
75 put_user_ns(up->user_ns);
78 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
80 struct user_struct *user;
81 struct hlist_node *h;
83 hlist_for_each_entry(user, h, hashent, uidhash_node) {
84 if (user->uid == uid) {
85 atomic_inc(&user->__count);
86 return user;
90 return NULL;
93 /* IRQs are disabled and uidhash_lock is held upon function entry.
94 * IRQ state (as stored in flags) is restored and uidhash_lock released
95 * upon function exit.
97 static void free_user(struct user_struct *up, unsigned long flags)
98 __releases(&uidhash_lock)
100 uid_hash_remove(up);
101 spin_unlock_irqrestore(&uidhash_lock, flags);
102 key_put(up->uid_keyring);
103 key_put(up->session_keyring);
104 kmem_cache_free(uid_cachep, up);
108 * Locate the user_struct for the passed UID. If found, take a ref on it. The
109 * caller must undo that ref with free_uid().
111 * If the user_struct could not be found, return NULL.
113 struct user_struct *find_user(uid_t uid)
115 struct user_struct *ret;
116 unsigned long flags;
117 struct user_namespace *ns = current_user_ns();
119 spin_lock_irqsave(&uidhash_lock, flags);
120 ret = uid_hash_find(uid, uidhashentry(ns, uid));
121 spin_unlock_irqrestore(&uidhash_lock, flags);
122 return ret;
125 void free_uid(struct user_struct *up)
127 unsigned long flags;
129 if (!up)
130 return;
132 local_irq_save(flags);
133 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
134 free_user(up, flags);
135 else
136 local_irq_restore(flags);
139 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
141 struct hlist_head *hashent = uidhashentry(ns, uid);
142 struct user_struct *up, *new;
144 spin_lock_irq(&uidhash_lock);
145 up = uid_hash_find(uid, hashent);
146 spin_unlock_irq(&uidhash_lock);
148 if (!up) {
149 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
150 if (!new)
151 goto out_unlock;
153 new->uid = uid;
154 atomic_set(&new->__count, 1);
156 new->user_ns = get_user_ns(ns);
159 * Before adding this, check whether we raced
160 * on adding the same user already..
162 spin_lock_irq(&uidhash_lock);
163 up = uid_hash_find(uid, hashent);
164 if (up) {
165 put_user_ns(ns);
166 key_put(new->uid_keyring);
167 key_put(new->session_keyring);
168 kmem_cache_free(uid_cachep, new);
169 } else {
170 uid_hash_insert(new, hashent);
171 up = new;
173 spin_unlock_irq(&uidhash_lock);
176 return up;
178 out_unlock:
179 return NULL;
182 static int __init uid_cache_init(void)
184 int n;
186 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
187 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
189 for(n = 0; n < UIDHASH_SZ; ++n)
190 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
192 /* Insert the root user immediately (init already runs as root) */
193 spin_lock_irq(&uidhash_lock);
194 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
195 spin_unlock_irq(&uidhash_lock);
197 return 0;
200 module_init(uid_cache_init);