Import 2.4.0-test6pre2
[davej-history.git] / kernel / user.c
blobd9f96da0a9c11e027128291ee86b062fbf903589
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
16 * UID task count cache, to get fast user lookup in "alloc_uid"
17 * when changing user ID's (ie setuid() and friends).
19 #define UIDHASH_SZ (256)
21 static struct user_struct *uidhash[UIDHASH_SZ];
23 spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
25 struct user_struct root_user = {
26 __count: ATOMIC_INIT(1),
27 processes: ATOMIC_INIT(1),
28 files: ATOMIC_INIT(0)
31 static kmem_cache_t *uid_cachep;
33 #define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
36 * These routines must be called with the uidhash spinlock held!
38 static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
40 if((up->next = uidhash[hashent]) != NULL)
41 uidhash[hashent]->pprev = &up->next;
42 up->pprev = &uidhash[hashent];
43 uidhash[hashent] = up;
46 static inline void uid_hash_remove(struct user_struct *up)
48 if(up->next)
49 up->next->pprev = up->pprev;
50 *up->pprev = up->next;
53 static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
55 struct user_struct *up, *next;
57 next = uidhash[hashent];
58 for (;;) {
59 up = next;
60 if (next) {
61 next = up->next;
62 if (up->uid != uid)
63 continue;
64 atomic_inc(&up->__count);
66 break;
68 return up;
72 * For SMP, we need to re-test the user struct counter
73 * after having aquired the spinlock. This allows us to do
74 * the common case (not freeing anything) without having
75 * any locking.
77 #ifdef CONFIG_SMP
78 #define uid_hash_free(up) (!atomic_read(&(up)->__count))
79 #else
80 #define uid_hash_free(up) (1)
81 #endif
83 void free_uid(struct user_struct *up)
85 if (up) {
86 if (atomic_dec_and_test(&up->__count)) {
87 spin_lock(&uidhash_lock);
88 if (uid_hash_free(up)) {
89 uid_hash_remove(up);
90 kmem_cache_free(uid_cachep, up);
92 spin_unlock(&uidhash_lock);
97 struct user_struct * alloc_uid(uid_t uid)
99 unsigned int hashent = uidhashfn(uid);
100 struct user_struct *up;
102 spin_lock(&uidhash_lock);
103 up = uid_hash_find(uid, hashent);
104 spin_unlock(&uidhash_lock);
106 if (!up) {
107 struct user_struct *new;
109 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
110 if (!new)
111 return NULL;
112 new->uid = uid;
113 atomic_set(&new->__count, 1);
114 atomic_set(&new->processes, 0);
115 atomic_set(&new->files, 0);
118 * Before adding this, check whether we raced
119 * on adding the same user already..
121 spin_lock(&uidhash_lock);
122 up = uid_hash_find(uid, hashent);
123 if (up) {
124 kmem_cache_free(uid_cachep, new);
125 } else {
126 uid_hash_insert(new, hashent);
127 up = new;
129 spin_unlock(&uidhash_lock);
132 return up;
136 static int __init uid_cache_init(void)
138 int i;
140 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
142 SLAB_HWCACHE_ALIGN, NULL, NULL);
143 if(!uid_cachep)
144 panic("Cannot create uid taskcount SLAB cache\n");
146 for(i = 0; i < UIDHASH_SZ; i++)
147 uidhash[i] = 0;
149 /* Insert the root user immediately - init already runs with this */
150 uid_hash_insert(&root_user, uidhashfn(0));
151 return 0;
154 module_init(uid_cache_init);