Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / kernel / user.c
blobbe99b110e745a39fc1c0d433cf37960ffc8ef058
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
16 * UID task count cache, to get fast user lookup in "alloc_uid"
17 * when changing user ID's (ie setuid() and friends).
19 #define UIDHASH_BITS 8
20 #define UIDHASH_SZ (1 << UIDHASH_BITS)
21 #define UIDHASH_MASK (UIDHASH_SZ - 1)
22 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) ^ uid) & UIDHASH_MASK)
23 #define uidhashentry(uid) (uidhash_table + __uidhashfn(uid))
25 static kmem_cache_t *uid_cachep;
26 static struct user_struct *uidhash_table[UIDHASH_SZ];
27 static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
29 struct user_struct root_user = {
30 __count: ATOMIC_INIT(1),
31 processes: ATOMIC_INIT(1),
32 files: ATOMIC_INIT(0)
36 * These routines must be called with the uidhash spinlock held!
38 static inline void uid_hash_insert(struct user_struct *up, struct user_struct **hashent)
40 struct user_struct *next = *hashent;
42 up->next = next;
43 if (next)
44 next->pprev = &up->next;
45 up->pprev = hashent;
46 *hashent = up;
49 static inline void uid_hash_remove(struct user_struct *up)
51 struct user_struct *next = up->next;
52 struct user_struct **pprev = up->pprev;
54 if (next)
55 next->pprev = pprev;
56 *pprev = next;
59 static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
61 struct user_struct *next;
63 next = *hashent;
64 for (;;) {
65 struct user_struct *up = next;
66 if (next) {
67 next = up->next;
68 if (up->uid != uid)
69 continue;
70 atomic_inc(&up->__count);
72 return up;
76 void free_uid(struct user_struct *up)
78 if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
79 uid_hash_remove(up);
80 kmem_cache_free(uid_cachep, up);
81 spin_unlock(&uidhash_lock);
85 struct user_struct * alloc_uid(uid_t uid)
87 struct user_struct **hashent = uidhashentry(uid);
88 struct user_struct *up;
90 spin_lock(&uidhash_lock);
91 up = uid_hash_find(uid, hashent);
92 spin_unlock(&uidhash_lock);
94 if (!up) {
95 struct user_struct *new;
97 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
98 if (!new)
99 return NULL;
100 new->uid = uid;
101 atomic_set(&new->__count, 1);
102 atomic_set(&new->processes, 0);
103 atomic_set(&new->files, 0);
106 * Before adding this, check whether we raced
107 * on adding the same user already..
109 spin_lock(&uidhash_lock);
110 up = uid_hash_find(uid, hashent);
111 if (up) {
112 kmem_cache_free(uid_cachep, new);
113 } else {
114 uid_hash_insert(new, hashent);
115 up = new;
117 spin_unlock(&uidhash_lock);
120 return up;
124 static int __init uid_cache_init(void)
126 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
128 SLAB_HWCACHE_ALIGN, NULL, NULL);
129 if(!uid_cachep)
130 panic("Cannot create uid taskcount SLAB cache\n");
132 /* Insert the root user immediately - init already runs with this */
133 uid_hash_insert(&root_user, uidhashentry(0));
134 return 0;
137 module_init(uid_cache_init);