Kill st_fstype member.
[linux-2.6/linux-mips.git] / kernel / user.c
blobdaae4dff490d57db0866365336ca0eb17fa3a972
1 /*
2 * The "user cache".
4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
17 * UID task count cache, to get fast user lookup in "alloc_uid"
18 * when changing user ID's (ie setuid() and friends).
20 #define UIDHASH_SZ (256)
22 static struct user_struct *uidhash[UIDHASH_SZ];
24 spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
26 struct user_struct root_user = {
27 __count: ATOMIC_INIT(1),
28 processes: ATOMIC_INIT(1),
29 files: ATOMIC_INIT(0)
32 static kmem_cache_t *uid_cachep;
34 #define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
37 * These routines must be called with the uidhash spinlock held!
39 static inline void uid_hash_insert(struct user_struct *up, unsigned int hashent)
41 if((up->next = uidhash[hashent]) != NULL)
42 uidhash[hashent]->pprev = &up->next;
43 up->pprev = &uidhash[hashent];
44 uidhash[hashent] = up;
47 static inline void uid_hash_remove(struct user_struct *up)
49 if(up->next)
50 up->next->pprev = up->pprev;
51 *up->pprev = up->next;
54 static inline struct user_struct *uid_hash_find(unsigned short uid, unsigned int hashent)
56 struct user_struct *up, *next;
58 next = uidhash[hashent];
59 for (;;) {
60 up = next;
61 if (next) {
62 next = up->next;
63 if (up->uid != uid)
64 continue;
65 atomic_inc(&up->__count);
67 break;
69 return up;
73 * For SMP, we need to re-test the user struct counter
74 * after having aquired the spinlock. This allows us to do
75 * the common case (not freeing anything) without having
76 * any locking.
78 #ifdef CONFIG_SMP
79 #define uid_hash_free(up) (!atomic_read(&(up)->__count))
80 #else
81 #define uid_hash_free(up) (1)
82 #endif
84 void free_uid(struct user_struct *up)
86 if (up) {
87 if (atomic_dec_and_test(&up->__count)) {
88 spin_lock(&uidhash_lock);
89 if (uid_hash_free(up)) {
90 uid_hash_remove(up);
91 kmem_cache_free(uid_cachep, up);
93 spin_unlock(&uidhash_lock);
98 struct user_struct * alloc_uid(uid_t uid)
100 unsigned int hashent = uidhashfn(uid);
101 struct user_struct *up;
103 spin_lock(&uidhash_lock);
104 up = uid_hash_find(uid, hashent);
105 spin_unlock(&uidhash_lock);
107 if (!up) {
108 struct user_struct *new;
110 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
111 if (!new)
112 return NULL;
113 new->uid = uid;
114 atomic_set(&new->__count, 1);
115 atomic_set(&new->processes, 0);
116 atomic_set(&new->files, 0);
119 * Before adding this, check whether we raced
120 * on adding the same user already..
122 spin_lock(&uidhash_lock);
123 up = uid_hash_find(uid, hashent);
124 if (up) {
125 kmem_cache_free(uid_cachep, new);
126 } else {
127 uid_hash_insert(new, hashent);
128 up = new;
130 spin_unlock(&uidhash_lock);
133 return up;
137 static int __init uid_cache_init(void)
139 int i;
141 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
143 SLAB_HWCACHE_ALIGN, NULL, NULL);
144 if(!uid_cachep)
145 panic("Cannot create uid taskcount SLAB cache\n");
147 for(i = 0; i < UIDHASH_SZ; i++)
148 uidhash[i] = 0;
150 /* Insert the root user immediately - init already runs with this */
151 uid_hash_insert(&root_user, uidhashfn(0));
152 return 0;
155 module_init(uid_cache_init);