4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
17 * UID task count cache, to get fast user lookup in "alloc_uid"
18 * when changing user ID's (ie setuid() and friends).
20 #define UIDHASH_SZ (256)
22 static struct user_struct
*uidhash
[UIDHASH_SZ
];
24 spinlock_t uidhash_lock
= SPIN_LOCK_UNLOCKED
;
26 struct user_struct root_user
= {
27 __count
: ATOMIC_INIT(1),
28 processes
: ATOMIC_INIT(1),
32 static kmem_cache_t
*uid_cachep
;
34 #define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
37 * These routines must be called with the uidhash spinlock held!
39 static inline void uid_hash_insert(struct user_struct
*up
, unsigned int hashent
)
41 if((up
->next
= uidhash
[hashent
]) != NULL
)
42 uidhash
[hashent
]->pprev
= &up
->next
;
43 up
->pprev
= &uidhash
[hashent
];
44 uidhash
[hashent
] = up
;
47 static inline void uid_hash_remove(struct user_struct
*up
)
50 up
->next
->pprev
= up
->pprev
;
51 *up
->pprev
= up
->next
;
54 static inline struct user_struct
*uid_hash_find(unsigned short uid
, unsigned int hashent
)
56 struct user_struct
*up
, *next
;
58 next
= uidhash
[hashent
];
65 atomic_inc(&up
->__count
);
73 * For SMP, we need to re-test the user struct counter
74 * after having aquired the spinlock. This allows us to do
75 * the common case (not freeing anything) without having
79 #define uid_hash_free(up) (!atomic_read(&(up)->__count))
81 #define uid_hash_free(up) (1)
84 void free_uid(struct user_struct
*up
)
87 if (atomic_dec_and_test(&up
->__count
)) {
88 spin_lock(&uidhash_lock
);
89 if (uid_hash_free(up
)) {
91 kmem_cache_free(uid_cachep
, up
);
93 spin_unlock(&uidhash_lock
);
98 struct user_struct
* alloc_uid(uid_t uid
)
100 unsigned int hashent
= uidhashfn(uid
);
101 struct user_struct
*up
;
103 spin_lock(&uidhash_lock
);
104 up
= uid_hash_find(uid
, hashent
);
105 spin_unlock(&uidhash_lock
);
108 struct user_struct
*new;
110 new = kmem_cache_alloc(uid_cachep
, SLAB_KERNEL
);
114 atomic_set(&new->__count
, 1);
115 atomic_set(&new->processes
, 0);
116 atomic_set(&new->files
, 0);
119 * Before adding this, check whether we raced
120 * on adding the same user already..
122 spin_lock(&uidhash_lock
);
123 up
= uid_hash_find(uid
, hashent
);
125 kmem_cache_free(uid_cachep
, new);
127 uid_hash_insert(new, hashent
);
130 spin_unlock(&uidhash_lock
);
137 static int __init
uid_cache_init(void)
141 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
143 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
145 panic("Cannot create uid taskcount SLAB cache\n");
147 for(i
= 0; i
< UIDHASH_SZ
; i
++)
150 /* Insert the root user immediately - init already runs with this */
151 uid_hash_insert(&root_user
, uidhashfn(0));
155 module_init(uid_cache_init
);