4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
16 * UID task count cache, to get fast user lookup in "alloc_uid"
17 * when changing user ID's (ie setuid() and friends).
19 #define UIDHASH_SZ (256)
21 static struct user_struct
*uidhash
[UIDHASH_SZ
];
23 spinlock_t uidhash_lock
= SPIN_LOCK_UNLOCKED
;
25 struct user_struct root_user
= {
26 __count
: ATOMIC_INIT(1),
27 processes
: ATOMIC_INIT(1),
31 static kmem_cache_t
*uid_cachep
;
33 #define uidhashfn(uid) (((uid >> 8) ^ uid) & (UIDHASH_SZ - 1))
36 * These routines must be called with the uidhash spinlock held!
38 static inline void uid_hash_insert(struct user_struct
*up
, unsigned int hashent
)
40 if((up
->next
= uidhash
[hashent
]) != NULL
)
41 uidhash
[hashent
]->pprev
= &up
->next
;
42 up
->pprev
= &uidhash
[hashent
];
43 uidhash
[hashent
] = up
;
46 static inline void uid_hash_remove(struct user_struct
*up
)
49 up
->next
->pprev
= up
->pprev
;
50 *up
->pprev
= up
->next
;
53 static inline struct user_struct
*uid_hash_find(unsigned short uid
, unsigned int hashent
)
55 struct user_struct
*up
, *next
;
57 next
= uidhash
[hashent
];
64 atomic_inc(&up
->__count
);
72 * For SMP, we need to re-test the user struct counter
73 * after having aquired the spinlock. This allows us to do
74 * the common case (not freeing anything) without having
78 #define uid_hash_free(up) (!atomic_read(&(up)->__count))
80 #define uid_hash_free(up) (1)
83 void free_uid(struct user_struct
*up
)
86 if (atomic_dec_and_test(&up
->__count
)) {
87 spin_lock(&uidhash_lock
);
88 if (uid_hash_free(up
)) {
90 kmem_cache_free(uid_cachep
, up
);
92 spin_unlock(&uidhash_lock
);
97 struct user_struct
* alloc_uid(uid_t uid
)
99 unsigned int hashent
= uidhashfn(uid
);
100 struct user_struct
*up
;
102 spin_lock(&uidhash_lock
);
103 up
= uid_hash_find(uid
, hashent
);
104 spin_unlock(&uidhash_lock
);
107 struct user_struct
*new;
109 new = kmem_cache_alloc(uid_cachep
, SLAB_KERNEL
);
113 atomic_set(&new->__count
, 1);
114 atomic_set(&new->processes
, 0);
115 atomic_set(&new->files
, 0);
118 * Before adding this, check whether we raced
119 * on adding the same user already..
121 spin_lock(&uidhash_lock
);
122 up
= uid_hash_find(uid
, hashent
);
124 kmem_cache_free(uid_cachep
, new);
126 uid_hash_insert(new, hashent
);
129 spin_unlock(&uidhash_lock
);
136 static int __init
uid_cache_init(void)
140 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
142 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
144 panic("Cannot create uid taskcount SLAB cache\n");
146 for(i
= 0; i
< UIDHASH_SZ
; i
++)
149 /* Insert the root user immediately - init already runs with this */
150 uid_hash_insert(&root_user
, uidhashfn(0));
154 module_init(uid_cache_init
);