4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
21 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends).
25 #define UIDHASH_MASK (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
29 static struct kmem_cache
*uid_cachep
;
32 * The uidhash_lock is mostly taken from process context, but it is
33 * occasionally also taken from softirq/tasklet context, when
34 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
35 * But free_uid() is also called with local interrupts disabled, and running
36 * local_bh_enable() with local interrupts disabled is an error - we'll run
37 * softirq callbacks, and they can unconditionally enable interrupts, and
38 * the caller of free_uid() didn't expect that..
40 static DEFINE_SPINLOCK(uidhash_lock
);
42 struct user_struct root_user
= {
43 .__count
= ATOMIC_INIT(1),
44 .processes
= ATOMIC_INIT(1),
45 .files
= ATOMIC_INIT(0),
46 .sigpending
= ATOMIC_INIT(0),
49 .uid_keyring
= &root_user_keyring
,
50 .session_keyring
= &root_session_keyring
,
52 #ifdef CONFIG_FAIR_USER_SCHED
53 .tg
= &init_task_group
,
58 * These routines must be called with the uidhash spinlock held!
60 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
62 hlist_add_head(&up
->uidhash_node
, hashent
);
65 static void uid_hash_remove(struct user_struct
*up
)
67 hlist_del_init(&up
->uidhash_node
);
70 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
72 struct user_struct
*user
;
75 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
76 if (user
->uid
== uid
) {
77 atomic_inc(&user
->__count
);
85 #ifdef CONFIG_FAIR_USER_SCHED
87 static struct kobject uids_kobject
; /* represents /sys/kernel/uids directory */
88 static DEFINE_MUTEX(uids_mutex
);
90 static void sched_destroy_user(struct user_struct
*up
)
92 sched_destroy_group(up
->tg
);
95 static int sched_create_user(struct user_struct
*up
)
99 up
->tg
= sched_create_group();
106 static void sched_switch_user(struct task_struct
*p
)
111 static inline void uids_mutex_lock(void)
113 mutex_lock(&uids_mutex
);
116 static inline void uids_mutex_unlock(void)
118 mutex_unlock(&uids_mutex
);
121 /* return cpu shares held by the user */
122 ssize_t
cpu_shares_show(struct kset
*kset
, char *buffer
)
124 struct user_struct
*up
= container_of(kset
, struct user_struct
, kset
);
126 return sprintf(buffer
, "%lu\n", sched_group_shares(up
->tg
));
129 /* modify cpu shares held by the user */
130 ssize_t
cpu_shares_store(struct kset
*kset
, const char *buffer
, size_t size
)
132 struct user_struct
*up
= container_of(kset
, struct user_struct
, kset
);
133 unsigned long shares
;
136 sscanf(buffer
, "%lu", &shares
);
138 rc
= sched_group_set_shares(up
->tg
, shares
);
140 return (rc
? rc
: size
);
143 static void user_attr_init(struct subsys_attribute
*sa
, char *name
, int mode
)
145 sa
->attr
.name
= name
;
146 sa
->attr
.mode
= mode
;
147 sa
->show
= cpu_shares_show
;
148 sa
->store
= cpu_shares_store
;
151 /* Create "/sys/kernel/uids/<uid>" directory and
152 * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
154 static int user_kobject_create(struct user_struct
*up
)
156 struct kset
*kset
= &up
->kset
;
157 struct kobject
*kobj
= &kset
->kobj
;
160 memset(kset
, 0, sizeof(struct kset
));
161 kobj
->parent
= &uids_kobject
; /* create under /sys/kernel/uids dir */
162 kobject_set_name(kobj
, "%d", up
->uid
);
164 user_attr_init(&up
->user_attr
, "cpu_share", 0644);
166 error
= kobject_add(kobj
);
170 error
= sysfs_create_file(kobj
, &up
->user_attr
.attr
);
174 kobject_uevent(kobj
, KOBJ_ADD
);
180 /* create these in sysfs filesystem:
181 * "/sys/kernel/uids" directory
182 * "/sys/kernel/uids/0" directory (for root user)
183 * "/sys/kernel/uids/0/cpu_share" file (for root user)
185 int __init
uids_kobject_init(void)
189 /* create under /sys/kernel dir */
190 uids_kobject
.parent
= &kernel_subsys
.kobj
;
191 uids_kobject
.kset
= &kernel_subsys
;
192 kobject_set_name(&uids_kobject
, "uids");
193 kobject_init(&uids_kobject
);
195 error
= kobject_add(&uids_kobject
);
197 error
= user_kobject_create(&root_user
);
202 /* work function to remove sysfs directory for a user and free up
203 * corresponding structures.
205 static void remove_user_sysfs_dir(struct work_struct
*w
)
207 struct user_struct
*up
= container_of(w
, struct user_struct
, work
);
208 struct kobject
*kobj
= &up
->kset
.kobj
;
212 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
217 local_irq_save(flags
);
219 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
222 spin_unlock_irqrestore(&uidhash_lock
, flags
);
224 local_irq_restore(flags
);
230 sysfs_remove_file(kobj
, &up
->user_attr
.attr
);
231 kobject_uevent(kobj
, KOBJ_REMOVE
);
234 sched_destroy_user(up
);
235 key_put(up
->uid_keyring
);
236 key_put(up
->session_keyring
);
237 kmem_cache_free(uid_cachep
, up
);
243 /* IRQs are disabled and uidhash_lock is held upon function entry.
244 * IRQ state (as stored in flags) is restored and uidhash_lock released
245 * upon function exit.
247 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
249 /* restore back the count */
250 atomic_inc(&up
->__count
);
251 spin_unlock_irqrestore(&uidhash_lock
, flags
);
253 INIT_WORK(&up
->work
, remove_user_sysfs_dir
);
254 schedule_work(&up
->work
);
257 #else /* CONFIG_FAIR_USER_SCHED */
259 static void sched_destroy_user(struct user_struct
*up
) { }
260 static int sched_create_user(struct user_struct
*up
) { return 0; }
261 static void sched_switch_user(struct task_struct
*p
) { }
262 static inline int user_kobject_create(struct user_struct
*up
) { return 0; }
263 static inline void uids_mutex_lock(void) { }
264 static inline void uids_mutex_unlock(void) { }
266 /* IRQs are disabled and uidhash_lock is held upon function entry.
267 * IRQ state (as stored in flags) is restored and uidhash_lock released
268 * upon function exit.
270 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
273 spin_unlock_irqrestore(&uidhash_lock
, flags
);
274 sched_destroy_user(up
);
275 key_put(up
->uid_keyring
);
276 key_put(up
->session_keyring
);
277 kmem_cache_free(uid_cachep
, up
);
280 #endif /* CONFIG_FAIR_USER_SCHED */
283 * Locate the user_struct for the passed UID. If found, take a ref on it. The
284 * caller must undo that ref with free_uid().
286 * If the user_struct could not be found, return NULL.
288 struct user_struct
*find_user(uid_t uid
)
290 struct user_struct
*ret
;
292 struct user_namespace
*ns
= current
->nsproxy
->user_ns
;
294 spin_lock_irqsave(&uidhash_lock
, flags
);
295 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
296 spin_unlock_irqrestore(&uidhash_lock
, flags
);
300 void free_uid(struct user_struct
*up
)
307 local_irq_save(flags
);
308 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
309 free_user(up
, flags
);
311 local_irq_restore(flags
);
314 struct user_struct
* alloc_uid(struct user_namespace
*ns
, uid_t uid
)
316 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
317 struct user_struct
*up
;
319 /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
324 spin_lock_irq(&uidhash_lock
);
325 up
= uid_hash_find(uid
, hashent
);
326 spin_unlock_irq(&uidhash_lock
);
329 struct user_struct
*new;
331 new = kmem_cache_alloc(uid_cachep
, GFP_KERNEL
);
335 atomic_set(&new->__count
, 1);
336 atomic_set(&new->processes
, 0);
337 atomic_set(&new->files
, 0);
338 atomic_set(&new->sigpending
, 0);
339 #ifdef CONFIG_INOTIFY_USER
340 atomic_set(&new->inotify_watches
, 0);
341 atomic_set(&new->inotify_devs
, 0);
343 #ifdef CONFIG_POSIX_MQUEUE
348 if (alloc_uid_keyring(new, current
) < 0) {
349 kmem_cache_free(uid_cachep
, new);
353 if (sched_create_user(new) < 0) {
354 key_put(new->uid_keyring
);
355 key_put(new->session_keyring
);
356 kmem_cache_free(uid_cachep
, new);
360 if (user_kobject_create(new)) {
361 sched_destroy_user(new);
362 key_put(new->uid_keyring
);
363 key_put(new->session_keyring
);
364 kmem_cache_free(uid_cachep
, new);
370 * Before adding this, check whether we raced
371 * on adding the same user already..
373 spin_lock_irq(&uidhash_lock
);
374 up
= uid_hash_find(uid
, hashent
);
376 /* This case is not possible when CONFIG_FAIR_USER_SCHED
377 * is defined, since we serialize alloc_uid() using
378 * uids_mutex. Hence no need to call
379 * sched_destroy_user() or remove_user_sysfs_dir().
381 key_put(new->uid_keyring
);
382 key_put(new->session_keyring
);
383 kmem_cache_free(uid_cachep
, new);
385 uid_hash_insert(new, hashent
);
388 spin_unlock_irq(&uidhash_lock
);
397 void switch_uid(struct user_struct
*new_user
)
399 struct user_struct
*old_user
;
401 /* What if a process setreuid()'s and this brings the
402 * new uid over his NPROC rlimit? We can check this now
403 * cheaply with the new uid cache, so if it matters
404 * we should be checking for it. -DaveM
406 old_user
= current
->user
;
407 atomic_inc(&new_user
->processes
);
408 atomic_dec(&old_user
->processes
);
409 switch_uid_keyring(new_user
);
410 current
->user
= new_user
;
411 sched_switch_user(current
);
414 * We need to synchronize with __sigqueue_alloc()
415 * doing a get_uid(p->user).. If that saw the old
416 * user value, we need to wait until it has exited
417 * its critical region before we can free the old
421 spin_unlock_wait(¤t
->sighand
->siglock
);
427 void release_uids(struct user_namespace
*ns
)
431 struct hlist_head
*head
;
432 struct hlist_node
*nd
;
434 spin_lock_irqsave(&uidhash_lock
, flags
);
436 * collapse the chains so that the user_struct-s will
437 * be still alive, but not in hashes. subsequent free_uid()
440 for (i
= 0; i
< UIDHASH_SZ
; i
++) {
441 head
= ns
->uidhash_table
+ i
;
442 while (!hlist_empty(head
)) {
447 spin_unlock_irqrestore(&uidhash_lock
, flags
);
449 free_uid(ns
->root_user
);
452 static int __init
uid_cache_init(void)
456 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
457 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
459 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
460 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
462 /* Insert the root user immediately (init already runs as root) */
463 spin_lock_irq(&uidhash_lock
);
464 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
465 spin_unlock_irq(&uidhash_lock
);
470 module_init(uid_cache_init
);