4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
21 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends).
25 #define UIDHASH_MASK (UIDHASH_SZ - 1)
26 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
27 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
29 static struct kmem_cache
*uid_cachep
;
32 * The uidhash_lock is mostly taken from process context, but it is
33 * occasionally also taken from softirq/tasklet context, when
34 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
35 * But free_uid() is also called with local interrupts disabled, and running
36 * local_bh_enable() with local interrupts disabled is an error - we'll run
37 * softirq callbacks, and they can unconditionally enable interrupts, and
38 * the caller of free_uid() didn't expect that..
40 static DEFINE_SPINLOCK(uidhash_lock
);
42 struct user_struct root_user
= {
43 .__count
= ATOMIC_INIT(1),
44 .processes
= ATOMIC_INIT(1),
45 .files
= ATOMIC_INIT(0),
46 .sigpending
= ATOMIC_INIT(0),
49 .uid_keyring
= &root_user_keyring
,
50 .session_keyring
= &root_session_keyring
,
52 #ifdef CONFIG_FAIR_USER_SCHED
53 .tg
= &init_task_group
,
58 * These routines must be called with the uidhash spinlock held!
60 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
62 hlist_add_head(&up
->uidhash_node
, hashent
);
65 static void uid_hash_remove(struct user_struct
*up
)
67 hlist_del_init(&up
->uidhash_node
);
70 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
72 struct user_struct
*user
;
75 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
76 if (user
->uid
== uid
) {
77 atomic_inc(&user
->__count
);
85 #ifdef CONFIG_FAIR_USER_SCHED
87 static void sched_destroy_user(struct user_struct
*up
)
89 sched_destroy_group(up
->tg
);
92 static int sched_create_user(struct user_struct
*up
)
96 up
->tg
= sched_create_group();
103 static void sched_switch_user(struct task_struct
*p
)
108 #else /* CONFIG_FAIR_USER_SCHED */
110 static void sched_destroy_user(struct user_struct
*up
) { }
111 static int sched_create_user(struct user_struct
*up
) { return 0; }
112 static void sched_switch_user(struct task_struct
*p
) { }
114 #endif /* CONFIG_FAIR_USER_SCHED */
116 #if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
118 static struct kobject uids_kobject
; /* represents /sys/kernel/uids directory */
119 static DEFINE_MUTEX(uids_mutex
);
121 static inline void uids_mutex_lock(void)
123 mutex_lock(&uids_mutex
);
126 static inline void uids_mutex_unlock(void)
128 mutex_unlock(&uids_mutex
);
131 /* return cpu shares held by the user */
132 static ssize_t
cpu_shares_show(struct kset
*kset
, char *buffer
)
134 struct user_struct
*up
= container_of(kset
, struct user_struct
, kset
);
136 return sprintf(buffer
, "%lu\n", sched_group_shares(up
->tg
));
139 /* modify cpu shares held by the user */
140 static ssize_t
cpu_shares_store(struct kset
*kset
, const char *buffer
,
143 struct user_struct
*up
= container_of(kset
, struct user_struct
, kset
);
144 unsigned long shares
;
147 sscanf(buffer
, "%lu", &shares
);
149 rc
= sched_group_set_shares(up
->tg
, shares
);
151 return (rc
? rc
: size
);
154 static void user_attr_init(struct subsys_attribute
*sa
, char *name
, int mode
)
156 sa
->attr
.name
= name
;
157 sa
->attr
.mode
= mode
;
158 sa
->show
= cpu_shares_show
;
159 sa
->store
= cpu_shares_store
;
162 /* Create "/sys/kernel/uids/<uid>" directory and
163 * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
165 static int user_kobject_create(struct user_struct
*up
)
167 struct kset
*kset
= &up
->kset
;
168 struct kobject
*kobj
= &kset
->kobj
;
171 memset(kset
, 0, sizeof(struct kset
));
172 kobj
->parent
= &uids_kobject
; /* create under /sys/kernel/uids dir */
173 kobject_set_name(kobj
, "%d", up
->uid
);
175 user_attr_init(&up
->user_attr
, "cpu_share", 0644);
177 error
= kobject_add(kobj
);
181 error
= sysfs_create_file(kobj
, &up
->user_attr
.attr
);
185 kobject_uevent(kobj
, KOBJ_ADD
);
191 /* create these in sysfs filesystem:
192 * "/sys/kernel/uids" directory
193 * "/sys/kernel/uids/0" directory (for root user)
194 * "/sys/kernel/uids/0/cpu_share" file (for root user)
196 int __init
uids_kobject_init(void)
200 /* create under /sys/kernel dir */
201 uids_kobject
.parent
= &kernel_subsys
.kobj
;
202 uids_kobject
.kset
= &kernel_subsys
;
203 kobject_set_name(&uids_kobject
, "uids");
204 kobject_init(&uids_kobject
);
206 error
= kobject_add(&uids_kobject
);
208 error
= user_kobject_create(&root_user
);
213 /* work function to remove sysfs directory for a user and free up
214 * corresponding structures.
216 static void remove_user_sysfs_dir(struct work_struct
*w
)
218 struct user_struct
*up
= container_of(w
, struct user_struct
, work
);
219 struct kobject
*kobj
= &up
->kset
.kobj
;
223 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
228 local_irq_save(flags
);
230 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
233 spin_unlock_irqrestore(&uidhash_lock
, flags
);
235 local_irq_restore(flags
);
241 sysfs_remove_file(kobj
, &up
->user_attr
.attr
);
242 kobject_uevent(kobj
, KOBJ_REMOVE
);
245 sched_destroy_user(up
);
246 key_put(up
->uid_keyring
);
247 key_put(up
->session_keyring
);
248 kmem_cache_free(uid_cachep
, up
);
254 /* IRQs are disabled and uidhash_lock is held upon function entry.
255 * IRQ state (as stored in flags) is restored and uidhash_lock released
256 * upon function exit.
258 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
260 /* restore back the count */
261 atomic_inc(&up
->__count
);
262 spin_unlock_irqrestore(&uidhash_lock
, flags
);
264 INIT_WORK(&up
->work
, remove_user_sysfs_dir
);
265 schedule_work(&up
->work
);
268 #else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
270 static inline int user_kobject_create(struct user_struct
*up
) { return 0; }
271 static inline void uids_mutex_lock(void) { }
272 static inline void uids_mutex_unlock(void) { }
274 /* IRQs are disabled and uidhash_lock is held upon function entry.
275 * IRQ state (as stored in flags) is restored and uidhash_lock released
276 * upon function exit.
278 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
281 spin_unlock_irqrestore(&uidhash_lock
, flags
);
282 sched_destroy_user(up
);
283 key_put(up
->uid_keyring
);
284 key_put(up
->session_keyring
);
285 kmem_cache_free(uid_cachep
, up
);
291 * Locate the user_struct for the passed UID. If found, take a ref on it. The
292 * caller must undo that ref with free_uid().
294 * If the user_struct could not be found, return NULL.
296 struct user_struct
*find_user(uid_t uid
)
298 struct user_struct
*ret
;
300 struct user_namespace
*ns
= current
->nsproxy
->user_ns
;
302 spin_lock_irqsave(&uidhash_lock
, flags
);
303 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
304 spin_unlock_irqrestore(&uidhash_lock
, flags
);
308 void free_uid(struct user_struct
*up
)
315 local_irq_save(flags
);
316 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
317 free_user(up
, flags
);
319 local_irq_restore(flags
);
322 struct user_struct
* alloc_uid(struct user_namespace
*ns
, uid_t uid
)
324 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
325 struct user_struct
*up
;
327 /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
332 spin_lock_irq(&uidhash_lock
);
333 up
= uid_hash_find(uid
, hashent
);
334 spin_unlock_irq(&uidhash_lock
);
337 struct user_struct
*new;
339 new = kmem_cache_alloc(uid_cachep
, GFP_KERNEL
);
346 atomic_set(&new->__count
, 1);
347 atomic_set(&new->processes
, 0);
348 atomic_set(&new->files
, 0);
349 atomic_set(&new->sigpending
, 0);
350 #ifdef CONFIG_INOTIFY_USER
351 atomic_set(&new->inotify_watches
, 0);
352 atomic_set(&new->inotify_devs
, 0);
354 #ifdef CONFIG_POSIX_MQUEUE
359 if (alloc_uid_keyring(new, current
) < 0) {
360 kmem_cache_free(uid_cachep
, new);
365 if (sched_create_user(new) < 0) {
366 key_put(new->uid_keyring
);
367 key_put(new->session_keyring
);
368 kmem_cache_free(uid_cachep
, new);
373 if (user_kobject_create(new)) {
374 sched_destroy_user(new);
375 key_put(new->uid_keyring
);
376 key_put(new->session_keyring
);
377 kmem_cache_free(uid_cachep
, new);
383 * Before adding this, check whether we raced
384 * on adding the same user already..
386 spin_lock_irq(&uidhash_lock
);
387 up
= uid_hash_find(uid
, hashent
);
389 /* This case is not possible when CONFIG_FAIR_USER_SCHED
390 * is defined, since we serialize alloc_uid() using
391 * uids_mutex. Hence no need to call
392 * sched_destroy_user() or remove_user_sysfs_dir().
394 key_put(new->uid_keyring
);
395 key_put(new->session_keyring
);
396 kmem_cache_free(uid_cachep
, new);
398 uid_hash_insert(new, hashent
);
401 spin_unlock_irq(&uidhash_lock
);
410 void switch_uid(struct user_struct
*new_user
)
412 struct user_struct
*old_user
;
414 /* What if a process setreuid()'s and this brings the
415 * new uid over his NPROC rlimit? We can check this now
416 * cheaply with the new uid cache, so if it matters
417 * we should be checking for it. -DaveM
419 old_user
= current
->user
;
420 atomic_inc(&new_user
->processes
);
421 atomic_dec(&old_user
->processes
);
422 switch_uid_keyring(new_user
);
423 current
->user
= new_user
;
424 sched_switch_user(current
);
427 * We need to synchronize with __sigqueue_alloc()
428 * doing a get_uid(p->user).. If that saw the old
429 * user value, we need to wait until it has exited
430 * its critical region before we can free the old
434 spin_unlock_wait(¤t
->sighand
->siglock
);
440 void release_uids(struct user_namespace
*ns
)
444 struct hlist_head
*head
;
445 struct hlist_node
*nd
;
447 spin_lock_irqsave(&uidhash_lock
, flags
);
449 * collapse the chains so that the user_struct-s will
450 * be still alive, but not in hashes. subsequent free_uid()
453 for (i
= 0; i
< UIDHASH_SZ
; i
++) {
454 head
= ns
->uidhash_table
+ i
;
455 while (!hlist_empty(head
)) {
460 spin_unlock_irqrestore(&uidhash_lock
, flags
);
462 free_uid(ns
->root_user
);
465 static int __init
uid_cache_init(void)
469 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
470 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
472 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
473 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
475 /* Insert the root user immediately (init already runs as root) */
476 spin_lock_irq(&uidhash_lock
);
477 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
478 spin_unlock_irq(&uidhash_lock
);
483 module_init(uid_cache_init
);