4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
21 struct user_namespace init_user_ns
= {
23 .refcount
= ATOMIC_INIT(1),
25 .creator
= &root_user
,
27 EXPORT_SYMBOL_GPL(init_user_ns
);
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
38 static struct kmem_cache
*uid_cachep
;
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
49 static DEFINE_SPINLOCK(uidhash_lock
);
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user
= {
53 .__count
= ATOMIC_INIT(2),
54 .processes
= ATOMIC_INIT(1),
55 .files
= ATOMIC_INIT(0),
56 .sigpending
= ATOMIC_INIT(0),
58 .user_ns
= &init_user_ns
,
59 #ifdef CONFIG_USER_SCHED
60 .tg
= &init_task_group
,
65 * These routines must be called with the uidhash spinlock held!
67 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
69 hlist_add_head(&up
->uidhash_node
, hashent
);
72 static void uid_hash_remove(struct user_struct
*up
)
74 hlist_del_init(&up
->uidhash_node
);
75 put_user_ns(up
->user_ns
);
78 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
80 struct user_struct
*user
;
83 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
84 if (user
->uid
== uid
) {
85 atomic_inc(&user
->__count
);
93 #ifdef CONFIG_USER_SCHED
95 static void sched_destroy_user(struct user_struct
*up
)
97 sched_destroy_group(up
->tg
);
100 static int sched_create_user(struct user_struct
*up
)
104 up
->tg
= sched_create_group(&root_task_group
);
113 #else /* CONFIG_USER_SCHED */
115 static void sched_destroy_user(struct user_struct
*up
) { }
116 static int sched_create_user(struct user_struct
*up
) { return 0; }
118 #endif /* CONFIG_USER_SCHED */
120 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
122 static struct kset
*uids_kset
; /* represents the /sys/kernel/uids/ directory */
123 static DEFINE_MUTEX(uids_mutex
);
125 static inline void uids_mutex_lock(void)
127 mutex_lock(&uids_mutex
);
130 static inline void uids_mutex_unlock(void)
132 mutex_unlock(&uids_mutex
);
135 /* uid directory attributes */
136 #ifdef CONFIG_FAIR_GROUP_SCHED
137 static ssize_t
cpu_shares_show(struct kobject
*kobj
,
138 struct kobj_attribute
*attr
,
141 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
143 return sprintf(buf
, "%lu\n", sched_group_shares(up
->tg
));
146 static ssize_t
cpu_shares_store(struct kobject
*kobj
,
147 struct kobj_attribute
*attr
,
148 const char *buf
, size_t size
)
150 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
151 unsigned long shares
;
154 sscanf(buf
, "%lu", &shares
);
156 rc
= sched_group_set_shares(up
->tg
, shares
);
158 return (rc
? rc
: size
);
161 static struct kobj_attribute cpu_share_attr
=
162 __ATTR(cpu_share
, 0644, cpu_shares_show
, cpu_shares_store
);
165 #ifdef CONFIG_RT_GROUP_SCHED
166 static ssize_t
cpu_rt_runtime_show(struct kobject
*kobj
,
167 struct kobj_attribute
*attr
,
170 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
172 return sprintf(buf
, "%ld\n", sched_group_rt_runtime(up
->tg
));
175 static ssize_t
cpu_rt_runtime_store(struct kobject
*kobj
,
176 struct kobj_attribute
*attr
,
177 const char *buf
, size_t size
)
179 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
180 unsigned long rt_runtime
;
183 sscanf(buf
, "%ld", &rt_runtime
);
185 rc
= sched_group_set_rt_runtime(up
->tg
, rt_runtime
);
187 return (rc
? rc
: size
);
190 static struct kobj_attribute cpu_rt_runtime_attr
=
191 __ATTR(cpu_rt_runtime
, 0644, cpu_rt_runtime_show
, cpu_rt_runtime_store
);
193 static ssize_t
cpu_rt_period_show(struct kobject
*kobj
,
194 struct kobj_attribute
*attr
,
197 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
199 return sprintf(buf
, "%lu\n", sched_group_rt_period(up
->tg
));
202 static ssize_t
cpu_rt_period_store(struct kobject
*kobj
,
203 struct kobj_attribute
*attr
,
204 const char *buf
, size_t size
)
206 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
207 unsigned long rt_period
;
210 sscanf(buf
, "%lu", &rt_period
);
212 rc
= sched_group_set_rt_period(up
->tg
, rt_period
);
214 return (rc
? rc
: size
);
217 static struct kobj_attribute cpu_rt_period_attr
=
218 __ATTR(cpu_rt_period
, 0644, cpu_rt_period_show
, cpu_rt_period_store
);
221 /* default attributes per uid directory */
222 static struct attribute
*uids_attributes
[] = {
223 #ifdef CONFIG_FAIR_GROUP_SCHED
224 &cpu_share_attr
.attr
,
226 #ifdef CONFIG_RT_GROUP_SCHED
227 &cpu_rt_runtime_attr
.attr
,
228 &cpu_rt_period_attr
.attr
,
233 /* the lifetime of user_struct is not managed by the core (now) */
234 static void uids_release(struct kobject
*kobj
)
239 static struct kobj_type uids_ktype
= {
240 .sysfs_ops
= &kobj_sysfs_ops
,
241 .default_attrs
= uids_attributes
,
242 .release
= uids_release
,
246 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
247 * We do not create this file for users in a user namespace (until
248 * sysfs tagging is implemented).
250 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
252 static int uids_user_create(struct user_struct
*up
)
254 struct kobject
*kobj
= &up
->kobj
;
257 memset(kobj
, 0, sizeof(struct kobject
));
258 if (up
->user_ns
!= &init_user_ns
)
260 kobj
->kset
= uids_kset
;
261 error
= kobject_init_and_add(kobj
, &uids_ktype
, NULL
, "%d", up
->uid
);
267 kobject_uevent(kobj
, KOBJ_ADD
);
272 /* create these entries in sysfs:
273 * "/sys/kernel/uids" directory
274 * "/sys/kernel/uids/0" directory (for root user)
275 * "/sys/kernel/uids/0/cpu_share" file (for root user)
277 int __init
uids_sysfs_init(void)
279 uids_kset
= kset_create_and_add("uids", NULL
, kernel_kobj
);
283 return uids_user_create(&root_user
);
286 /* work function to remove sysfs directory for a user and free up
287 * corresponding structures.
289 static void cleanup_user_struct(struct work_struct
*w
)
291 struct user_struct
*up
= container_of(w
, struct user_struct
, work
);
295 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
300 local_irq_save(flags
);
302 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
305 spin_unlock_irqrestore(&uidhash_lock
, flags
);
307 local_irq_restore(flags
);
313 if (up
->user_ns
== &init_user_ns
) {
314 kobject_uevent(&up
->kobj
, KOBJ_REMOVE
);
315 kobject_del(&up
->kobj
);
316 kobject_put(&up
->kobj
);
319 sched_destroy_user(up
);
320 key_put(up
->uid_keyring
);
321 key_put(up
->session_keyring
);
322 kmem_cache_free(uid_cachep
, up
);
328 /* IRQs are disabled and uidhash_lock is held upon function entry.
329 * IRQ state (as stored in flags) is restored and uidhash_lock released
330 * upon function exit.
332 static void free_user(struct user_struct
*up
, unsigned long flags
)
334 /* restore back the count */
335 atomic_inc(&up
->__count
);
336 spin_unlock_irqrestore(&uidhash_lock
, flags
);
338 INIT_WORK(&up
->work
, cleanup_user_struct
);
339 schedule_work(&up
->work
);
342 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
344 int uids_sysfs_init(void) { return 0; }
345 static inline int uids_user_create(struct user_struct
*up
) { return 0; }
346 static inline void uids_mutex_lock(void) { }
347 static inline void uids_mutex_unlock(void) { }
349 /* IRQs are disabled and uidhash_lock is held upon function entry.
350 * IRQ state (as stored in flags) is restored and uidhash_lock released
351 * upon function exit.
353 static void free_user(struct user_struct
*up
, unsigned long flags
)
356 spin_unlock_irqrestore(&uidhash_lock
, flags
);
357 sched_destroy_user(up
);
358 key_put(up
->uid_keyring
);
359 key_put(up
->session_keyring
);
360 kmem_cache_free(uid_cachep
, up
);
365 #if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
367 * We need to check if a setuid can take place. This function should be called
368 * before successfully completing the setuid.
370 int task_can_switch_user(struct user_struct
*up
, struct task_struct
*tsk
)
373 return sched_rt_can_attach(up
->tg
, tsk
);
377 int task_can_switch_user(struct user_struct
*up
, struct task_struct
*tsk
)
384 * Locate the user_struct for the passed UID. If found, take a ref on it. The
385 * caller must undo that ref with free_uid().
387 * If the user_struct could not be found, return NULL.
389 struct user_struct
*find_user(uid_t uid
)
391 struct user_struct
*ret
;
393 struct user_namespace
*ns
= current_user_ns();
395 spin_lock_irqsave(&uidhash_lock
, flags
);
396 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
397 spin_unlock_irqrestore(&uidhash_lock
, flags
);
401 void free_uid(struct user_struct
*up
)
408 local_irq_save(flags
);
409 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
410 free_user(up
, flags
);
412 local_irq_restore(flags
);
415 struct user_struct
*alloc_uid(struct user_namespace
*ns
, uid_t uid
)
417 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
418 struct user_struct
*up
, *new;
420 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
425 spin_lock_irq(&uidhash_lock
);
426 up
= uid_hash_find(uid
, hashent
);
427 spin_unlock_irq(&uidhash_lock
);
430 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
435 atomic_set(&new->__count
, 1);
437 if (sched_create_user(new) < 0)
440 new->user_ns
= get_user_ns(ns
);
442 if (uids_user_create(new))
443 goto out_destoy_sched
;
446 * Before adding this, check whether we raced
447 * on adding the same user already..
449 spin_lock_irq(&uidhash_lock
);
450 up
= uid_hash_find(uid
, hashent
);
452 /* This case is not possible when CONFIG_USER_SCHED
453 * is defined, since we serialize alloc_uid() using
454 * uids_mutex. Hence no need to call
455 * sched_destroy_user() or remove_user_sysfs_dir().
457 key_put(new->uid_keyring
);
458 key_put(new->session_keyring
);
459 kmem_cache_free(uid_cachep
, new);
461 uid_hash_insert(new, hashent
);
464 spin_unlock_irq(&uidhash_lock
);
472 sched_destroy_user(new);
473 put_user_ns(new->user_ns
);
475 kmem_cache_free(uid_cachep
, new);
481 static int __init
uid_cache_init(void)
485 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
486 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
488 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
489 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
491 /* Insert the root user immediately (init already runs as root) */
492 spin_lock_irq(&uidhash_lock
);
493 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
494 spin_unlock_irq(&uidhash_lock
);
499 module_init(uid_cache_init
);