4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
21 struct user_namespace init_user_ns
= {
23 .refcount
= ATOMIC_INIT(2),
25 .creator
= &root_user
,
27 EXPORT_SYMBOL_GPL(init_user_ns
);
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
38 static struct kmem_cache
*uid_cachep
;
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
49 static DEFINE_SPINLOCK(uidhash_lock
);
51 /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
52 struct user_struct root_user
= {
53 .__count
= ATOMIC_INIT(2),
54 .processes
= ATOMIC_INIT(1),
55 .files
= ATOMIC_INIT(0),
56 .sigpending
= ATOMIC_INIT(0),
58 .user_ns
= &init_user_ns
,
59 #ifdef CONFIG_USER_SCHED
60 .tg
= &init_task_group
,
65 * These routines must be called with the uidhash spinlock held!
67 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
69 hlist_add_head(&up
->uidhash_node
, hashent
);
72 static void uid_hash_remove(struct user_struct
*up
)
74 hlist_del_init(&up
->uidhash_node
);
75 put_user_ns(up
->user_ns
);
78 #ifdef CONFIG_USER_SCHED
80 static void sched_destroy_user(struct user_struct
*up
)
82 sched_destroy_group(up
->tg
);
85 static int sched_create_user(struct user_struct
*up
)
89 up
->tg
= sched_create_group(&root_task_group
);
98 #else /* CONFIG_USER_SCHED */
100 static void sched_destroy_user(struct user_struct
*up
) { }
101 static int sched_create_user(struct user_struct
*up
) { return 0; }
103 #endif /* CONFIG_USER_SCHED */
105 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
107 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
109 struct user_struct
*user
;
110 struct hlist_node
*h
;
112 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
113 if (user
->uid
== uid
) {
114 /* possibly resurrect an "almost deleted" object */
115 if (atomic_inc_return(&user
->__count
) == 1)
116 cancel_delayed_work(&user
->work
);
124 static struct kset
*uids_kset
; /* represents the /sys/kernel/uids/ directory */
125 static DEFINE_MUTEX(uids_mutex
);
127 static inline void uids_mutex_lock(void)
129 mutex_lock(&uids_mutex
);
132 static inline void uids_mutex_unlock(void)
134 mutex_unlock(&uids_mutex
);
137 /* uid directory attributes */
138 #ifdef CONFIG_FAIR_GROUP_SCHED
139 static ssize_t
cpu_shares_show(struct kobject
*kobj
,
140 struct kobj_attribute
*attr
,
143 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
145 return sprintf(buf
, "%lu\n", sched_group_shares(up
->tg
));
148 static ssize_t
cpu_shares_store(struct kobject
*kobj
,
149 struct kobj_attribute
*attr
,
150 const char *buf
, size_t size
)
152 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
153 unsigned long shares
;
156 sscanf(buf
, "%lu", &shares
);
158 rc
= sched_group_set_shares(up
->tg
, shares
);
160 return (rc
? rc
: size
);
163 static struct kobj_attribute cpu_share_attr
=
164 __ATTR(cpu_share
, 0644, cpu_shares_show
, cpu_shares_store
);
167 #ifdef CONFIG_RT_GROUP_SCHED
168 static ssize_t
cpu_rt_runtime_show(struct kobject
*kobj
,
169 struct kobj_attribute
*attr
,
172 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
174 return sprintf(buf
, "%ld\n", sched_group_rt_runtime(up
->tg
));
177 static ssize_t
cpu_rt_runtime_store(struct kobject
*kobj
,
178 struct kobj_attribute
*attr
,
179 const char *buf
, size_t size
)
181 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
182 unsigned long rt_runtime
;
185 sscanf(buf
, "%ld", &rt_runtime
);
187 rc
= sched_group_set_rt_runtime(up
->tg
, rt_runtime
);
189 return (rc
? rc
: size
);
192 static struct kobj_attribute cpu_rt_runtime_attr
=
193 __ATTR(cpu_rt_runtime
, 0644, cpu_rt_runtime_show
, cpu_rt_runtime_store
);
195 static ssize_t
cpu_rt_period_show(struct kobject
*kobj
,
196 struct kobj_attribute
*attr
,
199 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
201 return sprintf(buf
, "%lu\n", sched_group_rt_period(up
->tg
));
204 static ssize_t
cpu_rt_period_store(struct kobject
*kobj
,
205 struct kobj_attribute
*attr
,
206 const char *buf
, size_t size
)
208 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
209 unsigned long rt_period
;
212 sscanf(buf
, "%lu", &rt_period
);
214 rc
= sched_group_set_rt_period(up
->tg
, rt_period
);
216 return (rc
? rc
: size
);
219 static struct kobj_attribute cpu_rt_period_attr
=
220 __ATTR(cpu_rt_period
, 0644, cpu_rt_period_show
, cpu_rt_period_store
);
223 /* default attributes per uid directory */
224 static struct attribute
*uids_attributes
[] = {
225 #ifdef CONFIG_FAIR_GROUP_SCHED
226 &cpu_share_attr
.attr
,
228 #ifdef CONFIG_RT_GROUP_SCHED
229 &cpu_rt_runtime_attr
.attr
,
230 &cpu_rt_period_attr
.attr
,
235 /* the lifetime of user_struct is not managed by the core (now) */
236 static void uids_release(struct kobject
*kobj
)
241 static struct kobj_type uids_ktype
= {
242 .sysfs_ops
= &kobj_sysfs_ops
,
243 .default_attrs
= uids_attributes
,
244 .release
= uids_release
,
248 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
249 * We do not create this file for users in a user namespace (until
250 * sysfs tagging is implemented).
252 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
254 static int uids_user_create(struct user_struct
*up
)
256 struct kobject
*kobj
= &up
->kobj
;
259 memset(kobj
, 0, sizeof(struct kobject
));
260 if (up
->user_ns
!= &init_user_ns
)
262 kobj
->kset
= uids_kset
;
263 error
= kobject_init_and_add(kobj
, &uids_ktype
, NULL
, "%d", up
->uid
);
269 kobject_uevent(kobj
, KOBJ_ADD
);
274 /* create these entries in sysfs:
275 * "/sys/kernel/uids" directory
276 * "/sys/kernel/uids/0" directory (for root user)
277 * "/sys/kernel/uids/0/cpu_share" file (for root user)
279 int __init
uids_sysfs_init(void)
281 uids_kset
= kset_create_and_add("uids", NULL
, kernel_kobj
);
285 return uids_user_create(&root_user
);
288 /* delayed work function to remove sysfs directory for a user and free up
289 * corresponding structures.
291 static void cleanup_user_struct(struct work_struct
*w
)
293 struct user_struct
*up
= container_of(w
, struct user_struct
, work
.work
);
297 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
302 spin_lock_irqsave(&uidhash_lock
, flags
);
303 if (atomic_read(&up
->__count
) == 0) {
307 spin_unlock_irqrestore(&uidhash_lock
, flags
);
312 if (up
->user_ns
== &init_user_ns
) {
313 kobject_uevent(&up
->kobj
, KOBJ_REMOVE
);
314 kobject_del(&up
->kobj
);
315 kobject_put(&up
->kobj
);
318 sched_destroy_user(up
);
319 key_put(up
->uid_keyring
);
320 key_put(up
->session_keyring
);
321 kmem_cache_free(uid_cachep
, up
);
327 /* IRQs are disabled and uidhash_lock is held upon function entry.
328 * IRQ state (as stored in flags) is restored and uidhash_lock released
329 * upon function exit.
331 static void free_user(struct user_struct
*up
, unsigned long flags
)
333 spin_unlock_irqrestore(&uidhash_lock
, flags
);
334 INIT_DELAYED_WORK(&up
->work
, cleanup_user_struct
);
335 schedule_delayed_work(&up
->work
, msecs_to_jiffies(1000));
338 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
340 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
342 struct user_struct
*user
;
343 struct hlist_node
*h
;
345 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
346 if (user
->uid
== uid
) {
347 atomic_inc(&user
->__count
);
355 int uids_sysfs_init(void) { return 0; }
356 static inline int uids_user_create(struct user_struct
*up
) { return 0; }
357 static inline void uids_mutex_lock(void) { }
358 static inline void uids_mutex_unlock(void) { }
360 /* IRQs are disabled and uidhash_lock is held upon function entry.
361 * IRQ state (as stored in flags) is restored and uidhash_lock released
362 * upon function exit.
364 static void free_user(struct user_struct
*up
, unsigned long flags
)
367 spin_unlock_irqrestore(&uidhash_lock
, flags
);
368 sched_destroy_user(up
);
369 key_put(up
->uid_keyring
);
370 key_put(up
->session_keyring
);
371 kmem_cache_free(uid_cachep
, up
);
376 #if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
378 * We need to check if a setuid can take place. This function should be called
379 * before successfully completing the setuid.
381 int task_can_switch_user(struct user_struct
*up
, struct task_struct
*tsk
)
384 return sched_rt_can_attach(up
->tg
, tsk
);
388 int task_can_switch_user(struct user_struct
*up
, struct task_struct
*tsk
)
395 * Locate the user_struct for the passed UID. If found, take a ref on it. The
396 * caller must undo that ref with free_uid().
398 * If the user_struct could not be found, return NULL.
400 struct user_struct
*find_user(uid_t uid
)
402 struct user_struct
*ret
;
404 struct user_namespace
*ns
= current_user_ns();
406 spin_lock_irqsave(&uidhash_lock
, flags
);
407 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
408 spin_unlock_irqrestore(&uidhash_lock
, flags
);
412 void free_uid(struct user_struct
*up
)
419 local_irq_save(flags
);
420 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
421 free_user(up
, flags
);
423 local_irq_restore(flags
);
426 struct user_struct
*alloc_uid(struct user_namespace
*ns
, uid_t uid
)
428 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
429 struct user_struct
*up
, *new;
431 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
436 spin_lock_irq(&uidhash_lock
);
437 up
= uid_hash_find(uid
, hashent
);
438 spin_unlock_irq(&uidhash_lock
);
441 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
446 atomic_set(&new->__count
, 1);
448 if (sched_create_user(new) < 0)
451 new->user_ns
= get_user_ns(ns
);
453 if (uids_user_create(new))
454 goto out_destoy_sched
;
457 * Before adding this, check whether we raced
458 * on adding the same user already..
460 spin_lock_irq(&uidhash_lock
);
461 up
= uid_hash_find(uid
, hashent
);
463 /* This case is not possible when CONFIG_USER_SCHED
464 * is defined, since we serialize alloc_uid() using
465 * uids_mutex. Hence no need to call
466 * sched_destroy_user() or remove_user_sysfs_dir().
468 key_put(new->uid_keyring
);
469 key_put(new->session_keyring
);
470 kmem_cache_free(uid_cachep
, new);
472 uid_hash_insert(new, hashent
);
475 spin_unlock_irq(&uidhash_lock
);
483 sched_destroy_user(new);
484 put_user_ns(new->user_ns
);
486 kmem_cache_free(uid_cachep
, new);
492 static int __init
uid_cache_init(void)
496 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
497 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
499 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
500 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
502 /* Insert the root user immediately (init already runs as root) */
503 spin_lock_irq(&uidhash_lock
);
504 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
505 spin_unlock_irq(&uidhash_lock
);
510 module_init(uid_cache_init
);