4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/export.h>
18 #include <linux/user_namespace.h>
21 * userns count is 1 for root user, 1 for init_uts_ns,
24 struct user_namespace init_user_ns
= {
42 .refcount
= ATOMIC_INIT(3),
44 .owner
= GLOBAL_ROOT_UID
,
45 .group
= GLOBAL_ROOT_GID
,
47 EXPORT_SYMBOL_GPL(init_user_ns
);
50 * UID task count cache, to get fast user lookup in "alloc_uid"
51 * when changing user ID's (ie setuid() and friends).
54 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
55 #define UIDHASH_SZ (1 << UIDHASH_BITS)
56 #define UIDHASH_MASK (UIDHASH_SZ - 1)
57 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
58 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
60 static struct kmem_cache
*uid_cachep
;
61 struct hlist_head uidhash_table
[UIDHASH_SZ
];
64 * The uidhash_lock is mostly taken from process context, but it is
65 * occasionally also taken from softirq/tasklet context, when
66 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
67 * But free_uid() is also called with local interrupts disabled, and running
68 * local_bh_enable() with local interrupts disabled is an error - we'll run
69 * softirq callbacks, and they can unconditionally enable interrupts, and
70 * the caller of free_uid() didn't expect that..
72 static DEFINE_SPINLOCK(uidhash_lock
);
74 /* root_user.__count is 1, for init task cred */
75 struct user_struct root_user
= {
76 .__count
= ATOMIC_INIT(1),
77 .processes
= ATOMIC_INIT(1),
78 .files
= ATOMIC_INIT(0),
79 .sigpending
= ATOMIC_INIT(0),
81 .uid
= GLOBAL_ROOT_UID
,
85 * These routines must be called with the uidhash spinlock held!
87 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
89 hlist_add_head(&up
->uidhash_node
, hashent
);
92 static void uid_hash_remove(struct user_struct
*up
)
94 hlist_del_init(&up
->uidhash_node
);
97 static struct user_struct
*uid_hash_find(kuid_t uid
, struct hlist_head
*hashent
)
99 struct user_struct
*user
;
100 struct hlist_node
*h
;
102 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
103 if (uid_eq(user
->uid
, uid
)) {
104 atomic_inc(&user
->__count
);
112 /* IRQs are disabled and uidhash_lock is held upon function entry.
113 * IRQ state (as stored in flags) is restored and uidhash_lock released
114 * upon function exit.
116 static void free_user(struct user_struct
*up
, unsigned long flags
)
117 __releases(&uidhash_lock
)
120 spin_unlock_irqrestore(&uidhash_lock
, flags
);
121 key_put(up
->uid_keyring
);
122 key_put(up
->session_keyring
);
123 kmem_cache_free(uid_cachep
, up
);
127 * Locate the user_struct for the passed UID. If found, take a ref on it. The
128 * caller must undo that ref with free_uid().
130 * If the user_struct could not be found, return NULL.
132 struct user_struct
*find_user(kuid_t uid
)
134 struct user_struct
*ret
;
137 spin_lock_irqsave(&uidhash_lock
, flags
);
138 ret
= uid_hash_find(uid
, uidhashentry(uid
));
139 spin_unlock_irqrestore(&uidhash_lock
, flags
);
143 void free_uid(struct user_struct
*up
)
150 local_irq_save(flags
);
151 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
152 free_user(up
, flags
);
154 local_irq_restore(flags
);
157 struct user_struct
*alloc_uid(kuid_t uid
)
159 struct hlist_head
*hashent
= uidhashentry(uid
);
160 struct user_struct
*up
, *new;
162 spin_lock_irq(&uidhash_lock
);
163 up
= uid_hash_find(uid
, hashent
);
164 spin_unlock_irq(&uidhash_lock
);
167 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
172 atomic_set(&new->__count
, 1);
175 * Before adding this, check whether we raced
176 * on adding the same user already..
178 spin_lock_irq(&uidhash_lock
);
179 up
= uid_hash_find(uid
, hashent
);
181 key_put(new->uid_keyring
);
182 key_put(new->session_keyring
);
183 kmem_cache_free(uid_cachep
, new);
185 uid_hash_insert(new, hashent
);
188 spin_unlock_irq(&uidhash_lock
);
197 static int __init
uid_cache_init(void)
201 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
202 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
204 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
205 INIT_HLIST_HEAD(uidhash_table
+ n
);
207 /* Insert the root user immediately (init already runs as root) */
208 spin_lock_irq(&uidhash_lock
);
209 uid_hash_insert(&root_user
, uidhashentry(GLOBAL_ROOT_UID
));
210 spin_unlock_irq(&uidhash_lock
);
215 module_init(uid_cache_init
);