[PATCH] dm crypt: add key msg
[linux-2.6.22.y-op.git] / kernel / pid.c
blobb914392085f9a171f08a3e202dc14145e1563929
1 /*
2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 #include <linux/mm.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/bootmem.h>
28 #include <linux/hash.h>
29 #include <linux/pspace.h>
31 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
32 static struct hlist_head *pid_hash;
33 static int pidhash_shift;
34 static kmem_cache_t *pid_cachep;
36 int pid_max = PID_MAX_DEFAULT;
38 #define RESERVED_PIDS 300
40 int pid_max_min = RESERVED_PIDS + 1;
41 int pid_max_max = PID_MAX_LIMIT;
43 #define BITS_PER_PAGE (PAGE_SIZE*8)
44 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
46 static inline int mk_pid(struct pspace *pspace, struct pidmap *map, int off)
48 return (map - pspace->pidmap)*BITS_PER_PAGE + off;
51 #define find_next_offset(map, off) \
52 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
55 * PID-map pages start out as NULL, they get allocated upon
56 * first use and are never deallocated. This way a low pid_max
57 * value does not cause lots of bitmaps to be allocated, but
58 * the scheme scales to up to 4 million PIDs, runtime.
60 struct pspace init_pspace = {
61 .pidmap = {
62 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
64 .last_pid = 0
68 * Note: disable interrupts while the pidmap_lock is held as an
69 * interrupt might come in and do read_lock(&tasklist_lock).
71 * If we don't disable interrupts there is a nasty deadlock between
72 * detach_pid()->free_pid() and another cpu that does
73 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
74 * read_lock(&tasklist_lock);
76 * After we clean up the tasklist_lock and know there are no
77 * irq handlers that take it we can leave the interrupts enabled.
78 * For now it is easier to be safe than to prove it can't happen.
81 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
83 static fastcall void free_pidmap(struct pspace *pspace, int pid)
85 struct pidmap *map = pspace->pidmap + pid / BITS_PER_PAGE;
86 int offset = pid & BITS_PER_PAGE_MASK;
88 clear_bit(offset, map->page);
89 atomic_inc(&map->nr_free);
92 static int alloc_pidmap(struct pspace *pspace)
94 int i, offset, max_scan, pid, last = pspace->last_pid;
95 struct pidmap *map;
97 pid = last + 1;
98 if (pid >= pid_max)
99 pid = RESERVED_PIDS;
100 offset = pid & BITS_PER_PAGE_MASK;
101 map = &pspace->pidmap[pid/BITS_PER_PAGE];
102 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
103 for (i = 0; i <= max_scan; ++i) {
104 if (unlikely(!map->page)) {
105 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
107 * Free the page if someone raced with us
108 * installing it:
110 spin_lock_irq(&pidmap_lock);
111 if (map->page)
112 kfree(page);
113 else
114 map->page = page;
115 spin_unlock_irq(&pidmap_lock);
116 if (unlikely(!map->page))
117 break;
119 if (likely(atomic_read(&map->nr_free))) {
120 do {
121 if (!test_and_set_bit(offset, map->page)) {
122 atomic_dec(&map->nr_free);
123 pspace->last_pid = pid;
124 return pid;
126 offset = find_next_offset(map, offset);
127 pid = mk_pid(pspace, map, offset);
129 * find_next_offset() found a bit, the pid from it
130 * is in-bounds, and if we fell back to the last
131 * bitmap block and the final block was the same
132 * as the starting point, pid is before last_pid.
134 } while (offset < BITS_PER_PAGE && pid < pid_max &&
135 (i != max_scan || pid < last ||
136 !((last+1) & BITS_PER_PAGE_MASK)));
138 if (map < &pspace->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
139 ++map;
140 offset = 0;
141 } else {
142 map = &pspace->pidmap[0];
143 offset = RESERVED_PIDS;
144 if (unlikely(last == offset))
145 break;
147 pid = mk_pid(pspace, map, offset);
149 return -1;
152 static int next_pidmap(struct pspace *pspace, int last)
154 int offset;
155 struct pidmap *map, *end;
157 offset = (last + 1) & BITS_PER_PAGE_MASK;
158 map = &pspace->pidmap[(last + 1)/BITS_PER_PAGE];
159 end = &pspace->pidmap[PIDMAP_ENTRIES];
160 for (; map < end; map++, offset = 0) {
161 if (unlikely(!map->page))
162 continue;
163 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
164 if (offset < BITS_PER_PAGE)
165 return mk_pid(pspace, map, offset);
167 return -1;
170 fastcall void put_pid(struct pid *pid)
172 if (!pid)
173 return;
174 if ((atomic_read(&pid->count) == 1) ||
175 atomic_dec_and_test(&pid->count))
176 kmem_cache_free(pid_cachep, pid);
178 EXPORT_SYMBOL_GPL(put_pid);
180 static void delayed_put_pid(struct rcu_head *rhp)
182 struct pid *pid = container_of(rhp, struct pid, rcu);
183 put_pid(pid);
186 fastcall void free_pid(struct pid *pid)
188 /* We can be called with write_lock_irq(&tasklist_lock) held */
189 unsigned long flags;
191 spin_lock_irqsave(&pidmap_lock, flags);
192 hlist_del_rcu(&pid->pid_chain);
193 spin_unlock_irqrestore(&pidmap_lock, flags);
195 free_pidmap(&init_pspace, pid->nr);
196 call_rcu(&pid->rcu, delayed_put_pid);
199 struct pid *alloc_pid(void)
201 struct pid *pid;
202 enum pid_type type;
203 int nr = -1;
205 pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL);
206 if (!pid)
207 goto out;
209 nr = alloc_pidmap(&init_pspace);
210 if (nr < 0)
211 goto out_free;
213 atomic_set(&pid->count, 1);
214 pid->nr = nr;
215 for (type = 0; type < PIDTYPE_MAX; ++type)
216 INIT_HLIST_HEAD(&pid->tasks[type]);
218 spin_lock_irq(&pidmap_lock);
219 hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]);
220 spin_unlock_irq(&pidmap_lock);
222 out:
223 return pid;
225 out_free:
226 kmem_cache_free(pid_cachep, pid);
227 pid = NULL;
228 goto out;
231 struct pid * fastcall find_pid(int nr)
233 struct hlist_node *elem;
234 struct pid *pid;
236 hlist_for_each_entry_rcu(pid, elem,
237 &pid_hash[pid_hashfn(nr)], pid_chain) {
238 if (pid->nr == nr)
239 return pid;
241 return NULL;
243 EXPORT_SYMBOL_GPL(find_pid);
245 int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)
247 struct pid_link *link;
248 struct pid *pid;
250 link = &task->pids[type];
251 link->pid = pid = find_pid(nr);
252 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
254 return 0;
257 void fastcall detach_pid(struct task_struct *task, enum pid_type type)
259 struct pid_link *link;
260 struct pid *pid;
261 int tmp;
263 link = &task->pids[type];
264 pid = link->pid;
266 hlist_del_rcu(&link->node);
267 link->pid = NULL;
269 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
270 if (!hlist_empty(&pid->tasks[tmp]))
271 return;
273 free_pid(pid);
276 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
277 void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
278 enum pid_type type)
280 new->pids[type].pid = old->pids[type].pid;
281 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
282 old->pids[type].pid = NULL;
285 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
287 struct task_struct *result = NULL;
288 if (pid) {
289 struct hlist_node *first;
290 first = rcu_dereference(pid->tasks[type].first);
291 if (first)
292 result = hlist_entry(first, struct task_struct, pids[(type)].node);
294 return result;
298 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
300 struct task_struct *find_task_by_pid_type(int type, int nr)
302 return pid_task(find_pid(nr), type);
305 EXPORT_SYMBOL(find_task_by_pid_type);
307 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
309 struct pid *pid;
310 rcu_read_lock();
311 pid = get_pid(task->pids[type].pid);
312 rcu_read_unlock();
313 return pid;
316 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
318 struct task_struct *result;
319 rcu_read_lock();
320 result = pid_task(pid, type);
321 if (result)
322 get_task_struct(result);
323 rcu_read_unlock();
324 return result;
327 struct pid *find_get_pid(pid_t nr)
329 struct pid *pid;
331 rcu_read_lock();
332 pid = get_pid(find_pid(nr));
333 rcu_read_unlock();
335 return pid;
339 * Used by proc to find the first pid that is greater then or equal to nr.
341 * If there is a pid at nr this function is exactly the same as find_pid.
343 struct pid *find_ge_pid(int nr)
345 struct pid *pid;
347 do {
348 pid = find_pid(nr);
349 if (pid)
350 break;
351 nr = next_pidmap(&init_pspace, nr);
352 } while (nr > 0);
354 return pid;
356 EXPORT_SYMBOL_GPL(find_get_pid);
359 * The pid hash table is scaled according to the amount of memory in the
360 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
361 * more.
363 void __init pidhash_init(void)
365 int i, pidhash_size;
366 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
368 pidhash_shift = max(4, fls(megabytes * 4));
369 pidhash_shift = min(12, pidhash_shift);
370 pidhash_size = 1 << pidhash_shift;
372 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
373 pidhash_size, pidhash_shift,
374 pidhash_size * sizeof(struct hlist_head));
376 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
377 if (!pid_hash)
378 panic("Could not alloc pidhash!\n");
379 for (i = 0; i < pidhash_size; i++)
380 INIT_HLIST_HEAD(&pid_hash[i]);
383 void __init pidmap_init(void)
385 init_pspace.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
386 /* Reserve PID 0. We never call free_pidmap(0) */
387 set_bit(0, init_pspace.pidmap[0].page);
388 atomic_dec(&init_pspace.pidmap[0].nr_free);
390 pid_cachep = kmem_cache_create("pid", sizeof(struct pid),
391 __alignof__(struct pid),
392 SLAB_PANIC, NULL, NULL);