ACPI: register ACPI Processor as generic thermal cooling device
[linux-2.6/mini2440.git] / kernel / pid.c
blobf815455431bff3c95855a674086e6c1d8322a7c4
1 /*
2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/bootmem.h>
34 #include <linux/hash.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/init_task.h>
37 #include <linux/syscalls.h>
39 #define pid_hashfn(nr, ns) \
40 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
41 static struct hlist_head *pid_hash;
42 static int pidhash_shift;
43 struct pid init_struct_pid = INIT_STRUCT_PID;
44 static struct kmem_cache *pid_ns_cachep;
46 int pid_max = PID_MAX_DEFAULT;
48 #define RESERVED_PIDS 300
50 int pid_max_min = RESERVED_PIDS + 1;
51 int pid_max_max = PID_MAX_LIMIT;
53 #define BITS_PER_PAGE (PAGE_SIZE*8)
54 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
56 static inline int mk_pid(struct pid_namespace *pid_ns,
57 struct pidmap *map, int off)
59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
62 #define find_next_offset(map, off) \
63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
71 struct pid_namespace init_pid_ns = {
72 .kref = {
73 .refcount = ATOMIC_INIT(2),
75 .pidmap = {
76 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
78 .last_pid = 0,
79 .level = 0,
80 .child_reaper = &init_task,
82 EXPORT_SYMBOL_GPL(init_pid_ns);
84 int is_container_init(struct task_struct *tsk)
86 int ret = 0;
87 struct pid *pid;
89 rcu_read_lock();
90 pid = task_pid(tsk);
91 if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 ret = 1;
93 rcu_read_unlock();
95 return ret;
97 EXPORT_SYMBOL(is_container_init);
100 * Note: disable interrupts while the pidmap_lock is held as an
101 * interrupt might come in and do read_lock(&tasklist_lock).
103 * If we don't disable interrupts there is a nasty deadlock between
104 * detach_pid()->free_pid() and another cpu that does
105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106 * read_lock(&tasklist_lock);
108 * After we clean up the tasklist_lock and know there are no
109 * irq handlers that take it we can leave the interrupts enabled.
110 * For now it is easier to be safe than to prove it can't happen.
113 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
115 static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid)
117 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
118 int offset = pid & BITS_PER_PAGE_MASK;
120 clear_bit(offset, map->page);
121 atomic_inc(&map->nr_free);
124 static int alloc_pidmap(struct pid_namespace *pid_ns)
126 int i, offset, max_scan, pid, last = pid_ns->last_pid;
127 struct pidmap *map;
129 pid = last + 1;
130 if (pid >= pid_max)
131 pid = RESERVED_PIDS;
132 offset = pid & BITS_PER_PAGE_MASK;
133 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
134 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
135 for (i = 0; i <= max_scan; ++i) {
136 if (unlikely(!map->page)) {
137 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
139 * Free the page if someone raced with us
140 * installing it:
142 spin_lock_irq(&pidmap_lock);
143 if (map->page)
144 kfree(page);
145 else
146 map->page = page;
147 spin_unlock_irq(&pidmap_lock);
148 if (unlikely(!map->page))
149 break;
151 if (likely(atomic_read(&map->nr_free))) {
152 do {
153 if (!test_and_set_bit(offset, map->page)) {
154 atomic_dec(&map->nr_free);
155 pid_ns->last_pid = pid;
156 return pid;
158 offset = find_next_offset(map, offset);
159 pid = mk_pid(pid_ns, map, offset);
161 * find_next_offset() found a bit, the pid from it
162 * is in-bounds, and if we fell back to the last
163 * bitmap block and the final block was the same
164 * as the starting point, pid is before last_pid.
166 } while (offset < BITS_PER_PAGE && pid < pid_max &&
167 (i != max_scan || pid < last ||
168 !((last+1) & BITS_PER_PAGE_MASK)));
170 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
171 ++map;
172 offset = 0;
173 } else {
174 map = &pid_ns->pidmap[0];
175 offset = RESERVED_PIDS;
176 if (unlikely(last == offset))
177 break;
179 pid = mk_pid(pid_ns, map, offset);
181 return -1;
184 static int next_pidmap(struct pid_namespace *pid_ns, int last)
186 int offset;
187 struct pidmap *map, *end;
189 offset = (last + 1) & BITS_PER_PAGE_MASK;
190 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
191 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
192 for (; map < end; map++, offset = 0) {
193 if (unlikely(!map->page))
194 continue;
195 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
196 if (offset < BITS_PER_PAGE)
197 return mk_pid(pid_ns, map, offset);
199 return -1;
202 fastcall void put_pid(struct pid *pid)
204 struct pid_namespace *ns;
206 if (!pid)
207 return;
209 ns = pid->numbers[pid->level].ns;
210 if ((atomic_read(&pid->count) == 1) ||
211 atomic_dec_and_test(&pid->count)) {
212 kmem_cache_free(ns->pid_cachep, pid);
213 put_pid_ns(ns);
216 EXPORT_SYMBOL_GPL(put_pid);
218 static void delayed_put_pid(struct rcu_head *rhp)
220 struct pid *pid = container_of(rhp, struct pid, rcu);
221 put_pid(pid);
224 fastcall void free_pid(struct pid *pid)
226 /* We can be called with write_lock_irq(&tasklist_lock) held */
227 int i;
228 unsigned long flags;
230 spin_lock_irqsave(&pidmap_lock, flags);
231 for (i = 0; i <= pid->level; i++)
232 hlist_del_rcu(&pid->numbers[i].pid_chain);
233 spin_unlock_irqrestore(&pidmap_lock, flags);
235 for (i = 0; i <= pid->level; i++)
236 free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
238 call_rcu(&pid->rcu, delayed_put_pid);
241 struct pid *alloc_pid(struct pid_namespace *ns)
243 struct pid *pid;
244 enum pid_type type;
245 int i, nr;
246 struct pid_namespace *tmp;
247 struct upid *upid;
249 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
250 if (!pid)
251 goto out;
253 tmp = ns;
254 for (i = ns->level; i >= 0; i--) {
255 nr = alloc_pidmap(tmp);
256 if (nr < 0)
257 goto out_free;
259 pid->numbers[i].nr = nr;
260 pid->numbers[i].ns = tmp;
261 tmp = tmp->parent;
264 get_pid_ns(ns);
265 pid->level = ns->level;
266 atomic_set(&pid->count, 1);
267 for (type = 0; type < PIDTYPE_MAX; ++type)
268 INIT_HLIST_HEAD(&pid->tasks[type]);
270 spin_lock_irq(&pidmap_lock);
271 for (i = ns->level; i >= 0; i--) {
272 upid = &pid->numbers[i];
273 hlist_add_head_rcu(&upid->pid_chain,
274 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
276 spin_unlock_irq(&pidmap_lock);
278 out:
279 return pid;
281 out_free:
282 for (i++; i <= ns->level; i++)
283 free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
285 kmem_cache_free(ns->pid_cachep, pid);
286 pid = NULL;
287 goto out;
290 struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns)
292 struct hlist_node *elem;
293 struct upid *pnr;
295 hlist_for_each_entry_rcu(pnr, elem,
296 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
297 if (pnr->nr == nr && pnr->ns == ns)
298 return container_of(pnr, struct pid,
299 numbers[ns->level]);
301 return NULL;
303 EXPORT_SYMBOL_GPL(find_pid_ns);
305 struct pid *find_vpid(int nr)
307 return find_pid_ns(nr, current->nsproxy->pid_ns);
309 EXPORT_SYMBOL_GPL(find_vpid);
311 struct pid *find_pid(int nr)
313 return find_pid_ns(nr, &init_pid_ns);
315 EXPORT_SYMBOL_GPL(find_pid);
318 * attach_pid() must be called with the tasklist_lock write-held.
320 int fastcall attach_pid(struct task_struct *task, enum pid_type type,
321 struct pid *pid)
323 struct pid_link *link;
325 link = &task->pids[type];
326 link->pid = pid;
327 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
329 return 0;
332 void fastcall detach_pid(struct task_struct *task, enum pid_type type)
334 struct pid_link *link;
335 struct pid *pid;
336 int tmp;
338 link = &task->pids[type];
339 pid = link->pid;
341 hlist_del_rcu(&link->node);
342 link->pid = NULL;
344 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
345 if (!hlist_empty(&pid->tasks[tmp]))
346 return;
348 free_pid(pid);
351 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
352 void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
353 enum pid_type type)
355 new->pids[type].pid = old->pids[type].pid;
356 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
357 old->pids[type].pid = NULL;
360 struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
362 struct task_struct *result = NULL;
363 if (pid) {
364 struct hlist_node *first;
365 first = rcu_dereference(pid->tasks[type].first);
366 if (first)
367 result = hlist_entry(first, struct task_struct, pids[(type)].node);
369 return result;
373 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
375 struct task_struct *find_task_by_pid_type_ns(int type, int nr,
376 struct pid_namespace *ns)
378 return pid_task(find_pid_ns(nr, ns), type);
381 EXPORT_SYMBOL(find_task_by_pid_type_ns);
383 struct task_struct *find_task_by_pid(pid_t nr)
385 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
387 EXPORT_SYMBOL(find_task_by_pid);
389 struct task_struct *find_task_by_vpid(pid_t vnr)
391 return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
392 current->nsproxy->pid_ns);
394 EXPORT_SYMBOL(find_task_by_vpid);
396 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
398 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
400 EXPORT_SYMBOL(find_task_by_pid_ns);
402 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
404 struct pid *pid;
405 rcu_read_lock();
406 pid = get_pid(task->pids[type].pid);
407 rcu_read_unlock();
408 return pid;
411 struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type)
413 struct task_struct *result;
414 rcu_read_lock();
415 result = pid_task(pid, type);
416 if (result)
417 get_task_struct(result);
418 rcu_read_unlock();
419 return result;
422 struct pid *find_get_pid(pid_t nr)
424 struct pid *pid;
426 rcu_read_lock();
427 pid = get_pid(find_vpid(nr));
428 rcu_read_unlock();
430 return pid;
433 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
435 struct upid *upid;
436 pid_t nr = 0;
438 if (pid && ns->level <= pid->level) {
439 upid = &pid->numbers[ns->level];
440 if (upid->ns == ns)
441 nr = upid->nr;
443 return nr;
446 pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
448 return pid_nr_ns(task_pid(tsk), ns);
450 EXPORT_SYMBOL(task_pid_nr_ns);
452 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
454 return pid_nr_ns(task_tgid(tsk), ns);
456 EXPORT_SYMBOL(task_tgid_nr_ns);
458 pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
460 return pid_nr_ns(task_pgrp(tsk), ns);
462 EXPORT_SYMBOL(task_pgrp_nr_ns);
464 pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
466 return pid_nr_ns(task_session(tsk), ns);
468 EXPORT_SYMBOL(task_session_nr_ns);
471 * Used by proc to find the first pid that is greater then or equal to nr.
473 * If there is a pid at nr this function is exactly the same as find_pid.
475 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
477 struct pid *pid;
479 do {
480 pid = find_pid_ns(nr, ns);
481 if (pid)
482 break;
483 nr = next_pidmap(ns, nr);
484 } while (nr > 0);
486 return pid;
488 EXPORT_SYMBOL_GPL(find_get_pid);
490 struct pid_cache {
491 int nr_ids;
492 char name[16];
493 struct kmem_cache *cachep;
494 struct list_head list;
497 static LIST_HEAD(pid_caches_lh);
498 static DEFINE_MUTEX(pid_caches_mutex);
501 * creates the kmem cache to allocate pids from.
502 * @nr_ids: the number of numerical ids this pid will have to carry
505 static struct kmem_cache *create_pid_cachep(int nr_ids)
507 struct pid_cache *pcache;
508 struct kmem_cache *cachep;
510 mutex_lock(&pid_caches_mutex);
511 list_for_each_entry (pcache, &pid_caches_lh, list)
512 if (pcache->nr_ids == nr_ids)
513 goto out;
515 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
516 if (pcache == NULL)
517 goto err_alloc;
519 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
520 cachep = kmem_cache_create(pcache->name,
521 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
522 0, SLAB_HWCACHE_ALIGN, NULL);
523 if (cachep == NULL)
524 goto err_cachep;
526 pcache->nr_ids = nr_ids;
527 pcache->cachep = cachep;
528 list_add(&pcache->list, &pid_caches_lh);
529 out:
530 mutex_unlock(&pid_caches_mutex);
531 return pcache->cachep;
533 err_cachep:
534 kfree(pcache);
535 err_alloc:
536 mutex_unlock(&pid_caches_mutex);
537 return NULL;
540 #ifdef CONFIG_PID_NS
541 static struct pid_namespace *create_pid_namespace(int level)
543 struct pid_namespace *ns;
544 int i;
546 ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
547 if (ns == NULL)
548 goto out;
550 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
551 if (!ns->pidmap[0].page)
552 goto out_free;
554 ns->pid_cachep = create_pid_cachep(level + 1);
555 if (ns->pid_cachep == NULL)
556 goto out_free_map;
558 kref_init(&ns->kref);
559 ns->last_pid = 0;
560 ns->child_reaper = NULL;
561 ns->level = level;
563 set_bit(0, ns->pidmap[0].page);
564 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
566 for (i = 1; i < PIDMAP_ENTRIES; i++) {
567 ns->pidmap[i].page = 0;
568 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
571 return ns;
573 out_free_map:
574 kfree(ns->pidmap[0].page);
575 out_free:
576 kmem_cache_free(pid_ns_cachep, ns);
577 out:
578 return ERR_PTR(-ENOMEM);
581 static void destroy_pid_namespace(struct pid_namespace *ns)
583 int i;
585 for (i = 0; i < PIDMAP_ENTRIES; i++)
586 kfree(ns->pidmap[i].page);
587 kmem_cache_free(pid_ns_cachep, ns);
590 struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
592 struct pid_namespace *new_ns;
594 BUG_ON(!old_ns);
595 new_ns = get_pid_ns(old_ns);
596 if (!(flags & CLONE_NEWPID))
597 goto out;
599 new_ns = ERR_PTR(-EINVAL);
600 if (flags & CLONE_THREAD)
601 goto out_put;
603 new_ns = create_pid_namespace(old_ns->level + 1);
604 if (!IS_ERR(new_ns))
605 new_ns->parent = get_pid_ns(old_ns);
607 out_put:
608 put_pid_ns(old_ns);
609 out:
610 return new_ns;
613 void free_pid_ns(struct kref *kref)
615 struct pid_namespace *ns, *parent;
617 ns = container_of(kref, struct pid_namespace, kref);
619 parent = ns->parent;
620 destroy_pid_namespace(ns);
622 if (parent != NULL)
623 put_pid_ns(parent);
625 #endif /* CONFIG_PID_NS */
627 void zap_pid_ns_processes(struct pid_namespace *pid_ns)
629 int nr;
630 int rc;
633 * The last thread in the cgroup-init thread group is terminating.
634 * Find remaining pid_ts in the namespace, signal and wait for them
635 * to exit.
637 * Note: This signals each threads in the namespace - even those that
638 * belong to the same thread group, To avoid this, we would have
639 * to walk the entire tasklist looking a processes in this
640 * namespace, but that could be unnecessarily expensive if the
641 * pid namespace has just a few processes. Or we need to
642 * maintain a tasklist for each pid namespace.
645 read_lock(&tasklist_lock);
646 nr = next_pidmap(pid_ns, 1);
647 while (nr > 0) {
648 kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
649 nr = next_pidmap(pid_ns, nr);
651 read_unlock(&tasklist_lock);
653 do {
654 clear_thread_flag(TIF_SIGPENDING);
655 rc = sys_wait4(-1, NULL, __WALL, NULL);
656 } while (rc != -ECHILD);
659 /* Child reaper for the pid namespace is going away */
660 pid_ns->child_reaper = NULL;
661 return;
665 * The pid hash table is scaled according to the amount of memory in the
666 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
667 * more.
669 void __init pidhash_init(void)
671 int i, pidhash_size;
672 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
674 pidhash_shift = max(4, fls(megabytes * 4));
675 pidhash_shift = min(12, pidhash_shift);
676 pidhash_size = 1 << pidhash_shift;
678 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
679 pidhash_size, pidhash_shift,
680 pidhash_size * sizeof(struct hlist_head));
682 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
683 if (!pid_hash)
684 panic("Could not alloc pidhash!\n");
685 for (i = 0; i < pidhash_size; i++)
686 INIT_HLIST_HEAD(&pid_hash[i]);
689 void __init pidmap_init(void)
691 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
692 /* Reserve PID 0. We never call free_pidmap(0) */
693 set_bit(0, init_pid_ns.pidmap[0].page);
694 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
696 init_pid_ns.pid_cachep = create_pid_cachep(1);
697 if (init_pid_ns.pid_cachep == NULL)
698 panic("Can't create pid_1 cachep\n");
700 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);