nfsd: support ext4 i_version
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / pid.c
blobb2e5f78fd2812cdc31624583f347853f1d236024
1 /*
2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
29 #include <linux/mm.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
40 #define pid_hashfn(nr, ns) \
41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42 static struct hlist_head *pid_hash;
43 static int pidhash_shift;
44 struct pid init_struct_pid = INIT_STRUCT_PID;
46 int pid_max = PID_MAX_DEFAULT;
48 #define RESERVED_PIDS 300
50 int pid_max_min = RESERVED_PIDS + 1;
51 int pid_max_max = PID_MAX_LIMIT;
53 #define BITS_PER_PAGE (PAGE_SIZE*8)
54 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
56 static inline int mk_pid(struct pid_namespace *pid_ns,
57 struct pidmap *map, int off)
59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
62 #define find_next_offset(map, off) \
63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
71 struct pid_namespace init_pid_ns = {
72 .kref = {
73 .refcount = ATOMIC_INIT(2),
75 .pidmap = {
76 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
78 .last_pid = 0,
79 .level = 0,
80 .child_reaper = &init_task,
82 EXPORT_SYMBOL_GPL(init_pid_ns);
84 int is_container_init(struct task_struct *tsk)
86 int ret = 0;
87 struct pid *pid;
89 rcu_read_lock();
90 pid = task_pid(tsk);
91 if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 ret = 1;
93 rcu_read_unlock();
95 return ret;
97 EXPORT_SYMBOL(is_container_init);
100 * Note: disable interrupts while the pidmap_lock is held as an
101 * interrupt might come in and do read_lock(&tasklist_lock).
103 * If we don't disable interrupts there is a nasty deadlock between
104 * detach_pid()->free_pid() and another cpu that does
105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106 * read_lock(&tasklist_lock);
108 * After we clean up the tasklist_lock and know there are no
109 * irq handlers that take it we can leave the interrupts enabled.
110 * For now it is easier to be safe than to prove it can't happen.
113 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
115 static void free_pidmap(struct upid *upid)
117 int nr = upid->nr;
118 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119 int offset = nr & BITS_PER_PAGE_MASK;
121 clear_bit(offset, map->page);
122 atomic_inc(&map->nr_free);
125 static int alloc_pidmap(struct pid_namespace *pid_ns)
127 int i, offset, max_scan, pid, last = pid_ns->last_pid;
128 struct pidmap *map;
130 pid = last + 1;
131 if (pid >= pid_max)
132 pid = RESERVED_PIDS;
133 offset = pid & BITS_PER_PAGE_MASK;
134 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
135 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
136 for (i = 0; i <= max_scan; ++i) {
137 if (unlikely(!map->page)) {
138 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
140 * Free the page if someone raced with us
141 * installing it:
143 spin_lock_irq(&pidmap_lock);
144 if (map->page)
145 kfree(page);
146 else
147 map->page = page;
148 spin_unlock_irq(&pidmap_lock);
149 if (unlikely(!map->page))
150 break;
152 if (likely(atomic_read(&map->nr_free))) {
153 do {
154 if (!test_and_set_bit(offset, map->page)) {
155 atomic_dec(&map->nr_free);
156 pid_ns->last_pid = pid;
157 return pid;
159 offset = find_next_offset(map, offset);
160 pid = mk_pid(pid_ns, map, offset);
162 * find_next_offset() found a bit, the pid from it
163 * is in-bounds, and if we fell back to the last
164 * bitmap block and the final block was the same
165 * as the starting point, pid is before last_pid.
167 } while (offset < BITS_PER_PAGE && pid < pid_max &&
168 (i != max_scan || pid < last ||
169 !((last+1) & BITS_PER_PAGE_MASK)));
171 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
172 ++map;
173 offset = 0;
174 } else {
175 map = &pid_ns->pidmap[0];
176 offset = RESERVED_PIDS;
177 if (unlikely(last == offset))
178 break;
180 pid = mk_pid(pid_ns, map, offset);
182 return -1;
185 int next_pidmap(struct pid_namespace *pid_ns, int last)
187 int offset;
188 struct pidmap *map, *end;
190 offset = (last + 1) & BITS_PER_PAGE_MASK;
191 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
192 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
193 for (; map < end; map++, offset = 0) {
194 if (unlikely(!map->page))
195 continue;
196 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
197 if (offset < BITS_PER_PAGE)
198 return mk_pid(pid_ns, map, offset);
200 return -1;
203 void put_pid(struct pid *pid)
205 struct pid_namespace *ns;
207 if (!pid)
208 return;
210 ns = pid->numbers[pid->level].ns;
211 if ((atomic_read(&pid->count) == 1) ||
212 atomic_dec_and_test(&pid->count)) {
213 kmem_cache_free(ns->pid_cachep, pid);
214 put_pid_ns(ns);
217 EXPORT_SYMBOL_GPL(put_pid);
219 static void delayed_put_pid(struct rcu_head *rhp)
221 struct pid *pid = container_of(rhp, struct pid, rcu);
222 put_pid(pid);
225 void free_pid(struct pid *pid)
227 /* We can be called with write_lock_irq(&tasklist_lock) held */
228 int i;
229 unsigned long flags;
231 spin_lock_irqsave(&pidmap_lock, flags);
232 for (i = 0; i <= pid->level; i++)
233 hlist_del_rcu(&pid->numbers[i].pid_chain);
234 spin_unlock_irqrestore(&pidmap_lock, flags);
236 for (i = 0; i <= pid->level; i++)
237 free_pidmap(pid->numbers + i);
239 call_rcu(&pid->rcu, delayed_put_pid);
242 struct pid *alloc_pid(struct pid_namespace *ns)
244 struct pid *pid;
245 enum pid_type type;
246 int i, nr;
247 struct pid_namespace *tmp;
248 struct upid *upid;
250 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
251 if (!pid)
252 goto out;
254 tmp = ns;
255 for (i = ns->level; i >= 0; i--) {
256 nr = alloc_pidmap(tmp);
257 if (nr < 0)
258 goto out_free;
260 pid->numbers[i].nr = nr;
261 pid->numbers[i].ns = tmp;
262 tmp = tmp->parent;
265 get_pid_ns(ns);
266 pid->level = ns->level;
267 atomic_set(&pid->count, 1);
268 for (type = 0; type < PIDTYPE_MAX; ++type)
269 INIT_HLIST_HEAD(&pid->tasks[type]);
271 spin_lock_irq(&pidmap_lock);
272 for (i = ns->level; i >= 0; i--) {
273 upid = &pid->numbers[i];
274 hlist_add_head_rcu(&upid->pid_chain,
275 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
277 spin_unlock_irq(&pidmap_lock);
279 out:
280 return pid;
282 out_free:
283 while (++i <= ns->level)
284 free_pidmap(pid->numbers + i);
286 kmem_cache_free(ns->pid_cachep, pid);
287 pid = NULL;
288 goto out;
291 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
293 struct hlist_node *elem;
294 struct upid *pnr;
296 hlist_for_each_entry_rcu(pnr, elem,
297 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
298 if (pnr->nr == nr && pnr->ns == ns)
299 return container_of(pnr, struct pid,
300 numbers[ns->level]);
302 return NULL;
304 EXPORT_SYMBOL_GPL(find_pid_ns);
306 struct pid *find_vpid(int nr)
308 return find_pid_ns(nr, current->nsproxy->pid_ns);
310 EXPORT_SYMBOL_GPL(find_vpid);
313 * attach_pid() must be called with the tasklist_lock write-held.
315 void attach_pid(struct task_struct *task, enum pid_type type,
316 struct pid *pid)
318 struct pid_link *link;
320 link = &task->pids[type];
321 link->pid = pid;
322 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
325 static void __change_pid(struct task_struct *task, enum pid_type type,
326 struct pid *new)
328 struct pid_link *link;
329 struct pid *pid;
330 int tmp;
332 link = &task->pids[type];
333 pid = link->pid;
335 hlist_del_rcu(&link->node);
336 link->pid = new;
338 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
339 if (!hlist_empty(&pid->tasks[tmp]))
340 return;
342 free_pid(pid);
345 void detach_pid(struct task_struct *task, enum pid_type type)
347 __change_pid(task, type, NULL);
350 void change_pid(struct task_struct *task, enum pid_type type,
351 struct pid *pid)
353 __change_pid(task, type, pid);
354 attach_pid(task, type, pid);
357 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
358 void transfer_pid(struct task_struct *old, struct task_struct *new,
359 enum pid_type type)
361 new->pids[type].pid = old->pids[type].pid;
362 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
365 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
367 struct task_struct *result = NULL;
368 if (pid) {
369 struct hlist_node *first;
370 first = rcu_dereference(pid->tasks[type].first);
371 if (first)
372 result = hlist_entry(first, struct task_struct, pids[(type)].node);
374 return result;
376 EXPORT_SYMBOL(pid_task);
379 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
381 struct task_struct *find_task_by_pid_type_ns(int type, int nr,
382 struct pid_namespace *ns)
384 return pid_task(find_pid_ns(nr, ns), type);
387 EXPORT_SYMBOL(find_task_by_pid_type_ns);
389 struct task_struct *find_task_by_vpid(pid_t vnr)
391 return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
392 current->nsproxy->pid_ns);
394 EXPORT_SYMBOL(find_task_by_vpid);
396 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
398 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
400 EXPORT_SYMBOL(find_task_by_pid_ns);
402 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
404 struct pid *pid;
405 rcu_read_lock();
406 if (type != PIDTYPE_PID)
407 task = task->group_leader;
408 pid = get_pid(task->pids[type].pid);
409 rcu_read_unlock();
410 return pid;
413 struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
415 struct task_struct *result;
416 rcu_read_lock();
417 result = pid_task(pid, type);
418 if (result)
419 get_task_struct(result);
420 rcu_read_unlock();
421 return result;
424 struct pid *find_get_pid(pid_t nr)
426 struct pid *pid;
428 rcu_read_lock();
429 pid = get_pid(find_vpid(nr));
430 rcu_read_unlock();
432 return pid;
434 EXPORT_SYMBOL_GPL(find_get_pid);
436 pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
438 struct upid *upid;
439 pid_t nr = 0;
441 if (pid && ns->level <= pid->level) {
442 upid = &pid->numbers[ns->level];
443 if (upid->ns == ns)
444 nr = upid->nr;
446 return nr;
449 pid_t pid_vnr(struct pid *pid)
451 return pid_nr_ns(pid, current->nsproxy->pid_ns);
453 EXPORT_SYMBOL_GPL(pid_vnr);
455 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
456 struct pid_namespace *ns)
458 pid_t nr = 0;
460 rcu_read_lock();
461 if (!ns)
462 ns = current->nsproxy->pid_ns;
463 if (likely(pid_alive(task))) {
464 if (type != PIDTYPE_PID)
465 task = task->group_leader;
466 nr = pid_nr_ns(task->pids[type].pid, ns);
468 rcu_read_unlock();
470 return nr;
472 EXPORT_SYMBOL(__task_pid_nr_ns);
474 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
476 return pid_nr_ns(task_tgid(tsk), ns);
478 EXPORT_SYMBOL(task_tgid_nr_ns);
480 struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
482 return ns_of_pid(task_pid(tsk));
484 EXPORT_SYMBOL_GPL(task_active_pid_ns);
487 * Used by proc to find the first pid that is greater than or equal to nr.
489 * If there is a pid at nr this function is exactly the same as find_pid_ns.
491 struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
493 struct pid *pid;
495 do {
496 pid = find_pid_ns(nr, ns);
497 if (pid)
498 break;
499 nr = next_pidmap(ns, nr);
500 } while (nr > 0);
502 return pid;
506 * The pid hash table is scaled according to the amount of memory in the
507 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
508 * more.
510 void __init pidhash_init(void)
512 int i, pidhash_size;
513 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
515 pidhash_shift = max(4, fls(megabytes * 4));
516 pidhash_shift = min(12, pidhash_shift);
517 pidhash_size = 1 << pidhash_shift;
519 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
520 pidhash_size, pidhash_shift,
521 pidhash_size * sizeof(struct hlist_head));
523 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
524 if (!pid_hash)
525 panic("Could not alloc pidhash!\n");
526 for (i = 0; i < pidhash_size; i++)
527 INIT_HLIST_HEAD(&pid_hash[i]);
530 void __init pidmap_init(void)
532 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
533 /* Reserve PID 0. We never call free_pidmap(0) */
534 set_bit(0, init_pid_ns.pidmap[0].page);
535 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
537 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
538 SLAB_HWCACHE_ALIGN | SLAB_PANIC);