2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/rculist.h>
34 #include <linux/bootmem.h>
35 #include <linux/hash.h>
36 #include <linux/pid_namespace.h>
37 #include <linux/init_task.h>
38 #include <linux/syscalls.h>
39 #include <linux/kmemleak.h>
41 #define pid_hashfn(nr, ns) \
42 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
43 static struct hlist_head
*pid_hash
;
44 static int pidhash_shift
;
45 struct pid init_struct_pid
= INIT_STRUCT_PID
;
47 int pid_max
= PID_MAX_DEFAULT
;
49 #define RESERVED_PIDS 300
51 int pid_max_min
= RESERVED_PIDS
+ 1;
52 int pid_max_max
= PID_MAX_LIMIT
;
54 #define BITS_PER_PAGE (PAGE_SIZE*8)
55 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
57 static inline int mk_pid(struct pid_namespace
*pid_ns
,
58 struct pidmap
*map
, int off
)
60 return (map
- pid_ns
->pidmap
)*BITS_PER_PAGE
+ off
;
63 #define find_next_offset(map, off) \
64 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
67 * PID-map pages start out as NULL, they get allocated upon
68 * first use and are never deallocated. This way a low pid_max
69 * value does not cause lots of bitmaps to be allocated, but
70 * the scheme scales to up to 4 million PIDs, runtime.
72 struct pid_namespace init_pid_ns
= {
74 .refcount
= ATOMIC_INIT(2),
77 [ 0 ... PIDMAP_ENTRIES
-1] = { ATOMIC_INIT(BITS_PER_PAGE
), NULL
}
81 .child_reaper
= &init_task
,
83 EXPORT_SYMBOL_GPL(init_pid_ns
);
85 int is_container_init(struct task_struct
*tsk
)
92 if (pid
!= NULL
&& pid
->numbers
[pid
->level
].nr
== 1)
98 EXPORT_SYMBOL(is_container_init
);
101 * Note: disable interrupts while the pidmap_lock is held as an
102 * interrupt might come in and do read_lock(&tasklist_lock).
104 * If we don't disable interrupts there is a nasty deadlock between
105 * detach_pid()->free_pid() and another cpu that does
106 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
107 * read_lock(&tasklist_lock);
109 * After we clean up the tasklist_lock and know there are no
110 * irq handlers that take it we can leave the interrupts enabled.
111 * For now it is easier to be safe than to prove it can't happen.
114 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(pidmap_lock
);
116 static void free_pidmap(struct upid
*upid
)
119 struct pidmap
*map
= upid
->ns
->pidmap
+ nr
/ BITS_PER_PAGE
;
120 int offset
= nr
& BITS_PER_PAGE_MASK
;
122 clear_bit(offset
, map
->page
);
123 atomic_inc(&map
->nr_free
);
126 static int alloc_pidmap(struct pid_namespace
*pid_ns
)
128 int i
, offset
, max_scan
, pid
, last
= pid_ns
->last_pid
;
134 offset
= pid
& BITS_PER_PAGE_MASK
;
135 map
= &pid_ns
->pidmap
[pid
/BITS_PER_PAGE
];
136 max_scan
= (pid_max
+ BITS_PER_PAGE
- 1)/BITS_PER_PAGE
- !offset
;
137 for (i
= 0; i
<= max_scan
; ++i
) {
138 if (unlikely(!map
->page
)) {
139 void *page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
141 * Free the page if someone raced with us
144 spin_lock_irq(&pidmap_lock
);
149 spin_unlock_irq(&pidmap_lock
);
150 if (unlikely(!map
->page
))
153 if (likely(atomic_read(&map
->nr_free
))) {
155 if (!test_and_set_bit(offset
, map
->page
)) {
156 atomic_dec(&map
->nr_free
);
157 pid_ns
->last_pid
= pid
;
160 offset
= find_next_offset(map
, offset
);
161 pid
= mk_pid(pid_ns
, map
, offset
);
163 * find_next_offset() found a bit, the pid from it
164 * is in-bounds, and if we fell back to the last
165 * bitmap block and the final block was the same
166 * as the starting point, pid is before last_pid.
168 } while (offset
< BITS_PER_PAGE
&& pid
< pid_max
&&
169 (i
!= max_scan
|| pid
< last
||
170 !((last
+1) & BITS_PER_PAGE_MASK
)));
172 if (map
< &pid_ns
->pidmap
[(pid_max
-1)/BITS_PER_PAGE
]) {
176 map
= &pid_ns
->pidmap
[0];
177 offset
= RESERVED_PIDS
;
178 if (unlikely(last
== offset
))
181 pid
= mk_pid(pid_ns
, map
, offset
);
186 int next_pidmap(struct pid_namespace
*pid_ns
, int last
)
189 struct pidmap
*map
, *end
;
191 offset
= (last
+ 1) & BITS_PER_PAGE_MASK
;
192 map
= &pid_ns
->pidmap
[(last
+ 1)/BITS_PER_PAGE
];
193 end
= &pid_ns
->pidmap
[PIDMAP_ENTRIES
];
194 for (; map
< end
; map
++, offset
= 0) {
195 if (unlikely(!map
->page
))
197 offset
= find_next_bit((map
)->page
, BITS_PER_PAGE
, offset
);
198 if (offset
< BITS_PER_PAGE
)
199 return mk_pid(pid_ns
, map
, offset
);
204 void put_pid(struct pid
*pid
)
206 struct pid_namespace
*ns
;
211 ns
= pid
->numbers
[pid
->level
].ns
;
212 if ((atomic_read(&pid
->count
) == 1) ||
213 atomic_dec_and_test(&pid
->count
)) {
214 kmem_cache_free(ns
->pid_cachep
, pid
);
218 EXPORT_SYMBOL_GPL(put_pid
);
220 static void delayed_put_pid(struct rcu_head
*rhp
)
222 struct pid
*pid
= container_of(rhp
, struct pid
, rcu
);
226 void free_pid(struct pid
*pid
)
228 /* We can be called with write_lock_irq(&tasklist_lock) held */
232 spin_lock_irqsave(&pidmap_lock
, flags
);
233 for (i
= 0; i
<= pid
->level
; i
++)
234 hlist_del_rcu(&pid
->numbers
[i
].pid_chain
);
235 spin_unlock_irqrestore(&pidmap_lock
, flags
);
237 for (i
= 0; i
<= pid
->level
; i
++)
238 free_pidmap(pid
->numbers
+ i
);
240 call_rcu(&pid
->rcu
, delayed_put_pid
);
243 struct pid
*alloc_pid(struct pid_namespace
*ns
)
248 struct pid_namespace
*tmp
;
251 pid
= kmem_cache_alloc(ns
->pid_cachep
, GFP_KERNEL
);
256 for (i
= ns
->level
; i
>= 0; i
--) {
257 nr
= alloc_pidmap(tmp
);
261 pid
->numbers
[i
].nr
= nr
;
262 pid
->numbers
[i
].ns
= tmp
;
267 pid
->level
= ns
->level
;
268 atomic_set(&pid
->count
, 1);
269 for (type
= 0; type
< PIDTYPE_MAX
; ++type
)
270 INIT_HLIST_HEAD(&pid
->tasks
[type
]);
272 spin_lock_irq(&pidmap_lock
);
273 for (i
= ns
->level
; i
>= 0; i
--) {
274 upid
= &pid
->numbers
[i
];
275 hlist_add_head_rcu(&upid
->pid_chain
,
276 &pid_hash
[pid_hashfn(upid
->nr
, upid
->ns
)]);
278 spin_unlock_irq(&pidmap_lock
);
284 while (++i
<= ns
->level
)
285 free_pidmap(pid
->numbers
+ i
);
287 kmem_cache_free(ns
->pid_cachep
, pid
);
292 struct pid
*find_pid_ns(int nr
, struct pid_namespace
*ns
)
294 struct hlist_node
*elem
;
297 hlist_for_each_entry_rcu(pnr
, elem
,
298 &pid_hash
[pid_hashfn(nr
, ns
)], pid_chain
)
299 if (pnr
->nr
== nr
&& pnr
->ns
== ns
)
300 return container_of(pnr
, struct pid
,
305 EXPORT_SYMBOL_GPL(find_pid_ns
);
307 struct pid
*find_vpid(int nr
)
309 return find_pid_ns(nr
, current
->nsproxy
->pid_ns
);
311 EXPORT_SYMBOL_GPL(find_vpid
);
314 * attach_pid() must be called with the tasklist_lock write-held.
316 void attach_pid(struct task_struct
*task
, enum pid_type type
,
319 struct pid_link
*link
;
321 link
= &task
->pids
[type
];
323 hlist_add_head_rcu(&link
->node
, &pid
->tasks
[type
]);
326 static void __change_pid(struct task_struct
*task
, enum pid_type type
,
329 struct pid_link
*link
;
333 link
= &task
->pids
[type
];
336 hlist_del_rcu(&link
->node
);
339 for (tmp
= PIDTYPE_MAX
; --tmp
>= 0; )
340 if (!hlist_empty(&pid
->tasks
[tmp
]))
346 void detach_pid(struct task_struct
*task
, enum pid_type type
)
348 __change_pid(task
, type
, NULL
);
351 void change_pid(struct task_struct
*task
, enum pid_type type
,
354 __change_pid(task
, type
, pid
);
355 attach_pid(task
, type
, pid
);
358 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
359 void transfer_pid(struct task_struct
*old
, struct task_struct
*new,
362 new->pids
[type
].pid
= old
->pids
[type
].pid
;
363 hlist_replace_rcu(&old
->pids
[type
].node
, &new->pids
[type
].node
);
366 struct task_struct
*pid_task(struct pid
*pid
, enum pid_type type
)
368 struct task_struct
*result
= NULL
;
370 struct hlist_node
*first
;
371 first
= rcu_dereference(pid
->tasks
[type
].first
);
373 result
= hlist_entry(first
, struct task_struct
, pids
[(type
)].node
);
377 EXPORT_SYMBOL(pid_task
);
380 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
382 struct task_struct
*find_task_by_pid_ns(pid_t nr
, struct pid_namespace
*ns
)
384 return pid_task(find_pid_ns(nr
, ns
), PIDTYPE_PID
);
387 struct task_struct
*find_task_by_vpid(pid_t vnr
)
389 return find_task_by_pid_ns(vnr
, current
->nsproxy
->pid_ns
);
392 struct pid
*get_task_pid(struct task_struct
*task
, enum pid_type type
)
396 if (type
!= PIDTYPE_PID
)
397 task
= task
->group_leader
;
398 pid
= get_pid(task
->pids
[type
].pid
);
403 struct task_struct
*get_pid_task(struct pid
*pid
, enum pid_type type
)
405 struct task_struct
*result
;
407 result
= pid_task(pid
, type
);
409 get_task_struct(result
);
414 struct pid
*find_get_pid(pid_t nr
)
419 pid
= get_pid(find_vpid(nr
));
424 EXPORT_SYMBOL_GPL(find_get_pid
);
426 pid_t
pid_nr_ns(struct pid
*pid
, struct pid_namespace
*ns
)
431 if (pid
&& ns
->level
<= pid
->level
) {
432 upid
= &pid
->numbers
[ns
->level
];
439 pid_t
pid_vnr(struct pid
*pid
)
441 return pid_nr_ns(pid
, current
->nsproxy
->pid_ns
);
443 EXPORT_SYMBOL_GPL(pid_vnr
);
445 pid_t
__task_pid_nr_ns(struct task_struct
*task
, enum pid_type type
,
446 struct pid_namespace
*ns
)
452 ns
= current
->nsproxy
->pid_ns
;
453 if (likely(pid_alive(task
))) {
454 if (type
!= PIDTYPE_PID
)
455 task
= task
->group_leader
;
456 nr
= pid_nr_ns(task
->pids
[type
].pid
, ns
);
462 EXPORT_SYMBOL(__task_pid_nr_ns
);
464 pid_t
task_tgid_nr_ns(struct task_struct
*tsk
, struct pid_namespace
*ns
)
466 return pid_nr_ns(task_tgid(tsk
), ns
);
468 EXPORT_SYMBOL(task_tgid_nr_ns
);
470 struct pid_namespace
*task_active_pid_ns(struct task_struct
*tsk
)
472 return ns_of_pid(task_pid(tsk
));
474 EXPORT_SYMBOL_GPL(task_active_pid_ns
);
477 * Used by proc to find the first pid that is greater than or equal to nr.
479 * If there is a pid at nr this function is exactly the same as find_pid_ns.
481 struct pid
*find_ge_pid(int nr
, struct pid_namespace
*ns
)
486 pid
= find_pid_ns(nr
, ns
);
489 nr
= next_pidmap(ns
, nr
);
496 * The pid hash table is scaled according to the amount of memory in the
497 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
500 void __init
pidhash_init(void)
503 unsigned long megabytes
= nr_kernel_pages
>> (20 - PAGE_SHIFT
);
505 pidhash_shift
= max(4, fls(megabytes
* 4));
506 pidhash_shift
= min(12, pidhash_shift
);
507 pidhash_size
= 1 << pidhash_shift
;
509 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
510 pidhash_size
, pidhash_shift
,
511 pidhash_size
* sizeof(struct hlist_head
));
513 pid_hash
= alloc_bootmem(pidhash_size
* sizeof(*(pid_hash
)));
515 panic("Could not alloc pidhash!\n");
517 * pid_hash contains references to allocated struct pid objects and it
518 * must be scanned by kmemleak to avoid false positives.
520 kmemleak_alloc(pid_hash
, pidhash_size
* sizeof(*(pid_hash
)), 0,
522 for (i
= 0; i
< pidhash_size
; i
++)
523 INIT_HLIST_HEAD(&pid_hash
[i
]);
526 void __init
pidmap_init(void)
528 init_pid_ns
.pidmap
[0].page
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
529 /* Reserve PID 0. We never call free_pidmap(0) */
530 set_bit(0, init_pid_ns
.pidmap
[0].page
);
531 atomic_dec(&init_pid_ns
.pidmap
[0].nr_free
);
533 init_pid_ns
.pid_cachep
= KMEM_CACHE(pid
,
534 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
);