2 * Generic pidhash and scalable, time-bounded PID allocator
4 * (C) 2002 William Irwin, IBM
5 * (C) 2002 Ingo Molnar, Red Hat
7 * pid-structures are backing objects for tasks sharing a given ID to chain
8 * against. There is very little to them aside from hashing them and
9 * parking tasks using given ID's on a list.
11 * The hash is always changed with the tasklist_lock write-acquired,
12 * and the hash is only accessed with the tasklist_lock at least
13 * read-acquired, so there's no additional SMP locking needed here.
15 * We have a list of bitmap pages, which bitmaps represent the PID space.
16 * Allocating and freeing PIDs is completely lockless. The worst-case
17 * allocation scenario when all but one out of 1 million PIDs possible are
18 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
19 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
23 #include <linux/slab.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/hash.h>
28 #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
29 static struct list_head
*pid_hash
[PIDTYPE_MAX
];
30 static int pidhash_shift
;
32 int pid_max
= PID_MAX_DEFAULT
;
35 #define RESERVED_PIDS 300
37 #define PIDMAP_ENTRIES (PID_MAX_LIMIT/PAGE_SIZE/8)
38 #define BITS_PER_PAGE (PAGE_SIZE*8)
39 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 * PID-map pages start out as NULL, they get allocated upon
43 * first use and are never deallocated. This way a low pid_max
44 * value does not cause lots of bitmaps to be allocated, but
45 * the scheme scales to up to 4 million PIDs, runtime.
47 typedef struct pidmap
{
52 static pidmap_t pidmap_array
[PIDMAP_ENTRIES
] =
53 { [ 0 ... PIDMAP_ENTRIES
-1 ] = { ATOMIC_INIT(BITS_PER_PAGE
), NULL
} };
55 static pidmap_t
*map_limit
= pidmap_array
+ PIDMAP_ENTRIES
;
57 static spinlock_t pidmap_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
59 inline void free_pidmap(int pid
)
61 pidmap_t
*map
= pidmap_array
+ pid
/ BITS_PER_PAGE
;
62 int offset
= pid
& BITS_PER_PAGE_MASK
;
64 clear_bit(offset
, map
->page
);
65 atomic_inc(&map
->nr_free
);
69 * Here we search for the next map that has free bits left.
70 * Normally the next map has free PIDs.
72 static inline pidmap_t
*next_free_map(pidmap_t
*map
, int *max_steps
)
74 while (--*max_steps
) {
75 if (++map
== map_limit
)
77 if (unlikely(!map
->page
)) {
78 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
80 * Free the page if someone raced with us
83 spin_lock(&pidmap_lock
);
87 map
->page
= (void *)page
;
88 spin_unlock(&pidmap_lock
);
93 if (atomic_read(&map
->nr_free
))
99 int alloc_pidmap(void)
101 int pid
, offset
, max_steps
= PIDMAP_ENTRIES
+ 1;
108 offset
= pid
& BITS_PER_PAGE_MASK
;
109 map
= pidmap_array
+ pid
/ BITS_PER_PAGE
;
111 if (likely(map
->page
&& !test_and_set_bit(offset
, map
->page
))) {
113 * There is a small window for last_pid updates to race,
114 * but in that case the next allocation will go into the
115 * slowpath and that fixes things up.
118 atomic_dec(&map
->nr_free
);
123 if (!offset
|| !atomic_read(&map
->nr_free
)) {
125 map
= next_free_map(map
, &max_steps
);
131 * Find the next zero bit:
134 offset
= find_next_zero_bit(map
->page
, BITS_PER_PAGE
, offset
);
135 if (offset
>= BITS_PER_PAGE
)
137 if (test_and_set_bit(offset
, map
->page
))
140 /* we got the PID: */
141 pid
= (map
- pidmap_array
) * BITS_PER_PAGE
+ offset
;
148 inline struct pid
*find_pid(enum pid_type type
, int nr
)
150 struct list_head
*elem
, *bucket
= &pid_hash
[type
][pid_hashfn(nr
)];
153 __list_for_each(elem
, bucket
) {
154 pid
= list_entry(elem
, struct pid
, hash_chain
);
161 void link_pid(task_t
*task
, struct pid_link
*link
, struct pid
*pid
)
163 atomic_inc(&pid
->count
);
164 list_add_tail(&link
->pid_chain
, &pid
->task_list
);
168 int attach_pid(task_t
*task
, enum pid_type type
, int nr
)
170 struct pid
*pid
= find_pid(type
, nr
);
173 atomic_inc(&pid
->count
);
175 pid
= &task
->pids
[type
].pid
;
177 atomic_set(&pid
->count
, 1);
178 INIT_LIST_HEAD(&pid
->task_list
);
180 get_task_struct(task
);
181 list_add(&pid
->hash_chain
, &pid_hash
[type
][pid_hashfn(nr
)]);
183 list_add_tail(&task
->pids
[type
].pid_chain
, &pid
->task_list
);
184 task
->pids
[type
].pidptr
= pid
;
189 static inline int __detach_pid(task_t
*task
, enum pid_type type
)
191 struct pid_link
*link
= task
->pids
+ type
;
192 struct pid
*pid
= link
->pidptr
;
195 list_del(&link
->pid_chain
);
196 if (!atomic_dec_and_test(&pid
->count
))
200 list_del(&pid
->hash_chain
);
201 put_task_struct(pid
->task
);
206 static void _detach_pid(task_t
*task
, enum pid_type type
)
208 __detach_pid(task
, type
);
211 void detach_pid(task_t
*task
, enum pid_type type
)
213 int nr
= __detach_pid(task
, type
);
218 for (type
= 0; type
< PIDTYPE_MAX
; ++type
)
219 if (find_pid(type
, nr
))
224 task_t
*find_task_by_pid(int nr
)
226 struct pid
*pid
= find_pid(PIDTYPE_PID
, nr
);
230 return pid_task(pid
->task_list
.next
, PIDTYPE_PID
);
234 * This function switches the PIDs if a non-leader thread calls
235 * sys_execve() - this must be done without releasing the PID.
236 * (which a detach_pid() would eventually do.)
238 void switch_exec_pids(task_t
*leader
, task_t
*thread
)
240 _detach_pid(leader
, PIDTYPE_PID
);
241 _detach_pid(leader
, PIDTYPE_TGID
);
242 _detach_pid(leader
, PIDTYPE_PGID
);
243 _detach_pid(leader
, PIDTYPE_SID
);
245 _detach_pid(thread
, PIDTYPE_PID
);
246 _detach_pid(thread
, PIDTYPE_TGID
);
248 leader
->pid
= leader
->tgid
= thread
->pid
;
249 thread
->pid
= thread
->tgid
;
251 attach_pid(thread
, PIDTYPE_PID
, thread
->pid
);
252 attach_pid(thread
, PIDTYPE_TGID
, thread
->tgid
);
253 attach_pid(thread
, PIDTYPE_PGID
, leader
->__pgrp
);
254 attach_pid(thread
, PIDTYPE_SID
, thread
->session
);
255 list_add_tail(&thread
->tasks
, &init_task
.tasks
);
257 attach_pid(leader
, PIDTYPE_PID
, leader
->pid
);
258 attach_pid(leader
, PIDTYPE_TGID
, leader
->tgid
);
259 attach_pid(leader
, PIDTYPE_PGID
, leader
->__pgrp
);
260 attach_pid(leader
, PIDTYPE_SID
, leader
->session
);
264 * The pid hash table is scaled according to the amount of memory in the
265 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
268 void __init
pidhash_init(void)
270 int i
, j
, pidhash_size
;
271 unsigned long megabytes
= max_pfn
>> (20 - PAGE_SHIFT
);
273 pidhash_shift
= max(4, fls(megabytes
* 4));
274 pidhash_shift
= min(12, pidhash_shift
);
275 pidhash_size
= 1 << pidhash_shift
;
277 printk("PID hash table entries: %d (order %d: %Zd bytes)\n",
278 pidhash_size
, pidhash_shift
,
279 pidhash_size
* sizeof(struct list_head
));
281 for (i
= 0; i
< PIDTYPE_MAX
; i
++) {
282 pid_hash
[i
] = alloc_bootmem(pidhash_size
*
283 sizeof(struct list_head
));
285 panic("Could not alloc pidhash!\n");
286 for (j
= 0; j
< pidhash_size
; j
++)
287 INIT_LIST_HEAD(&pid_hash
[i
][j
]);
291 void __init
pidmap_init(void)
295 pidmap_array
->page
= (void *)get_zeroed_page(GFP_KERNEL
);
296 set_bit(0, pidmap_array
->page
);
297 atomic_dec(&pidmap_array
->nr_free
);
300 * Allocate PID 0, and hash it via all PID types:
303 for (i
= 0; i
< PIDTYPE_MAX
; i
++)
304 attach_pid(current
, i
, 0);