[PATCH] bcm43xx: Fix array overrun in bcm43xx_geo_init
[linux-2.6/kmemtrace.git] / include / linux / pid.h
blob29960b03bef75ef031054485c6df960aab0bdc50
1 #ifndef _LINUX_PID_H
2 #define _LINUX_PID_H
4 #include <linux/rcupdate.h>
6 enum pid_type
8 PIDTYPE_PID,
9 PIDTYPE_PGID,
10 PIDTYPE_SID,
11 PIDTYPE_MAX
15 * What is struct pid?
17 * A struct pid is the kernel's internal notion of a process identifier.
18 * It refers to individual tasks, process groups, and sessions. While
19 * there are processes attached to it the struct pid lives in a hash
20 * table, so it and then the processes that it refers to can be found
21 * quickly from the numeric pid value. The attached processes may be
22 * quickly accessed by following pointers from struct pid.
24 * Storing pid_t values in the kernel and refering to them later has a
25 * problem. The process originally with that pid may have exited and the
26 * pid allocator wrapped, and another process could have come along
27 * and been assigned that pid.
29 * Referring to user space processes by holding a reference to struct
30 * task_struct has a problem. When the user space process exits
31 * the now useless task_struct is still kept. A task_struct plus a
32 * stack consumes around 10K of low kernel memory. More precisely
33 * this is THREAD_SIZE + sizeof(struct task_struct). By comparison
34 * a struct pid is about 64 bytes.
36 * Holding a reference to struct pid solves both of these problems.
37 * It is small so holding a reference does not consume a lot of
38 * resources, and since a new struct pid is allocated when the numeric
39 * pid value is reused we don't mistakenly refer to new processes.
42 struct pid
44 atomic_t count;
45 /* Try to keep pid_chain in the same cacheline as nr for find_pid */
46 int nr;
47 struct hlist_node pid_chain;
48 /* lists of tasks that use this pid */
49 struct hlist_head tasks[PIDTYPE_MAX];
50 struct rcu_head rcu;
53 struct pid_link
55 struct hlist_node node;
56 struct pid *pid;
59 static inline struct pid *get_pid(struct pid *pid)
61 if (pid)
62 atomic_inc(&pid->count);
63 return pid;
66 extern void FASTCALL(put_pid(struct pid *pid));
67 extern struct task_struct *FASTCALL(pid_task(struct pid *pid, enum pid_type));
68 extern struct task_struct *FASTCALL(get_pid_task(struct pid *pid,
69 enum pid_type));
72 * attach_pid() and detach_pid() must be called with the tasklist_lock
73 * write-held.
75 extern int FASTCALL(attach_pid(struct task_struct *task,
76 enum pid_type type, int nr));
78 extern void FASTCALL(detach_pid(struct task_struct *task, enum pid_type));
81 * look up a PID in the hash table. Must be called with the tasklist_lock
82 * or rcu_read_lock() held.
84 extern struct pid *FASTCALL(find_pid(int nr));
87 * Lookup a PID in the hash table, and return with it's count elevated.
89 extern struct pid *find_get_pid(int nr);
91 extern struct pid *alloc_pid(void);
92 extern void FASTCALL(free_pid(struct pid *pid));
94 #define pid_next(task, type) \
95 ((task)->pids[(type)].node.next)
97 #define pid_next_task(task, type) \
98 hlist_entry(pid_next(task, type), struct task_struct, \
99 pids[(type)].node)
102 /* We could use hlist_for_each_entry_rcu here but it takes more arguments
103 * than the do_each_task_pid/while_each_task_pid. So we roll our own
104 * to preserve the existing interface.
106 #define do_each_task_pid(who, type, task) \
107 if ((task = find_task_by_pid_type(type, who))) { \
108 prefetch(pid_next(task, type)); \
109 do {
111 #define while_each_task_pid(who, type, task) \
112 } while (pid_next(task, type) && ({ \
113 task = pid_next_task(task, type); \
114 rcu_dereference(task); \
115 prefetch(pid_next(task, type)); \
116 1; }) ); \
119 #endif /* _LINUX_PID_H */