[PATCH] avoid unaligned access when accessing poll stack
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / kthread.c
blobc5f3c6613b6d77dd21d6a0bfcedfbe44d624f802
1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Creation is done via keventd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/unistd.h>
13 #include <linux/file.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <asm/semaphore.h>
19 * We dont want to execute off keventd since it might
20 * hold a semaphore our callers hold too:
22 static struct workqueue_struct *helper_wq;
24 struct kthread_create_info
26 /* Information passed to kthread() from keventd. */
27 int (*threadfn)(void *data);
28 void *data;
29 struct completion started;
31 /* Result passed back to kthread_create() from keventd. */
32 struct task_struct *result;
33 struct completion done;
36 struct kthread_stop_info
38 struct task_struct *k;
39 int err;
40 struct completion done;
43 /* Thread stopping is done by setthing this var: lock serializes
44 * multiple kthread_stop calls. */
45 static DEFINE_MUTEX(kthread_stop_lock);
46 static struct kthread_stop_info kthread_stop_info;
48 int kthread_should_stop(void)
50 return (kthread_stop_info.k == current);
52 EXPORT_SYMBOL(kthread_should_stop);
54 static void kthread_exit_files(void)
56 struct fs_struct *fs;
57 struct task_struct *tsk = current;
59 exit_fs(tsk); /* current->fs->count--; */
60 fs = init_task.fs;
61 tsk->fs = fs;
62 atomic_inc(&fs->count);
63 exit_files(tsk);
64 current->files = init_task.files;
65 atomic_inc(&tsk->files->count);
68 static int kthread(void *_create)
70 struct kthread_create_info *create = _create;
71 int (*threadfn)(void *data);
72 void *data;
73 sigset_t blocked;
74 int ret = -EINTR;
76 kthread_exit_files();
78 /* Copy data: it's on keventd's stack */
79 threadfn = create->threadfn;
80 data = create->data;
82 /* Block and flush all signals (in case we're not from keventd). */
83 sigfillset(&blocked);
84 sigprocmask(SIG_BLOCK, &blocked, NULL);
85 flush_signals(current);
87 /* By default we can run anywhere, unlike keventd. */
88 set_cpus_allowed(current, CPU_MASK_ALL);
90 /* OK, tell user we're spawned, wait for stop or wakeup */
91 __set_current_state(TASK_INTERRUPTIBLE);
92 complete(&create->started);
93 schedule();
95 if (!kthread_should_stop())
96 ret = threadfn(data);
98 /* It might have exited on its own, w/o kthread_stop. Check. */
99 if (kthread_should_stop()) {
100 kthread_stop_info.err = ret;
101 complete(&kthread_stop_info.done);
103 return 0;
106 /* We are keventd: create a thread. */
107 static void keventd_create_kthread(void *_create)
109 struct kthread_create_info *create = _create;
110 int pid;
112 /* We want our own signal handler (we take no signals by default). */
113 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
114 if (pid < 0) {
115 create->result = ERR_PTR(pid);
116 } else {
117 wait_for_completion(&create->started);
118 read_lock(&tasklist_lock);
119 create->result = find_task_by_pid(pid);
120 read_unlock(&tasklist_lock);
122 complete(&create->done);
125 struct task_struct *kthread_create(int (*threadfn)(void *data),
126 void *data,
127 const char namefmt[],
128 ...)
130 struct kthread_create_info create;
131 DECLARE_WORK(work, keventd_create_kthread, &create);
133 create.threadfn = threadfn;
134 create.data = data;
135 init_completion(&create.started);
136 init_completion(&create.done);
139 * The workqueue needs to start up first:
141 if (!helper_wq)
142 work.func(work.data);
143 else {
144 queue_work(helper_wq, &work);
145 wait_for_completion(&create.done);
147 if (!IS_ERR(create.result)) {
148 va_list args;
149 va_start(args, namefmt);
150 vsnprintf(create.result->comm, sizeof(create.result->comm),
151 namefmt, args);
152 va_end(args);
155 return create.result;
157 EXPORT_SYMBOL(kthread_create);
159 void kthread_bind(struct task_struct *k, unsigned int cpu)
161 BUG_ON(k->state != TASK_INTERRUPTIBLE);
162 /* Must have done schedule() in kthread() before we set_task_cpu */
163 wait_task_inactive(k);
164 set_task_cpu(k, cpu);
165 k->cpus_allowed = cpumask_of_cpu(cpu);
167 EXPORT_SYMBOL(kthread_bind);
169 int kthread_stop(struct task_struct *k)
171 return kthread_stop_sem(k, NULL);
173 EXPORT_SYMBOL(kthread_stop);
175 int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
177 int ret;
179 mutex_lock(&kthread_stop_lock);
181 /* It could exit after stop_info.k set, but before wake_up_process. */
182 get_task_struct(k);
184 /* Must init completion *before* thread sees kthread_stop_info.k */
185 init_completion(&kthread_stop_info.done);
186 smp_wmb();
188 /* Now set kthread_should_stop() to true, and wake it up. */
189 kthread_stop_info.k = k;
190 if (s)
191 up(s);
192 else
193 wake_up_process(k);
194 put_task_struct(k);
196 /* Once it dies, reset stop ptr, gather result and we're done. */
197 wait_for_completion(&kthread_stop_info.done);
198 kthread_stop_info.k = NULL;
199 ret = kthread_stop_info.err;
200 mutex_unlock(&kthread_stop_lock);
202 return ret;
204 EXPORT_SYMBOL(kthread_stop_sem);
206 static __init int helper_init(void)
208 helper_wq = create_singlethread_workqueue("kthread");
209 BUG_ON(!helper_wq);
211 return 0;
213 core_initcall(helper_init);