[PATCH] Fix spinlock debugging delays to not time out too early
[linux-2.6/s3c2410-cpufreq.git] / kernel / kthread.c
blobe75950a1092c6b2063ea24a1f196f987e7f21bf5
1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Creation is done via keventd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
6 * etc.).
7 */
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/unistd.h>
13 #include <linux/file.h>
14 #include <linux/module.h>
15 #include <asm/semaphore.h>
18 * We dont want to execute off keventd since it might
19 * hold a semaphore our callers hold too:
21 static struct workqueue_struct *helper_wq;
23 struct kthread_create_info
25 /* Information passed to kthread() from keventd. */
26 int (*threadfn)(void *data);
27 void *data;
28 struct completion started;
30 /* Result passed back to kthread_create() from keventd. */
31 struct task_struct *result;
32 struct completion done;
35 struct kthread_stop_info
37 struct task_struct *k;
38 int err;
39 struct completion done;
42 /* Thread stopping is done by setthing this var: lock serializes
43 * multiple kthread_stop calls. */
44 static DECLARE_MUTEX(kthread_stop_lock);
45 static struct kthread_stop_info kthread_stop_info;
47 int kthread_should_stop(void)
49 return (kthread_stop_info.k == current);
51 EXPORT_SYMBOL(kthread_should_stop);
53 static void kthread_exit_files(void)
55 struct fs_struct *fs;
56 struct task_struct *tsk = current;
58 exit_fs(tsk); /* current->fs->count--; */
59 fs = init_task.fs;
60 tsk->fs = fs;
61 atomic_inc(&fs->count);
62 exit_files(tsk);
63 current->files = init_task.files;
64 atomic_inc(&tsk->files->count);
67 static int kthread(void *_create)
69 struct kthread_create_info *create = _create;
70 int (*threadfn)(void *data);
71 void *data;
72 sigset_t blocked;
73 int ret = -EINTR;
75 kthread_exit_files();
77 /* Copy data: it's on keventd's stack */
78 threadfn = create->threadfn;
79 data = create->data;
81 /* Block and flush all signals (in case we're not from keventd). */
82 sigfillset(&blocked);
83 sigprocmask(SIG_BLOCK, &blocked, NULL);
84 flush_signals(current);
86 /* By default we can run anywhere, unlike keventd. */
87 set_cpus_allowed(current, CPU_MASK_ALL);
89 /* OK, tell user we're spawned, wait for stop or wakeup */
90 __set_current_state(TASK_INTERRUPTIBLE);
91 complete(&create->started);
92 schedule();
94 if (!kthread_should_stop())
95 ret = threadfn(data);
97 /* It might have exited on its own, w/o kthread_stop. Check. */
98 if (kthread_should_stop()) {
99 kthread_stop_info.err = ret;
100 complete(&kthread_stop_info.done);
102 return 0;
105 /* We are keventd: create a thread. */
106 static void keventd_create_kthread(void *_create)
108 struct kthread_create_info *create = _create;
109 int pid;
111 /* We want our own signal handler (we take no signals by default). */
112 pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
113 if (pid < 0) {
114 create->result = ERR_PTR(pid);
115 } else {
116 wait_for_completion(&create->started);
117 create->result = find_task_by_pid(pid);
119 complete(&create->done);
122 struct task_struct *kthread_create(int (*threadfn)(void *data),
123 void *data,
124 const char namefmt[],
125 ...)
127 struct kthread_create_info create;
128 DECLARE_WORK(work, keventd_create_kthread, &create);
130 create.threadfn = threadfn;
131 create.data = data;
132 init_completion(&create.started);
133 init_completion(&create.done);
136 * The workqueue needs to start up first:
138 if (!helper_wq)
139 work.func(work.data);
140 else {
141 queue_work(helper_wq, &work);
142 wait_for_completion(&create.done);
144 if (!IS_ERR(create.result)) {
145 va_list args;
146 va_start(args, namefmt);
147 vsnprintf(create.result->comm, sizeof(create.result->comm),
148 namefmt, args);
149 va_end(args);
152 return create.result;
154 EXPORT_SYMBOL(kthread_create);
156 void kthread_bind(struct task_struct *k, unsigned int cpu)
158 BUG_ON(k->state != TASK_INTERRUPTIBLE);
159 /* Must have done schedule() in kthread() before we set_task_cpu */
160 wait_task_inactive(k);
161 set_task_cpu(k, cpu);
162 k->cpus_allowed = cpumask_of_cpu(cpu);
164 EXPORT_SYMBOL(kthread_bind);
166 int kthread_stop(struct task_struct *k)
168 return kthread_stop_sem(k, NULL);
170 EXPORT_SYMBOL(kthread_stop);
172 int kthread_stop_sem(struct task_struct *k, struct semaphore *s)
174 int ret;
176 down(&kthread_stop_lock);
178 /* It could exit after stop_info.k set, but before wake_up_process. */
179 get_task_struct(k);
181 /* Must init completion *before* thread sees kthread_stop_info.k */
182 init_completion(&kthread_stop_info.done);
183 smp_wmb();
185 /* Now set kthread_should_stop() to true, and wake it up. */
186 kthread_stop_info.k = k;
187 if (s)
188 up(s);
189 else
190 wake_up_process(k);
191 put_task_struct(k);
193 /* Once it dies, reset stop ptr, gather result and we're done. */
194 wait_for_completion(&kthread_stop_info.done);
195 kthread_stop_info.k = NULL;
196 ret = kthread_stop_info.err;
197 up(&kthread_stop_lock);
199 return ret;
201 EXPORT_SYMBOL(kthread_stop_sem);
203 static __init int helper_init(void)
205 helper_wq = create_singlethread_workqueue("kthread");
206 BUG_ON(!helper_wq);
208 return 0;
210 core_initcall(helper_init);