1 /* Kernel thread helper functions.
2 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Creation is done via kthreadd, so that we get a clean environment
5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 #include <linux/sched.h>
9 #include <linux/kthread.h>
10 #include <linux/completion.h>
11 #include <linux/err.h>
12 #include <linux/cpuset.h>
13 #include <linux/unistd.h>
14 #include <linux/file.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/slab.h>
18 #include <linux/freezer.h>
19 #include <trace/events/sched.h>
21 static DEFINE_SPINLOCK(kthread_create_lock
);
22 static LIST_HEAD(kthread_create_list
);
23 struct task_struct
*kthreadd_task
;
25 struct kthread_create_info
27 /* Information passed to kthread() from kthreadd. */
28 int (*threadfn
)(void *data
);
32 /* Result passed back to kthread_create() from kthreadd. */
33 struct task_struct
*result
;
34 struct completion done
;
36 struct list_head list
;
42 struct completion exited
;
45 #define to_kthread(tsk) \
46 container_of((tsk)->vfork_done, struct kthread, exited)
49 * kthread_should_stop - should this kthread return now?
51 * When someone calls kthread_stop() on your kthread, it will be woken
52 * and this will return true. You should then return, and your return
53 * value will be passed through to kthread_stop().
55 int kthread_should_stop(void)
57 return to_kthread(current
)->should_stop
;
59 EXPORT_SYMBOL(kthread_should_stop
);
62 * kthread_data - return data value specified on kthread creation
63 * @task: kthread task in question
65 * Return the data value specified when kthread @task was created.
66 * The caller is responsible for ensuring the validity of @task when
67 * calling this function.
69 void *kthread_data(struct task_struct
*task
)
71 return to_kthread(task
)->data
;
74 static int kthread(void *_create
)
76 /* Copy data: it's on kthread's stack */
77 struct kthread_create_info
*create
= _create
;
78 int (*threadfn
)(void *data
) = create
->threadfn
;
79 void *data
= create
->data
;
85 init_completion(&self
.exited
);
86 current
->vfork_done
= &self
.exited
;
88 /* OK, tell user we're spawned, wait for stop or wakeup */
89 __set_current_state(TASK_UNINTERRUPTIBLE
);
90 create
->result
= current
;
91 complete(&create
->done
);
95 if (!self
.should_stop
)
98 /* we can't just return, we must preserve "self" on stack */
102 /* called from do_fork() to get node information for about to be created task */
103 int tsk_fork_get_node(struct task_struct
*tsk
)
106 if (tsk
== kthreadd_task
)
107 return tsk
->pref_node_fork
;
109 return numa_node_id();
112 static void create_kthread(struct kthread_create_info
*create
)
117 current
->pref_node_fork
= create
->node
;
119 /* We want our own signal handler (we take no signals by default). */
120 pid
= kernel_thread(kthread
, create
, CLONE_FS
| CLONE_FILES
| SIGCHLD
);
122 create
->result
= ERR_PTR(pid
);
123 complete(&create
->done
);
128 * kthread_create_on_node - create a kthread.
129 * @threadfn: the function to run until signal_pending(current).
130 * @data: data ptr for @threadfn.
131 * @node: memory node number.
132 * @namefmt: printf-style name for the thread.
134 * Description: This helper function creates and names a kernel
135 * thread. The thread will be stopped: use wake_up_process() to start
136 * it. See also kthread_run().
138 * If thread is going to be bound on a particular cpu, give its node
139 * in @node, to get NUMA affinity for kthread stack, or else give -1.
140 * When woken, the thread will run @threadfn() with @data as its
141 * argument. @threadfn() can either call do_exit() directly if it is a
142 * standalone thread for which no one will call kthread_stop(), or
143 * return when 'kthread_should_stop()' is true (which means
144 * kthread_stop() has been called). The return value should be zero
145 * or a negative error number; it will be passed to kthread_stop().
147 * Returns a task_struct or ERR_PTR(-ENOMEM).
149 struct task_struct
*kthread_create_on_node(int (*threadfn
)(void *data
),
152 const char namefmt
[],
155 struct kthread_create_info create
;
157 create
.threadfn
= threadfn
;
160 init_completion(&create
.done
);
162 spin_lock(&kthread_create_lock
);
163 list_add_tail(&create
.list
, &kthread_create_list
);
164 spin_unlock(&kthread_create_lock
);
166 wake_up_process(kthreadd_task
);
167 wait_for_completion(&create
.done
);
169 if (!IS_ERR(create
.result
)) {
170 static const struct sched_param param
= { .sched_priority
= 0 };
173 va_start(args
, namefmt
);
174 vsnprintf(create
.result
->comm
, sizeof(create
.result
->comm
),
178 * root may have changed our (kthreadd's) priority or CPU mask.
179 * The kernel thread should not inherit these properties.
181 sched_setscheduler_nocheck(create
.result
, SCHED_NORMAL
, ¶m
);
182 set_cpus_allowed_ptr(create
.result
, cpu_all_mask
);
184 return create
.result
;
186 EXPORT_SYMBOL(kthread_create_on_node
);
189 * kthread_bind - bind a just-created kthread to a cpu.
190 * @p: thread created by kthread_create().
191 * @cpu: cpu (might not be online, must be possible) for @k to run on.
193 * Description: This function is equivalent to set_cpus_allowed(),
194 * except that @cpu doesn't need to be online, and the thread must be
195 * stopped (i.e., just returned from kthread_create()).
197 void kthread_bind(struct task_struct
*p
, unsigned int cpu
)
199 /* Must have done schedule() in kthread() before we set_task_cpu */
200 if (!wait_task_inactive(p
, TASK_UNINTERRUPTIBLE
)) {
205 p
->cpus_allowed
= cpumask_of_cpu(cpu
);
206 p
->rt
.nr_cpus_allowed
= 1;
207 p
->flags
|= PF_THREAD_BOUND
;
209 EXPORT_SYMBOL(kthread_bind
);
212 * kthread_stop - stop a thread created by kthread_create().
213 * @k: thread created by kthread_create().
215 * Sets kthread_should_stop() for @k to return true, wakes it, and
216 * waits for it to exit. This can also be called after kthread_create()
217 * instead of calling wake_up_process(): the thread will exit without
218 * calling threadfn().
220 * If threadfn() may call do_exit() itself, the caller must ensure
221 * task_struct can't go away.
223 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
226 int kthread_stop(struct task_struct
*k
)
228 struct kthread
*kthread
;
231 trace_sched_kthread_stop(k
);
234 kthread
= to_kthread(k
);
235 barrier(); /* it might have exited */
236 if (k
->vfork_done
!= NULL
) {
237 kthread
->should_stop
= 1;
239 wait_for_completion(&kthread
->exited
);
244 trace_sched_kthread_stop_ret(ret
);
248 EXPORT_SYMBOL(kthread_stop
);
250 int kthreadd(void *unused
)
252 struct task_struct
*tsk
= current
;
254 /* Setup a clean context for our children to inherit. */
255 set_task_comm(tsk
, "kthreadd");
257 set_cpus_allowed_ptr(tsk
, cpu_all_mask
);
258 set_mems_allowed(node_states
[N_HIGH_MEMORY
]);
260 current
->flags
|= PF_NOFREEZE
| PF_FREEZER_NOSIG
;
263 set_current_state(TASK_INTERRUPTIBLE
);
264 if (list_empty(&kthread_create_list
))
266 __set_current_state(TASK_RUNNING
);
268 spin_lock(&kthread_create_lock
);
269 while (!list_empty(&kthread_create_list
)) {
270 struct kthread_create_info
*create
;
272 create
= list_entry(kthread_create_list
.next
,
273 struct kthread_create_info
, list
);
274 list_del_init(&create
->list
);
275 spin_unlock(&kthread_create_lock
);
277 create_kthread(create
);
279 spin_lock(&kthread_create_lock
);
281 spin_unlock(&kthread_create_lock
);
287 void __init_kthread_worker(struct kthread_worker
*worker
,
289 struct lock_class_key
*key
)
291 spin_lock_init(&worker
->lock
);
292 lockdep_set_class_and_name(&worker
->lock
, key
, name
);
293 INIT_LIST_HEAD(&worker
->work_list
);
296 EXPORT_SYMBOL_GPL(__init_kthread_worker
);
299 * kthread_worker_fn - kthread function to process kthread_worker
300 * @worker_ptr: pointer to initialized kthread_worker
302 * This function can be used as @threadfn to kthread_create() or
303 * kthread_run() with @worker_ptr argument pointing to an initialized
304 * kthread_worker. The started kthread will process work_list until
305 * the it is stopped with kthread_stop(). A kthread can also call
306 * this function directly after extra initialization.
308 * Different kthreads can be used for the same kthread_worker as long
309 * as there's only one kthread attached to it at any given time. A
310 * kthread_worker without an attached kthread simply collects queued
313 int kthread_worker_fn(void *worker_ptr
)
315 struct kthread_worker
*worker
= worker_ptr
;
316 struct kthread_work
*work
;
318 WARN_ON(worker
->task
);
319 worker
->task
= current
;
321 set_current_state(TASK_INTERRUPTIBLE
); /* mb paired w/ kthread_stop */
323 if (kthread_should_stop()) {
324 __set_current_state(TASK_RUNNING
);
325 spin_lock_irq(&worker
->lock
);
327 spin_unlock_irq(&worker
->lock
);
332 spin_lock_irq(&worker
->lock
);
333 if (!list_empty(&worker
->work_list
)) {
334 work
= list_first_entry(&worker
->work_list
,
335 struct kthread_work
, node
);
336 list_del_init(&work
->node
);
338 spin_unlock_irq(&worker
->lock
);
341 __set_current_state(TASK_RUNNING
);
343 smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
344 work
->done_seq
= work
->queue_seq
;
345 smp_mb(); /* mb worker-b1 paired with flush-b0 */
346 if (atomic_read(&work
->flushing
))
347 wake_up_all(&work
->done
);
348 } else if (!freezing(current
))
354 EXPORT_SYMBOL_GPL(kthread_worker_fn
);
357 * queue_kthread_work - queue a kthread_work
358 * @worker: target kthread_worker
359 * @work: kthread_work to queue
361 * Queue @work to work processor @task for async execution. @task
362 * must have been created with kthread_worker_create(). Returns %true
363 * if @work was successfully queued, %false if it was already pending.
365 bool queue_kthread_work(struct kthread_worker
*worker
,
366 struct kthread_work
*work
)
371 spin_lock_irqsave(&worker
->lock
, flags
);
372 if (list_empty(&work
->node
)) {
373 list_add_tail(&work
->node
, &worker
->work_list
);
375 if (likely(worker
->task
))
376 wake_up_process(worker
->task
);
379 spin_unlock_irqrestore(&worker
->lock
, flags
);
382 EXPORT_SYMBOL_GPL(queue_kthread_work
);
385 * flush_kthread_work - flush a kthread_work
386 * @work: work to flush
388 * If @work is queued or executing, wait for it to finish execution.
390 void flush_kthread_work(struct kthread_work
*work
)
392 int seq
= work
->queue_seq
;
394 atomic_inc(&work
->flushing
);
397 * mb flush-b0 paired with worker-b1, to make sure either
398 * worker sees the above increment or we see done_seq update.
400 smp_mb__after_atomic_inc();
402 /* A - B <= 0 tests whether B is in front of A regardless of overflow */
403 wait_event(work
->done
, seq
- work
->done_seq
<= 0);
404 atomic_dec(&work
->flushing
);
407 * rmb flush-b1 paired with worker-b0, to make sure our caller
408 * sees every change made by work->func().
410 smp_mb__after_atomic_dec();
412 EXPORT_SYMBOL_GPL(flush_kthread_work
);
414 struct kthread_flush_work
{
415 struct kthread_work work
;
416 struct completion done
;
419 static void kthread_flush_work_fn(struct kthread_work
*work
)
421 struct kthread_flush_work
*fwork
=
422 container_of(work
, struct kthread_flush_work
, work
);
423 complete(&fwork
->done
);
427 * flush_kthread_worker - flush all current works on a kthread_worker
428 * @worker: worker to flush
430 * Wait until all currently executing or pending works on @worker are
433 void flush_kthread_worker(struct kthread_worker
*worker
)
435 struct kthread_flush_work fwork
= {
436 KTHREAD_WORK_INIT(fwork
.work
, kthread_flush_work_fn
),
437 COMPLETION_INITIALIZER_ONSTACK(fwork
.done
),
440 queue_kthread_work(worker
, &fwork
.work
);
441 wait_for_completion(&fwork
.done
);
443 EXPORT_SYMBOL_GPL(flush_kthread_worker
);