2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
33 * The per-CPU workqueue (if single thread, we always use the first
36 * The sequence counters are for flush_scheduled_work(). It wants to wait
37 * until until all currently-scheduled works are completed, but it doesn't
38 * want to be livelocked by new, incoming ones. So it waits until
39 * remove_sequence is >= the insert_sequence which pertained when
40 * flush_scheduled_work() was called.
42 struct cpu_workqueue_struct
{
46 long remove_sequence
; /* Least-recently added (next to run) */
47 long insert_sequence
; /* Next to add */
49 struct list_head worklist
;
50 wait_queue_head_t more_work
;
51 wait_queue_head_t work_done
;
53 struct workqueue_struct
*wq
;
56 int run_depth
; /* Detect run_workqueue() recursion depth */
57 } ____cacheline_aligned
;
60 * The externally visible workqueue abstraction is an array of
63 struct workqueue_struct
{
64 struct cpu_workqueue_struct
*cpu_wq
;
66 struct list_head list
; /* Empty if single thread */
69 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */
71 static DEFINE_SPINLOCK(workqueue_lock
);
72 static LIST_HEAD(workqueues
);
74 static int singlethread_cpu
;
76 /* If it's single threaded, it isn't in the list of workqueues. */
77 static inline int is_single_threaded(struct workqueue_struct
*wq
)
79 return list_empty(&wq
->list
);
82 /* Preempt must be disabled. */
83 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
84 struct work_struct
*work
)
88 spin_lock_irqsave(&cwq
->lock
, flags
);
90 list_add_tail(&work
->entry
, &cwq
->worklist
);
91 cwq
->insert_sequence
++;
92 wake_up(&cwq
->more_work
);
93 spin_unlock_irqrestore(&cwq
->lock
, flags
);
97 * Queue work on a workqueue. Return non-zero if it was successfully
100 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU.
103 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
105 int ret
= 0, cpu
= get_cpu();
107 if (!test_and_set_bit(0, &work
->pending
)) {
108 if (unlikely(is_single_threaded(wq
)))
109 cpu
= singlethread_cpu
;
110 BUG_ON(!list_empty(&work
->entry
));
111 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
118 static void delayed_work_timer_fn(unsigned long __data
)
120 struct work_struct
*work
= (struct work_struct
*)__data
;
121 struct workqueue_struct
*wq
= work
->wq_data
;
122 int cpu
= smp_processor_id();
124 if (unlikely(is_single_threaded(wq
)))
125 cpu
= singlethread_cpu
;
127 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
130 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
131 struct work_struct
*work
, unsigned long delay
)
134 struct timer_list
*timer
= &work
->timer
;
136 if (!test_and_set_bit(0, &work
->pending
)) {
137 BUG_ON(timer_pending(timer
));
138 BUG_ON(!list_empty(&work
->entry
));
140 /* This stores wq for the moment, for the timer_fn */
142 timer
->expires
= jiffies
+ delay
;
143 timer
->data
= (unsigned long)work
;
144 timer
->function
= delayed_work_timer_fn
;
151 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
152 struct work_struct
*work
, unsigned long delay
)
155 struct timer_list
*timer
= &work
->timer
;
157 if (!test_and_set_bit(0, &work
->pending
)) {
158 BUG_ON(timer_pending(timer
));
159 BUG_ON(!list_empty(&work
->entry
));
161 /* This stores wq for the moment, for the timer_fn */
163 timer
->expires
= jiffies
+ delay
;
164 timer
->data
= (unsigned long)work
;
165 timer
->function
= delayed_work_timer_fn
;
166 add_timer_on(timer
, cpu
);
172 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
177 * Keep taking off work from the queue until
180 spin_lock_irqsave(&cwq
->lock
, flags
);
182 if (cwq
->run_depth
> 3) {
183 /* morton gets to eat his hat */
184 printk("%s: recursion depth exceeded: %d\n",
185 __FUNCTION__
, cwq
->run_depth
);
188 while (!list_empty(&cwq
->worklist
)) {
189 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
190 struct work_struct
, entry
);
191 void (*f
) (void *) = work
->func
;
192 void *data
= work
->data
;
194 list_del_init(cwq
->worklist
.next
);
195 spin_unlock_irqrestore(&cwq
->lock
, flags
);
197 BUG_ON(work
->wq_data
!= cwq
);
198 clear_bit(0, &work
->pending
);
201 spin_lock_irqsave(&cwq
->lock
, flags
);
202 cwq
->remove_sequence
++;
203 wake_up(&cwq
->work_done
);
206 spin_unlock_irqrestore(&cwq
->lock
, flags
);
209 static int worker_thread(void *__cwq
)
211 struct cpu_workqueue_struct
*cwq
= __cwq
;
212 DECLARE_WAITQUEUE(wait
, current
);
213 struct k_sigaction sa
;
216 current
->flags
|= PF_NOFREEZE
;
218 set_user_nice(current
, -5);
220 /* Block and flush all signals */
221 sigfillset(&blocked
);
222 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
223 flush_signals(current
);
225 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
226 sa
.sa
.sa_handler
= SIG_IGN
;
228 siginitset(&sa
.sa
.sa_mask
, sigmask(SIGCHLD
));
229 do_sigaction(SIGCHLD
, &sa
, (struct k_sigaction
*)0);
231 set_current_state(TASK_INTERRUPTIBLE
);
232 while (!kthread_should_stop()) {
233 add_wait_queue(&cwq
->more_work
, &wait
);
234 if (list_empty(&cwq
->worklist
))
237 __set_current_state(TASK_RUNNING
);
238 remove_wait_queue(&cwq
->more_work
, &wait
);
240 if (!list_empty(&cwq
->worklist
))
242 set_current_state(TASK_INTERRUPTIBLE
);
244 __set_current_state(TASK_RUNNING
);
248 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
250 if (cwq
->thread
== current
) {
252 * Probably keventd trying to flush its own queue. So simply run
253 * it by hand rather than deadlocking.
258 long sequence_needed
;
260 spin_lock_irq(&cwq
->lock
);
261 sequence_needed
= cwq
->insert_sequence
;
263 while (sequence_needed
- cwq
->remove_sequence
> 0) {
264 prepare_to_wait(&cwq
->work_done
, &wait
,
265 TASK_UNINTERRUPTIBLE
);
266 spin_unlock_irq(&cwq
->lock
);
268 spin_lock_irq(&cwq
->lock
);
270 finish_wait(&cwq
->work_done
, &wait
);
271 spin_unlock_irq(&cwq
->lock
);
276 * flush_workqueue - ensure that any scheduled work has run to completion.
278 * Forces execution of the workqueue and blocks until its completion.
279 * This is typically used in driver shutdown handlers.
281 * This function will sample each workqueue's current insert_sequence number and
282 * will sleep until the head sequence is greater than or equal to that. This
283 * means that we sleep until all works which were queued on entry have been
284 * handled, but we are not livelocked by new incoming ones.
286 * This function used to run the workqueues itself. Now we just wait for the
287 * helper threads to do it.
289 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
293 if (is_single_threaded(wq
)) {
294 /* Always use first cpu's area. */
295 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, singlethread_cpu
));
300 for_each_online_cpu(cpu
)
301 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
302 unlock_cpu_hotplug();
306 static struct task_struct
*create_workqueue_thread(struct workqueue_struct
*wq
,
309 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
310 struct task_struct
*p
;
312 spin_lock_init(&cwq
->lock
);
315 cwq
->insert_sequence
= 0;
316 cwq
->remove_sequence
= 0;
317 INIT_LIST_HEAD(&cwq
->worklist
);
318 init_waitqueue_head(&cwq
->more_work
);
319 init_waitqueue_head(&cwq
->work_done
);
321 if (is_single_threaded(wq
))
322 p
= kthread_create(worker_thread
, cwq
, "%s", wq
->name
);
324 p
= kthread_create(worker_thread
, cwq
, "%s/%d", wq
->name
, cpu
);
331 struct workqueue_struct
*__create_workqueue(const char *name
,
334 int cpu
, destroy
= 0;
335 struct workqueue_struct
*wq
;
336 struct task_struct
*p
;
338 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
342 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
349 /* We don't need the distraction of CPUs appearing and vanishing. */
352 INIT_LIST_HEAD(&wq
->list
);
353 p
= create_workqueue_thread(wq
, singlethread_cpu
);
359 spin_lock(&workqueue_lock
);
360 list_add(&wq
->list
, &workqueues
);
361 spin_unlock(&workqueue_lock
);
362 for_each_online_cpu(cpu
) {
363 p
= create_workqueue_thread(wq
, cpu
);
365 kthread_bind(p
, cpu
);
371 unlock_cpu_hotplug();
374 * Was there any error during startup? If yes then clean up:
377 destroy_workqueue(wq
);
383 static void cleanup_workqueue_thread(struct workqueue_struct
*wq
, int cpu
)
385 struct cpu_workqueue_struct
*cwq
;
387 struct task_struct
*p
;
389 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
390 spin_lock_irqsave(&cwq
->lock
, flags
);
393 spin_unlock_irqrestore(&cwq
->lock
, flags
);
398 void destroy_workqueue(struct workqueue_struct
*wq
)
404 /* We don't need the distraction of CPUs appearing and vanishing. */
406 if (is_single_threaded(wq
))
407 cleanup_workqueue_thread(wq
, singlethread_cpu
);
409 for_each_online_cpu(cpu
)
410 cleanup_workqueue_thread(wq
, cpu
);
411 spin_lock(&workqueue_lock
);
413 spin_unlock(&workqueue_lock
);
415 unlock_cpu_hotplug();
416 free_percpu(wq
->cpu_wq
);
420 static struct workqueue_struct
*keventd_wq
;
422 int fastcall
schedule_work(struct work_struct
*work
)
424 return queue_work(keventd_wq
, work
);
427 int fastcall
schedule_delayed_work(struct work_struct
*work
, unsigned long delay
)
429 return queue_delayed_work(keventd_wq
, work
, delay
);
432 int schedule_delayed_work_on(int cpu
,
433 struct work_struct
*work
, unsigned long delay
)
435 return queue_delayed_work_on(cpu
, keventd_wq
, work
, delay
);
439 * schedule_on_each_cpu - call a function on each online CPU from keventd
440 * @func: the function to call
441 * @info: a pointer to pass to func()
443 * Returns zero on success.
444 * Returns -ve errno on failure.
446 * Appears to be racy against CPU hotplug.
448 * schedule_on_each_cpu() is very slow.
450 int schedule_on_each_cpu(void (*func
)(void *info
), void *info
)
453 struct work_struct
*works
;
455 works
= alloc_percpu(struct work_struct
);
459 for_each_online_cpu(cpu
) {
460 INIT_WORK(per_cpu_ptr(works
, cpu
), func
, info
);
461 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
),
462 per_cpu_ptr(works
, cpu
));
464 flush_workqueue(keventd_wq
);
469 void flush_scheduled_work(void)
471 flush_workqueue(keventd_wq
);
475 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
476 * work whose handler rearms the delayed work.
477 * @wq: the controlling workqueue structure
478 * @work: the delayed work struct
480 void cancel_rearming_delayed_workqueue(struct workqueue_struct
*wq
,
481 struct work_struct
*work
)
483 while (!cancel_delayed_work(work
))
486 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue
);
489 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
490 * work whose handler rearms the delayed work.
491 * @work: the delayed work struct
493 void cancel_rearming_delayed_work(struct work_struct
*work
)
495 cancel_rearming_delayed_workqueue(keventd_wq
, work
);
497 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
500 * execute_in_process_context - reliably execute the routine with user context
501 * @fn: the function to execute
502 * @data: data to pass to the function
503 * @ew: guaranteed storage for the execute work structure (must
504 * be available when the work executes)
506 * Executes the function immediately if process context is available,
507 * otherwise schedules the function for delayed execution.
509 * Returns: 0 - function was executed
510 * 1 - function was scheduled for execution
512 int execute_in_process_context(void (*fn
)(void *data
), void *data
,
513 struct execute_work
*ew
)
515 if (!in_interrupt()) {
520 INIT_WORK(&ew
->work
, fn
, data
);
521 schedule_work(&ew
->work
);
525 EXPORT_SYMBOL_GPL(execute_in_process_context
);
529 return keventd_wq
!= NULL
;
532 int current_is_keventd(void)
534 struct cpu_workqueue_struct
*cwq
;
535 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
540 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
541 if (current
== cwq
->thread
)
548 #ifdef CONFIG_HOTPLUG_CPU
549 /* Take the work from this (downed) CPU. */
550 static void take_over_work(struct workqueue_struct
*wq
, unsigned int cpu
)
552 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
553 struct list_head list
;
554 struct work_struct
*work
;
556 spin_lock_irq(&cwq
->lock
);
557 list_replace_init(&cwq
->worklist
, &list
);
559 while (!list_empty(&list
)) {
560 printk("Taking work for %s\n", wq
->name
);
561 work
= list_entry(list
.next
,struct work_struct
,entry
);
562 list_del(&work
->entry
);
563 __queue_work(per_cpu_ptr(wq
->cpu_wq
, smp_processor_id()), work
);
565 spin_unlock_irq(&cwq
->lock
);
568 /* We're holding the cpucontrol mutex here */
569 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
570 unsigned long action
,
573 unsigned int hotcpu
= (unsigned long)hcpu
;
574 struct workqueue_struct
*wq
;
578 /* Create a new workqueue thread for it. */
579 list_for_each_entry(wq
, &workqueues
, list
) {
580 if (!create_workqueue_thread(wq
, hotcpu
)) {
581 printk("workqueue for %i failed\n", hotcpu
);
588 /* Kick off worker threads. */
589 list_for_each_entry(wq
, &workqueues
, list
) {
590 struct cpu_workqueue_struct
*cwq
;
592 cwq
= per_cpu_ptr(wq
->cpu_wq
, hotcpu
);
593 kthread_bind(cwq
->thread
, hotcpu
);
594 wake_up_process(cwq
->thread
);
598 case CPU_UP_CANCELED
:
599 list_for_each_entry(wq
, &workqueues
, list
) {
600 if (!per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
)
602 /* Unbind so it can run. */
603 kthread_bind(per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
,
604 any_online_cpu(cpu_online_map
));
605 cleanup_workqueue_thread(wq
, hotcpu
);
610 list_for_each_entry(wq
, &workqueues
, list
)
611 cleanup_workqueue_thread(wq
, hotcpu
);
612 list_for_each_entry(wq
, &workqueues
, list
)
613 take_over_work(wq
, hotcpu
);
621 void init_workqueues(void)
623 singlethread_cpu
= first_cpu(cpu_possible_map
);
624 hotcpu_notifier(workqueue_cpu_callback
, 0);
625 keventd_wq
= create_workqueue("events");
629 EXPORT_SYMBOL_GPL(__create_workqueue
);
630 EXPORT_SYMBOL_GPL(queue_work
);
631 EXPORT_SYMBOL_GPL(queue_delayed_work
);
632 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
633 EXPORT_SYMBOL_GPL(flush_workqueue
);
634 EXPORT_SYMBOL_GPL(destroy_workqueue
);
636 EXPORT_SYMBOL(schedule_work
);
637 EXPORT_SYMBOL(schedule_delayed_work
);
638 EXPORT_SYMBOL(schedule_delayed_work_on
);
639 EXPORT_SYMBOL(flush_scheduled_work
);