2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 struct cpu_workqueue_struct
{
44 struct list_head worklist
;
45 wait_queue_head_t more_work
;
46 struct work_struct
*current_work
;
48 struct workqueue_struct
*wq
;
49 struct task_struct
*thread
;
52 int run_depth
; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
68 threads to each one as cpus come/go. */
69 static DEFINE_MUTEX(workqueue_mutex
);
70 static LIST_HEAD(workqueues
);
72 static int singlethread_cpu __read_mostly
;
73 static cpumask_t cpu_singlethread_map __read_mostly
;
74 /* optimization, we could use cpu_possible_map */
75 static cpumask_t cpu_populated_map __read_mostly
;
77 /* If it's single threaded, it isn't in the list of workqueues. */
78 static inline int is_single_threaded(struct workqueue_struct
*wq
)
80 return wq
->singlethread
;
83 static const cpumask_t
*wq_cpu_map(struct workqueue_struct
*wq
)
85 return is_single_threaded(wq
)
86 ? &cpu_singlethread_map
: &cpu_populated_map
;
90 * Set the workqueue on which a work item is to be run
91 * - Must *only* be called if the pending flag is set
93 static inline void set_wq_data(struct work_struct
*work
,
94 struct cpu_workqueue_struct
*cwq
)
98 BUG_ON(!work_pending(work
));
100 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
101 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
102 atomic_long_set(&work
->data
, new);
106 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
108 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
111 static void insert_work(struct cpu_workqueue_struct
*cwq
,
112 struct work_struct
*work
, int tail
)
114 set_wq_data(work
, cwq
);
116 list_add_tail(&work
->entry
, &cwq
->worklist
);
118 list_add(&work
->entry
, &cwq
->worklist
);
119 wake_up(&cwq
->more_work
);
122 /* Preempt must be disabled. */
123 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
124 struct work_struct
*work
)
128 spin_lock_irqsave(&cwq
->lock
, flags
);
129 insert_work(cwq
, work
, 1);
130 spin_unlock_irqrestore(&cwq
->lock
, flags
);
134 * queue_work - queue work on a workqueue
135 * @wq: workqueue to use
136 * @work: work to queue
138 * Returns 0 if @work was already on a queue, non-zero otherwise.
140 * We queue the work to the CPU it was submitted, but there is no
141 * guarantee that it will be processed by that CPU.
143 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
145 int ret
= 0, cpu
= get_cpu();
147 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
148 if (unlikely(is_single_threaded(wq
)))
149 cpu
= singlethread_cpu
;
150 BUG_ON(!list_empty(&work
->entry
));
151 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
157 EXPORT_SYMBOL_GPL(queue_work
);
159 void delayed_work_timer_fn(unsigned long __data
)
161 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
162 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
163 struct workqueue_struct
*wq
= cwq
->wq
;
164 int cpu
= smp_processor_id();
166 if (unlikely(is_single_threaded(wq
)))
167 cpu
= singlethread_cpu
;
169 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), &dwork
->work
);
173 * queue_delayed_work - queue work on a workqueue after delay
174 * @wq: workqueue to use
175 * @dwork: delayable work to queue
176 * @delay: number of jiffies to wait before queueing
178 * Returns 0 if @work was already on a queue, non-zero otherwise.
180 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
181 struct delayed_work
*dwork
, unsigned long delay
)
184 struct timer_list
*timer
= &dwork
->timer
;
185 struct work_struct
*work
= &dwork
->work
;
187 timer_stats_timer_set_start_info(timer
);
189 return queue_work(wq
, work
);
191 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
192 BUG_ON(timer_pending(timer
));
193 BUG_ON(!list_empty(&work
->entry
));
195 /* This stores cwq for the moment, for the timer_fn */
197 per_cpu_ptr(wq
->cpu_wq
, raw_smp_processor_id()));
198 timer
->expires
= jiffies
+ delay
;
199 timer
->data
= (unsigned long)dwork
;
200 timer
->function
= delayed_work_timer_fn
;
206 EXPORT_SYMBOL_GPL(queue_delayed_work
);
209 * queue_delayed_work_on - queue work on specific CPU after delay
210 * @cpu: CPU number to execute work on
211 * @wq: workqueue to use
212 * @dwork: work to queue
213 * @delay: number of jiffies to wait before queueing
215 * Returns 0 if @work was already on a queue, non-zero otherwise.
217 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
218 struct delayed_work
*dwork
, unsigned long delay
)
221 struct timer_list
*timer
= &dwork
->timer
;
222 struct work_struct
*work
= &dwork
->work
;
224 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
225 BUG_ON(timer_pending(timer
));
226 BUG_ON(!list_empty(&work
->entry
));
228 /* This stores cwq for the moment, for the timer_fn */
230 per_cpu_ptr(wq
->cpu_wq
, raw_smp_processor_id()));
231 timer
->expires
= jiffies
+ delay
;
232 timer
->data
= (unsigned long)dwork
;
233 timer
->function
= delayed_work_timer_fn
;
234 add_timer_on(timer
, cpu
);
239 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
241 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
243 spin_lock_irq(&cwq
->lock
);
245 if (cwq
->run_depth
> 3) {
246 /* morton gets to eat his hat */
247 printk("%s: recursion depth exceeded: %d\n",
248 __FUNCTION__
, cwq
->run_depth
);
251 while (!list_empty(&cwq
->worklist
)) {
252 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
253 struct work_struct
, entry
);
254 work_func_t f
= work
->func
;
256 cwq
->current_work
= work
;
257 list_del_init(cwq
->worklist
.next
);
258 spin_unlock_irq(&cwq
->lock
);
260 BUG_ON(get_wq_data(work
) != cwq
);
261 if (!test_bit(WORK_STRUCT_NOAUTOREL
, work_data_bits(work
)))
265 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
266 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
268 current
->comm
, preempt_count(),
270 printk(KERN_ERR
" last function: ");
271 print_symbol("%s\n", (unsigned long)f
);
272 debug_show_held_locks(current
);
276 spin_lock_irq(&cwq
->lock
);
277 cwq
->current_work
= NULL
;
280 spin_unlock_irq(&cwq
->lock
);
284 * NOTE: the caller must not touch *cwq if this func returns true
286 static int cwq_should_stop(struct cpu_workqueue_struct
*cwq
)
288 int should_stop
= cwq
->should_stop
;
290 if (unlikely(should_stop
)) {
291 spin_lock_irq(&cwq
->lock
);
292 should_stop
= cwq
->should_stop
&& list_empty(&cwq
->worklist
);
295 spin_unlock_irq(&cwq
->lock
);
301 static int worker_thread(void *__cwq
)
303 struct cpu_workqueue_struct
*cwq
= __cwq
;
305 struct k_sigaction sa
;
308 if (!cwq
->wq
->freezeable
)
309 current
->flags
|= PF_NOFREEZE
;
311 set_user_nice(current
, -5);
313 /* Block and flush all signals */
314 sigfillset(&blocked
);
315 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
316 flush_signals(current
);
319 * We inherited MPOL_INTERLEAVE from the booting kernel.
320 * Set MPOL_DEFAULT to insure node local allocations.
322 numa_default_policy();
324 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
325 sa
.sa
.sa_handler
= SIG_IGN
;
327 siginitset(&sa
.sa
.sa_mask
, sigmask(SIGCHLD
));
328 do_sigaction(SIGCHLD
, &sa
, (struct k_sigaction
*)0);
331 if (cwq
->wq
->freezeable
)
334 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
335 if (!cwq
->should_stop
&& list_empty(&cwq
->worklist
))
337 finish_wait(&cwq
->more_work
, &wait
);
339 if (cwq_should_stop(cwq
))
349 struct work_struct work
;
350 struct completion done
;
353 static void wq_barrier_func(struct work_struct
*work
)
355 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
356 complete(&barr
->done
);
359 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
360 struct wq_barrier
*barr
, int tail
)
362 INIT_WORK(&barr
->work
, wq_barrier_func
);
363 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
365 init_completion(&barr
->done
);
367 insert_work(cwq
, &barr
->work
, tail
);
370 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
372 if (cwq
->thread
== current
) {
374 * Probably keventd trying to flush its own queue. So simply run
375 * it by hand rather than deadlocking.
379 struct wq_barrier barr
;
382 spin_lock_irq(&cwq
->lock
);
383 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
384 insert_wq_barrier(cwq
, &barr
, 1);
387 spin_unlock_irq(&cwq
->lock
);
390 wait_for_completion(&barr
.done
);
395 * flush_workqueue - ensure that any scheduled work has run to completion.
396 * @wq: workqueue to flush
398 * Forces execution of the workqueue and blocks until its completion.
399 * This is typically used in driver shutdown handlers.
401 * We sleep until all works which were queued on entry have been handled,
402 * but we are not livelocked by new incoming ones.
404 * This function used to run the workqueues itself. Now we just wait for the
405 * helper threads to do it.
407 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
409 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
413 for_each_cpu_mask(cpu
, *cpu_map
)
414 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
416 EXPORT_SYMBOL_GPL(flush_workqueue
);
418 static void wait_on_work(struct cpu_workqueue_struct
*cwq
,
419 struct work_struct
*work
)
421 struct wq_barrier barr
;
424 spin_lock_irq(&cwq
->lock
);
425 if (unlikely(cwq
->current_work
== work
)) {
426 insert_wq_barrier(cwq
, &barr
, 0);
429 spin_unlock_irq(&cwq
->lock
);
431 if (unlikely(running
))
432 wait_for_completion(&barr
.done
);
436 * flush_work - block until a work_struct's callback has terminated
437 * @wq: the workqueue on which the work is queued
438 * @work: the work which is to be flushed
440 * flush_work() will attempt to cancel the work if it is queued. If the work's
441 * callback appears to be running, flush_work() will block until it has
444 * flush_work() is designed to be used when the caller is tearing down data
445 * structures which the callback function operates upon. It is expected that,
446 * prior to calling flush_work(), the caller has arranged for the work to not
449 void flush_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
451 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
452 struct cpu_workqueue_struct
*cwq
;
457 cwq
= get_wq_data(work
);
458 /* Was it ever queued ? */
463 * This work can't be re-queued, no need to re-check that
464 * get_wq_data() is still the same when we take cwq->lock.
466 spin_lock_irq(&cwq
->lock
);
467 list_del_init(&work
->entry
);
469 spin_unlock_irq(&cwq
->lock
);
471 for_each_cpu_mask(cpu
, *cpu_map
)
472 wait_on_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
474 EXPORT_SYMBOL_GPL(flush_work
);
477 static struct workqueue_struct
*keventd_wq
;
480 * schedule_work - put work task in global workqueue
481 * @work: job to be done
483 * This puts a job in the kernel-global workqueue.
485 int fastcall
schedule_work(struct work_struct
*work
)
487 return queue_work(keventd_wq
, work
);
489 EXPORT_SYMBOL(schedule_work
);
492 * schedule_delayed_work - put work task in global workqueue after delay
493 * @dwork: job to be done
494 * @delay: number of jiffies to wait or 0 for immediate execution
496 * After waiting for a given time this puts a job in the kernel-global
499 int fastcall
schedule_delayed_work(struct delayed_work
*dwork
,
502 timer_stats_timer_set_start_info(&dwork
->timer
);
503 return queue_delayed_work(keventd_wq
, dwork
, delay
);
505 EXPORT_SYMBOL(schedule_delayed_work
);
508 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
510 * @dwork: job to be done
511 * @delay: number of jiffies to wait
513 * After waiting for a given time this puts a job in the kernel-global
514 * workqueue on the specified CPU.
516 int schedule_delayed_work_on(int cpu
,
517 struct delayed_work
*dwork
, unsigned long delay
)
519 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
521 EXPORT_SYMBOL(schedule_delayed_work_on
);
524 * schedule_on_each_cpu - call a function on each online CPU from keventd
525 * @func: the function to call
527 * Returns zero on success.
528 * Returns -ve errno on failure.
530 * Appears to be racy against CPU hotplug.
532 * schedule_on_each_cpu() is very slow.
534 int schedule_on_each_cpu(work_func_t func
)
537 struct work_struct
*works
;
539 works
= alloc_percpu(struct work_struct
);
543 preempt_disable(); /* CPU hotplug */
544 for_each_online_cpu(cpu
) {
545 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
547 INIT_WORK(work
, func
);
548 set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
));
549 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
), work
);
552 flush_workqueue(keventd_wq
);
557 void flush_scheduled_work(void)
559 flush_workqueue(keventd_wq
);
561 EXPORT_SYMBOL(flush_scheduled_work
);
563 void flush_work_keventd(struct work_struct
*work
)
565 flush_work(keventd_wq
, work
);
567 EXPORT_SYMBOL(flush_work_keventd
);
570 * cancel_rearming_delayed_workqueue - kill off a delayed work whose handler rearms the delayed work.
571 * @wq: the controlling workqueue structure
572 * @dwork: the delayed work struct
574 * Note that the work callback function may still be running on return from
575 * cancel_delayed_work(). Run flush_workqueue() or flush_work() to wait on it.
577 void cancel_rearming_delayed_workqueue(struct workqueue_struct
*wq
,
578 struct delayed_work
*dwork
)
580 /* Was it ever queued ? */
581 if (!get_wq_data(&dwork
->work
))
584 while (!cancel_delayed_work(dwork
))
587 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue
);
590 * cancel_rearming_delayed_work - kill off a delayed keventd work whose handler rearms the delayed work.
591 * @dwork: the delayed work struct
593 void cancel_rearming_delayed_work(struct delayed_work
*dwork
)
595 cancel_rearming_delayed_workqueue(keventd_wq
, dwork
);
597 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
600 * execute_in_process_context - reliably execute the routine with user context
601 * @fn: the function to execute
602 * @ew: guaranteed storage for the execute work structure (must
603 * be available when the work executes)
605 * Executes the function immediately if process context is available,
606 * otherwise schedules the function for delayed execution.
608 * Returns: 0 - function was executed
609 * 1 - function was scheduled for execution
611 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
613 if (!in_interrupt()) {
618 INIT_WORK(&ew
->work
, fn
);
619 schedule_work(&ew
->work
);
623 EXPORT_SYMBOL_GPL(execute_in_process_context
);
627 return keventd_wq
!= NULL
;
630 int current_is_keventd(void)
632 struct cpu_workqueue_struct
*cwq
;
633 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
638 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
639 if (current
== cwq
->thread
)
646 static struct cpu_workqueue_struct
*
647 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
649 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
652 spin_lock_init(&cwq
->lock
);
653 INIT_LIST_HEAD(&cwq
->worklist
);
654 init_waitqueue_head(&cwq
->more_work
);
659 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
661 struct workqueue_struct
*wq
= cwq
->wq
;
662 const char *fmt
= is_single_threaded(wq
) ? "%s" : "%s/%d";
663 struct task_struct
*p
;
665 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
667 * Nobody can add the work_struct to this cwq,
668 * if (caller is __create_workqueue)
669 * nobody should see this wq
670 * else // caller is CPU_UP_PREPARE
671 * cpu is not on cpu_online_map
672 * so we can abort safely.
678 cwq
->should_stop
= 0;
683 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
685 struct task_struct
*p
= cwq
->thread
;
689 kthread_bind(p
, cpu
);
694 struct workqueue_struct
*__create_workqueue(const char *name
,
695 int singlethread
, int freezeable
)
697 struct workqueue_struct
*wq
;
698 struct cpu_workqueue_struct
*cwq
;
701 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
705 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
712 wq
->singlethread
= singlethread
;
713 wq
->freezeable
= freezeable
;
714 INIT_LIST_HEAD(&wq
->list
);
717 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
718 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
719 start_workqueue_thread(cwq
, -1);
721 mutex_lock(&workqueue_mutex
);
722 list_add(&wq
->list
, &workqueues
);
724 for_each_possible_cpu(cpu
) {
725 cwq
= init_cpu_workqueue(wq
, cpu
);
726 if (err
|| !cpu_online(cpu
))
728 err
= create_workqueue_thread(cwq
, cpu
);
729 start_workqueue_thread(cwq
, cpu
);
731 mutex_unlock(&workqueue_mutex
);
735 destroy_workqueue(wq
);
740 EXPORT_SYMBOL_GPL(__create_workqueue
);
742 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
744 struct wq_barrier barr
;
747 spin_lock_irq(&cwq
->lock
);
748 if (cwq
->thread
!= NULL
) {
749 insert_wq_barrier(cwq
, &barr
, 1);
750 cwq
->should_stop
= 1;
753 spin_unlock_irq(&cwq
->lock
);
756 wait_for_completion(&barr
.done
);
758 while (unlikely(cwq
->thread
!= NULL
))
761 * Wait until cwq->thread unlocks cwq->lock,
762 * it won't touch *cwq after that.
765 spin_unlock_wait(&cwq
->lock
);
770 * destroy_workqueue - safely terminate a workqueue
771 * @wq: target workqueue
773 * Safely destroy a workqueue. All work currently pending will be done first.
775 void destroy_workqueue(struct workqueue_struct
*wq
)
777 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
778 struct cpu_workqueue_struct
*cwq
;
781 mutex_lock(&workqueue_mutex
);
783 mutex_unlock(&workqueue_mutex
);
785 for_each_cpu_mask(cpu
, *cpu_map
) {
786 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
787 cleanup_workqueue_thread(cwq
, cpu
);
790 free_percpu(wq
->cpu_wq
);
793 EXPORT_SYMBOL_GPL(destroy_workqueue
);
795 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
796 unsigned long action
,
799 unsigned int cpu
= (unsigned long)hcpu
;
800 struct cpu_workqueue_struct
*cwq
;
801 struct workqueue_struct
*wq
;
804 case CPU_LOCK_ACQUIRE
:
805 mutex_lock(&workqueue_mutex
);
808 case CPU_LOCK_RELEASE
:
809 mutex_unlock(&workqueue_mutex
);
813 cpu_set(cpu
, cpu_populated_map
);
816 list_for_each_entry(wq
, &workqueues
, list
) {
817 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
821 if (!create_workqueue_thread(cwq
, cpu
))
823 printk(KERN_ERR
"workqueue for %i failed\n", cpu
);
827 start_workqueue_thread(cwq
, cpu
);
830 case CPU_UP_CANCELED
:
831 start_workqueue_thread(cwq
, -1);
833 cleanup_workqueue_thread(cwq
, cpu
);
841 void __init
init_workqueues(void)
843 cpu_populated_map
= cpu_online_map
;
844 singlethread_cpu
= first_cpu(cpu_possible_map
);
845 cpu_singlethread_map
= cpumask_of_cpu(singlethread_cpu
);
846 hotcpu_notifier(workqueue_cpu_callback
, 0);
847 keventd_wq
= create_workqueue("events");