2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter <clameter@sgi.com>.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
37 * The per-CPU workqueue (if single thread, we always use the first
40 * The sequence counters are for flush_scheduled_work(). It wants to wait
41 * until all currently-scheduled works are completed, but it doesn't
42 * want to be livelocked by new, incoming ones. So it waits until
43 * remove_sequence is >= the insert_sequence which pertained when
44 * flush_scheduled_work() was called.
46 struct cpu_workqueue_struct
{
50 long remove_sequence
; /* Least-recently added (next to run) */
51 long insert_sequence
; /* Next to add */
53 struct list_head worklist
;
54 wait_queue_head_t more_work
;
55 wait_queue_head_t work_done
;
57 struct workqueue_struct
*wq
;
58 struct task_struct
*thread
;
60 int run_depth
; /* Detect run_workqueue() recursion depth */
62 int freezeable
; /* Freeze the thread during suspend */
63 } ____cacheline_aligned
;
66 * The externally visible workqueue abstraction is an array of
69 struct workqueue_struct
{
70 struct cpu_workqueue_struct
*cpu_wq
;
72 struct list_head list
; /* Empty if single thread */
75 /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
76 threads to each one as cpus come/go. */
77 static DEFINE_MUTEX(workqueue_mutex
);
78 static LIST_HEAD(workqueues
);
80 static int singlethread_cpu
;
82 /* If it's single threaded, it isn't in the list of workqueues. */
83 static inline int is_single_threaded(struct workqueue_struct
*wq
)
85 return list_empty(&wq
->list
);
88 static inline void set_wq_data(struct work_struct
*work
, void *wq
)
90 unsigned long new, old
, res
;
92 /* assume the pending flag is already set and that the task has already
93 * been queued on this workqueue */
94 new = (unsigned long) wq
| (1UL << WORK_STRUCT_PENDING
);
95 res
= work
->management
;
99 new = (unsigned long) wq
;
100 new |= (old
& WORK_STRUCT_FLAG_MASK
);
101 res
= cmpxchg(&work
->management
, old
, new);
102 } while (res
!= old
);
106 static inline void *get_wq_data(struct work_struct
*work
)
108 return (void *) (work
->management
& WORK_STRUCT_WQ_DATA_MASK
);
111 /* Preempt must be disabled. */
112 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
113 struct work_struct
*work
)
117 spin_lock_irqsave(&cwq
->lock
, flags
);
118 set_wq_data(work
, cwq
);
119 list_add_tail(&work
->entry
, &cwq
->worklist
);
120 cwq
->insert_sequence
++;
121 wake_up(&cwq
->more_work
);
122 spin_unlock_irqrestore(&cwq
->lock
, flags
);
126 * queue_work - queue work on a workqueue
127 * @wq: workqueue to use
128 * @work: work to queue
130 * Returns 0 if @work was already on a queue, non-zero otherwise.
132 * We queue the work to the CPU it was submitted, but there is no
133 * guarantee that it will be processed by that CPU.
135 int fastcall
queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
137 int ret
= 0, cpu
= get_cpu();
139 if (!test_and_set_bit(WORK_STRUCT_PENDING
, &work
->management
)) {
140 if (unlikely(is_single_threaded(wq
)))
141 cpu
= singlethread_cpu
;
142 BUG_ON(!list_empty(&work
->entry
));
143 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
149 EXPORT_SYMBOL_GPL(queue_work
);
151 static void delayed_work_timer_fn(unsigned long __data
)
153 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
154 struct workqueue_struct
*wq
= get_wq_data(&dwork
->work
);
155 int cpu
= smp_processor_id();
157 if (unlikely(is_single_threaded(wq
)))
158 cpu
= singlethread_cpu
;
160 __queue_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), &dwork
->work
);
164 * queue_delayed_work - queue work on a workqueue after delay
165 * @wq: workqueue to use
166 * @work: delayable work to queue
167 * @delay: number of jiffies to wait before queueing
169 * Returns 0 if @work was already on a queue, non-zero otherwise.
171 int fastcall
queue_delayed_work(struct workqueue_struct
*wq
,
172 struct delayed_work
*dwork
, unsigned long delay
)
175 struct timer_list
*timer
= &dwork
->timer
;
176 struct work_struct
*work
= &dwork
->work
;
179 return queue_work(wq
, work
);
181 if (!test_and_set_bit(WORK_STRUCT_PENDING
, &work
->management
)) {
182 BUG_ON(timer_pending(timer
));
183 BUG_ON(!list_empty(&work
->entry
));
185 /* This stores wq for the moment, for the timer_fn */
186 set_wq_data(work
, wq
);
187 timer
->expires
= jiffies
+ delay
;
188 timer
->data
= (unsigned long)dwork
;
189 timer
->function
= delayed_work_timer_fn
;
195 EXPORT_SYMBOL_GPL(queue_delayed_work
);
198 * queue_delayed_work_on - queue work on specific CPU after delay
199 * @cpu: CPU number to execute work on
200 * @wq: workqueue to use
201 * @work: work to queue
202 * @delay: number of jiffies to wait before queueing
204 * Returns 0 if @work was already on a queue, non-zero otherwise.
206 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
207 struct delayed_work
*dwork
, unsigned long delay
)
210 struct timer_list
*timer
= &dwork
->timer
;
211 struct work_struct
*work
= &dwork
->work
;
213 if (!test_and_set_bit(WORK_STRUCT_PENDING
, &work
->management
)) {
214 BUG_ON(timer_pending(timer
));
215 BUG_ON(!list_empty(&work
->entry
));
217 /* This stores wq for the moment, for the timer_fn */
218 set_wq_data(work
, wq
);
219 timer
->expires
= jiffies
+ delay
;
220 timer
->data
= (unsigned long)dwork
;
221 timer
->function
= delayed_work_timer_fn
;
222 add_timer_on(timer
, cpu
);
227 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
229 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
234 * Keep taking off work from the queue until
237 spin_lock_irqsave(&cwq
->lock
, flags
);
239 if (cwq
->run_depth
> 3) {
240 /* morton gets to eat his hat */
241 printk("%s: recursion depth exceeded: %d\n",
242 __FUNCTION__
, cwq
->run_depth
);
245 while (!list_empty(&cwq
->worklist
)) {
246 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
247 struct work_struct
, entry
);
248 work_func_t f
= work
->func
;
250 list_del_init(cwq
->worklist
.next
);
251 spin_unlock_irqrestore(&cwq
->lock
, flags
);
253 BUG_ON(get_wq_data(work
) != cwq
);
254 if (!test_bit(WORK_STRUCT_NOAUTOREL
, &work
->management
))
258 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
259 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
261 current
->comm
, preempt_count(),
263 printk(KERN_ERR
" last function: ");
264 print_symbol("%s\n", (unsigned long)f
);
265 debug_show_held_locks(current
);
269 spin_lock_irqsave(&cwq
->lock
, flags
);
270 cwq
->remove_sequence
++;
271 wake_up(&cwq
->work_done
);
274 spin_unlock_irqrestore(&cwq
->lock
, flags
);
277 static int worker_thread(void *__cwq
)
279 struct cpu_workqueue_struct
*cwq
= __cwq
;
280 DECLARE_WAITQUEUE(wait
, current
);
281 struct k_sigaction sa
;
284 if (!cwq
->freezeable
)
285 current
->flags
|= PF_NOFREEZE
;
287 set_user_nice(current
, -5);
289 /* Block and flush all signals */
290 sigfillset(&blocked
);
291 sigprocmask(SIG_BLOCK
, &blocked
, NULL
);
292 flush_signals(current
);
295 * We inherited MPOL_INTERLEAVE from the booting kernel.
296 * Set MPOL_DEFAULT to insure node local allocations.
298 numa_default_policy();
300 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
301 sa
.sa
.sa_handler
= SIG_IGN
;
303 siginitset(&sa
.sa
.sa_mask
, sigmask(SIGCHLD
));
304 do_sigaction(SIGCHLD
, &sa
, (struct k_sigaction
*)0);
306 set_current_state(TASK_INTERRUPTIBLE
);
307 while (!kthread_should_stop()) {
311 add_wait_queue(&cwq
->more_work
, &wait
);
312 if (list_empty(&cwq
->worklist
))
315 __set_current_state(TASK_RUNNING
);
316 remove_wait_queue(&cwq
->more_work
, &wait
);
318 if (!list_empty(&cwq
->worklist
))
320 set_current_state(TASK_INTERRUPTIBLE
);
322 __set_current_state(TASK_RUNNING
);
326 static void flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
328 if (cwq
->thread
== current
) {
330 * Probably keventd trying to flush its own queue. So simply run
331 * it by hand rather than deadlocking.
336 long sequence_needed
;
338 spin_lock_irq(&cwq
->lock
);
339 sequence_needed
= cwq
->insert_sequence
;
341 while (sequence_needed
- cwq
->remove_sequence
> 0) {
342 prepare_to_wait(&cwq
->work_done
, &wait
,
343 TASK_UNINTERRUPTIBLE
);
344 spin_unlock_irq(&cwq
->lock
);
346 spin_lock_irq(&cwq
->lock
);
348 finish_wait(&cwq
->work_done
, &wait
);
349 spin_unlock_irq(&cwq
->lock
);
354 * flush_workqueue - ensure that any scheduled work has run to completion.
355 * @wq: workqueue to flush
357 * Forces execution of the workqueue and blocks until its completion.
358 * This is typically used in driver shutdown handlers.
360 * This function will sample each workqueue's current insert_sequence number and
361 * will sleep until the head sequence is greater than or equal to that. This
362 * means that we sleep until all works which were queued on entry have been
363 * handled, but we are not livelocked by new incoming ones.
365 * This function used to run the workqueues itself. Now we just wait for the
366 * helper threads to do it.
368 void fastcall
flush_workqueue(struct workqueue_struct
*wq
)
372 if (is_single_threaded(wq
)) {
373 /* Always use first cpu's area. */
374 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, singlethread_cpu
));
378 mutex_lock(&workqueue_mutex
);
379 for_each_online_cpu(cpu
)
380 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
381 mutex_unlock(&workqueue_mutex
);
384 EXPORT_SYMBOL_GPL(flush_workqueue
);
386 static struct task_struct
*create_workqueue_thread(struct workqueue_struct
*wq
,
387 int cpu
, int freezeable
)
389 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
390 struct task_struct
*p
;
392 spin_lock_init(&cwq
->lock
);
395 cwq
->insert_sequence
= 0;
396 cwq
->remove_sequence
= 0;
397 cwq
->freezeable
= freezeable
;
398 INIT_LIST_HEAD(&cwq
->worklist
);
399 init_waitqueue_head(&cwq
->more_work
);
400 init_waitqueue_head(&cwq
->work_done
);
402 if (is_single_threaded(wq
))
403 p
= kthread_create(worker_thread
, cwq
, "%s", wq
->name
);
405 p
= kthread_create(worker_thread
, cwq
, "%s/%d", wq
->name
, cpu
);
412 struct workqueue_struct
*__create_workqueue(const char *name
,
413 int singlethread
, int freezeable
)
415 int cpu
, destroy
= 0;
416 struct workqueue_struct
*wq
;
417 struct task_struct
*p
;
419 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
423 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
430 mutex_lock(&workqueue_mutex
);
432 INIT_LIST_HEAD(&wq
->list
);
433 p
= create_workqueue_thread(wq
, singlethread_cpu
, freezeable
);
439 list_add(&wq
->list
, &workqueues
);
440 for_each_online_cpu(cpu
) {
441 p
= create_workqueue_thread(wq
, cpu
, freezeable
);
443 kthread_bind(p
, cpu
);
449 mutex_unlock(&workqueue_mutex
);
452 * Was there any error during startup? If yes then clean up:
455 destroy_workqueue(wq
);
460 EXPORT_SYMBOL_GPL(__create_workqueue
);
462 static void cleanup_workqueue_thread(struct workqueue_struct
*wq
, int cpu
)
464 struct cpu_workqueue_struct
*cwq
;
466 struct task_struct
*p
;
468 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
469 spin_lock_irqsave(&cwq
->lock
, flags
);
472 spin_unlock_irqrestore(&cwq
->lock
, flags
);
478 * destroy_workqueue - safely terminate a workqueue
479 * @wq: target workqueue
481 * Safely destroy a workqueue. All work currently pending will be done first.
483 void destroy_workqueue(struct workqueue_struct
*wq
)
489 /* We don't need the distraction of CPUs appearing and vanishing. */
490 mutex_lock(&workqueue_mutex
);
491 if (is_single_threaded(wq
))
492 cleanup_workqueue_thread(wq
, singlethread_cpu
);
494 for_each_online_cpu(cpu
)
495 cleanup_workqueue_thread(wq
, cpu
);
498 mutex_unlock(&workqueue_mutex
);
499 free_percpu(wq
->cpu_wq
);
502 EXPORT_SYMBOL_GPL(destroy_workqueue
);
504 static struct workqueue_struct
*keventd_wq
;
507 * schedule_work - put work task in global workqueue
508 * @work: job to be done
510 * This puts a job in the kernel-global workqueue.
512 int fastcall
schedule_work(struct work_struct
*work
)
514 return queue_work(keventd_wq
, work
);
516 EXPORT_SYMBOL(schedule_work
);
519 * schedule_delayed_work - put work task in global workqueue after delay
520 * @dwork: job to be done
521 * @delay: number of jiffies to wait or 0 for immediate execution
523 * After waiting for a given time this puts a job in the kernel-global
526 int fastcall
schedule_delayed_work(struct delayed_work
*dwork
, unsigned long delay
)
528 return queue_delayed_work(keventd_wq
, dwork
, delay
);
530 EXPORT_SYMBOL(schedule_delayed_work
);
533 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
535 * @dwork: job to be done
536 * @delay: number of jiffies to wait
538 * After waiting for a given time this puts a job in the kernel-global
539 * workqueue on the specified CPU.
541 int schedule_delayed_work_on(int cpu
,
542 struct delayed_work
*dwork
, unsigned long delay
)
544 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
546 EXPORT_SYMBOL(schedule_delayed_work_on
);
549 * schedule_on_each_cpu - call a function on each online CPU from keventd
550 * @func: the function to call
552 * Returns zero on success.
553 * Returns -ve errno on failure.
555 * Appears to be racy against CPU hotplug.
557 * schedule_on_each_cpu() is very slow.
559 int schedule_on_each_cpu(work_func_t func
)
562 struct work_struct
*works
;
564 works
= alloc_percpu(struct work_struct
);
568 mutex_lock(&workqueue_mutex
);
569 for_each_online_cpu(cpu
) {
570 INIT_WORK(per_cpu_ptr(works
, cpu
), func
);
571 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
),
572 per_cpu_ptr(works
, cpu
));
574 mutex_unlock(&workqueue_mutex
);
575 flush_workqueue(keventd_wq
);
580 void flush_scheduled_work(void)
582 flush_workqueue(keventd_wq
);
584 EXPORT_SYMBOL(flush_scheduled_work
);
587 * cancel_rearming_delayed_workqueue - reliably kill off a delayed
588 * work whose handler rearms the delayed work.
589 * @wq: the controlling workqueue structure
590 * @dwork: the delayed work struct
592 void cancel_rearming_delayed_workqueue(struct workqueue_struct
*wq
,
593 struct delayed_work
*dwork
)
595 while (!cancel_delayed_work(dwork
))
598 EXPORT_SYMBOL(cancel_rearming_delayed_workqueue
);
601 * cancel_rearming_delayed_work - reliably kill off a delayed keventd
602 * work whose handler rearms the delayed work.
603 * @dwork: the delayed work struct
605 void cancel_rearming_delayed_work(struct delayed_work
*dwork
)
607 cancel_rearming_delayed_workqueue(keventd_wq
, dwork
);
609 EXPORT_SYMBOL(cancel_rearming_delayed_work
);
612 * execute_in_process_context - reliably execute the routine with user context
613 * @fn: the function to execute
614 * @ew: guaranteed storage for the execute work structure (must
615 * be available when the work executes)
617 * Executes the function immediately if process context is available,
618 * otherwise schedules the function for delayed execution.
620 * Returns: 0 - function was executed
621 * 1 - function was scheduled for execution
623 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
625 if (!in_interrupt()) {
630 INIT_WORK(&ew
->work
, fn
);
631 schedule_work(&ew
->work
);
635 EXPORT_SYMBOL_GPL(execute_in_process_context
);
639 return keventd_wq
!= NULL
;
642 int current_is_keventd(void)
644 struct cpu_workqueue_struct
*cwq
;
645 int cpu
= smp_processor_id(); /* preempt-safe: keventd is per-cpu */
650 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
651 if (current
== cwq
->thread
)
658 /* Take the work from this (downed) CPU. */
659 static void take_over_work(struct workqueue_struct
*wq
, unsigned int cpu
)
661 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
662 struct list_head list
;
663 struct work_struct
*work
;
665 spin_lock_irq(&cwq
->lock
);
666 list_replace_init(&cwq
->worklist
, &list
);
668 while (!list_empty(&list
)) {
669 printk("Taking work for %s\n", wq
->name
);
670 work
= list_entry(list
.next
,struct work_struct
,entry
);
671 list_del(&work
->entry
);
672 __queue_work(per_cpu_ptr(wq
->cpu_wq
, smp_processor_id()), work
);
674 spin_unlock_irq(&cwq
->lock
);
677 /* We're holding the cpucontrol mutex here */
678 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
679 unsigned long action
,
682 unsigned int hotcpu
= (unsigned long)hcpu
;
683 struct workqueue_struct
*wq
;
687 mutex_lock(&workqueue_mutex
);
688 /* Create a new workqueue thread for it. */
689 list_for_each_entry(wq
, &workqueues
, list
) {
690 if (!create_workqueue_thread(wq
, hotcpu
, 0)) {
691 printk("workqueue for %i failed\n", hotcpu
);
698 /* Kick off worker threads. */
699 list_for_each_entry(wq
, &workqueues
, list
) {
700 struct cpu_workqueue_struct
*cwq
;
702 cwq
= per_cpu_ptr(wq
->cpu_wq
, hotcpu
);
703 kthread_bind(cwq
->thread
, hotcpu
);
704 wake_up_process(cwq
->thread
);
706 mutex_unlock(&workqueue_mutex
);
709 case CPU_UP_CANCELED
:
710 list_for_each_entry(wq
, &workqueues
, list
) {
711 if (!per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
)
713 /* Unbind so it can run. */
714 kthread_bind(per_cpu_ptr(wq
->cpu_wq
, hotcpu
)->thread
,
715 any_online_cpu(cpu_online_map
));
716 cleanup_workqueue_thread(wq
, hotcpu
);
718 mutex_unlock(&workqueue_mutex
);
721 case CPU_DOWN_PREPARE
:
722 mutex_lock(&workqueue_mutex
);
725 case CPU_DOWN_FAILED
:
726 mutex_unlock(&workqueue_mutex
);
730 list_for_each_entry(wq
, &workqueues
, list
)
731 cleanup_workqueue_thread(wq
, hotcpu
);
732 list_for_each_entry(wq
, &workqueues
, list
)
733 take_over_work(wq
, hotcpu
);
734 mutex_unlock(&workqueue_mutex
);
741 void init_workqueues(void)
743 singlethread_cpu
= first_cpu(cpu_possible_map
);
744 hotcpu_notifier(workqueue_cpu_callback
, 0);
745 keventd_wq
= create_workqueue("events");