2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
40 * The per-CPU workqueue (if single thread, we always use the first
43 struct cpu_workqueue_struct
{
47 struct list_head worklist
;
48 wait_queue_head_t more_work
;
49 struct work_struct
*current_work
;
51 struct workqueue_struct
*wq
;
52 struct task_struct
*thread
;
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 struct lockdep_map lockdep_map
;
71 #ifdef CONFIG_DEBUG_OBJECTS_WORK
73 static struct debug_obj_descr work_debug_descr
;
76 * fixup_init is called when:
77 * - an active object is initialized
79 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
81 struct work_struct
*work
= addr
;
84 case ODEBUG_STATE_ACTIVE
:
85 cancel_work_sync(work
);
86 debug_object_init(work
, &work_debug_descr
);
94 * fixup_activate is called when:
95 * - an active object is activated
96 * - an unknown object is activated (might be a statically initialized object)
98 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
100 struct work_struct
*work
= addr
;
104 case ODEBUG_STATE_NOTAVAILABLE
:
106 * This is not really a fixup. The work struct was
107 * statically initialized. We just make sure that it
108 * is tracked in the object tracker.
110 if (test_bit(WORK_STRUCT_STATIC
, work_data_bits(work
))) {
111 debug_object_init(work
, &work_debug_descr
);
112 debug_object_activate(work
, &work_debug_descr
);
118 case ODEBUG_STATE_ACTIVE
:
127 * fixup_free is called when:
128 * - an active object is freed
130 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
132 struct work_struct
*work
= addr
;
135 case ODEBUG_STATE_ACTIVE
:
136 cancel_work_sync(work
);
137 debug_object_free(work
, &work_debug_descr
);
144 static struct debug_obj_descr work_debug_descr
= {
145 .name
= "work_struct",
146 .fixup_init
= work_fixup_init
,
147 .fixup_activate
= work_fixup_activate
,
148 .fixup_free
= work_fixup_free
,
151 static inline void debug_work_activate(struct work_struct
*work
)
153 debug_object_activate(work
, &work_debug_descr
);
156 static inline void debug_work_deactivate(struct work_struct
*work
)
158 debug_object_deactivate(work
, &work_debug_descr
);
161 void __init_work(struct work_struct
*work
, int onstack
)
164 debug_object_init_on_stack(work
, &work_debug_descr
);
166 debug_object_init(work
, &work_debug_descr
);
168 EXPORT_SYMBOL_GPL(__init_work
);
170 void destroy_work_on_stack(struct work_struct
*work
)
172 debug_object_free(work
, &work_debug_descr
);
174 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
177 static inline void debug_work_activate(struct work_struct
*work
) { }
178 static inline void debug_work_deactivate(struct work_struct
*work
) { }
181 /* Serializes the accesses to the list of workqueues. */
182 static DEFINE_SPINLOCK(workqueue_lock
);
183 static LIST_HEAD(workqueues
);
185 static int singlethread_cpu __read_mostly
;
186 static const struct cpumask
*cpu_singlethread_map __read_mostly
;
188 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
189 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
190 * which comes in between can't use for_each_online_cpu(). We could
191 * use cpu_possible_map, the cpumask below is more a documentation
194 static cpumask_var_t cpu_populated_map __read_mostly
;
196 /* If it's single threaded, it isn't in the list of workqueues. */
197 static inline int is_wq_single_threaded(struct workqueue_struct
*wq
)
199 return wq
->singlethread
;
202 static const struct cpumask
*wq_cpu_map(struct workqueue_struct
*wq
)
204 return is_wq_single_threaded(wq
)
205 ? cpu_singlethread_map
: cpu_populated_map
;
209 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
211 if (unlikely(is_wq_single_threaded(wq
)))
212 cpu
= singlethread_cpu
;
213 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
217 * Set the workqueue on which a work item is to be run
218 * - Must *only* be called if the pending flag is set
220 static inline void set_wq_data(struct work_struct
*work
,
221 struct cpu_workqueue_struct
*cwq
)
225 BUG_ON(!work_pending(work
));
227 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
228 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
229 atomic_long_set(&work
->data
, new);
233 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
235 static inline void clear_wq_data(struct work_struct
*work
)
237 unsigned long flags
= *work_data_bits(work
) &
238 (1UL << WORK_STRUCT_STATIC
);
239 atomic_long_set(&work
->data
, flags
);
243 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
245 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
248 static void insert_work(struct cpu_workqueue_struct
*cwq
,
249 struct work_struct
*work
, struct list_head
*head
)
251 trace_workqueue_insertion(cwq
->thread
, work
);
253 set_wq_data(work
, cwq
);
255 * Ensure that we get the right work->data if we see the
256 * result of list_add() below, see try_to_grab_pending().
259 list_add_tail(&work
->entry
, head
);
260 wake_up(&cwq
->more_work
);
263 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
264 struct work_struct
*work
)
268 debug_work_activate(work
);
269 spin_lock_irqsave(&cwq
->lock
, flags
);
270 insert_work(cwq
, work
, &cwq
->worklist
);
271 spin_unlock_irqrestore(&cwq
->lock
, flags
);
275 * queue_work - queue work on a workqueue
276 * @wq: workqueue to use
277 * @work: work to queue
279 * Returns 0 if @work was already on a queue, non-zero otherwise.
281 * We queue the work to the CPU on which it was submitted, but if the CPU dies
282 * it can be processed by another CPU.
284 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
288 ret
= queue_work_on(get_cpu(), wq
, work
);
293 EXPORT_SYMBOL_GPL(queue_work
);
296 * queue_work_on - queue work on specific cpu
297 * @cpu: CPU number to execute work on
298 * @wq: workqueue to use
299 * @work: work to queue
301 * Returns 0 if @work was already on a queue, non-zero otherwise.
303 * We queue the work to a specific CPU, the caller must ensure it
307 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
311 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
312 BUG_ON(!list_empty(&work
->entry
));
313 __queue_work(wq_per_cpu(wq
, cpu
), work
);
318 EXPORT_SYMBOL_GPL(queue_work_on
);
320 static void delayed_work_timer_fn(unsigned long __data
)
322 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
323 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
324 struct workqueue_struct
*wq
= cwq
->wq
;
326 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
330 * queue_delayed_work - queue work on a workqueue after delay
331 * @wq: workqueue to use
332 * @dwork: delayable work to queue
333 * @delay: number of jiffies to wait before queueing
335 * Returns 0 if @work was already on a queue, non-zero otherwise.
337 int queue_delayed_work(struct workqueue_struct
*wq
,
338 struct delayed_work
*dwork
, unsigned long delay
)
341 return queue_work(wq
, &dwork
->work
);
343 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
345 EXPORT_SYMBOL_GPL(queue_delayed_work
);
348 * queue_delayed_work_on - queue work on specific CPU after delay
349 * @cpu: CPU number to execute work on
350 * @wq: workqueue to use
351 * @dwork: work to queue
352 * @delay: number of jiffies to wait before queueing
354 * Returns 0 if @work was already on a queue, non-zero otherwise.
356 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
357 struct delayed_work
*dwork
, unsigned long delay
)
360 struct timer_list
*timer
= &dwork
->timer
;
361 struct work_struct
*work
= &dwork
->work
;
363 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
364 BUG_ON(timer_pending(timer
));
365 BUG_ON(!list_empty(&work
->entry
));
367 timer_stats_timer_set_start_info(&dwork
->timer
);
369 /* This stores cwq for the moment, for the timer_fn */
370 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
371 timer
->expires
= jiffies
+ delay
;
372 timer
->data
= (unsigned long)dwork
;
373 timer
->function
= delayed_work_timer_fn
;
375 if (unlikely(cpu
>= 0))
376 add_timer_on(timer
, cpu
);
383 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
385 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
387 spin_lock_irq(&cwq
->lock
);
388 while (!list_empty(&cwq
->worklist
)) {
389 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
390 struct work_struct
, entry
);
391 work_func_t f
= work
->func
;
392 #ifdef CONFIG_LOCKDEP
394 * It is permissible to free the struct work_struct
395 * from inside the function that is called from it,
396 * this we need to take into account for lockdep too.
397 * To avoid bogus "held lock freed" warnings as well
398 * as problems when looking into work->lockdep_map,
399 * make a copy and use that here.
401 struct lockdep_map lockdep_map
= work
->lockdep_map
;
403 trace_workqueue_execution(cwq
->thread
, work
);
404 debug_work_deactivate(work
);
405 cwq
->current_work
= work
;
406 list_del_init(cwq
->worklist
.next
);
407 spin_unlock_irq(&cwq
->lock
);
409 BUG_ON(get_wq_data(work
) != cwq
);
410 work_clear_pending(work
);
411 lock_map_acquire(&cwq
->wq
->lockdep_map
);
412 lock_map_acquire(&lockdep_map
);
414 lock_map_release(&lockdep_map
);
415 lock_map_release(&cwq
->wq
->lockdep_map
);
417 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
418 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
420 current
->comm
, preempt_count(),
421 task_pid_nr(current
));
422 printk(KERN_ERR
" last function: ");
423 print_symbol("%s\n", (unsigned long)f
);
424 debug_show_held_locks(current
);
428 spin_lock_irq(&cwq
->lock
);
429 cwq
->current_work
= NULL
;
431 spin_unlock_irq(&cwq
->lock
);
434 static int worker_thread(void *__cwq
)
436 struct cpu_workqueue_struct
*cwq
= __cwq
;
439 if (cwq
->wq
->freezeable
)
443 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
444 if (!freezing(current
) &&
445 !kthread_should_stop() &&
446 list_empty(&cwq
->worklist
))
448 finish_wait(&cwq
->more_work
, &wait
);
452 if (kthread_should_stop())
462 struct work_struct work
;
463 struct completion done
;
466 static void wq_barrier_func(struct work_struct
*work
)
468 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
469 complete(&barr
->done
);
472 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
473 struct wq_barrier
*barr
, struct list_head
*head
)
476 * debugobject calls are safe here even with cwq->lock locked
477 * as we know for sure that this will not trigger any of the
478 * checks and call back into the fixup functions where we
481 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
482 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
484 init_completion(&barr
->done
);
486 debug_work_activate(&barr
->work
);
487 insert_work(cwq
, &barr
->work
, head
);
490 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
493 struct wq_barrier barr
;
495 WARN_ON(cwq
->thread
== current
);
497 spin_lock_irq(&cwq
->lock
);
498 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
499 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
502 spin_unlock_irq(&cwq
->lock
);
505 wait_for_completion(&barr
.done
);
506 destroy_work_on_stack(&barr
.work
);
513 * flush_workqueue - ensure that any scheduled work has run to completion.
514 * @wq: workqueue to flush
516 * Forces execution of the workqueue and blocks until its completion.
517 * This is typically used in driver shutdown handlers.
519 * We sleep until all works which were queued on entry have been handled,
520 * but we are not livelocked by new incoming ones.
522 * This function used to run the workqueues itself. Now we just wait for the
523 * helper threads to do it.
525 void flush_workqueue(struct workqueue_struct
*wq
)
527 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
531 lock_map_acquire(&wq
->lockdep_map
);
532 lock_map_release(&wq
->lockdep_map
);
533 for_each_cpu(cpu
, cpu_map
)
534 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
536 EXPORT_SYMBOL_GPL(flush_workqueue
);
539 * flush_work - block until a work_struct's callback has terminated
540 * @work: the work which is to be flushed
542 * Returns false if @work has already terminated.
544 * It is expected that, prior to calling flush_work(), the caller has
545 * arranged for the work to not be requeued, otherwise it doesn't make
546 * sense to use this function.
548 int flush_work(struct work_struct
*work
)
550 struct cpu_workqueue_struct
*cwq
;
551 struct list_head
*prev
;
552 struct wq_barrier barr
;
555 cwq
= get_wq_data(work
);
559 lock_map_acquire(&cwq
->wq
->lockdep_map
);
560 lock_map_release(&cwq
->wq
->lockdep_map
);
563 spin_lock_irq(&cwq
->lock
);
564 if (!list_empty(&work
->entry
)) {
566 * See the comment near try_to_grab_pending()->smp_rmb().
567 * If it was re-queued under us we are not going to wait.
570 if (unlikely(cwq
!= get_wq_data(work
)))
574 if (cwq
->current_work
!= work
)
576 prev
= &cwq
->worklist
;
578 insert_wq_barrier(cwq
, &barr
, prev
->next
);
580 spin_unlock_irq(&cwq
->lock
);
584 wait_for_completion(&barr
.done
);
585 destroy_work_on_stack(&barr
.work
);
588 EXPORT_SYMBOL_GPL(flush_work
);
591 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
592 * so this work can't be re-armed in any way.
594 static int try_to_grab_pending(struct work_struct
*work
)
596 struct cpu_workqueue_struct
*cwq
;
599 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
603 * The queueing is in progress, or it is already queued. Try to
604 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
607 cwq
= get_wq_data(work
);
611 spin_lock_irq(&cwq
->lock
);
612 if (!list_empty(&work
->entry
)) {
614 * This work is queued, but perhaps we locked the wrong cwq.
615 * In that case we must see the new value after rmb(), see
616 * insert_work()->wmb().
619 if (cwq
== get_wq_data(work
)) {
620 debug_work_deactivate(work
);
621 list_del_init(&work
->entry
);
625 spin_unlock_irq(&cwq
->lock
);
630 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
631 struct work_struct
*work
)
633 struct wq_barrier barr
;
636 spin_lock_irq(&cwq
->lock
);
637 if (unlikely(cwq
->current_work
== work
)) {
638 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
641 spin_unlock_irq(&cwq
->lock
);
643 if (unlikely(running
)) {
644 wait_for_completion(&barr
.done
);
645 destroy_work_on_stack(&barr
.work
);
649 static void wait_on_work(struct work_struct
*work
)
651 struct cpu_workqueue_struct
*cwq
;
652 struct workqueue_struct
*wq
;
653 const struct cpumask
*cpu_map
;
658 lock_map_acquire(&work
->lockdep_map
);
659 lock_map_release(&work
->lockdep_map
);
661 cwq
= get_wq_data(work
);
666 cpu_map
= wq_cpu_map(wq
);
668 for_each_cpu(cpu
, cpu_map
)
669 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
672 static int __cancel_work_timer(struct work_struct
*work
,
673 struct timer_list
* timer
)
678 ret
= (timer
&& likely(del_timer(timer
)));
680 ret
= try_to_grab_pending(work
);
682 } while (unlikely(ret
< 0));
689 * cancel_work_sync - block until a work_struct's callback has terminated
690 * @work: the work which is to be flushed
692 * Returns true if @work was pending.
694 * cancel_work_sync() will cancel the work if it is queued. If the work's
695 * callback appears to be running, cancel_work_sync() will block until it
698 * It is possible to use this function if the work re-queues itself. It can
699 * cancel the work even if it migrates to another workqueue, however in that
700 * case it only guarantees that work->func() has completed on the last queued
703 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
704 * pending, otherwise it goes into a busy-wait loop until the timer expires.
706 * The caller must ensure that workqueue_struct on which this work was last
707 * queued can't be destroyed before this function returns.
709 int cancel_work_sync(struct work_struct
*work
)
711 return __cancel_work_timer(work
, NULL
);
713 EXPORT_SYMBOL_GPL(cancel_work_sync
);
716 * cancel_delayed_work_sync - reliably kill off a delayed work.
717 * @dwork: the delayed work struct
719 * Returns true if @dwork was pending.
721 * It is possible to use this function if @dwork rearms itself via queue_work()
722 * or queue_delayed_work(). See also the comment for cancel_work_sync().
724 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
726 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
728 EXPORT_SYMBOL(cancel_delayed_work_sync
);
730 static struct workqueue_struct
*keventd_wq __read_mostly
;
733 * schedule_work - put work task in global workqueue
734 * @work: job to be done
736 * Returns zero if @work was already on the kernel-global workqueue and
737 * non-zero otherwise.
739 * This puts a job in the kernel-global workqueue if it was not already
740 * queued and leaves it in the same position on the kernel-global
741 * workqueue otherwise.
743 int schedule_work(struct work_struct
*work
)
745 return queue_work(keventd_wq
, work
);
747 EXPORT_SYMBOL(schedule_work
);
750 * schedule_work_on - put work task on a specific cpu
751 * @cpu: cpu to put the work task on
752 * @work: job to be done
754 * This puts a job on a specific cpu
756 int schedule_work_on(int cpu
, struct work_struct
*work
)
758 return queue_work_on(cpu
, keventd_wq
, work
);
760 EXPORT_SYMBOL(schedule_work_on
);
763 * schedule_delayed_work - put work task in global workqueue after delay
764 * @dwork: job to be done
765 * @delay: number of jiffies to wait or 0 for immediate execution
767 * After waiting for a given time this puts a job in the kernel-global
770 int schedule_delayed_work(struct delayed_work
*dwork
,
773 return queue_delayed_work(keventd_wq
, dwork
, delay
);
775 EXPORT_SYMBOL(schedule_delayed_work
);
778 * flush_delayed_work - block until a dwork_struct's callback has terminated
779 * @dwork: the delayed work which is to be flushed
781 * Any timeout is cancelled, and any pending work is run immediately.
783 void flush_delayed_work(struct delayed_work
*dwork
)
785 if (del_timer_sync(&dwork
->timer
)) {
786 struct cpu_workqueue_struct
*cwq
;
787 cwq
= wq_per_cpu(get_wq_data(&dwork
->work
)->wq
, get_cpu());
788 __queue_work(cwq
, &dwork
->work
);
791 flush_work(&dwork
->work
);
793 EXPORT_SYMBOL(flush_delayed_work
);
796 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
798 * @dwork: job to be done
799 * @delay: number of jiffies to wait
801 * After waiting for a given time this puts a job in the kernel-global
802 * workqueue on the specified CPU.
804 int schedule_delayed_work_on(int cpu
,
805 struct delayed_work
*dwork
, unsigned long delay
)
807 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
809 EXPORT_SYMBOL(schedule_delayed_work_on
);
812 * schedule_on_each_cpu - call a function on each online CPU from keventd
813 * @func: the function to call
815 * Returns zero on success.
816 * Returns -ve errno on failure.
818 * schedule_on_each_cpu() is very slow.
820 int schedule_on_each_cpu(work_func_t func
)
824 struct work_struct
*works
;
826 works
= alloc_percpu(struct work_struct
);
833 * When running in keventd don't schedule a work item on
834 * itself. Can just call directly because the work queue is
835 * already bound. This also is faster.
837 if (current_is_keventd())
838 orig
= raw_smp_processor_id();
840 for_each_online_cpu(cpu
) {
841 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
843 INIT_WORK(work
, func
);
845 schedule_work_on(cpu
, work
);
848 func(per_cpu_ptr(works
, orig
));
850 for_each_online_cpu(cpu
)
851 flush_work(per_cpu_ptr(works
, cpu
));
859 * flush_scheduled_work - ensure that any scheduled work has run to completion.
861 * Forces execution of the kernel-global workqueue and blocks until its
864 * Think twice before calling this function! It's very easy to get into
865 * trouble if you don't take great care. Either of the following situations
866 * will lead to deadlock:
868 * One of the work items currently on the workqueue needs to acquire
869 * a lock held by your code or its caller.
871 * Your code is running in the context of a work routine.
873 * They will be detected by lockdep when they occur, but the first might not
874 * occur very often. It depends on what work items are on the workqueue and
875 * what locks they need, which you have no control over.
877 * In most situations flushing the entire workqueue is overkill; you merely
878 * need to know that a particular work item isn't queued and isn't running.
879 * In such cases you should use cancel_delayed_work_sync() or
880 * cancel_work_sync() instead.
882 void flush_scheduled_work(void)
884 flush_workqueue(keventd_wq
);
886 EXPORT_SYMBOL(flush_scheduled_work
);
889 * execute_in_process_context - reliably execute the routine with user context
890 * @fn: the function to execute
891 * @ew: guaranteed storage for the execute work structure (must
892 * be available when the work executes)
894 * Executes the function immediately if process context is available,
895 * otherwise schedules the function for delayed execution.
897 * Returns: 0 - function was executed
898 * 1 - function was scheduled for execution
900 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
902 if (!in_interrupt()) {
907 INIT_WORK(&ew
->work
, fn
);
908 schedule_work(&ew
->work
);
912 EXPORT_SYMBOL_GPL(execute_in_process_context
);
916 return keventd_wq
!= NULL
;
919 int current_is_keventd(void)
921 struct cpu_workqueue_struct
*cwq
;
922 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
927 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
928 if (current
== cwq
->thread
)
935 static struct cpu_workqueue_struct
*
936 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
938 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
941 spin_lock_init(&cwq
->lock
);
942 INIT_LIST_HEAD(&cwq
->worklist
);
943 init_waitqueue_head(&cwq
->more_work
);
948 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
950 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
951 struct workqueue_struct
*wq
= cwq
->wq
;
952 const char *fmt
= is_wq_single_threaded(wq
) ? "%s" : "%s/%d";
953 struct task_struct
*p
;
955 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
957 * Nobody can add the work_struct to this cwq,
958 * if (caller is __create_workqueue)
959 * nobody should see this wq
960 * else // caller is CPU_UP_PREPARE
961 * cpu is not on cpu_online_map
962 * so we can abort safely.
967 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
970 trace_workqueue_creation(cwq
->thread
, cpu
);
975 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
977 struct task_struct
*p
= cwq
->thread
;
981 kthread_bind(p
, cpu
);
986 struct workqueue_struct
*__create_workqueue_key(const char *name
,
990 struct lock_class_key
*key
,
991 const char *lock_name
)
993 struct workqueue_struct
*wq
;
994 struct cpu_workqueue_struct
*cwq
;
997 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
1001 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
1008 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
1009 wq
->singlethread
= singlethread
;
1010 wq
->freezeable
= freezeable
;
1012 INIT_LIST_HEAD(&wq
->list
);
1015 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
1016 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
1017 start_workqueue_thread(cwq
, -1);
1019 cpu_maps_update_begin();
1021 * We must place this wq on list even if the code below fails.
1022 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1023 * destroy_workqueue() takes the lock, in that case we leak
1026 spin_lock(&workqueue_lock
);
1027 list_add(&wq
->list
, &workqueues
);
1028 spin_unlock(&workqueue_lock
);
1030 * We must initialize cwqs for each possible cpu even if we
1031 * are going to call destroy_workqueue() finally. Otherwise
1032 * cpu_up() can hit the uninitialized cwq once we drop the
1035 for_each_possible_cpu(cpu
) {
1036 cwq
= init_cpu_workqueue(wq
, cpu
);
1037 if (err
|| !cpu_online(cpu
))
1039 err
= create_workqueue_thread(cwq
, cpu
);
1040 start_workqueue_thread(cwq
, cpu
);
1042 cpu_maps_update_done();
1046 destroy_workqueue(wq
);
1051 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
1053 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
1056 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1057 * cpu_add_remove_lock protects cwq->thread.
1059 if (cwq
->thread
== NULL
)
1062 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1063 lock_map_release(&cwq
->wq
->lockdep_map
);
1065 flush_cpu_workqueue(cwq
);
1067 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1068 * a concurrent flush_workqueue() can insert a barrier after us.
1069 * However, in that case run_workqueue() won't return and check
1070 * kthread_should_stop() until it flushes all work_struct's.
1071 * When ->worklist becomes empty it is safe to exit because no
1072 * more work_structs can be queued on this cwq: flush_workqueue
1073 * checks list_empty(), and a "normal" queue_work() can't use
1076 trace_workqueue_destruction(cwq
->thread
);
1077 kthread_stop(cwq
->thread
);
1082 * destroy_workqueue - safely terminate a workqueue
1083 * @wq: target workqueue
1085 * Safely destroy a workqueue. All work currently pending will be done first.
1087 void destroy_workqueue(struct workqueue_struct
*wq
)
1089 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
1092 cpu_maps_update_begin();
1093 spin_lock(&workqueue_lock
);
1094 list_del(&wq
->list
);
1095 spin_unlock(&workqueue_lock
);
1097 for_each_cpu(cpu
, cpu_map
)
1098 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
1099 cpu_maps_update_done();
1101 free_percpu(wq
->cpu_wq
);
1104 EXPORT_SYMBOL_GPL(destroy_workqueue
);
1106 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
1107 unsigned long action
,
1110 unsigned int cpu
= (unsigned long)hcpu
;
1111 struct cpu_workqueue_struct
*cwq
;
1112 struct workqueue_struct
*wq
;
1115 action
&= ~CPU_TASKS_FROZEN
;
1118 case CPU_UP_PREPARE
:
1119 cpumask_set_cpu(cpu
, cpu_populated_map
);
1122 list_for_each_entry(wq
, &workqueues
, list
) {
1123 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
1126 case CPU_UP_PREPARE
:
1127 err
= create_workqueue_thread(cwq
, cpu
);
1130 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
1132 action
= CPU_UP_CANCELED
;
1137 start_workqueue_thread(cwq
, cpu
);
1140 case CPU_UP_CANCELED
:
1141 start_workqueue_thread(cwq
, -1);
1143 cleanup_workqueue_thread(cwq
);
1149 case CPU_UP_CANCELED
:
1151 cpumask_clear_cpu(cpu
, cpu_populated_map
);
1154 return notifier_from_errno(err
);
1159 struct work_for_cpu
{
1160 struct completion completion
;
1166 static int do_work_for_cpu(void *_wfc
)
1168 struct work_for_cpu
*wfc
= _wfc
;
1169 wfc
->ret
= wfc
->fn(wfc
->arg
);
1170 complete(&wfc
->completion
);
1175 * work_on_cpu - run a function in user context on a particular cpu
1176 * @cpu: the cpu to run on
1177 * @fn: the function to run
1178 * @arg: the function arg
1180 * This will return the value @fn returns.
1181 * It is up to the caller to ensure that the cpu doesn't go offline.
1182 * The caller must not hold any locks which would prevent @fn from completing.
1184 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
1186 struct task_struct
*sub_thread
;
1187 struct work_for_cpu wfc
= {
1188 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
1193 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
1194 if (IS_ERR(sub_thread
))
1195 return PTR_ERR(sub_thread
);
1196 kthread_bind(sub_thread
, cpu
);
1197 wake_up_process(sub_thread
);
1198 wait_for_completion(&wfc
.completion
);
1201 EXPORT_SYMBOL_GPL(work_on_cpu
);
1202 #endif /* CONFIG_SMP */
1204 void __init
init_workqueues(void)
1206 alloc_cpumask_var(&cpu_populated_map
, GFP_KERNEL
);
1208 cpumask_copy(cpu_populated_map
, cpu_online_mask
);
1209 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1210 cpu_singlethread_map
= cpumask_of(singlethread_cpu
);
1211 hotcpu_notifier(workqueue_cpu_callback
, 0);
1212 keventd_wq
= create_workqueue("events");
1213 BUG_ON(!keventd_wq
);