2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
40 * The per-CPU workqueue (if single thread, we always use the first
43 struct cpu_workqueue_struct
{
47 struct list_head worklist
;
48 wait_queue_head_t more_work
;
49 struct work_struct
*current_work
;
51 struct workqueue_struct
*wq
;
52 struct task_struct
*thread
;
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 struct lockdep_map lockdep_map
;
71 /* Serializes the accesses to the list of workqueues. */
72 static DEFINE_SPINLOCK(workqueue_lock
);
73 static LIST_HEAD(workqueues
);
75 static int singlethread_cpu __read_mostly
;
76 static const struct cpumask
*cpu_singlethread_map __read_mostly
;
78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80 * which comes in between can't use for_each_online_cpu(). We could
81 * use cpu_possible_map, the cpumask below is more a documentation
84 static cpumask_var_t cpu_populated_map __read_mostly
;
86 /* If it's single threaded, it isn't in the list of workqueues. */
87 static inline int is_wq_single_threaded(struct workqueue_struct
*wq
)
89 return wq
->singlethread
;
92 static const struct cpumask
*wq_cpu_map(struct workqueue_struct
*wq
)
94 return is_wq_single_threaded(wq
)
95 ? cpu_singlethread_map
: cpu_populated_map
;
99 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
101 if (unlikely(is_wq_single_threaded(wq
)))
102 cpu
= singlethread_cpu
;
103 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
107 * Set the workqueue on which a work item is to be run
108 * - Must *only* be called if the pending flag is set
110 static inline void set_wq_data(struct work_struct
*work
,
111 struct cpu_workqueue_struct
*cwq
)
115 BUG_ON(!work_pending(work
));
117 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
118 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
119 atomic_long_set(&work
->data
, new);
123 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
125 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
128 static void insert_work(struct cpu_workqueue_struct
*cwq
,
129 struct work_struct
*work
, struct list_head
*head
)
131 trace_workqueue_insertion(cwq
->thread
, work
);
133 set_wq_data(work
, cwq
);
135 * Ensure that we get the right work->data if we see the
136 * result of list_add() below, see try_to_grab_pending().
139 list_add_tail(&work
->entry
, head
);
140 wake_up(&cwq
->more_work
);
143 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
144 struct work_struct
*work
)
148 spin_lock_irqsave(&cwq
->lock
, flags
);
149 insert_work(cwq
, work
, &cwq
->worklist
);
150 spin_unlock_irqrestore(&cwq
->lock
, flags
);
154 * queue_work - queue work on a workqueue
155 * @wq: workqueue to use
156 * @work: work to queue
158 * Returns 0 if @work was already on a queue, non-zero otherwise.
160 * We queue the work to the CPU on which it was submitted, but if the CPU dies
161 * it can be processed by another CPU.
163 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
167 ret
= queue_work_on(get_cpu(), wq
, work
);
172 EXPORT_SYMBOL_GPL(queue_work
);
175 * queue_work_on - queue work on specific cpu
176 * @cpu: CPU number to execute work on
177 * @wq: workqueue to use
178 * @work: work to queue
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
182 * We queue the work to a specific CPU, the caller must ensure it
186 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
190 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
191 BUG_ON(!list_empty(&work
->entry
));
192 __queue_work(wq_per_cpu(wq
, cpu
), work
);
197 EXPORT_SYMBOL_GPL(queue_work_on
);
199 static void delayed_work_timer_fn(unsigned long __data
)
201 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
202 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
203 struct workqueue_struct
*wq
= cwq
->wq
;
205 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
209 * queue_delayed_work - queue work on a workqueue after delay
210 * @wq: workqueue to use
211 * @dwork: delayable work to queue
212 * @delay: number of jiffies to wait before queueing
214 * Returns 0 if @work was already on a queue, non-zero otherwise.
216 int queue_delayed_work(struct workqueue_struct
*wq
,
217 struct delayed_work
*dwork
, unsigned long delay
)
220 return queue_work(wq
, &dwork
->work
);
222 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
224 EXPORT_SYMBOL_GPL(queue_delayed_work
);
227 * queue_delayed_work_on - queue work on specific CPU after delay
228 * @cpu: CPU number to execute work on
229 * @wq: workqueue to use
230 * @dwork: work to queue
231 * @delay: number of jiffies to wait before queueing
233 * Returns 0 if @work was already on a queue, non-zero otherwise.
235 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
236 struct delayed_work
*dwork
, unsigned long delay
)
239 struct timer_list
*timer
= &dwork
->timer
;
240 struct work_struct
*work
= &dwork
->work
;
242 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
243 BUG_ON(timer_pending(timer
));
244 BUG_ON(!list_empty(&work
->entry
));
246 timer_stats_timer_set_start_info(&dwork
->timer
);
248 /* This stores cwq for the moment, for the timer_fn */
249 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
250 timer
->expires
= jiffies
+ delay
;
251 timer
->data
= (unsigned long)dwork
;
252 timer
->function
= delayed_work_timer_fn
;
254 if (unlikely(cpu
>= 0))
255 add_timer_on(timer
, cpu
);
262 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
264 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
266 spin_lock_irq(&cwq
->lock
);
267 while (!list_empty(&cwq
->worklist
)) {
268 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
269 struct work_struct
, entry
);
270 work_func_t f
= work
->func
;
271 #ifdef CONFIG_LOCKDEP
273 * It is permissible to free the struct work_struct
274 * from inside the function that is called from it,
275 * this we need to take into account for lockdep too.
276 * To avoid bogus "held lock freed" warnings as well
277 * as problems when looking into work->lockdep_map,
278 * make a copy and use that here.
280 struct lockdep_map lockdep_map
= work
->lockdep_map
;
282 trace_workqueue_execution(cwq
->thread
, work
);
283 cwq
->current_work
= work
;
284 list_del_init(cwq
->worklist
.next
);
285 spin_unlock_irq(&cwq
->lock
);
287 BUG_ON(get_wq_data(work
) != cwq
);
288 work_clear_pending(work
);
289 lock_map_acquire(&cwq
->wq
->lockdep_map
);
290 lock_map_acquire(&lockdep_map
);
292 lock_map_release(&lockdep_map
);
293 lock_map_release(&cwq
->wq
->lockdep_map
);
295 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
296 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
298 current
->comm
, preempt_count(),
299 task_pid_nr(current
));
300 printk(KERN_ERR
" last function: ");
301 print_symbol("%s\n", (unsigned long)f
);
302 debug_show_held_locks(current
);
306 spin_lock_irq(&cwq
->lock
);
307 cwq
->current_work
= NULL
;
309 spin_unlock_irq(&cwq
->lock
);
312 static int worker_thread(void *__cwq
)
314 struct cpu_workqueue_struct
*cwq
= __cwq
;
317 if (cwq
->wq
->freezeable
)
321 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
322 if (!freezing(current
) &&
323 !kthread_should_stop() &&
324 list_empty(&cwq
->worklist
))
326 finish_wait(&cwq
->more_work
, &wait
);
330 if (kthread_should_stop())
340 struct work_struct work
;
341 struct completion done
;
344 static void wq_barrier_func(struct work_struct
*work
)
346 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
347 complete(&barr
->done
);
350 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
351 struct wq_barrier
*barr
, struct list_head
*head
)
353 INIT_WORK(&barr
->work
, wq_barrier_func
);
354 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
356 init_completion(&barr
->done
);
358 insert_work(cwq
, &barr
->work
, head
);
361 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
364 struct wq_barrier barr
;
366 WARN_ON(cwq
->thread
== current
);
368 spin_lock_irq(&cwq
->lock
);
369 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
370 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
373 spin_unlock_irq(&cwq
->lock
);
376 wait_for_completion(&barr
.done
);
382 * flush_workqueue - ensure that any scheduled work has run to completion.
383 * @wq: workqueue to flush
385 * Forces execution of the workqueue and blocks until its completion.
386 * This is typically used in driver shutdown handlers.
388 * We sleep until all works which were queued on entry have been handled,
389 * but we are not livelocked by new incoming ones.
391 * This function used to run the workqueues itself. Now we just wait for the
392 * helper threads to do it.
394 void flush_workqueue(struct workqueue_struct
*wq
)
396 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
400 lock_map_acquire(&wq
->lockdep_map
);
401 lock_map_release(&wq
->lockdep_map
);
402 for_each_cpu(cpu
, cpu_map
)
403 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
405 EXPORT_SYMBOL_GPL(flush_workqueue
);
408 * flush_work - block until a work_struct's callback has terminated
409 * @work: the work which is to be flushed
411 * Returns false if @work has already terminated.
413 * It is expected that, prior to calling flush_work(), the caller has
414 * arranged for the work to not be requeued, otherwise it doesn't make
415 * sense to use this function.
417 int flush_work(struct work_struct
*work
)
419 struct cpu_workqueue_struct
*cwq
;
420 struct list_head
*prev
;
421 struct wq_barrier barr
;
424 cwq
= get_wq_data(work
);
428 lock_map_acquire(&cwq
->wq
->lockdep_map
);
429 lock_map_release(&cwq
->wq
->lockdep_map
);
432 spin_lock_irq(&cwq
->lock
);
433 if (!list_empty(&work
->entry
)) {
435 * See the comment near try_to_grab_pending()->smp_rmb().
436 * If it was re-queued under us we are not going to wait.
439 if (unlikely(cwq
!= get_wq_data(work
)))
443 if (cwq
->current_work
!= work
)
445 prev
= &cwq
->worklist
;
447 insert_wq_barrier(cwq
, &barr
, prev
->next
);
449 spin_unlock_irq(&cwq
->lock
);
453 wait_for_completion(&barr
.done
);
456 EXPORT_SYMBOL_GPL(flush_work
);
459 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
460 * so this work can't be re-armed in any way.
462 static int try_to_grab_pending(struct work_struct
*work
)
464 struct cpu_workqueue_struct
*cwq
;
467 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
471 * The queueing is in progress, or it is already queued. Try to
472 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
475 cwq
= get_wq_data(work
);
479 spin_lock_irq(&cwq
->lock
);
480 if (!list_empty(&work
->entry
)) {
482 * This work is queued, but perhaps we locked the wrong cwq.
483 * In that case we must see the new value after rmb(), see
484 * insert_work()->wmb().
487 if (cwq
== get_wq_data(work
)) {
488 list_del_init(&work
->entry
);
492 spin_unlock_irq(&cwq
->lock
);
497 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
498 struct work_struct
*work
)
500 struct wq_barrier barr
;
503 spin_lock_irq(&cwq
->lock
);
504 if (unlikely(cwq
->current_work
== work
)) {
505 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
508 spin_unlock_irq(&cwq
->lock
);
510 if (unlikely(running
))
511 wait_for_completion(&barr
.done
);
514 static void wait_on_work(struct work_struct
*work
)
516 struct cpu_workqueue_struct
*cwq
;
517 struct workqueue_struct
*wq
;
518 const struct cpumask
*cpu_map
;
523 lock_map_acquire(&work
->lockdep_map
);
524 lock_map_release(&work
->lockdep_map
);
526 cwq
= get_wq_data(work
);
531 cpu_map
= wq_cpu_map(wq
);
533 for_each_cpu(cpu
, cpu_map
)
534 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
537 static int __cancel_work_timer(struct work_struct
*work
,
538 struct timer_list
* timer
)
543 ret
= (timer
&& likely(del_timer(timer
)));
545 ret
= try_to_grab_pending(work
);
547 } while (unlikely(ret
< 0));
549 work_clear_pending(work
);
554 * cancel_work_sync - block until a work_struct's callback has terminated
555 * @work: the work which is to be flushed
557 * Returns true if @work was pending.
559 * cancel_work_sync() will cancel the work if it is queued. If the work's
560 * callback appears to be running, cancel_work_sync() will block until it
563 * It is possible to use this function if the work re-queues itself. It can
564 * cancel the work even if it migrates to another workqueue, however in that
565 * case it only guarantees that work->func() has completed on the last queued
568 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
569 * pending, otherwise it goes into a busy-wait loop until the timer expires.
571 * The caller must ensure that workqueue_struct on which this work was last
572 * queued can't be destroyed before this function returns.
574 int cancel_work_sync(struct work_struct
*work
)
576 return __cancel_work_timer(work
, NULL
);
578 EXPORT_SYMBOL_GPL(cancel_work_sync
);
581 * cancel_delayed_work_sync - reliably kill off a delayed work.
582 * @dwork: the delayed work struct
584 * Returns true if @dwork was pending.
586 * It is possible to use this function if @dwork rearms itself via queue_work()
587 * or queue_delayed_work(). See also the comment for cancel_work_sync().
589 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
591 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
593 EXPORT_SYMBOL(cancel_delayed_work_sync
);
595 static struct workqueue_struct
*keventd_wq __read_mostly
;
598 * schedule_work - put work task in global workqueue
599 * @work: job to be done
601 * Returns zero if @work was already on the kernel-global workqueue and
602 * non-zero otherwise.
604 * This puts a job in the kernel-global workqueue if it was not already
605 * queued and leaves it in the same position on the kernel-global
606 * workqueue otherwise.
608 int schedule_work(struct work_struct
*work
)
610 return queue_work(keventd_wq
, work
);
612 EXPORT_SYMBOL(schedule_work
);
615 * schedule_work_on - put work task on a specific cpu
616 * @cpu: cpu to put the work task on
617 * @work: job to be done
619 * This puts a job on a specific cpu
621 int schedule_work_on(int cpu
, struct work_struct
*work
)
623 return queue_work_on(cpu
, keventd_wq
, work
);
625 EXPORT_SYMBOL(schedule_work_on
);
628 * schedule_delayed_work - put work task in global workqueue after delay
629 * @dwork: job to be done
630 * @delay: number of jiffies to wait or 0 for immediate execution
632 * After waiting for a given time this puts a job in the kernel-global
635 int schedule_delayed_work(struct delayed_work
*dwork
,
638 return queue_delayed_work(keventd_wq
, dwork
, delay
);
640 EXPORT_SYMBOL(schedule_delayed_work
);
643 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
645 * @dwork: job to be done
646 * @delay: number of jiffies to wait
648 * After waiting for a given time this puts a job in the kernel-global
649 * workqueue on the specified CPU.
651 int schedule_delayed_work_on(int cpu
,
652 struct delayed_work
*dwork
, unsigned long delay
)
654 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
656 EXPORT_SYMBOL(schedule_delayed_work_on
);
659 * schedule_on_each_cpu - call a function on each online CPU from keventd
660 * @func: the function to call
662 * Returns zero on success.
663 * Returns -ve errno on failure.
665 * schedule_on_each_cpu() is very slow.
667 int schedule_on_each_cpu(work_func_t func
)
670 struct work_struct
*works
;
672 works
= alloc_percpu(struct work_struct
);
677 for_each_online_cpu(cpu
) {
678 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
680 INIT_WORK(work
, func
);
681 schedule_work_on(cpu
, work
);
683 for_each_online_cpu(cpu
)
684 flush_work(per_cpu_ptr(works
, cpu
));
690 void flush_scheduled_work(void)
692 flush_workqueue(keventd_wq
);
694 EXPORT_SYMBOL(flush_scheduled_work
);
697 * execute_in_process_context - reliably execute the routine with user context
698 * @fn: the function to execute
699 * @ew: guaranteed storage for the execute work structure (must
700 * be available when the work executes)
702 * Executes the function immediately if process context is available,
703 * otherwise schedules the function for delayed execution.
705 * Returns: 0 - function was executed
706 * 1 - function was scheduled for execution
708 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
710 if (!in_interrupt()) {
715 INIT_WORK(&ew
->work
, fn
);
716 schedule_work(&ew
->work
);
720 EXPORT_SYMBOL_GPL(execute_in_process_context
);
724 return keventd_wq
!= NULL
;
727 int current_is_keventd(void)
729 struct cpu_workqueue_struct
*cwq
;
730 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
735 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
736 if (current
== cwq
->thread
)
743 static struct cpu_workqueue_struct
*
744 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
746 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
749 spin_lock_init(&cwq
->lock
);
750 INIT_LIST_HEAD(&cwq
->worklist
);
751 init_waitqueue_head(&cwq
->more_work
);
756 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
758 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
759 struct workqueue_struct
*wq
= cwq
->wq
;
760 const char *fmt
= is_wq_single_threaded(wq
) ? "%s" : "%s/%d";
761 struct task_struct
*p
;
763 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
765 * Nobody can add the work_struct to this cwq,
766 * if (caller is __create_workqueue)
767 * nobody should see this wq
768 * else // caller is CPU_UP_PREPARE
769 * cpu is not on cpu_online_map
770 * so we can abort safely.
775 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
778 trace_workqueue_creation(cwq
->thread
, cpu
);
783 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
785 struct task_struct
*p
= cwq
->thread
;
789 kthread_bind(p
, cpu
);
794 struct workqueue_struct
*__create_workqueue_key(const char *name
,
798 struct lock_class_key
*key
,
799 const char *lock_name
)
801 struct workqueue_struct
*wq
;
802 struct cpu_workqueue_struct
*cwq
;
805 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
809 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
816 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
817 wq
->singlethread
= singlethread
;
818 wq
->freezeable
= freezeable
;
820 INIT_LIST_HEAD(&wq
->list
);
823 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
824 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
825 start_workqueue_thread(cwq
, -1);
827 cpu_maps_update_begin();
829 * We must place this wq on list even if the code below fails.
830 * cpu_down(cpu) can remove cpu from cpu_populated_map before
831 * destroy_workqueue() takes the lock, in that case we leak
834 spin_lock(&workqueue_lock
);
835 list_add(&wq
->list
, &workqueues
);
836 spin_unlock(&workqueue_lock
);
838 * We must initialize cwqs for each possible cpu even if we
839 * are going to call destroy_workqueue() finally. Otherwise
840 * cpu_up() can hit the uninitialized cwq once we drop the
843 for_each_possible_cpu(cpu
) {
844 cwq
= init_cpu_workqueue(wq
, cpu
);
845 if (err
|| !cpu_online(cpu
))
847 err
= create_workqueue_thread(cwq
, cpu
);
848 start_workqueue_thread(cwq
, cpu
);
850 cpu_maps_update_done();
854 destroy_workqueue(wq
);
859 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
861 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
864 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
865 * cpu_add_remove_lock protects cwq->thread.
867 if (cwq
->thread
== NULL
)
870 lock_map_acquire(&cwq
->wq
->lockdep_map
);
871 lock_map_release(&cwq
->wq
->lockdep_map
);
873 flush_cpu_workqueue(cwq
);
875 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
876 * a concurrent flush_workqueue() can insert a barrier after us.
877 * However, in that case run_workqueue() won't return and check
878 * kthread_should_stop() until it flushes all work_struct's.
879 * When ->worklist becomes empty it is safe to exit because no
880 * more work_structs can be queued on this cwq: flush_workqueue
881 * checks list_empty(), and a "normal" queue_work() can't use
884 trace_workqueue_destruction(cwq
->thread
);
885 kthread_stop(cwq
->thread
);
890 * destroy_workqueue - safely terminate a workqueue
891 * @wq: target workqueue
893 * Safely destroy a workqueue. All work currently pending will be done first.
895 void destroy_workqueue(struct workqueue_struct
*wq
)
897 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
900 cpu_maps_update_begin();
901 spin_lock(&workqueue_lock
);
903 spin_unlock(&workqueue_lock
);
905 for_each_cpu(cpu
, cpu_map
)
906 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
907 cpu_maps_update_done();
909 free_percpu(wq
->cpu_wq
);
912 EXPORT_SYMBOL_GPL(destroy_workqueue
);
914 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
915 unsigned long action
,
918 unsigned int cpu
= (unsigned long)hcpu
;
919 struct cpu_workqueue_struct
*cwq
;
920 struct workqueue_struct
*wq
;
923 action
&= ~CPU_TASKS_FROZEN
;
927 cpumask_set_cpu(cpu
, cpu_populated_map
);
930 list_for_each_entry(wq
, &workqueues
, list
) {
931 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
935 if (!create_workqueue_thread(cwq
, cpu
))
937 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
939 action
= CPU_UP_CANCELED
;
944 start_workqueue_thread(cwq
, cpu
);
947 case CPU_UP_CANCELED
:
948 start_workqueue_thread(cwq
, -1);
950 cleanup_workqueue_thread(cwq
);
956 case CPU_UP_CANCELED
:
958 cpumask_clear_cpu(cpu
, cpu_populated_map
);
966 struct work_for_cpu
{
967 struct completion completion
;
973 static int do_work_for_cpu(void *_wfc
)
975 struct work_for_cpu
*wfc
= _wfc
;
976 wfc
->ret
= wfc
->fn(wfc
->arg
);
977 complete(&wfc
->completion
);
982 * work_on_cpu - run a function in user context on a particular cpu
983 * @cpu: the cpu to run on
984 * @fn: the function to run
985 * @arg: the function arg
987 * This will return the value @fn returns.
988 * It is up to the caller to ensure that the cpu doesn't go offline.
989 * The caller must not hold any locks which would prevent @fn from completing.
991 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
993 struct task_struct
*sub_thread
;
994 struct work_for_cpu wfc
= {
995 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
1000 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
1001 if (IS_ERR(sub_thread
))
1002 return PTR_ERR(sub_thread
);
1003 kthread_bind(sub_thread
, cpu
);
1004 wake_up_process(sub_thread
);
1005 wait_for_completion(&wfc
.completion
);
1008 EXPORT_SYMBOL_GPL(work_on_cpu
);
1009 #endif /* CONFIG_SMP */
1011 void __init
init_workqueues(void)
1013 alloc_cpumask_var(&cpu_populated_map
, GFP_KERNEL
);
1015 cpumask_copy(cpu_populated_map
, cpu_online_mask
);
1016 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1017 cpu_singlethread_map
= cpumask_of(singlethread_cpu
);
1018 hotcpu_notifier(workqueue_cpu_callback
, 0);
1019 keventd_wq
= create_workqueue("events");
1020 BUG_ON(!keventd_wq
);