2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
38 * The per-CPU workqueue (if single thread, we always use the first
41 struct cpu_workqueue_struct
{
45 struct list_head worklist
;
46 wait_queue_head_t more_work
;
47 struct work_struct
*current_work
;
49 struct workqueue_struct
*wq
;
50 struct task_struct
*thread
;
52 int run_depth
; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
67 struct lockdep_map lockdep_map
;
71 /* Serializes the accesses to the list of workqueues. */
72 static DEFINE_SPINLOCK(workqueue_lock
);
73 static LIST_HEAD(workqueues
);
75 static int singlethread_cpu __read_mostly
;
76 static cpumask_t cpu_singlethread_map __read_mostly
;
78 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
79 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
80 * which comes in between can't use for_each_online_cpu(). We could
81 * use cpu_possible_map, the cpumask below is more a documentation
84 static cpumask_t cpu_populated_map __read_mostly
;
86 /* If it's single threaded, it isn't in the list of workqueues. */
87 static inline int is_single_threaded(struct workqueue_struct
*wq
)
89 return wq
->singlethread
;
92 static const cpumask_t
*wq_cpu_map(struct workqueue_struct
*wq
)
94 return is_single_threaded(wq
)
95 ? &cpu_singlethread_map
: &cpu_populated_map
;
99 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
101 if (unlikely(is_single_threaded(wq
)))
102 cpu
= singlethread_cpu
;
103 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
107 * Set the workqueue on which a work item is to be run
108 * - Must *only* be called if the pending flag is set
110 static inline void set_wq_data(struct work_struct
*work
,
111 struct cpu_workqueue_struct
*cwq
)
115 BUG_ON(!work_pending(work
));
117 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
118 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
119 atomic_long_set(&work
->data
, new);
123 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
125 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
128 static void insert_work(struct cpu_workqueue_struct
*cwq
,
129 struct work_struct
*work
, struct list_head
*head
)
131 set_wq_data(work
, cwq
);
133 * Ensure that we get the right work->data if we see the
134 * result of list_add() below, see try_to_grab_pending().
137 list_add_tail(&work
->entry
, head
);
138 wake_up(&cwq
->more_work
);
141 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
142 struct work_struct
*work
)
146 spin_lock_irqsave(&cwq
->lock
, flags
);
147 insert_work(cwq
, work
, &cwq
->worklist
);
148 spin_unlock_irqrestore(&cwq
->lock
, flags
);
152 * queue_work - queue work on a workqueue
153 * @wq: workqueue to use
154 * @work: work to queue
156 * Returns 0 if @work was already on a queue, non-zero otherwise.
158 * We queue the work to the CPU on which it was submitted, but if the CPU dies
159 * it can be processed by another CPU.
161 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
165 ret
= queue_work_on(get_cpu(), wq
, work
);
170 EXPORT_SYMBOL_GPL(queue_work
);
173 * queue_work_on - queue work on specific cpu
174 * @cpu: CPU number to execute work on
175 * @wq: workqueue to use
176 * @work: work to queue
178 * Returns 0 if @work was already on a queue, non-zero otherwise.
180 * We queue the work to a specific CPU, the caller must ensure it
184 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
188 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
189 BUG_ON(!list_empty(&work
->entry
));
190 __queue_work(wq_per_cpu(wq
, cpu
), work
);
195 EXPORT_SYMBOL_GPL(queue_work_on
);
197 static void delayed_work_timer_fn(unsigned long __data
)
199 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
200 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
201 struct workqueue_struct
*wq
= cwq
->wq
;
203 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
207 * queue_delayed_work - queue work on a workqueue after delay
208 * @wq: workqueue to use
209 * @dwork: delayable work to queue
210 * @delay: number of jiffies to wait before queueing
212 * Returns 0 if @work was already on a queue, non-zero otherwise.
214 int queue_delayed_work(struct workqueue_struct
*wq
,
215 struct delayed_work
*dwork
, unsigned long delay
)
218 return queue_work(wq
, &dwork
->work
);
220 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
222 EXPORT_SYMBOL_GPL(queue_delayed_work
);
225 * queue_delayed_work_on - queue work on specific CPU after delay
226 * @cpu: CPU number to execute work on
227 * @wq: workqueue to use
228 * @dwork: work to queue
229 * @delay: number of jiffies to wait before queueing
231 * Returns 0 if @work was already on a queue, non-zero otherwise.
233 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
234 struct delayed_work
*dwork
, unsigned long delay
)
237 struct timer_list
*timer
= &dwork
->timer
;
238 struct work_struct
*work
= &dwork
->work
;
240 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
241 BUG_ON(timer_pending(timer
));
242 BUG_ON(!list_empty(&work
->entry
));
244 timer_stats_timer_set_start_info(&dwork
->timer
);
246 /* This stores cwq for the moment, for the timer_fn */
247 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
248 timer
->expires
= jiffies
+ delay
;
249 timer
->data
= (unsigned long)dwork
;
250 timer
->function
= delayed_work_timer_fn
;
252 if (unlikely(cpu
>= 0))
253 add_timer_on(timer
, cpu
);
260 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
262 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
264 spin_lock_irq(&cwq
->lock
);
266 if (cwq
->run_depth
> 3) {
267 /* morton gets to eat his hat */
268 printk("%s: recursion depth exceeded: %d\n",
269 __func__
, cwq
->run_depth
);
272 while (!list_empty(&cwq
->worklist
)) {
273 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
274 struct work_struct
, entry
);
275 work_func_t f
= work
->func
;
276 #ifdef CONFIG_LOCKDEP
278 * It is permissible to free the struct work_struct
279 * from inside the function that is called from it,
280 * this we need to take into account for lockdep too.
281 * To avoid bogus "held lock freed" warnings as well
282 * as problems when looking into work->lockdep_map,
283 * make a copy and use that here.
285 struct lockdep_map lockdep_map
= work
->lockdep_map
;
288 cwq
->current_work
= work
;
289 list_del_init(cwq
->worklist
.next
);
290 spin_unlock_irq(&cwq
->lock
);
292 BUG_ON(get_wq_data(work
) != cwq
);
293 work_clear_pending(work
);
294 lock_map_acquire(&cwq
->wq
->lockdep_map
);
295 lock_map_acquire(&lockdep_map
);
297 lock_map_release(&lockdep_map
);
298 lock_map_release(&cwq
->wq
->lockdep_map
);
300 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
301 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
303 current
->comm
, preempt_count(),
304 task_pid_nr(current
));
305 printk(KERN_ERR
" last function: ");
306 print_symbol("%s\n", (unsigned long)f
);
307 debug_show_held_locks(current
);
311 spin_lock_irq(&cwq
->lock
);
312 cwq
->current_work
= NULL
;
315 spin_unlock_irq(&cwq
->lock
);
318 static int worker_thread(void *__cwq
)
320 struct cpu_workqueue_struct
*cwq
= __cwq
;
323 if (cwq
->wq
->freezeable
)
326 #ifdef CONFIG_BOOST_PRIVILEGED_TASKS
327 sched_privileged_task(current
);
330 set_user_nice(current
, -5);
333 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
334 if (!freezing(current
) &&
335 !kthread_should_stop() &&
336 list_empty(&cwq
->worklist
))
338 finish_wait(&cwq
->more_work
, &wait
);
342 if (kthread_should_stop())
352 struct work_struct work
;
353 struct completion done
;
356 static void wq_barrier_func(struct work_struct
*work
)
358 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
359 complete(&barr
->done
);
362 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
363 struct wq_barrier
*barr
, struct list_head
*head
)
365 INIT_WORK(&barr
->work
, wq_barrier_func
);
366 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
368 init_completion(&barr
->done
);
370 insert_work(cwq
, &barr
->work
, head
);
373 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
377 if (cwq
->thread
== current
) {
379 * Probably keventd trying to flush its own queue. So simply run
380 * it by hand rather than deadlocking.
385 struct wq_barrier barr
;
388 spin_lock_irq(&cwq
->lock
);
389 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
390 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
393 spin_unlock_irq(&cwq
->lock
);
396 wait_for_completion(&barr
.done
);
403 * flush_workqueue - ensure that any scheduled work has run to completion.
404 * @wq: workqueue to flush
406 * Forces execution of the workqueue and blocks until its completion.
407 * This is typically used in driver shutdown handlers.
409 * We sleep until all works which were queued on entry have been handled,
410 * but we are not livelocked by new incoming ones.
412 * This function used to run the workqueues itself. Now we just wait for the
413 * helper threads to do it.
415 void flush_workqueue(struct workqueue_struct
*wq
)
417 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
421 lock_map_acquire(&wq
->lockdep_map
);
422 lock_map_release(&wq
->lockdep_map
);
423 for_each_cpu_mask_nr(cpu
, *cpu_map
)
424 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
426 EXPORT_SYMBOL_GPL(flush_workqueue
);
429 * flush_work - block until a work_struct's callback has terminated
430 * @work: the work which is to be flushed
432 * Returns false if @work has already terminated.
434 * It is expected that, prior to calling flush_work(), the caller has
435 * arranged for the work to not be requeued, otherwise it doesn't make
436 * sense to use this function.
438 int flush_work(struct work_struct
*work
)
440 struct cpu_workqueue_struct
*cwq
;
441 struct list_head
*prev
;
442 struct wq_barrier barr
;
445 cwq
= get_wq_data(work
);
449 lock_map_acquire(&cwq
->wq
->lockdep_map
);
450 lock_map_release(&cwq
->wq
->lockdep_map
);
453 spin_lock_irq(&cwq
->lock
);
454 if (!list_empty(&work
->entry
)) {
456 * See the comment near try_to_grab_pending()->smp_rmb().
457 * If it was re-queued under us we are not going to wait.
460 if (unlikely(cwq
!= get_wq_data(work
)))
464 if (cwq
->current_work
!= work
)
466 prev
= &cwq
->worklist
;
468 insert_wq_barrier(cwq
, &barr
, prev
->next
);
470 spin_unlock_irq(&cwq
->lock
);
474 wait_for_completion(&barr
.done
);
477 EXPORT_SYMBOL_GPL(flush_work
);
480 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
481 * so this work can't be re-armed in any way.
483 static int try_to_grab_pending(struct work_struct
*work
)
485 struct cpu_workqueue_struct
*cwq
;
488 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
492 * The queueing is in progress, or it is already queued. Try to
493 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
496 cwq
= get_wq_data(work
);
500 spin_lock_irq(&cwq
->lock
);
501 if (!list_empty(&work
->entry
)) {
503 * This work is queued, but perhaps we locked the wrong cwq.
504 * In that case we must see the new value after rmb(), see
505 * insert_work()->wmb().
508 if (cwq
== get_wq_data(work
)) {
509 list_del_init(&work
->entry
);
513 spin_unlock_irq(&cwq
->lock
);
518 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
519 struct work_struct
*work
)
521 struct wq_barrier barr
;
524 spin_lock_irq(&cwq
->lock
);
525 if (unlikely(cwq
->current_work
== work
)) {
526 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
529 spin_unlock_irq(&cwq
->lock
);
531 if (unlikely(running
))
532 wait_for_completion(&barr
.done
);
535 static void wait_on_work(struct work_struct
*work
)
537 struct cpu_workqueue_struct
*cwq
;
538 struct workqueue_struct
*wq
;
539 const cpumask_t
*cpu_map
;
544 lock_map_acquire(&work
->lockdep_map
);
545 lock_map_release(&work
->lockdep_map
);
547 cwq
= get_wq_data(work
);
552 cpu_map
= wq_cpu_map(wq
);
554 for_each_cpu_mask_nr(cpu
, *cpu_map
)
555 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
558 static int __cancel_work_timer(struct work_struct
*work
,
559 struct timer_list
* timer
)
564 ret
= (timer
&& likely(del_timer(timer
)));
566 ret
= try_to_grab_pending(work
);
568 } while (unlikely(ret
< 0));
570 work_clear_pending(work
);
575 * cancel_work_sync - block until a work_struct's callback has terminated
576 * @work: the work which is to be flushed
578 * Returns true if @work was pending.
580 * cancel_work_sync() will cancel the work if it is queued. If the work's
581 * callback appears to be running, cancel_work_sync() will block until it
584 * It is possible to use this function if the work re-queues itself. It can
585 * cancel the work even if it migrates to another workqueue, however in that
586 * case it only guarantees that work->func() has completed on the last queued
589 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
590 * pending, otherwise it goes into a busy-wait loop until the timer expires.
592 * The caller must ensure that workqueue_struct on which this work was last
593 * queued can't be destroyed before this function returns.
595 int cancel_work_sync(struct work_struct
*work
)
597 return __cancel_work_timer(work
, NULL
);
599 EXPORT_SYMBOL_GPL(cancel_work_sync
);
602 * cancel_delayed_work_sync - reliably kill off a delayed work.
603 * @dwork: the delayed work struct
605 * Returns true if @dwork was pending.
607 * It is possible to use this function if @dwork rearms itself via queue_work()
608 * or queue_delayed_work(). See also the comment for cancel_work_sync().
610 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
612 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
614 EXPORT_SYMBOL(cancel_delayed_work_sync
);
616 static struct workqueue_struct
*keventd_wq __read_mostly
;
619 * schedule_work - put work task in global workqueue
620 * @work: job to be done
622 * This puts a job in the kernel-global workqueue.
624 int schedule_work(struct work_struct
*work
)
626 return queue_work(keventd_wq
, work
);
628 EXPORT_SYMBOL(schedule_work
);
631 * schedule_work_on - put work task on a specific cpu
632 * @cpu: cpu to put the work task on
633 * @work: job to be done
635 * This puts a job on a specific cpu
637 int schedule_work_on(int cpu
, struct work_struct
*work
)
639 return queue_work_on(cpu
, keventd_wq
, work
);
641 EXPORT_SYMBOL(schedule_work_on
);
644 * schedule_delayed_work - put work task in global workqueue after delay
645 * @dwork: job to be done
646 * @delay: number of jiffies to wait or 0 for immediate execution
648 * After waiting for a given time this puts a job in the kernel-global
651 int schedule_delayed_work(struct delayed_work
*dwork
,
654 return queue_delayed_work(keventd_wq
, dwork
, delay
);
656 EXPORT_SYMBOL(schedule_delayed_work
);
659 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
661 * @dwork: job to be done
662 * @delay: number of jiffies to wait
664 * After waiting for a given time this puts a job in the kernel-global
665 * workqueue on the specified CPU.
667 int schedule_delayed_work_on(int cpu
,
668 struct delayed_work
*dwork
, unsigned long delay
)
670 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
672 EXPORT_SYMBOL(schedule_delayed_work_on
);
675 * schedule_on_each_cpu - call a function on each online CPU from keventd
676 * @func: the function to call
678 * Returns zero on success.
679 * Returns -ve errno on failure.
681 * schedule_on_each_cpu() is very slow.
683 int schedule_on_each_cpu(work_func_t func
)
686 struct work_struct
*works
;
688 works
= alloc_percpu(struct work_struct
);
693 for_each_online_cpu(cpu
) {
694 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
696 INIT_WORK(work
, func
);
697 schedule_work_on(cpu
, work
);
699 for_each_online_cpu(cpu
)
700 flush_work(per_cpu_ptr(works
, cpu
));
706 void flush_scheduled_work(void)
708 flush_workqueue(keventd_wq
);
710 EXPORT_SYMBOL(flush_scheduled_work
);
713 * execute_in_process_context - reliably execute the routine with user context
714 * @fn: the function to execute
715 * @ew: guaranteed storage for the execute work structure (must
716 * be available when the work executes)
718 * Executes the function immediately if process context is available,
719 * otherwise schedules the function for delayed execution.
721 * Returns: 0 - function was executed
722 * 1 - function was scheduled for execution
724 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
726 if (!in_interrupt()) {
731 INIT_WORK(&ew
->work
, fn
);
732 schedule_work(&ew
->work
);
736 EXPORT_SYMBOL_GPL(execute_in_process_context
);
740 return keventd_wq
!= NULL
;
743 int current_is_keventd(void)
745 struct cpu_workqueue_struct
*cwq
;
746 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
751 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
752 if (current
== cwq
->thread
)
759 static struct cpu_workqueue_struct
*
760 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
762 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
765 spin_lock_init(&cwq
->lock
);
766 INIT_LIST_HEAD(&cwq
->worklist
);
767 init_waitqueue_head(&cwq
->more_work
);
772 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
774 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
775 struct workqueue_struct
*wq
= cwq
->wq
;
776 const char *fmt
= is_single_threaded(wq
) ? "%s" : "%s/%d";
777 struct task_struct
*p
;
779 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
781 * Nobody can add the work_struct to this cwq,
782 * if (caller is __create_workqueue)
783 * nobody should see this wq
784 * else // caller is CPU_UP_PREPARE
785 * cpu is not on cpu_online_map
786 * so we can abort safely.
791 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
797 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
799 struct task_struct
*p
= cwq
->thread
;
803 kthread_bind(p
, cpu
);
808 struct workqueue_struct
*__create_workqueue_key(const char *name
,
812 struct lock_class_key
*key
,
813 const char *lock_name
)
815 struct workqueue_struct
*wq
;
816 struct cpu_workqueue_struct
*cwq
;
819 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
823 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
830 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
831 wq
->singlethread
= singlethread
;
832 wq
->freezeable
= freezeable
;
834 INIT_LIST_HEAD(&wq
->list
);
837 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
838 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
839 start_workqueue_thread(cwq
, -1);
841 cpu_maps_update_begin();
843 * We must place this wq on list even if the code below fails.
844 * cpu_down(cpu) can remove cpu from cpu_populated_map before
845 * destroy_workqueue() takes the lock, in that case we leak
848 spin_lock(&workqueue_lock
);
849 list_add(&wq
->list
, &workqueues
);
850 spin_unlock(&workqueue_lock
);
852 * We must initialize cwqs for each possible cpu even if we
853 * are going to call destroy_workqueue() finally. Otherwise
854 * cpu_up() can hit the uninitialized cwq once we drop the
857 for_each_possible_cpu(cpu
) {
858 cwq
= init_cpu_workqueue(wq
, cpu
);
859 if (err
|| !cpu_online(cpu
))
861 err
= create_workqueue_thread(cwq
, cpu
);
862 start_workqueue_thread(cwq
, cpu
);
864 cpu_maps_update_done();
868 destroy_workqueue(wq
);
873 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
875 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
878 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
879 * cpu_add_remove_lock protects cwq->thread.
881 if (cwq
->thread
== NULL
)
884 lock_map_acquire(&cwq
->wq
->lockdep_map
);
885 lock_map_release(&cwq
->wq
->lockdep_map
);
887 flush_cpu_workqueue(cwq
);
889 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
890 * a concurrent flush_workqueue() can insert a barrier after us.
891 * However, in that case run_workqueue() won't return and check
892 * kthread_should_stop() until it flushes all work_struct's.
893 * When ->worklist becomes empty it is safe to exit because no
894 * more work_structs can be queued on this cwq: flush_workqueue
895 * checks list_empty(), and a "normal" queue_work() can't use
898 kthread_stop(cwq
->thread
);
903 * destroy_workqueue - safely terminate a workqueue
904 * @wq: target workqueue
906 * Safely destroy a workqueue. All work currently pending will be done first.
908 void destroy_workqueue(struct workqueue_struct
*wq
)
910 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
913 cpu_maps_update_begin();
914 spin_lock(&workqueue_lock
);
916 spin_unlock(&workqueue_lock
);
918 for_each_cpu_mask_nr(cpu
, *cpu_map
)
919 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
920 cpu_maps_update_done();
922 free_percpu(wq
->cpu_wq
);
925 EXPORT_SYMBOL_GPL(destroy_workqueue
);
927 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
928 unsigned long action
,
931 unsigned int cpu
= (unsigned long)hcpu
;
932 struct cpu_workqueue_struct
*cwq
;
933 struct workqueue_struct
*wq
;
936 action
&= ~CPU_TASKS_FROZEN
;
940 cpu_set(cpu
, cpu_populated_map
);
943 list_for_each_entry(wq
, &workqueues
, list
) {
944 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
948 if (!create_workqueue_thread(cwq
, cpu
))
950 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
952 action
= CPU_UP_CANCELED
;
957 start_workqueue_thread(cwq
, cpu
);
960 case CPU_UP_CANCELED
:
961 start_workqueue_thread(cwq
, -1);
963 cleanup_workqueue_thread(cwq
);
969 case CPU_UP_CANCELED
:
971 cpu_clear(cpu
, cpu_populated_map
);
978 struct work_for_cpu
{
979 struct work_struct work
;
985 static void do_work_for_cpu(struct work_struct
*w
)
987 struct work_for_cpu
*wfc
= container_of(w
, struct work_for_cpu
, work
);
989 wfc
->ret
= wfc
->fn(wfc
->arg
);
993 * work_on_cpu - run a function in user context on a particular cpu
994 * @cpu: the cpu to run on
995 * @fn: the function to run
996 * @arg: the function arg
998 * This will return -EINVAL in the cpu is not online, or the return value
1001 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
1003 struct work_for_cpu wfc
;
1005 INIT_WORK(&wfc
.work
, do_work_for_cpu
);
1009 if (unlikely(!cpu_online(cpu
)))
1012 schedule_work_on(cpu
, &wfc
.work
);
1013 flush_work(&wfc
.work
);
1019 EXPORT_SYMBOL_GPL(work_on_cpu
);
1020 #endif /* CONFIG_SMP */
1022 void __init
init_workqueues(void)
1024 cpu_populated_map
= cpu_online_map
;
1025 singlethread_cpu
= first_cpu(cpu_possible_map
);
1026 cpu_singlethread_map
= cpumask_of_cpu(singlethread_cpu
);
1027 hotcpu_notifier(workqueue_cpu_callback
, 0);
1028 keventd_wq
= create_workqueue("events");
1029 BUG_ON(!keventd_wq
);