2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
38 * The per-CPU workqueue (if single thread, we always use the first
41 struct cpu_workqueue_struct
{
45 struct list_head worklist
;
46 wait_queue_head_t more_work
;
47 struct work_struct
*current_work
;
49 struct workqueue_struct
*wq
;
50 struct task_struct
*thread
;
51 } ____cacheline_aligned
;
54 * The externally visible workqueue abstraction is an array of
57 struct workqueue_struct
{
58 struct cpu_workqueue_struct
*cpu_wq
;
59 struct list_head list
;
62 int freezeable
; /* Freeze threads during suspend */
65 struct lockdep_map lockdep_map
;
69 /* Serializes the accesses to the list of workqueues. */
70 static DEFINE_SPINLOCK(workqueue_lock
);
71 static LIST_HEAD(workqueues
);
73 static int singlethread_cpu __read_mostly
;
74 static const struct cpumask
*cpu_singlethread_map __read_mostly
;
76 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
77 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
78 * which comes in between can't use for_each_online_cpu(). We could
79 * use cpu_possible_map, the cpumask below is more a documentation
82 static cpumask_var_t cpu_populated_map __read_mostly
;
84 /* If it's single threaded, it isn't in the list of workqueues. */
85 static inline int is_wq_single_threaded(struct workqueue_struct
*wq
)
87 return wq
->singlethread
;
90 static const struct cpumask
*wq_cpu_map(struct workqueue_struct
*wq
)
92 return is_wq_single_threaded(wq
)
93 ? cpu_singlethread_map
: cpu_populated_map
;
97 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
99 if (unlikely(is_wq_single_threaded(wq
)))
100 cpu
= singlethread_cpu
;
101 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
105 * Set the workqueue on which a work item is to be run
106 * - Must *only* be called if the pending flag is set
108 static inline void set_wq_data(struct work_struct
*work
,
109 struct cpu_workqueue_struct
*cwq
)
113 BUG_ON(!work_pending(work
));
115 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
116 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
117 atomic_long_set(&work
->data
, new);
121 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
123 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
126 static void insert_work(struct cpu_workqueue_struct
*cwq
,
127 struct work_struct
*work
, struct list_head
*head
)
129 set_wq_data(work
, cwq
);
131 * Ensure that we get the right work->data if we see the
132 * result of list_add() below, see try_to_grab_pending().
135 list_add_tail(&work
->entry
, head
);
136 wake_up(&cwq
->more_work
);
139 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
140 struct work_struct
*work
)
144 spin_lock_irqsave(&cwq
->lock
, flags
);
145 insert_work(cwq
, work
, &cwq
->worklist
);
146 spin_unlock_irqrestore(&cwq
->lock
, flags
);
150 * queue_work - queue work on a workqueue
151 * @wq: workqueue to use
152 * @work: work to queue
154 * Returns 0 if @work was already on a queue, non-zero otherwise.
156 * We queue the work to the CPU on which it was submitted, but if the CPU dies
157 * it can be processed by another CPU.
159 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
163 ret
= queue_work_on(get_cpu(), wq
, work
);
168 EXPORT_SYMBOL_GPL(queue_work
);
171 * queue_work_on - queue work on specific cpu
172 * @cpu: CPU number to execute work on
173 * @wq: workqueue to use
174 * @work: work to queue
176 * Returns 0 if @work was already on a queue, non-zero otherwise.
178 * We queue the work to a specific CPU, the caller must ensure it
182 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
186 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
187 BUG_ON(!list_empty(&work
->entry
));
188 __queue_work(wq_per_cpu(wq
, cpu
), work
);
193 EXPORT_SYMBOL_GPL(queue_work_on
);
195 static void delayed_work_timer_fn(unsigned long __data
)
197 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
198 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
199 struct workqueue_struct
*wq
= cwq
->wq
;
201 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
205 * queue_delayed_work - queue work on a workqueue after delay
206 * @wq: workqueue to use
207 * @dwork: delayable work to queue
208 * @delay: number of jiffies to wait before queueing
210 * Returns 0 if @work was already on a queue, non-zero otherwise.
212 int queue_delayed_work(struct workqueue_struct
*wq
,
213 struct delayed_work
*dwork
, unsigned long delay
)
216 return queue_work(wq
, &dwork
->work
);
218 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
220 EXPORT_SYMBOL_GPL(queue_delayed_work
);
223 * queue_delayed_work_on - queue work on specific CPU after delay
224 * @cpu: CPU number to execute work on
225 * @wq: workqueue to use
226 * @dwork: work to queue
227 * @delay: number of jiffies to wait before queueing
229 * Returns 0 if @work was already on a queue, non-zero otherwise.
231 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
232 struct delayed_work
*dwork
, unsigned long delay
)
235 struct timer_list
*timer
= &dwork
->timer
;
236 struct work_struct
*work
= &dwork
->work
;
238 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
239 BUG_ON(timer_pending(timer
));
240 BUG_ON(!list_empty(&work
->entry
));
242 timer_stats_timer_set_start_info(&dwork
->timer
);
244 /* This stores cwq for the moment, for the timer_fn */
245 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
246 timer
->expires
= jiffies
+ delay
;
247 timer
->data
= (unsigned long)dwork
;
248 timer
->function
= delayed_work_timer_fn
;
250 if (unlikely(cpu
>= 0))
251 add_timer_on(timer
, cpu
);
258 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
260 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
262 spin_lock_irq(&cwq
->lock
);
263 while (!list_empty(&cwq
->worklist
)) {
264 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
265 struct work_struct
, entry
);
266 work_func_t f
= work
->func
;
267 #ifdef CONFIG_LOCKDEP
269 * It is permissible to free the struct work_struct
270 * from inside the function that is called from it,
271 * this we need to take into account for lockdep too.
272 * To avoid bogus "held lock freed" warnings as well
273 * as problems when looking into work->lockdep_map,
274 * make a copy and use that here.
276 struct lockdep_map lockdep_map
= work
->lockdep_map
;
279 cwq
->current_work
= work
;
280 list_del_init(cwq
->worklist
.next
);
281 spin_unlock_irq(&cwq
->lock
);
283 BUG_ON(get_wq_data(work
) != cwq
);
284 work_clear_pending(work
);
285 lock_map_acquire(&cwq
->wq
->lockdep_map
);
286 lock_map_acquire(&lockdep_map
);
288 lock_map_release(&lockdep_map
);
289 lock_map_release(&cwq
->wq
->lockdep_map
);
291 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
292 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
294 current
->comm
, preempt_count(),
295 task_pid_nr(current
));
296 printk(KERN_ERR
" last function: ");
297 print_symbol("%s\n", (unsigned long)f
);
298 debug_show_held_locks(current
);
302 spin_lock_irq(&cwq
->lock
);
303 cwq
->current_work
= NULL
;
305 spin_unlock_irq(&cwq
->lock
);
308 static int worker_thread(void *__cwq
)
310 struct cpu_workqueue_struct
*cwq
= __cwq
;
313 if (cwq
->wq
->freezeable
)
316 set_user_nice(current
, -5);
319 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
320 if (!freezing(current
) &&
321 !kthread_should_stop() &&
322 list_empty(&cwq
->worklist
))
324 finish_wait(&cwq
->more_work
, &wait
);
328 if (kthread_should_stop())
338 struct work_struct work
;
339 struct completion done
;
342 static void wq_barrier_func(struct work_struct
*work
)
344 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
345 complete(&barr
->done
);
348 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
349 struct wq_barrier
*barr
, struct list_head
*head
)
351 INIT_WORK(&barr
->work
, wq_barrier_func
);
352 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
354 init_completion(&barr
->done
);
356 insert_work(cwq
, &barr
->work
, head
);
359 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
362 struct wq_barrier barr
;
364 WARN_ON(cwq
->thread
== current
);
366 spin_lock_irq(&cwq
->lock
);
367 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
368 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
371 spin_unlock_irq(&cwq
->lock
);
374 wait_for_completion(&barr
.done
);
380 * flush_workqueue - ensure that any scheduled work has run to completion.
381 * @wq: workqueue to flush
383 * Forces execution of the workqueue and blocks until its completion.
384 * This is typically used in driver shutdown handlers.
386 * We sleep until all works which were queued on entry have been handled,
387 * but we are not livelocked by new incoming ones.
389 * This function used to run the workqueues itself. Now we just wait for the
390 * helper threads to do it.
392 void flush_workqueue(struct workqueue_struct
*wq
)
394 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
398 lock_map_acquire(&wq
->lockdep_map
);
399 lock_map_release(&wq
->lockdep_map
);
400 for_each_cpu(cpu
, cpu_map
)
401 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
403 EXPORT_SYMBOL_GPL(flush_workqueue
);
406 * flush_work - block until a work_struct's callback has terminated
407 * @work: the work which is to be flushed
409 * Returns false if @work has already terminated.
411 * It is expected that, prior to calling flush_work(), the caller has
412 * arranged for the work to not be requeued, otherwise it doesn't make
413 * sense to use this function.
415 int flush_work(struct work_struct
*work
)
417 struct cpu_workqueue_struct
*cwq
;
418 struct list_head
*prev
;
419 struct wq_barrier barr
;
422 cwq
= get_wq_data(work
);
426 lock_map_acquire(&cwq
->wq
->lockdep_map
);
427 lock_map_release(&cwq
->wq
->lockdep_map
);
430 spin_lock_irq(&cwq
->lock
);
431 if (!list_empty(&work
->entry
)) {
433 * See the comment near try_to_grab_pending()->smp_rmb().
434 * If it was re-queued under us we are not going to wait.
437 if (unlikely(cwq
!= get_wq_data(work
)))
441 if (cwq
->current_work
!= work
)
443 prev
= &cwq
->worklist
;
445 insert_wq_barrier(cwq
, &barr
, prev
->next
);
447 spin_unlock_irq(&cwq
->lock
);
451 wait_for_completion(&barr
.done
);
454 EXPORT_SYMBOL_GPL(flush_work
);
457 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
458 * so this work can't be re-armed in any way.
460 static int try_to_grab_pending(struct work_struct
*work
)
462 struct cpu_workqueue_struct
*cwq
;
465 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
469 * The queueing is in progress, or it is already queued. Try to
470 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
473 cwq
= get_wq_data(work
);
477 spin_lock_irq(&cwq
->lock
);
478 if (!list_empty(&work
->entry
)) {
480 * This work is queued, but perhaps we locked the wrong cwq.
481 * In that case we must see the new value after rmb(), see
482 * insert_work()->wmb().
485 if (cwq
== get_wq_data(work
)) {
486 list_del_init(&work
->entry
);
490 spin_unlock_irq(&cwq
->lock
);
495 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
496 struct work_struct
*work
)
498 struct wq_barrier barr
;
501 spin_lock_irq(&cwq
->lock
);
502 if (unlikely(cwq
->current_work
== work
)) {
503 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
506 spin_unlock_irq(&cwq
->lock
);
508 if (unlikely(running
))
509 wait_for_completion(&barr
.done
);
512 static void wait_on_work(struct work_struct
*work
)
514 struct cpu_workqueue_struct
*cwq
;
515 struct workqueue_struct
*wq
;
516 const struct cpumask
*cpu_map
;
521 lock_map_acquire(&work
->lockdep_map
);
522 lock_map_release(&work
->lockdep_map
);
524 cwq
= get_wq_data(work
);
529 cpu_map
= wq_cpu_map(wq
);
531 for_each_cpu(cpu
, cpu_map
)
532 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
535 static int __cancel_work_timer(struct work_struct
*work
,
536 struct timer_list
* timer
)
541 ret
= (timer
&& likely(del_timer(timer
)));
543 ret
= try_to_grab_pending(work
);
545 } while (unlikely(ret
< 0));
547 work_clear_pending(work
);
552 * cancel_work_sync - block until a work_struct's callback has terminated
553 * @work: the work which is to be flushed
555 * Returns true if @work was pending.
557 * cancel_work_sync() will cancel the work if it is queued. If the work's
558 * callback appears to be running, cancel_work_sync() will block until it
561 * It is possible to use this function if the work re-queues itself. It can
562 * cancel the work even if it migrates to another workqueue, however in that
563 * case it only guarantees that work->func() has completed on the last queued
566 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
567 * pending, otherwise it goes into a busy-wait loop until the timer expires.
569 * The caller must ensure that workqueue_struct on which this work was last
570 * queued can't be destroyed before this function returns.
572 int cancel_work_sync(struct work_struct
*work
)
574 return __cancel_work_timer(work
, NULL
);
576 EXPORT_SYMBOL_GPL(cancel_work_sync
);
579 * cancel_delayed_work_sync - reliably kill off a delayed work.
580 * @dwork: the delayed work struct
582 * Returns true if @dwork was pending.
584 * It is possible to use this function if @dwork rearms itself via queue_work()
585 * or queue_delayed_work(). See also the comment for cancel_work_sync().
587 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
589 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
591 EXPORT_SYMBOL(cancel_delayed_work_sync
);
593 static struct workqueue_struct
*keventd_wq __read_mostly
;
596 * schedule_work - put work task in global workqueue
597 * @work: job to be done
599 * This puts a job in the kernel-global workqueue.
601 int schedule_work(struct work_struct
*work
)
603 return queue_work(keventd_wq
, work
);
605 EXPORT_SYMBOL(schedule_work
);
608 * schedule_work_on - put work task on a specific cpu
609 * @cpu: cpu to put the work task on
610 * @work: job to be done
612 * This puts a job on a specific cpu
614 int schedule_work_on(int cpu
, struct work_struct
*work
)
616 return queue_work_on(cpu
, keventd_wq
, work
);
618 EXPORT_SYMBOL(schedule_work_on
);
621 * schedule_delayed_work - put work task in global workqueue after delay
622 * @dwork: job to be done
623 * @delay: number of jiffies to wait or 0 for immediate execution
625 * After waiting for a given time this puts a job in the kernel-global
628 int schedule_delayed_work(struct delayed_work
*dwork
,
631 return queue_delayed_work(keventd_wq
, dwork
, delay
);
633 EXPORT_SYMBOL(schedule_delayed_work
);
636 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
638 * @dwork: job to be done
639 * @delay: number of jiffies to wait
641 * After waiting for a given time this puts a job in the kernel-global
642 * workqueue on the specified CPU.
644 int schedule_delayed_work_on(int cpu
,
645 struct delayed_work
*dwork
, unsigned long delay
)
647 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
649 EXPORT_SYMBOL(schedule_delayed_work_on
);
652 * schedule_on_each_cpu - call a function on each online CPU from keventd
653 * @func: the function to call
655 * Returns zero on success.
656 * Returns -ve errno on failure.
658 * schedule_on_each_cpu() is very slow.
660 int schedule_on_each_cpu(work_func_t func
)
663 struct work_struct
*works
;
665 works
= alloc_percpu(struct work_struct
);
670 for_each_online_cpu(cpu
) {
671 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
673 INIT_WORK(work
, func
);
674 schedule_work_on(cpu
, work
);
676 for_each_online_cpu(cpu
)
677 flush_work(per_cpu_ptr(works
, cpu
));
683 void flush_scheduled_work(void)
685 flush_workqueue(keventd_wq
);
687 EXPORT_SYMBOL(flush_scheduled_work
);
690 * execute_in_process_context - reliably execute the routine with user context
691 * @fn: the function to execute
692 * @ew: guaranteed storage for the execute work structure (must
693 * be available when the work executes)
695 * Executes the function immediately if process context is available,
696 * otherwise schedules the function for delayed execution.
698 * Returns: 0 - function was executed
699 * 1 - function was scheduled for execution
701 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
703 if (!in_interrupt()) {
708 INIT_WORK(&ew
->work
, fn
);
709 schedule_work(&ew
->work
);
713 EXPORT_SYMBOL_GPL(execute_in_process_context
);
717 return keventd_wq
!= NULL
;
720 int current_is_keventd(void)
722 struct cpu_workqueue_struct
*cwq
;
723 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
728 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
729 if (current
== cwq
->thread
)
736 static struct cpu_workqueue_struct
*
737 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
739 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
742 spin_lock_init(&cwq
->lock
);
743 INIT_LIST_HEAD(&cwq
->worklist
);
744 init_waitqueue_head(&cwq
->more_work
);
749 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
751 struct sched_param param
= { .sched_priority
= MAX_RT_PRIO
-1 };
752 struct workqueue_struct
*wq
= cwq
->wq
;
753 const char *fmt
= is_wq_single_threaded(wq
) ? "%s" : "%s/%d";
754 struct task_struct
*p
;
756 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
758 * Nobody can add the work_struct to this cwq,
759 * if (caller is __create_workqueue)
760 * nobody should see this wq
761 * else // caller is CPU_UP_PREPARE
762 * cpu is not on cpu_online_map
763 * so we can abort safely.
768 sched_setscheduler_nocheck(p
, SCHED_FIFO
, ¶m
);
774 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
776 struct task_struct
*p
= cwq
->thread
;
780 kthread_bind(p
, cpu
);
785 struct workqueue_struct
*__create_workqueue_key(const char *name
,
789 struct lock_class_key
*key
,
790 const char *lock_name
)
792 struct workqueue_struct
*wq
;
793 struct cpu_workqueue_struct
*cwq
;
796 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
800 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
807 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
808 wq
->singlethread
= singlethread
;
809 wq
->freezeable
= freezeable
;
811 INIT_LIST_HEAD(&wq
->list
);
814 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
815 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
816 start_workqueue_thread(cwq
, -1);
818 cpu_maps_update_begin();
820 * We must place this wq on list even if the code below fails.
821 * cpu_down(cpu) can remove cpu from cpu_populated_map before
822 * destroy_workqueue() takes the lock, in that case we leak
825 spin_lock(&workqueue_lock
);
826 list_add(&wq
->list
, &workqueues
);
827 spin_unlock(&workqueue_lock
);
829 * We must initialize cwqs for each possible cpu even if we
830 * are going to call destroy_workqueue() finally. Otherwise
831 * cpu_up() can hit the uninitialized cwq once we drop the
834 for_each_possible_cpu(cpu
) {
835 cwq
= init_cpu_workqueue(wq
, cpu
);
836 if (err
|| !cpu_online(cpu
))
838 err
= create_workqueue_thread(cwq
, cpu
);
839 start_workqueue_thread(cwq
, cpu
);
841 cpu_maps_update_done();
845 destroy_workqueue(wq
);
850 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
852 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
855 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
856 * cpu_add_remove_lock protects cwq->thread.
858 if (cwq
->thread
== NULL
)
861 lock_map_acquire(&cwq
->wq
->lockdep_map
);
862 lock_map_release(&cwq
->wq
->lockdep_map
);
864 flush_cpu_workqueue(cwq
);
866 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
867 * a concurrent flush_workqueue() can insert a barrier after us.
868 * However, in that case run_workqueue() won't return and check
869 * kthread_should_stop() until it flushes all work_struct's.
870 * When ->worklist becomes empty it is safe to exit because no
871 * more work_structs can be queued on this cwq: flush_workqueue
872 * checks list_empty(), and a "normal" queue_work() can't use
875 kthread_stop(cwq
->thread
);
880 * destroy_workqueue - safely terminate a workqueue
881 * @wq: target workqueue
883 * Safely destroy a workqueue. All work currently pending will be done first.
885 void destroy_workqueue(struct workqueue_struct
*wq
)
887 const struct cpumask
*cpu_map
= wq_cpu_map(wq
);
890 cpu_maps_update_begin();
891 spin_lock(&workqueue_lock
);
893 spin_unlock(&workqueue_lock
);
895 for_each_cpu(cpu
, cpu_map
)
896 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
897 cpu_maps_update_done();
899 free_percpu(wq
->cpu_wq
);
902 EXPORT_SYMBOL_GPL(destroy_workqueue
);
904 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
905 unsigned long action
,
908 unsigned int cpu
= (unsigned long)hcpu
;
909 struct cpu_workqueue_struct
*cwq
;
910 struct workqueue_struct
*wq
;
913 action
&= ~CPU_TASKS_FROZEN
;
917 cpumask_set_cpu(cpu
, cpu_populated_map
);
920 list_for_each_entry(wq
, &workqueues
, list
) {
921 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
925 if (!create_workqueue_thread(cwq
, cpu
))
927 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
929 action
= CPU_UP_CANCELED
;
934 start_workqueue_thread(cwq
, cpu
);
937 case CPU_UP_CANCELED
:
938 start_workqueue_thread(cwq
, -1);
940 cleanup_workqueue_thread(cwq
);
946 case CPU_UP_CANCELED
:
948 cpumask_clear_cpu(cpu
, cpu_populated_map
);
955 static struct workqueue_struct
*work_on_cpu_wq __read_mostly
;
957 struct work_for_cpu
{
958 struct work_struct work
;
964 static void do_work_for_cpu(struct work_struct
*w
)
966 struct work_for_cpu
*wfc
= container_of(w
, struct work_for_cpu
, work
);
968 wfc
->ret
= wfc
->fn(wfc
->arg
);
972 * work_on_cpu - run a function in user context on a particular cpu
973 * @cpu: the cpu to run on
974 * @fn: the function to run
975 * @arg: the function arg
977 * This will return the value @fn returns.
978 * It is up to the caller to ensure that the cpu doesn't go offline.
980 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
982 struct work_for_cpu wfc
;
984 INIT_WORK(&wfc
.work
, do_work_for_cpu
);
987 queue_work_on(cpu
, work_on_cpu_wq
, &wfc
.work
);
988 flush_work(&wfc
.work
);
992 EXPORT_SYMBOL_GPL(work_on_cpu
);
993 #endif /* CONFIG_SMP */
995 void __init
init_workqueues(void)
997 alloc_cpumask_var(&cpu_populated_map
, GFP_KERNEL
);
999 cpumask_copy(cpu_populated_map
, cpu_online_mask
);
1000 singlethread_cpu
= cpumask_first(cpu_possible_mask
);
1001 cpu_singlethread_map
= cpumask_of(singlethread_cpu
);
1002 hotcpu_notifier(workqueue_cpu_callback
, 0);
1003 keventd_wq
= create_workqueue("events");
1004 BUG_ON(!keventd_wq
);
1006 work_on_cpu_wq
= create_workqueue("work_on_cpu");
1007 BUG_ON(!work_on_cpu_wq
);