2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
38 * The per-CPU workqueue (if single thread, we always use the first
41 struct cpu_workqueue_struct
{
45 struct list_head worklist
;
46 wait_queue_head_t more_work
;
47 struct work_struct
*current_work
;
49 struct workqueue_struct
*wq
;
50 struct task_struct
*thread
;
52 int run_depth
; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned
;
56 * The externally visible workqueue abstraction is an array of
59 struct workqueue_struct
{
60 struct cpu_workqueue_struct
*cpu_wq
;
61 struct list_head list
;
64 int freezeable
; /* Freeze threads during suspend */
66 struct lockdep_map lockdep_map
;
70 /* Serializes the accesses to the list of workqueues. */
71 static DEFINE_SPINLOCK(workqueue_lock
);
72 static LIST_HEAD(workqueues
);
74 static int singlethread_cpu __read_mostly
;
75 static cpumask_t cpu_singlethread_map __read_mostly
;
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
83 static cpumask_t cpu_populated_map __read_mostly
;
85 /* If it's single threaded, it isn't in the list of workqueues. */
86 static inline int is_single_threaded(struct workqueue_struct
*wq
)
88 return wq
->singlethread
;
91 static const cpumask_t
*wq_cpu_map(struct workqueue_struct
*wq
)
93 return is_single_threaded(wq
)
94 ? &cpu_singlethread_map
: &cpu_populated_map
;
98 struct cpu_workqueue_struct
*wq_per_cpu(struct workqueue_struct
*wq
, int cpu
)
100 if (unlikely(is_single_threaded(wq
)))
101 cpu
= singlethread_cpu
;
102 return per_cpu_ptr(wq
->cpu_wq
, cpu
);
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
109 static inline void set_wq_data(struct work_struct
*work
,
110 struct cpu_workqueue_struct
*cwq
)
114 BUG_ON(!work_pending(work
));
116 new = (unsigned long) cwq
| (1UL << WORK_STRUCT_PENDING
);
117 new |= WORK_STRUCT_FLAG_MASK
& *work_data_bits(work
);
118 atomic_long_set(&work
->data
, new);
122 struct cpu_workqueue_struct
*get_wq_data(struct work_struct
*work
)
124 return (void *) (atomic_long_read(&work
->data
) & WORK_STRUCT_WQ_DATA_MASK
);
127 static void insert_work(struct cpu_workqueue_struct
*cwq
,
128 struct work_struct
*work
, struct list_head
*head
)
130 set_wq_data(work
, cwq
);
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
136 list_add_tail(&work
->entry
, head
);
137 wake_up(&cwq
->more_work
);
140 static void __queue_work(struct cpu_workqueue_struct
*cwq
,
141 struct work_struct
*work
)
145 spin_lock_irqsave(&cwq
->lock
, flags
);
146 insert_work(cwq
, work
, &cwq
->worklist
);
147 spin_unlock_irqrestore(&cwq
->lock
, flags
);
151 * queue_work - queue work on a workqueue
152 * @wq: workqueue to use
153 * @work: work to queue
155 * Returns 0 if @work was already on a queue, non-zero otherwise.
157 * We queue the work to the CPU on which it was submitted, but if the CPU dies
158 * it can be processed by another CPU.
160 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
164 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
165 BUG_ON(!list_empty(&work
->entry
));
166 __queue_work(wq_per_cpu(wq
, get_cpu()), work
);
172 EXPORT_SYMBOL_GPL(queue_work
);
175 * queue_work_on - queue work on specific cpu
176 * @cpu: CPU number to execute work on
177 * @wq: workqueue to use
178 * @work: work to queue
180 * Returns 0 if @work was already on a queue, non-zero otherwise.
182 * We queue the work to a specific CPU, the caller must ensure it
186 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
190 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
191 BUG_ON(!list_empty(&work
->entry
));
192 __queue_work(wq_per_cpu(wq
, cpu
), work
);
197 EXPORT_SYMBOL_GPL(queue_work_on
);
199 static void delayed_work_timer_fn(unsigned long __data
)
201 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
202 struct cpu_workqueue_struct
*cwq
= get_wq_data(&dwork
->work
);
203 struct workqueue_struct
*wq
= cwq
->wq
;
205 __queue_work(wq_per_cpu(wq
, smp_processor_id()), &dwork
->work
);
209 * queue_delayed_work - queue work on a workqueue after delay
210 * @wq: workqueue to use
211 * @dwork: delayable work to queue
212 * @delay: number of jiffies to wait before queueing
214 * Returns 0 if @work was already on a queue, non-zero otherwise.
216 int queue_delayed_work(struct workqueue_struct
*wq
,
217 struct delayed_work
*dwork
, unsigned long delay
)
220 return queue_work(wq
, &dwork
->work
);
222 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
224 EXPORT_SYMBOL_GPL(queue_delayed_work
);
227 * queue_delayed_work_on - queue work on specific CPU after delay
228 * @cpu: CPU number to execute work on
229 * @wq: workqueue to use
230 * @dwork: work to queue
231 * @delay: number of jiffies to wait before queueing
233 * Returns 0 if @work was already on a queue, non-zero otherwise.
235 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
236 struct delayed_work
*dwork
, unsigned long delay
)
239 struct timer_list
*timer
= &dwork
->timer
;
240 struct work_struct
*work
= &dwork
->work
;
242 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
))) {
243 BUG_ON(timer_pending(timer
));
244 BUG_ON(!list_empty(&work
->entry
));
246 timer_stats_timer_set_start_info(&dwork
->timer
);
248 /* This stores cwq for the moment, for the timer_fn */
249 set_wq_data(work
, wq_per_cpu(wq
, raw_smp_processor_id()));
250 timer
->expires
= jiffies
+ delay
;
251 timer
->data
= (unsigned long)dwork
;
252 timer
->function
= delayed_work_timer_fn
;
254 if (unlikely(cpu
>= 0))
255 add_timer_on(timer
, cpu
);
262 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
264 static void run_workqueue(struct cpu_workqueue_struct
*cwq
)
266 spin_lock_irq(&cwq
->lock
);
268 if (cwq
->run_depth
> 3) {
269 /* morton gets to eat his hat */
270 printk("%s: recursion depth exceeded: %d\n",
271 __func__
, cwq
->run_depth
);
274 while (!list_empty(&cwq
->worklist
)) {
275 struct work_struct
*work
= list_entry(cwq
->worklist
.next
,
276 struct work_struct
, entry
);
277 work_func_t f
= work
->func
;
278 #ifdef CONFIG_LOCKDEP
280 * It is permissible to free the struct work_struct
281 * from inside the function that is called from it,
282 * this we need to take into account for lockdep too.
283 * To avoid bogus "held lock freed" warnings as well
284 * as problems when looking into work->lockdep_map,
285 * make a copy and use that here.
287 struct lockdep_map lockdep_map
= work
->lockdep_map
;
290 cwq
->current_work
= work
;
291 list_del_init(cwq
->worklist
.next
);
292 spin_unlock_irq(&cwq
->lock
);
294 BUG_ON(get_wq_data(work
) != cwq
);
295 work_clear_pending(work
);
296 lock_acquire(&cwq
->wq
->lockdep_map
, 0, 0, 0, 2, _THIS_IP_
);
297 lock_acquire(&lockdep_map
, 0, 0, 0, 2, _THIS_IP_
);
299 lock_release(&lockdep_map
, 1, _THIS_IP_
);
300 lock_release(&cwq
->wq
->lockdep_map
, 1, _THIS_IP_
);
302 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
303 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
305 current
->comm
, preempt_count(),
306 task_pid_nr(current
));
307 printk(KERN_ERR
" last function: ");
308 print_symbol("%s\n", (unsigned long)f
);
309 debug_show_held_locks(current
);
313 spin_lock_irq(&cwq
->lock
);
314 cwq
->current_work
= NULL
;
317 spin_unlock_irq(&cwq
->lock
);
320 static int worker_thread(void *__cwq
)
322 struct cpu_workqueue_struct
*cwq
= __cwq
;
325 if (cwq
->wq
->freezeable
)
328 set_user_nice(current
, -5);
331 prepare_to_wait(&cwq
->more_work
, &wait
, TASK_INTERRUPTIBLE
);
332 if (!freezing(current
) &&
333 !kthread_should_stop() &&
334 list_empty(&cwq
->worklist
))
336 finish_wait(&cwq
->more_work
, &wait
);
340 if (kthread_should_stop())
350 struct work_struct work
;
351 struct completion done
;
354 static void wq_barrier_func(struct work_struct
*work
)
356 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
357 complete(&barr
->done
);
360 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
361 struct wq_barrier
*barr
, struct list_head
*head
)
363 INIT_WORK(&barr
->work
, wq_barrier_func
);
364 __set_bit(WORK_STRUCT_PENDING
, work_data_bits(&barr
->work
));
366 init_completion(&barr
->done
);
368 insert_work(cwq
, &barr
->work
, head
);
371 static int flush_cpu_workqueue(struct cpu_workqueue_struct
*cwq
)
375 if (cwq
->thread
== current
) {
377 * Probably keventd trying to flush its own queue. So simply run
378 * it by hand rather than deadlocking.
383 struct wq_barrier barr
;
386 spin_lock_irq(&cwq
->lock
);
387 if (!list_empty(&cwq
->worklist
) || cwq
->current_work
!= NULL
) {
388 insert_wq_barrier(cwq
, &barr
, &cwq
->worklist
);
391 spin_unlock_irq(&cwq
->lock
);
394 wait_for_completion(&barr
.done
);
401 * flush_workqueue - ensure that any scheduled work has run to completion.
402 * @wq: workqueue to flush
404 * Forces execution of the workqueue and blocks until its completion.
405 * This is typically used in driver shutdown handlers.
407 * We sleep until all works which were queued on entry have been handled,
408 * but we are not livelocked by new incoming ones.
410 * This function used to run the workqueues itself. Now we just wait for the
411 * helper threads to do it.
413 void flush_workqueue(struct workqueue_struct
*wq
)
415 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
419 lock_acquire(&wq
->lockdep_map
, 0, 0, 0, 2, _THIS_IP_
);
420 lock_release(&wq
->lockdep_map
, 1, _THIS_IP_
);
421 for_each_cpu_mask_nr(cpu
, *cpu_map
)
422 flush_cpu_workqueue(per_cpu_ptr(wq
->cpu_wq
, cpu
));
424 EXPORT_SYMBOL_GPL(flush_workqueue
);
427 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
428 * so this work can't be re-armed in any way.
430 static int try_to_grab_pending(struct work_struct
*work
)
432 struct cpu_workqueue_struct
*cwq
;
435 if (!test_and_set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
)))
439 * The queueing is in progress, or it is already queued. Try to
440 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
443 cwq
= get_wq_data(work
);
447 spin_lock_irq(&cwq
->lock
);
448 if (!list_empty(&work
->entry
)) {
450 * This work is queued, but perhaps we locked the wrong cwq.
451 * In that case we must see the new value after rmb(), see
452 * insert_work()->wmb().
455 if (cwq
== get_wq_data(work
)) {
456 list_del_init(&work
->entry
);
460 spin_unlock_irq(&cwq
->lock
);
465 static void wait_on_cpu_work(struct cpu_workqueue_struct
*cwq
,
466 struct work_struct
*work
)
468 struct wq_barrier barr
;
471 spin_lock_irq(&cwq
->lock
);
472 if (unlikely(cwq
->current_work
== work
)) {
473 insert_wq_barrier(cwq
, &barr
, cwq
->worklist
.next
);
476 spin_unlock_irq(&cwq
->lock
);
478 if (unlikely(running
))
479 wait_for_completion(&barr
.done
);
482 static void wait_on_work(struct work_struct
*work
)
484 struct cpu_workqueue_struct
*cwq
;
485 struct workqueue_struct
*wq
;
486 const cpumask_t
*cpu_map
;
491 lock_acquire(&work
->lockdep_map
, 0, 0, 0, 2, _THIS_IP_
);
492 lock_release(&work
->lockdep_map
, 1, _THIS_IP_
);
494 cwq
= get_wq_data(work
);
499 cpu_map
= wq_cpu_map(wq
);
501 for_each_cpu_mask_nr(cpu
, *cpu_map
)
502 wait_on_cpu_work(per_cpu_ptr(wq
->cpu_wq
, cpu
), work
);
505 static int __cancel_work_timer(struct work_struct
*work
,
506 struct timer_list
* timer
)
511 ret
= (timer
&& likely(del_timer(timer
)));
513 ret
= try_to_grab_pending(work
);
515 } while (unlikely(ret
< 0));
517 work_clear_pending(work
);
522 * cancel_work_sync - block until a work_struct's callback has terminated
523 * @work: the work which is to be flushed
525 * Returns true if @work was pending.
527 * cancel_work_sync() will cancel the work if it is queued. If the work's
528 * callback appears to be running, cancel_work_sync() will block until it
531 * It is possible to use this function if the work re-queues itself. It can
532 * cancel the work even if it migrates to another workqueue, however in that
533 * case it only guarantees that work->func() has completed on the last queued
536 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
537 * pending, otherwise it goes into a busy-wait loop until the timer expires.
539 * The caller must ensure that workqueue_struct on which this work was last
540 * queued can't be destroyed before this function returns.
542 int cancel_work_sync(struct work_struct
*work
)
544 return __cancel_work_timer(work
, NULL
);
546 EXPORT_SYMBOL_GPL(cancel_work_sync
);
549 * cancel_delayed_work_sync - reliably kill off a delayed work.
550 * @dwork: the delayed work struct
552 * Returns true if @dwork was pending.
554 * It is possible to use this function if @dwork rearms itself via queue_work()
555 * or queue_delayed_work(). See also the comment for cancel_work_sync().
557 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
559 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
561 EXPORT_SYMBOL(cancel_delayed_work_sync
);
563 static struct workqueue_struct
*keventd_wq __read_mostly
;
566 * schedule_work - put work task in global workqueue
567 * @work: job to be done
569 * This puts a job in the kernel-global workqueue.
571 int schedule_work(struct work_struct
*work
)
573 return queue_work(keventd_wq
, work
);
575 EXPORT_SYMBOL(schedule_work
);
578 * schedule_work_on - put work task on a specific cpu
579 * @cpu: cpu to put the work task on
580 * @work: job to be done
582 * This puts a job on a specific cpu
584 int schedule_work_on(int cpu
, struct work_struct
*work
)
586 return queue_work_on(cpu
, keventd_wq
, work
);
588 EXPORT_SYMBOL(schedule_work_on
);
591 * schedule_delayed_work - put work task in global workqueue after delay
592 * @dwork: job to be done
593 * @delay: number of jiffies to wait or 0 for immediate execution
595 * After waiting for a given time this puts a job in the kernel-global
598 int schedule_delayed_work(struct delayed_work
*dwork
,
601 return queue_delayed_work(keventd_wq
, dwork
, delay
);
603 EXPORT_SYMBOL(schedule_delayed_work
);
606 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
608 * @dwork: job to be done
609 * @delay: number of jiffies to wait
611 * After waiting for a given time this puts a job in the kernel-global
612 * workqueue on the specified CPU.
614 int schedule_delayed_work_on(int cpu
,
615 struct delayed_work
*dwork
, unsigned long delay
)
617 return queue_delayed_work_on(cpu
, keventd_wq
, dwork
, delay
);
619 EXPORT_SYMBOL(schedule_delayed_work_on
);
622 * schedule_on_each_cpu - call a function on each online CPU from keventd
623 * @func: the function to call
625 * Returns zero on success.
626 * Returns -ve errno on failure.
628 * schedule_on_each_cpu() is very slow.
630 int schedule_on_each_cpu(work_func_t func
)
633 struct work_struct
*works
;
635 works
= alloc_percpu(struct work_struct
);
640 for_each_online_cpu(cpu
) {
641 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
643 INIT_WORK(work
, func
);
644 set_bit(WORK_STRUCT_PENDING
, work_data_bits(work
));
645 __queue_work(per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
), work
);
647 flush_workqueue(keventd_wq
);
653 void flush_scheduled_work(void)
655 flush_workqueue(keventd_wq
);
657 EXPORT_SYMBOL(flush_scheduled_work
);
660 * execute_in_process_context - reliably execute the routine with user context
661 * @fn: the function to execute
662 * @ew: guaranteed storage for the execute work structure (must
663 * be available when the work executes)
665 * Executes the function immediately if process context is available,
666 * otherwise schedules the function for delayed execution.
668 * Returns: 0 - function was executed
669 * 1 - function was scheduled for execution
671 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
673 if (!in_interrupt()) {
678 INIT_WORK(&ew
->work
, fn
);
679 schedule_work(&ew
->work
);
683 EXPORT_SYMBOL_GPL(execute_in_process_context
);
687 return keventd_wq
!= NULL
;
690 int current_is_keventd(void)
692 struct cpu_workqueue_struct
*cwq
;
693 int cpu
= raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
698 cwq
= per_cpu_ptr(keventd_wq
->cpu_wq
, cpu
);
699 if (current
== cwq
->thread
)
706 static struct cpu_workqueue_struct
*
707 init_cpu_workqueue(struct workqueue_struct
*wq
, int cpu
)
709 struct cpu_workqueue_struct
*cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
712 spin_lock_init(&cwq
->lock
);
713 INIT_LIST_HEAD(&cwq
->worklist
);
714 init_waitqueue_head(&cwq
->more_work
);
719 static int create_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
721 struct workqueue_struct
*wq
= cwq
->wq
;
722 const char *fmt
= is_single_threaded(wq
) ? "%s" : "%s/%d";
723 struct task_struct
*p
;
725 p
= kthread_create(worker_thread
, cwq
, fmt
, wq
->name
, cpu
);
727 * Nobody can add the work_struct to this cwq,
728 * if (caller is __create_workqueue)
729 * nobody should see this wq
730 * else // caller is CPU_UP_PREPARE
731 * cpu is not on cpu_online_map
732 * so we can abort safely.
742 static void start_workqueue_thread(struct cpu_workqueue_struct
*cwq
, int cpu
)
744 struct task_struct
*p
= cwq
->thread
;
748 kthread_bind(p
, cpu
);
753 struct workqueue_struct
*__create_workqueue_key(const char *name
,
756 struct lock_class_key
*key
,
757 const char *lock_name
)
759 struct workqueue_struct
*wq
;
760 struct cpu_workqueue_struct
*cwq
;
763 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
767 wq
->cpu_wq
= alloc_percpu(struct cpu_workqueue_struct
);
774 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
775 wq
->singlethread
= singlethread
;
776 wq
->freezeable
= freezeable
;
777 INIT_LIST_HEAD(&wq
->list
);
780 cwq
= init_cpu_workqueue(wq
, singlethread_cpu
);
781 err
= create_workqueue_thread(cwq
, singlethread_cpu
);
782 start_workqueue_thread(cwq
, -1);
785 spin_lock(&workqueue_lock
);
786 list_add(&wq
->list
, &workqueues
);
787 spin_unlock(&workqueue_lock
);
789 for_each_possible_cpu(cpu
) {
790 cwq
= init_cpu_workqueue(wq
, cpu
);
791 if (err
|| !cpu_online(cpu
))
793 err
= create_workqueue_thread(cwq
, cpu
);
794 start_workqueue_thread(cwq
, cpu
);
800 destroy_workqueue(wq
);
805 EXPORT_SYMBOL_GPL(__create_workqueue_key
);
807 static void cleanup_workqueue_thread(struct cpu_workqueue_struct
*cwq
)
810 * Our caller is either destroy_workqueue() or CPU_DEAD,
811 * get_online_cpus() protects cwq->thread.
813 if (cwq
->thread
== NULL
)
816 lock_acquire(&cwq
->wq
->lockdep_map
, 0, 0, 0, 2, _THIS_IP_
);
817 lock_release(&cwq
->wq
->lockdep_map
, 1, _THIS_IP_
);
819 flush_cpu_workqueue(cwq
);
821 * If the caller is CPU_DEAD and cwq->worklist was not empty,
822 * a concurrent flush_workqueue() can insert a barrier after us.
823 * However, in that case run_workqueue() won't return and check
824 * kthread_should_stop() until it flushes all work_struct's.
825 * When ->worklist becomes empty it is safe to exit because no
826 * more work_structs can be queued on this cwq: flush_workqueue
827 * checks list_empty(), and a "normal" queue_work() can't use
830 kthread_stop(cwq
->thread
);
835 * destroy_workqueue - safely terminate a workqueue
836 * @wq: target workqueue
838 * Safely destroy a workqueue. All work currently pending will be done first.
840 void destroy_workqueue(struct workqueue_struct
*wq
)
842 const cpumask_t
*cpu_map
= wq_cpu_map(wq
);
846 spin_lock(&workqueue_lock
);
848 spin_unlock(&workqueue_lock
);
850 for_each_cpu_mask_nr(cpu
, *cpu_map
)
851 cleanup_workqueue_thread(per_cpu_ptr(wq
->cpu_wq
, cpu
));
854 free_percpu(wq
->cpu_wq
);
857 EXPORT_SYMBOL_GPL(destroy_workqueue
);
859 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
860 unsigned long action
,
863 unsigned int cpu
= (unsigned long)hcpu
;
864 struct cpu_workqueue_struct
*cwq
;
865 struct workqueue_struct
*wq
;
867 action
&= ~CPU_TASKS_FROZEN
;
871 cpu_set(cpu
, cpu_populated_map
);
874 list_for_each_entry(wq
, &workqueues
, list
) {
875 cwq
= per_cpu_ptr(wq
->cpu_wq
, cpu
);
879 if (!create_workqueue_thread(cwq
, cpu
))
881 printk(KERN_ERR
"workqueue [%s] for %i failed\n",
886 start_workqueue_thread(cwq
, cpu
);
889 case CPU_UP_CANCELED
:
890 start_workqueue_thread(cwq
, -1);
892 cleanup_workqueue_thread(cwq
);
898 case CPU_UP_CANCELED
:
900 cpu_clear(cpu
, cpu_populated_map
);
906 void __init
init_workqueues(void)
908 cpu_populated_map
= cpu_online_map
;
909 singlethread_cpu
= first_cpu(cpu_possible_map
);
910 cpu_singlethread_map
= cpumask_of_cpu(singlethread_cpu
);
911 hotcpu_notifier(workqueue_cpu_callback
, 0);
912 keventd_wq
= create_workqueue("events");