[XFS] Check for invalid flags in xfs_attrlist_by_handle.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / workqueue.c
blobec7e4f62aaff4e71051eefb7d684e8ff4868a5ad
1 /*
2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
38 * The per-CPU workqueue (if single thread, we always use the first
39 * possible cpu).
41 struct cpu_workqueue_struct {
43 spinlock_t lock;
45 struct list_head worklist;
46 wait_queue_head_t more_work;
47 struct work_struct *current_work;
49 struct workqueue_struct *wq;
50 struct task_struct *thread;
52 int run_depth; /* Detect run_workqueue() recursion depth */
53 } ____cacheline_aligned;
56 * The externally visible workqueue abstraction is an array of
57 * per-CPU workqueues:
59 struct workqueue_struct {
60 struct cpu_workqueue_struct *cpu_wq;
61 struct list_head list;
62 const char *name;
63 int singlethread;
64 int freezeable; /* Freeze threads during suspend */
65 #ifdef CONFIG_LOCKDEP
66 struct lockdep_map lockdep_map;
67 #endif
70 /* Serializes the accesses to the list of workqueues. */
71 static DEFINE_SPINLOCK(workqueue_lock);
72 static LIST_HEAD(workqueues);
74 static int singlethread_cpu __read_mostly;
75 static cpumask_t cpu_singlethread_map __read_mostly;
77 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
78 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
79 * which comes in between can't use for_each_online_cpu(). We could
80 * use cpu_possible_map, the cpumask below is more a documentation
81 * than optimization.
83 static cpumask_t cpu_populated_map __read_mostly;
85 /* If it's single threaded, it isn't in the list of workqueues. */
86 static inline int is_single_threaded(struct workqueue_struct *wq)
88 return wq->singlethread;
91 static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq)
93 return is_single_threaded(wq)
94 ? &cpu_singlethread_map : &cpu_populated_map;
97 static
98 struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
100 if (unlikely(is_single_threaded(wq)))
101 cpu = singlethread_cpu;
102 return per_cpu_ptr(wq->cpu_wq, cpu);
106 * Set the workqueue on which a work item is to be run
107 * - Must *only* be called if the pending flag is set
109 static inline void set_wq_data(struct work_struct *work,
110 struct cpu_workqueue_struct *cwq)
112 unsigned long new;
114 BUG_ON(!work_pending(work));
116 new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
117 new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
118 atomic_long_set(&work->data, new);
121 static inline
122 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
124 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
127 static void insert_work(struct cpu_workqueue_struct *cwq,
128 struct work_struct *work, struct list_head *head)
130 set_wq_data(work, cwq);
132 * Ensure that we get the right work->data if we see the
133 * result of list_add() below, see try_to_grab_pending().
135 smp_wmb();
136 list_add_tail(&work->entry, head);
137 wake_up(&cwq->more_work);
140 static void __queue_work(struct cpu_workqueue_struct *cwq,
141 struct work_struct *work)
143 unsigned long flags;
145 spin_lock_irqsave(&cwq->lock, flags);
146 insert_work(cwq, work, &cwq->worklist);
147 spin_unlock_irqrestore(&cwq->lock, flags);
151 * queue_work - queue work on a workqueue
152 * @wq: workqueue to use
153 * @work: work to queue
155 * Returns 0 if @work was already on a queue, non-zero otherwise.
157 * We queue the work to the CPU on which it was submitted, but if the CPU dies
158 * it can be processed by another CPU.
160 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
162 int ret;
164 ret = queue_work_on(get_cpu(), wq, work);
165 put_cpu();
167 return ret;
169 EXPORT_SYMBOL_GPL(queue_work);
172 * queue_work_on - queue work on specific cpu
173 * @cpu: CPU number to execute work on
174 * @wq: workqueue to use
175 * @work: work to queue
177 * Returns 0 if @work was already on a queue, non-zero otherwise.
179 * We queue the work to a specific CPU, the caller must ensure it
180 * can't go away.
183 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
185 int ret = 0;
187 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
188 BUG_ON(!list_empty(&work->entry));
189 __queue_work(wq_per_cpu(wq, cpu), work);
190 ret = 1;
192 return ret;
194 EXPORT_SYMBOL_GPL(queue_work_on);
196 static void delayed_work_timer_fn(unsigned long __data)
198 struct delayed_work *dwork = (struct delayed_work *)__data;
199 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
200 struct workqueue_struct *wq = cwq->wq;
202 __queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
206 * queue_delayed_work - queue work on a workqueue after delay
207 * @wq: workqueue to use
208 * @dwork: delayable work to queue
209 * @delay: number of jiffies to wait before queueing
211 * Returns 0 if @work was already on a queue, non-zero otherwise.
213 int queue_delayed_work(struct workqueue_struct *wq,
214 struct delayed_work *dwork, unsigned long delay)
216 if (delay == 0)
217 return queue_work(wq, &dwork->work);
219 return queue_delayed_work_on(-1, wq, dwork, delay);
221 EXPORT_SYMBOL_GPL(queue_delayed_work);
224 * queue_delayed_work_on - queue work on specific CPU after delay
225 * @cpu: CPU number to execute work on
226 * @wq: workqueue to use
227 * @dwork: work to queue
228 * @delay: number of jiffies to wait before queueing
230 * Returns 0 if @work was already on a queue, non-zero otherwise.
232 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
233 struct delayed_work *dwork, unsigned long delay)
235 int ret = 0;
236 struct timer_list *timer = &dwork->timer;
237 struct work_struct *work = &dwork->work;
239 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
240 BUG_ON(timer_pending(timer));
241 BUG_ON(!list_empty(&work->entry));
243 timer_stats_timer_set_start_info(&dwork->timer);
245 /* This stores cwq for the moment, for the timer_fn */
246 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
247 timer->expires = jiffies + delay;
248 timer->data = (unsigned long)dwork;
249 timer->function = delayed_work_timer_fn;
251 if (unlikely(cpu >= 0))
252 add_timer_on(timer, cpu);
253 else
254 add_timer(timer);
255 ret = 1;
257 return ret;
259 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
261 static void run_workqueue(struct cpu_workqueue_struct *cwq)
263 spin_lock_irq(&cwq->lock);
264 cwq->run_depth++;
265 if (cwq->run_depth > 3) {
266 /* morton gets to eat his hat */
267 printk("%s: recursion depth exceeded: %d\n",
268 __func__, cwq->run_depth);
269 dump_stack();
271 while (!list_empty(&cwq->worklist)) {
272 struct work_struct *work = list_entry(cwq->worklist.next,
273 struct work_struct, entry);
274 work_func_t f = work->func;
275 #ifdef CONFIG_LOCKDEP
277 * It is permissible to free the struct work_struct
278 * from inside the function that is called from it,
279 * this we need to take into account for lockdep too.
280 * To avoid bogus "held lock freed" warnings as well
281 * as problems when looking into work->lockdep_map,
282 * make a copy and use that here.
284 struct lockdep_map lockdep_map = work->lockdep_map;
285 #endif
287 cwq->current_work = work;
288 list_del_init(cwq->worklist.next);
289 spin_unlock_irq(&cwq->lock);
291 BUG_ON(get_wq_data(work) != cwq);
292 work_clear_pending(work);
293 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
294 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_);
295 f(work);
296 lock_release(&lockdep_map, 1, _THIS_IP_);
297 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
299 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
300 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
301 "%s/0x%08x/%d\n",
302 current->comm, preempt_count(),
303 task_pid_nr(current));
304 printk(KERN_ERR " last function: ");
305 print_symbol("%s\n", (unsigned long)f);
306 debug_show_held_locks(current);
307 dump_stack();
310 spin_lock_irq(&cwq->lock);
311 cwq->current_work = NULL;
313 cwq->run_depth--;
314 spin_unlock_irq(&cwq->lock);
317 static int worker_thread(void *__cwq)
319 struct cpu_workqueue_struct *cwq = __cwq;
320 DEFINE_WAIT(wait);
322 if (cwq->wq->freezeable)
323 set_freezable();
325 set_user_nice(current, -5);
327 for (;;) {
328 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
329 if (!freezing(current) &&
330 !kthread_should_stop() &&
331 list_empty(&cwq->worklist))
332 schedule();
333 finish_wait(&cwq->more_work, &wait);
335 try_to_freeze();
337 if (kthread_should_stop())
338 break;
340 run_workqueue(cwq);
343 return 0;
346 struct wq_barrier {
347 struct work_struct work;
348 struct completion done;
351 static void wq_barrier_func(struct work_struct *work)
353 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
354 complete(&barr->done);
357 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
358 struct wq_barrier *barr, struct list_head *head)
360 INIT_WORK(&barr->work, wq_barrier_func);
361 __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
363 init_completion(&barr->done);
365 insert_work(cwq, &barr->work, head);
368 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
370 int active;
372 if (cwq->thread == current) {
374 * Probably keventd trying to flush its own queue. So simply run
375 * it by hand rather than deadlocking.
377 run_workqueue(cwq);
378 active = 1;
379 } else {
380 struct wq_barrier barr;
382 active = 0;
383 spin_lock_irq(&cwq->lock);
384 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
385 insert_wq_barrier(cwq, &barr, &cwq->worklist);
386 active = 1;
388 spin_unlock_irq(&cwq->lock);
390 if (active)
391 wait_for_completion(&barr.done);
394 return active;
398 * flush_workqueue - ensure that any scheduled work has run to completion.
399 * @wq: workqueue to flush
401 * Forces execution of the workqueue and blocks until its completion.
402 * This is typically used in driver shutdown handlers.
404 * We sleep until all works which were queued on entry have been handled,
405 * but we are not livelocked by new incoming ones.
407 * This function used to run the workqueues itself. Now we just wait for the
408 * helper threads to do it.
410 void flush_workqueue(struct workqueue_struct *wq)
412 const cpumask_t *cpu_map = wq_cpu_map(wq);
413 int cpu;
415 might_sleep();
416 lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
417 lock_release(&wq->lockdep_map, 1, _THIS_IP_);
418 for_each_cpu_mask_nr(cpu, *cpu_map)
419 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
421 EXPORT_SYMBOL_GPL(flush_workqueue);
424 * flush_work - block until a work_struct's callback has terminated
425 * @work: the work which is to be flushed
427 * Returns false if @work has already terminated.
429 * It is expected that, prior to calling flush_work(), the caller has
430 * arranged for the work to not be requeued, otherwise it doesn't make
431 * sense to use this function.
433 int flush_work(struct work_struct *work)
435 struct cpu_workqueue_struct *cwq;
436 struct list_head *prev;
437 struct wq_barrier barr;
439 might_sleep();
440 cwq = get_wq_data(work);
441 if (!cwq)
442 return 0;
444 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
445 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
447 prev = NULL;
448 spin_lock_irq(&cwq->lock);
449 if (!list_empty(&work->entry)) {
451 * See the comment near try_to_grab_pending()->smp_rmb().
452 * If it was re-queued under us we are not going to wait.
454 smp_rmb();
455 if (unlikely(cwq != get_wq_data(work)))
456 goto out;
457 prev = &work->entry;
458 } else {
459 if (cwq->current_work != work)
460 goto out;
461 prev = &cwq->worklist;
463 insert_wq_barrier(cwq, &barr, prev->next);
464 out:
465 spin_unlock_irq(&cwq->lock);
466 if (!prev)
467 return 0;
469 wait_for_completion(&barr.done);
470 return 1;
472 EXPORT_SYMBOL_GPL(flush_work);
475 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
476 * so this work can't be re-armed in any way.
478 static int try_to_grab_pending(struct work_struct *work)
480 struct cpu_workqueue_struct *cwq;
481 int ret = -1;
483 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
484 return 0;
487 * The queueing is in progress, or it is already queued. Try to
488 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
491 cwq = get_wq_data(work);
492 if (!cwq)
493 return ret;
495 spin_lock_irq(&cwq->lock);
496 if (!list_empty(&work->entry)) {
498 * This work is queued, but perhaps we locked the wrong cwq.
499 * In that case we must see the new value after rmb(), see
500 * insert_work()->wmb().
502 smp_rmb();
503 if (cwq == get_wq_data(work)) {
504 list_del_init(&work->entry);
505 ret = 1;
508 spin_unlock_irq(&cwq->lock);
510 return ret;
513 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
514 struct work_struct *work)
516 struct wq_barrier barr;
517 int running = 0;
519 spin_lock_irq(&cwq->lock);
520 if (unlikely(cwq->current_work == work)) {
521 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
522 running = 1;
524 spin_unlock_irq(&cwq->lock);
526 if (unlikely(running))
527 wait_for_completion(&barr.done);
530 static void wait_on_work(struct work_struct *work)
532 struct cpu_workqueue_struct *cwq;
533 struct workqueue_struct *wq;
534 const cpumask_t *cpu_map;
535 int cpu;
537 might_sleep();
539 lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
540 lock_release(&work->lockdep_map, 1, _THIS_IP_);
542 cwq = get_wq_data(work);
543 if (!cwq)
544 return;
546 wq = cwq->wq;
547 cpu_map = wq_cpu_map(wq);
549 for_each_cpu_mask_nr(cpu, *cpu_map)
550 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
553 static int __cancel_work_timer(struct work_struct *work,
554 struct timer_list* timer)
556 int ret;
558 do {
559 ret = (timer && likely(del_timer(timer)));
560 if (!ret)
561 ret = try_to_grab_pending(work);
562 wait_on_work(work);
563 } while (unlikely(ret < 0));
565 work_clear_pending(work);
566 return ret;
570 * cancel_work_sync - block until a work_struct's callback has terminated
571 * @work: the work which is to be flushed
573 * Returns true if @work was pending.
575 * cancel_work_sync() will cancel the work if it is queued. If the work's
576 * callback appears to be running, cancel_work_sync() will block until it
577 * has completed.
579 * It is possible to use this function if the work re-queues itself. It can
580 * cancel the work even if it migrates to another workqueue, however in that
581 * case it only guarantees that work->func() has completed on the last queued
582 * workqueue.
584 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
585 * pending, otherwise it goes into a busy-wait loop until the timer expires.
587 * The caller must ensure that workqueue_struct on which this work was last
588 * queued can't be destroyed before this function returns.
590 int cancel_work_sync(struct work_struct *work)
592 return __cancel_work_timer(work, NULL);
594 EXPORT_SYMBOL_GPL(cancel_work_sync);
597 * cancel_delayed_work_sync - reliably kill off a delayed work.
598 * @dwork: the delayed work struct
600 * Returns true if @dwork was pending.
602 * It is possible to use this function if @dwork rearms itself via queue_work()
603 * or queue_delayed_work(). See also the comment for cancel_work_sync().
605 int cancel_delayed_work_sync(struct delayed_work *dwork)
607 return __cancel_work_timer(&dwork->work, &dwork->timer);
609 EXPORT_SYMBOL(cancel_delayed_work_sync);
611 static struct workqueue_struct *keventd_wq __read_mostly;
614 * schedule_work - put work task in global workqueue
615 * @work: job to be done
617 * This puts a job in the kernel-global workqueue.
619 int schedule_work(struct work_struct *work)
621 return queue_work(keventd_wq, work);
623 EXPORT_SYMBOL(schedule_work);
626 * schedule_work_on - put work task on a specific cpu
627 * @cpu: cpu to put the work task on
628 * @work: job to be done
630 * This puts a job on a specific cpu
632 int schedule_work_on(int cpu, struct work_struct *work)
634 return queue_work_on(cpu, keventd_wq, work);
636 EXPORT_SYMBOL(schedule_work_on);
639 * schedule_delayed_work - put work task in global workqueue after delay
640 * @dwork: job to be done
641 * @delay: number of jiffies to wait or 0 for immediate execution
643 * After waiting for a given time this puts a job in the kernel-global
644 * workqueue.
646 int schedule_delayed_work(struct delayed_work *dwork,
647 unsigned long delay)
649 return queue_delayed_work(keventd_wq, dwork, delay);
651 EXPORT_SYMBOL(schedule_delayed_work);
654 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
655 * @cpu: cpu to use
656 * @dwork: job to be done
657 * @delay: number of jiffies to wait
659 * After waiting for a given time this puts a job in the kernel-global
660 * workqueue on the specified CPU.
662 int schedule_delayed_work_on(int cpu,
663 struct delayed_work *dwork, unsigned long delay)
665 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
667 EXPORT_SYMBOL(schedule_delayed_work_on);
670 * schedule_on_each_cpu - call a function on each online CPU from keventd
671 * @func: the function to call
673 * Returns zero on success.
674 * Returns -ve errno on failure.
676 * schedule_on_each_cpu() is very slow.
678 int schedule_on_each_cpu(work_func_t func)
680 int cpu;
681 struct work_struct *works;
683 works = alloc_percpu(struct work_struct);
684 if (!works)
685 return -ENOMEM;
687 get_online_cpus();
688 for_each_online_cpu(cpu) {
689 struct work_struct *work = per_cpu_ptr(works, cpu);
691 INIT_WORK(work, func);
692 schedule_work_on(cpu, work);
694 for_each_online_cpu(cpu)
695 flush_work(per_cpu_ptr(works, cpu));
696 put_online_cpus();
697 free_percpu(works);
698 return 0;
701 void flush_scheduled_work(void)
703 flush_workqueue(keventd_wq);
705 EXPORT_SYMBOL(flush_scheduled_work);
708 * execute_in_process_context - reliably execute the routine with user context
709 * @fn: the function to execute
710 * @ew: guaranteed storage for the execute work structure (must
711 * be available when the work executes)
713 * Executes the function immediately if process context is available,
714 * otherwise schedules the function for delayed execution.
716 * Returns: 0 - function was executed
717 * 1 - function was scheduled for execution
719 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
721 if (!in_interrupt()) {
722 fn(&ew->work);
723 return 0;
726 INIT_WORK(&ew->work, fn);
727 schedule_work(&ew->work);
729 return 1;
731 EXPORT_SYMBOL_GPL(execute_in_process_context);
733 int keventd_up(void)
735 return keventd_wq != NULL;
738 int current_is_keventd(void)
740 struct cpu_workqueue_struct *cwq;
741 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
742 int ret = 0;
744 BUG_ON(!keventd_wq);
746 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
747 if (current == cwq->thread)
748 ret = 1;
750 return ret;
754 static struct cpu_workqueue_struct *
755 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
757 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
759 cwq->wq = wq;
760 spin_lock_init(&cwq->lock);
761 INIT_LIST_HEAD(&cwq->worklist);
762 init_waitqueue_head(&cwq->more_work);
764 return cwq;
767 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
769 struct workqueue_struct *wq = cwq->wq;
770 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d";
771 struct task_struct *p;
773 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
775 * Nobody can add the work_struct to this cwq,
776 * if (caller is __create_workqueue)
777 * nobody should see this wq
778 * else // caller is CPU_UP_PREPARE
779 * cpu is not on cpu_online_map
780 * so we can abort safely.
782 if (IS_ERR(p))
783 return PTR_ERR(p);
785 cwq->thread = p;
787 return 0;
790 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
792 struct task_struct *p = cwq->thread;
794 if (p != NULL) {
795 if (cpu >= 0)
796 kthread_bind(p, cpu);
797 wake_up_process(p);
801 struct workqueue_struct *__create_workqueue_key(const char *name,
802 int singlethread,
803 int freezeable,
804 struct lock_class_key *key,
805 const char *lock_name)
807 struct workqueue_struct *wq;
808 struct cpu_workqueue_struct *cwq;
809 int err = 0, cpu;
811 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
812 if (!wq)
813 return NULL;
815 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
816 if (!wq->cpu_wq) {
817 kfree(wq);
818 return NULL;
821 wq->name = name;
822 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
823 wq->singlethread = singlethread;
824 wq->freezeable = freezeable;
825 INIT_LIST_HEAD(&wq->list);
827 if (singlethread) {
828 cwq = init_cpu_workqueue(wq, singlethread_cpu);
829 err = create_workqueue_thread(cwq, singlethread_cpu);
830 start_workqueue_thread(cwq, -1);
831 } else {
832 cpu_maps_update_begin();
833 spin_lock(&workqueue_lock);
834 list_add(&wq->list, &workqueues);
835 spin_unlock(&workqueue_lock);
837 for_each_possible_cpu(cpu) {
838 cwq = init_cpu_workqueue(wq, cpu);
839 if (err || !cpu_online(cpu))
840 continue;
841 err = create_workqueue_thread(cwq, cpu);
842 start_workqueue_thread(cwq, cpu);
844 cpu_maps_update_done();
847 if (err) {
848 destroy_workqueue(wq);
849 wq = NULL;
851 return wq;
853 EXPORT_SYMBOL_GPL(__create_workqueue_key);
855 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
858 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
859 * cpu_add_remove_lock protects cwq->thread.
861 if (cwq->thread == NULL)
862 return;
864 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_);
865 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_);
867 flush_cpu_workqueue(cwq);
869 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
870 * a concurrent flush_workqueue() can insert a barrier after us.
871 * However, in that case run_workqueue() won't return and check
872 * kthread_should_stop() until it flushes all work_struct's.
873 * When ->worklist becomes empty it is safe to exit because no
874 * more work_structs can be queued on this cwq: flush_workqueue
875 * checks list_empty(), and a "normal" queue_work() can't use
876 * a dead CPU.
878 kthread_stop(cwq->thread);
879 cwq->thread = NULL;
883 * destroy_workqueue - safely terminate a workqueue
884 * @wq: target workqueue
886 * Safely destroy a workqueue. All work currently pending will be done first.
888 void destroy_workqueue(struct workqueue_struct *wq)
890 const cpumask_t *cpu_map = wq_cpu_map(wq);
891 int cpu;
893 cpu_maps_update_begin();
894 spin_lock(&workqueue_lock);
895 list_del(&wq->list);
896 spin_unlock(&workqueue_lock);
898 for_each_cpu_mask_nr(cpu, *cpu_map)
899 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
900 cpu_maps_update_done();
902 free_percpu(wq->cpu_wq);
903 kfree(wq);
905 EXPORT_SYMBOL_GPL(destroy_workqueue);
907 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
908 unsigned long action,
909 void *hcpu)
911 unsigned int cpu = (unsigned long)hcpu;
912 struct cpu_workqueue_struct *cwq;
913 struct workqueue_struct *wq;
914 int ret = NOTIFY_OK;
916 action &= ~CPU_TASKS_FROZEN;
918 switch (action) {
919 case CPU_UP_PREPARE:
920 cpu_set(cpu, cpu_populated_map);
922 undo:
923 list_for_each_entry(wq, &workqueues, list) {
924 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
926 switch (action) {
927 case CPU_UP_PREPARE:
928 if (!create_workqueue_thread(cwq, cpu))
929 break;
930 printk(KERN_ERR "workqueue [%s] for %i failed\n",
931 wq->name, cpu);
932 action = CPU_UP_CANCELED;
933 ret = NOTIFY_BAD;
934 goto undo;
936 case CPU_ONLINE:
937 start_workqueue_thread(cwq, cpu);
938 break;
940 case CPU_UP_CANCELED:
941 start_workqueue_thread(cwq, -1);
942 case CPU_POST_DEAD:
943 cleanup_workqueue_thread(cwq);
944 break;
948 switch (action) {
949 case CPU_UP_CANCELED:
950 case CPU_POST_DEAD:
951 cpu_clear(cpu, cpu_populated_map);
954 return ret;
957 void __init init_workqueues(void)
959 cpu_populated_map = cpu_online_map;
960 singlethread_cpu = first_cpu(cpu_possible_map);
961 cpu_singlethread_map = cpumask_of_cpu(singlethread_cpu);
962 hotcpu_notifier(workqueue_cpu_callback, 0);
963 keventd_wq = create_workqueue("events");
964 BUG_ON(!keventd_wq);