2 * kernel/workqueue.c - generic async execution with shared worker pool
4 * Copyright (C) 2002 Ingo Molnar
6 * Derived from the taskqueue/keventd code by:
7 * David Woodhouse <dwmw2@infradead.org>
9 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
10 * Theodore Ts'o <tytso@mit.edu>
12 * Made to use alloc_percpu by Christoph Lameter.
14 * Copyright (C) 2010 SUSE Linux Products GmbH
15 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
17 * This is the generic async execution mechanism. Work items as are
18 * executed in process context. The worker pool is shared and
19 * automatically managed. There is one worker pool for each CPU and
20 * one extra for works which are better served by workers which are
21 * not bound to any specific CPU.
23 * Please read Documentation/workqueue.txt for details.
26 #include <linux/export.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/completion.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/cpu.h>
35 #include <linux/notifier.h>
36 #include <linux/kthread.h>
37 #include <linux/hardirq.h>
38 #include <linux/mempolicy.h>
39 #include <linux/freezer.h>
40 #include <linux/kallsyms.h>
41 #include <linux/debug_locks.h>
42 #include <linux/lockdep.h>
43 #include <linux/idr.h>
45 #include "workqueue_sched.h"
48 /* global_cwq flags */
49 GCWQ_DISASSOCIATED
= 1 << 0, /* cpu can't serve workers */
50 GCWQ_FREEZING
= 1 << 1, /* freeze in progress */
53 POOL_MANAGE_WORKERS
= 1 << 0, /* need to manage workers */
54 POOL_MANAGING_WORKERS
= 1 << 1, /* managing workers */
55 POOL_HIGHPRI_PENDING
= 1 << 2, /* highpri works on queue */
58 WORKER_STARTED
= 1 << 0, /* started */
59 WORKER_DIE
= 1 << 1, /* die die die */
60 WORKER_IDLE
= 1 << 2, /* is idle */
61 WORKER_PREP
= 1 << 3, /* preparing to run works */
62 WORKER_ROGUE
= 1 << 4, /* not bound to any cpu */
63 WORKER_REBIND
= 1 << 5, /* mom is home, come back */
64 WORKER_CPU_INTENSIVE
= 1 << 6, /* cpu intensive */
65 WORKER_UNBOUND
= 1 << 7, /* worker is unbound */
67 WORKER_NOT_RUNNING
= WORKER_PREP
| WORKER_ROGUE
| WORKER_REBIND
|
68 WORKER_CPU_INTENSIVE
| WORKER_UNBOUND
,
70 /* gcwq->trustee_state */
71 TRUSTEE_START
= 0, /* start */
72 TRUSTEE_IN_CHARGE
= 1, /* trustee in charge of gcwq */
73 TRUSTEE_BUTCHER
= 2, /* butcher workers */
74 TRUSTEE_RELEASE
= 3, /* release workers */
75 TRUSTEE_DONE
= 4, /* trustee is done */
77 NR_WORKER_POOLS
= 1, /* # worker pools per gcwq */
79 BUSY_WORKER_HASH_ORDER
= 6, /* 64 pointers */
80 BUSY_WORKER_HASH_SIZE
= 1 << BUSY_WORKER_HASH_ORDER
,
81 BUSY_WORKER_HASH_MASK
= BUSY_WORKER_HASH_SIZE
- 1,
83 MAX_IDLE_WORKERS_RATIO
= 4, /* 1/4 of busy can be idle */
84 IDLE_WORKER_TIMEOUT
= 300 * HZ
, /* keep idle ones for 5 mins */
86 MAYDAY_INITIAL_TIMEOUT
= HZ
/ 100 >= 2 ? HZ
/ 100 : 2,
87 /* call for help after 10ms
89 MAYDAY_INTERVAL
= HZ
/ 10, /* and then every 100ms */
90 CREATE_COOLDOWN
= HZ
, /* time to breath after fail */
91 TRUSTEE_COOLDOWN
= HZ
/ 10, /* for trustee draining */
94 * Rescue workers are used only on emergencies and shared by
97 RESCUER_NICE_LEVEL
= -20,
101 * Structure fields follow one of the following exclusion rules.
103 * I: Modifiable by initialization/destruction paths and read-only for
106 * P: Preemption protected. Disabling preemption is enough and should
107 * only be modified and accessed from the local cpu.
109 * L: gcwq->lock protected. Access with gcwq->lock held.
111 * X: During normal operation, modification requires gcwq->lock and
112 * should be done only from local cpu. Either disabling preemption
113 * on local cpu or grabbing gcwq->lock is enough for read access.
114 * If GCWQ_DISASSOCIATED is set, it's identical to L.
116 * F: wq->flush_mutex protected.
118 * W: workqueue_lock protected.
125 * The poor guys doing the actual heavy lifting. All on-duty workers
126 * are either serving the manager role, on idle list or on busy hash.
129 /* on idle list while idle, on busy hash table while busy */
131 struct list_head entry
; /* L: while idle */
132 struct hlist_node hentry
; /* L: while busy */
135 struct work_struct
*current_work
; /* L: work being processed */
136 struct cpu_workqueue_struct
*current_cwq
; /* L: current_work's cwq */
137 struct list_head scheduled
; /* L: scheduled works */
138 struct task_struct
*task
; /* I: worker task */
139 struct worker_pool
*pool
; /* I: the associated pool */
140 /* 64 bytes boundary on 64bit, 32 on 32bit */
141 unsigned long last_active
; /* L: last active timestamp */
142 unsigned int flags
; /* X: flags */
143 int id
; /* I: worker id */
144 struct work_struct rebind_work
; /* L: rebind worker to cpu */
148 struct global_cwq
*gcwq
; /* I: the owning gcwq */
149 unsigned int flags
; /* X: flags */
151 struct list_head worklist
; /* L: list of pending works */
152 int nr_workers
; /* L: total number of workers */
153 int nr_idle
; /* L: currently idle ones */
155 struct list_head idle_list
; /* X: list of idle workers */
156 struct timer_list idle_timer
; /* L: worker idle timeout */
157 struct timer_list mayday_timer
; /* L: SOS timer for workers */
159 struct ida worker_ida
; /* L: for worker IDs */
160 struct worker
*first_idle
; /* L: first idle worker */
164 * Global per-cpu workqueue. There's one and only one for each cpu
165 * and all works are queued and processed here regardless of their
169 spinlock_t lock
; /* the gcwq lock */
170 unsigned int cpu
; /* I: the associated cpu */
171 unsigned int flags
; /* L: GCWQ_* flags */
173 /* workers are chained either in busy_hash or pool idle_list */
174 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
175 /* L: hash of busy workers */
177 struct worker_pool pool
; /* the worker pools */
179 struct task_struct
*trustee
; /* L: for gcwq shutdown */
180 unsigned int trustee_state
; /* L: trustee state */
181 wait_queue_head_t trustee_wait
; /* trustee wait */
182 } ____cacheline_aligned_in_smp
;
185 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
186 * work_struct->data are used for flags and thus cwqs need to be
187 * aligned at two's power of the number of flag bits.
189 struct cpu_workqueue_struct
{
190 struct worker_pool
*pool
; /* I: the associated pool */
191 struct workqueue_struct
*wq
; /* I: the owning workqueue */
192 int work_color
; /* L: current color */
193 int flush_color
; /* L: flushing color */
194 int nr_in_flight
[WORK_NR_COLORS
];
195 /* L: nr of in_flight works */
196 int nr_active
; /* L: nr of active works */
197 int max_active
; /* L: max active works */
198 struct list_head delayed_works
; /* L: delayed works */
202 * Structure used to wait for workqueue flush.
205 struct list_head list
; /* F: list of flushers */
206 int flush_color
; /* F: flush color waiting for */
207 struct completion done
; /* flush completion */
211 * All cpumasks are assumed to be always set on UP and thus can't be
212 * used to determine whether there's something to be done.
215 typedef cpumask_var_t mayday_mask_t
;
216 #define mayday_test_and_set_cpu(cpu, mask) \
217 cpumask_test_and_set_cpu((cpu), (mask))
218 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
219 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
220 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
221 #define free_mayday_mask(mask) free_cpumask_var((mask))
223 typedef unsigned long mayday_mask_t
;
224 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
225 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
226 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
227 #define alloc_mayday_mask(maskp, gfp) true
228 #define free_mayday_mask(mask) do { } while (0)
232 * The externally visible workqueue abstraction is an array of
233 * per-CPU workqueues:
235 struct workqueue_struct
{
236 unsigned int flags
; /* W: WQ_* flags */
238 struct cpu_workqueue_struct __percpu
*pcpu
;
239 struct cpu_workqueue_struct
*single
;
241 } cpu_wq
; /* I: cwq's */
242 struct list_head list
; /* W: list of all workqueues */
244 struct mutex flush_mutex
; /* protects wq flushing */
245 int work_color
; /* F: current work color */
246 int flush_color
; /* F: current flush color */
247 atomic_t nr_cwqs_to_flush
; /* flush in progress */
248 struct wq_flusher
*first_flusher
; /* F: first flusher */
249 struct list_head flusher_queue
; /* F: flush waiters */
250 struct list_head flusher_overflow
; /* F: flush overflow list */
252 mayday_mask_t mayday_mask
; /* cpus requesting rescue */
253 struct worker
*rescuer
; /* I: rescue worker */
255 int nr_drainers
; /* W: drain in progress */
256 int saved_max_active
; /* W: saved cwq max_active */
257 #ifdef CONFIG_LOCKDEP
258 struct lockdep_map lockdep_map
;
260 char name
[]; /* I: workqueue name */
263 struct workqueue_struct
*system_wq __read_mostly
;
264 struct workqueue_struct
*system_long_wq __read_mostly
;
265 struct workqueue_struct
*system_nrt_wq __read_mostly
;
266 struct workqueue_struct
*system_unbound_wq __read_mostly
;
267 struct workqueue_struct
*system_freezable_wq __read_mostly
;
268 struct workqueue_struct
*system_nrt_freezable_wq __read_mostly
;
269 EXPORT_SYMBOL_GPL(system_wq
);
270 EXPORT_SYMBOL_GPL(system_long_wq
);
271 EXPORT_SYMBOL_GPL(system_nrt_wq
);
272 EXPORT_SYMBOL_GPL(system_unbound_wq
);
273 EXPORT_SYMBOL_GPL(system_freezable_wq
);
274 EXPORT_SYMBOL_GPL(system_nrt_freezable_wq
);
276 #define CREATE_TRACE_POINTS
277 #include <trace/events/workqueue.h>
279 #define for_each_worker_pool(pool, gcwq) \
280 for ((pool) = &(gcwq)->pool; (pool); (pool) = NULL)
282 #define for_each_busy_worker(worker, i, pos, gcwq) \
283 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
284 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
286 static inline int __next_gcwq_cpu(int cpu
, const struct cpumask
*mask
,
289 if (cpu
< nr_cpu_ids
) {
291 cpu
= cpumask_next(cpu
, mask
);
292 if (cpu
< nr_cpu_ids
)
296 return WORK_CPU_UNBOUND
;
298 return WORK_CPU_NONE
;
301 static inline int __next_wq_cpu(int cpu
, const struct cpumask
*mask
,
302 struct workqueue_struct
*wq
)
304 return __next_gcwq_cpu(cpu
, mask
, !(wq
->flags
& WQ_UNBOUND
) ? 1 : 2);
310 * An extra gcwq is defined for an invalid cpu number
311 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
312 * specific CPU. The following iterators are similar to
313 * for_each_*_cpu() iterators but also considers the unbound gcwq.
315 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
316 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
317 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
318 * WORK_CPU_UNBOUND for unbound workqueues
320 #define for_each_gcwq_cpu(cpu) \
321 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
322 (cpu) < WORK_CPU_NONE; \
323 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
325 #define for_each_online_gcwq_cpu(cpu) \
326 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
327 (cpu) < WORK_CPU_NONE; \
328 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
330 #define for_each_cwq_cpu(cpu, wq) \
331 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
332 (cpu) < WORK_CPU_NONE; \
333 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
335 #ifdef CONFIG_DEBUG_OBJECTS_WORK
337 static struct debug_obj_descr work_debug_descr
;
339 static void *work_debug_hint(void *addr
)
341 return ((struct work_struct
*) addr
)->func
;
345 * fixup_init is called when:
346 * - an active object is initialized
348 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
350 struct work_struct
*work
= addr
;
353 case ODEBUG_STATE_ACTIVE
:
354 cancel_work_sync(work
);
355 debug_object_init(work
, &work_debug_descr
);
363 * fixup_activate is called when:
364 * - an active object is activated
365 * - an unknown object is activated (might be a statically initialized object)
367 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
369 struct work_struct
*work
= addr
;
373 case ODEBUG_STATE_NOTAVAILABLE
:
375 * This is not really a fixup. The work struct was
376 * statically initialized. We just make sure that it
377 * is tracked in the object tracker.
379 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
380 debug_object_init(work
, &work_debug_descr
);
381 debug_object_activate(work
, &work_debug_descr
);
387 case ODEBUG_STATE_ACTIVE
:
396 * fixup_free is called when:
397 * - an active object is freed
399 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
401 struct work_struct
*work
= addr
;
404 case ODEBUG_STATE_ACTIVE
:
405 cancel_work_sync(work
);
406 debug_object_free(work
, &work_debug_descr
);
413 static struct debug_obj_descr work_debug_descr
= {
414 .name
= "work_struct",
415 .debug_hint
= work_debug_hint
,
416 .fixup_init
= work_fixup_init
,
417 .fixup_activate
= work_fixup_activate
,
418 .fixup_free
= work_fixup_free
,
421 static inline void debug_work_activate(struct work_struct
*work
)
423 debug_object_activate(work
, &work_debug_descr
);
426 static inline void debug_work_deactivate(struct work_struct
*work
)
428 debug_object_deactivate(work
, &work_debug_descr
);
431 void __init_work(struct work_struct
*work
, int onstack
)
434 debug_object_init_on_stack(work
, &work_debug_descr
);
436 debug_object_init(work
, &work_debug_descr
);
438 EXPORT_SYMBOL_GPL(__init_work
);
440 void destroy_work_on_stack(struct work_struct
*work
)
442 debug_object_free(work
, &work_debug_descr
);
444 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
447 static inline void debug_work_activate(struct work_struct
*work
) { }
448 static inline void debug_work_deactivate(struct work_struct
*work
) { }
451 /* Serializes the accesses to the list of workqueues. */
452 static DEFINE_SPINLOCK(workqueue_lock
);
453 static LIST_HEAD(workqueues
);
454 static bool workqueue_freezing
; /* W: have wqs started freezing? */
457 * The almighty global cpu workqueues. nr_running is the only field
458 * which is expected to be used frequently by other cpus via
459 * try_to_wake_up(). Put it in a separate cacheline.
461 static DEFINE_PER_CPU(struct global_cwq
, global_cwq
);
462 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t
, pool_nr_running
[NR_WORKER_POOLS
]);
465 * Global cpu workqueue and nr_running counter for unbound gcwq. The
466 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
467 * workers have WORKER_UNBOUND set.
469 static struct global_cwq unbound_global_cwq
;
470 static atomic_t unbound_pool_nr_running
[NR_WORKER_POOLS
] = {
471 [0 ... NR_WORKER_POOLS
- 1] = ATOMIC_INIT(0), /* always 0 */
474 static int worker_thread(void *__worker
);
476 static struct global_cwq
*get_gcwq(unsigned int cpu
)
478 if (cpu
!= WORK_CPU_UNBOUND
)
479 return &per_cpu(global_cwq
, cpu
);
481 return &unbound_global_cwq
;
484 static atomic_t
*get_pool_nr_running(struct worker_pool
*pool
)
486 int cpu
= pool
->gcwq
->cpu
;
489 if (cpu
!= WORK_CPU_UNBOUND
)
490 return &per_cpu(pool_nr_running
, cpu
)[idx
];
492 return &unbound_pool_nr_running
[idx
];
495 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
496 struct workqueue_struct
*wq
)
498 if (!(wq
->flags
& WQ_UNBOUND
)) {
499 if (likely(cpu
< nr_cpu_ids
))
500 return per_cpu_ptr(wq
->cpu_wq
.pcpu
, cpu
);
501 } else if (likely(cpu
== WORK_CPU_UNBOUND
))
502 return wq
->cpu_wq
.single
;
506 static unsigned int work_color_to_flags(int color
)
508 return color
<< WORK_STRUCT_COLOR_SHIFT
;
511 static int get_work_color(struct work_struct
*work
)
513 return (*work_data_bits(work
) >> WORK_STRUCT_COLOR_SHIFT
) &
514 ((1 << WORK_STRUCT_COLOR_BITS
) - 1);
517 static int work_next_color(int color
)
519 return (color
+ 1) % WORK_NR_COLORS
;
523 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
524 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
525 * cleared and the work data contains the cpu number it was last on.
527 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
528 * cwq, cpu or clear work->data. These functions should only be
529 * called while the work is owned - ie. while the PENDING bit is set.
531 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
532 * corresponding to a work. gcwq is available once the work has been
533 * queued anywhere after initialization. cwq is available only from
534 * queueing until execution starts.
536 static inline void set_work_data(struct work_struct
*work
, unsigned long data
,
539 BUG_ON(!work_pending(work
));
540 atomic_long_set(&work
->data
, data
| flags
| work_static(work
));
543 static void set_work_cwq(struct work_struct
*work
,
544 struct cpu_workqueue_struct
*cwq
,
545 unsigned long extra_flags
)
547 set_work_data(work
, (unsigned long)cwq
,
548 WORK_STRUCT_PENDING
| WORK_STRUCT_CWQ
| extra_flags
);
551 static void set_work_cpu(struct work_struct
*work
, unsigned int cpu
)
553 set_work_data(work
, cpu
<< WORK_STRUCT_FLAG_BITS
, WORK_STRUCT_PENDING
);
556 static void clear_work_data(struct work_struct
*work
)
558 set_work_data(work
, WORK_STRUCT_NO_CPU
, 0);
561 static struct cpu_workqueue_struct
*get_work_cwq(struct work_struct
*work
)
563 unsigned long data
= atomic_long_read(&work
->data
);
565 if (data
& WORK_STRUCT_CWQ
)
566 return (void *)(data
& WORK_STRUCT_WQ_DATA_MASK
);
571 static struct global_cwq
*get_work_gcwq(struct work_struct
*work
)
573 unsigned long data
= atomic_long_read(&work
->data
);
576 if (data
& WORK_STRUCT_CWQ
)
577 return ((struct cpu_workqueue_struct
*)
578 (data
& WORK_STRUCT_WQ_DATA_MASK
))->pool
->gcwq
;
580 cpu
= data
>> WORK_STRUCT_FLAG_BITS
;
581 if (cpu
== WORK_CPU_NONE
)
584 BUG_ON(cpu
>= nr_cpu_ids
&& cpu
!= WORK_CPU_UNBOUND
);
585 return get_gcwq(cpu
);
589 * Policy functions. These define the policies on how the global
590 * worker pool is managed. Unless noted otherwise, these functions
591 * assume that they're being called with gcwq->lock held.
594 static bool __need_more_worker(struct worker_pool
*pool
)
596 return !atomic_read(get_pool_nr_running(pool
)) ||
597 (pool
->flags
& POOL_HIGHPRI_PENDING
);
601 * Need to wake up a worker? Called from anything but currently
604 * Note that, because unbound workers never contribute to nr_running, this
605 * function will always return %true for unbound gcwq as long as the
606 * worklist isn't empty.
608 static bool need_more_worker(struct worker_pool
*pool
)
610 return !list_empty(&pool
->worklist
) && __need_more_worker(pool
);
613 /* Can I start working? Called from busy but !running workers. */
614 static bool may_start_working(struct worker_pool
*pool
)
616 return pool
->nr_idle
;
619 /* Do I need to keep working? Called from currently running workers. */
620 static bool keep_working(struct worker_pool
*pool
)
622 atomic_t
*nr_running
= get_pool_nr_running(pool
);
624 return !list_empty(&pool
->worklist
) &&
625 (atomic_read(nr_running
) <= 1 ||
626 (pool
->flags
& POOL_HIGHPRI_PENDING
));
629 /* Do we need a new worker? Called from manager. */
630 static bool need_to_create_worker(struct worker_pool
*pool
)
632 return need_more_worker(pool
) && !may_start_working(pool
);
635 /* Do I need to be the manager? */
636 static bool need_to_manage_workers(struct worker_pool
*pool
)
638 return need_to_create_worker(pool
) ||
639 (pool
->flags
& POOL_MANAGE_WORKERS
);
642 /* Do we have too many workers and should some go away? */
643 static bool too_many_workers(struct worker_pool
*pool
)
645 bool managing
= pool
->flags
& POOL_MANAGING_WORKERS
;
646 int nr_idle
= pool
->nr_idle
+ managing
; /* manager is considered idle */
647 int nr_busy
= pool
->nr_workers
- nr_idle
;
649 return nr_idle
> 2 && (nr_idle
- 2) * MAX_IDLE_WORKERS_RATIO
>= nr_busy
;
656 /* Return the first worker. Safe with preemption disabled */
657 static struct worker
*first_worker(struct worker_pool
*pool
)
659 if (unlikely(list_empty(&pool
->idle_list
)))
662 return list_first_entry(&pool
->idle_list
, struct worker
, entry
);
666 * wake_up_worker - wake up an idle worker
667 * @pool: worker pool to wake worker from
669 * Wake up the first idle worker of @pool.
672 * spin_lock_irq(gcwq->lock).
674 static void wake_up_worker(struct worker_pool
*pool
)
676 struct worker
*worker
= first_worker(pool
);
679 wake_up_process(worker
->task
);
683 * wq_worker_waking_up - a worker is waking up
684 * @task: task waking up
685 * @cpu: CPU @task is waking up to
687 * This function is called during try_to_wake_up() when a worker is
691 * spin_lock_irq(rq->lock)
693 void wq_worker_waking_up(struct task_struct
*task
, unsigned int cpu
)
695 struct worker
*worker
= kthread_data(task
);
697 if (!(worker
->flags
& WORKER_NOT_RUNNING
))
698 atomic_inc(get_pool_nr_running(worker
->pool
));
702 * wq_worker_sleeping - a worker is going to sleep
703 * @task: task going to sleep
704 * @cpu: CPU in question, must be the current CPU number
706 * This function is called during schedule() when a busy worker is
707 * going to sleep. Worker on the same cpu can be woken up by
708 * returning pointer to its task.
711 * spin_lock_irq(rq->lock)
714 * Worker task on @cpu to wake up, %NULL if none.
716 struct task_struct
*wq_worker_sleeping(struct task_struct
*task
,
719 struct worker
*worker
= kthread_data(task
), *to_wakeup
= NULL
;
720 struct worker_pool
*pool
= worker
->pool
;
721 atomic_t
*nr_running
= get_pool_nr_running(pool
);
723 if (worker
->flags
& WORKER_NOT_RUNNING
)
726 /* this can only happen on the local cpu */
727 BUG_ON(cpu
!= raw_smp_processor_id());
730 * The counterpart of the following dec_and_test, implied mb,
731 * worklist not empty test sequence is in insert_work().
732 * Please read comment there.
734 * NOT_RUNNING is clear. This means that trustee is not in
735 * charge and we're running on the local cpu w/ rq lock held
736 * and preemption disabled, which in turn means that none else
737 * could be manipulating idle_list, so dereferencing idle_list
738 * without gcwq lock is safe.
740 if (atomic_dec_and_test(nr_running
) && !list_empty(&pool
->worklist
))
741 to_wakeup
= first_worker(pool
);
742 return to_wakeup
? to_wakeup
->task
: NULL
;
746 * worker_set_flags - set worker flags and adjust nr_running accordingly
748 * @flags: flags to set
749 * @wakeup: wakeup an idle worker if necessary
751 * Set @flags in @worker->flags and adjust nr_running accordingly. If
752 * nr_running becomes zero and @wakeup is %true, an idle worker is
756 * spin_lock_irq(gcwq->lock)
758 static inline void worker_set_flags(struct worker
*worker
, unsigned int flags
,
761 struct worker_pool
*pool
= worker
->pool
;
763 WARN_ON_ONCE(worker
->task
!= current
);
766 * If transitioning into NOT_RUNNING, adjust nr_running and
767 * wake up an idle worker as necessary if requested by
770 if ((flags
& WORKER_NOT_RUNNING
) &&
771 !(worker
->flags
& WORKER_NOT_RUNNING
)) {
772 atomic_t
*nr_running
= get_pool_nr_running(pool
);
775 if (atomic_dec_and_test(nr_running
) &&
776 !list_empty(&pool
->worklist
))
777 wake_up_worker(pool
);
779 atomic_dec(nr_running
);
782 worker
->flags
|= flags
;
786 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
788 * @flags: flags to clear
790 * Clear @flags in @worker->flags and adjust nr_running accordingly.
793 * spin_lock_irq(gcwq->lock)
795 static inline void worker_clr_flags(struct worker
*worker
, unsigned int flags
)
797 struct worker_pool
*pool
= worker
->pool
;
798 unsigned int oflags
= worker
->flags
;
800 WARN_ON_ONCE(worker
->task
!= current
);
802 worker
->flags
&= ~flags
;
805 * If transitioning out of NOT_RUNNING, increment nr_running. Note
806 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
807 * of multiple flags, not a single flag.
809 if ((flags
& WORKER_NOT_RUNNING
) && (oflags
& WORKER_NOT_RUNNING
))
810 if (!(worker
->flags
& WORKER_NOT_RUNNING
))
811 atomic_inc(get_pool_nr_running(pool
));
815 * busy_worker_head - return the busy hash head for a work
816 * @gcwq: gcwq of interest
817 * @work: work to be hashed
819 * Return hash head of @gcwq for @work.
822 * spin_lock_irq(gcwq->lock).
825 * Pointer to the hash head.
827 static struct hlist_head
*busy_worker_head(struct global_cwq
*gcwq
,
828 struct work_struct
*work
)
830 const int base_shift
= ilog2(sizeof(struct work_struct
));
831 unsigned long v
= (unsigned long)work
;
833 /* simple shift and fold hash, do we need something better? */
835 v
+= v
>> BUSY_WORKER_HASH_ORDER
;
836 v
&= BUSY_WORKER_HASH_MASK
;
838 return &gcwq
->busy_hash
[v
];
842 * __find_worker_executing_work - find worker which is executing a work
843 * @gcwq: gcwq of interest
844 * @bwh: hash head as returned by busy_worker_head()
845 * @work: work to find worker for
847 * Find a worker which is executing @work on @gcwq. @bwh should be
848 * the hash head obtained by calling busy_worker_head() with the same
852 * spin_lock_irq(gcwq->lock).
855 * Pointer to worker which is executing @work if found, NULL
858 static struct worker
*__find_worker_executing_work(struct global_cwq
*gcwq
,
859 struct hlist_head
*bwh
,
860 struct work_struct
*work
)
862 struct worker
*worker
;
863 struct hlist_node
*tmp
;
865 hlist_for_each_entry(worker
, tmp
, bwh
, hentry
)
866 if (worker
->current_work
== work
)
872 * find_worker_executing_work - find worker which is executing a work
873 * @gcwq: gcwq of interest
874 * @work: work to find worker for
876 * Find a worker which is executing @work on @gcwq. This function is
877 * identical to __find_worker_executing_work() except that this
878 * function calculates @bwh itself.
881 * spin_lock_irq(gcwq->lock).
884 * Pointer to worker which is executing @work if found, NULL
887 static struct worker
*find_worker_executing_work(struct global_cwq
*gcwq
,
888 struct work_struct
*work
)
890 return __find_worker_executing_work(gcwq
, busy_worker_head(gcwq
, work
),
895 * pool_determine_ins_pos - find insertion position
896 * @pool: pool of interest
897 * @cwq: cwq a work is being queued for
899 * A work for @cwq is about to be queued on @pool, determine insertion
900 * position for the work. If @cwq is for HIGHPRI wq, the work is
901 * queued at the head of the queue but in FIFO order with respect to
902 * other HIGHPRI works; otherwise, at the end of the queue. This
903 * function also sets POOL_HIGHPRI_PENDING flag to hint @pool that
904 * there are HIGHPRI works pending.
907 * spin_lock_irq(gcwq->lock).
910 * Pointer to inserstion position.
912 static inline struct list_head
*pool_determine_ins_pos(struct worker_pool
*pool
,
913 struct cpu_workqueue_struct
*cwq
)
915 struct work_struct
*twork
;
917 if (likely(!(cwq
->wq
->flags
& WQ_HIGHPRI
)))
918 return &pool
->worklist
;
920 list_for_each_entry(twork
, &pool
->worklist
, entry
) {
921 struct cpu_workqueue_struct
*tcwq
= get_work_cwq(twork
);
923 if (!(tcwq
->wq
->flags
& WQ_HIGHPRI
))
927 pool
->flags
|= POOL_HIGHPRI_PENDING
;
928 return &twork
->entry
;
932 * insert_work - insert a work into gcwq
933 * @cwq: cwq @work belongs to
934 * @work: work to insert
935 * @head: insertion point
936 * @extra_flags: extra WORK_STRUCT_* flags to set
938 * Insert @work which belongs to @cwq into @gcwq after @head.
939 * @extra_flags is or'd to work_struct flags.
942 * spin_lock_irq(gcwq->lock).
944 static void insert_work(struct cpu_workqueue_struct
*cwq
,
945 struct work_struct
*work
, struct list_head
*head
,
946 unsigned int extra_flags
)
948 struct worker_pool
*pool
= cwq
->pool
;
950 /* we own @work, set data and link */
951 set_work_cwq(work
, cwq
, extra_flags
);
954 * Ensure that we get the right work->data if we see the
955 * result of list_add() below, see try_to_grab_pending().
959 list_add_tail(&work
->entry
, head
);
962 * Ensure either worker_sched_deactivated() sees the above
963 * list_add_tail() or we see zero nr_running to avoid workers
964 * lying around lazily while there are works to be processed.
968 if (__need_more_worker(pool
))
969 wake_up_worker(pool
);
973 * Test whether @work is being queued from another work executing on the
974 * same workqueue. This is rather expensive and should only be used from
977 static bool is_chained_work(struct workqueue_struct
*wq
)
982 for_each_gcwq_cpu(cpu
) {
983 struct global_cwq
*gcwq
= get_gcwq(cpu
);
984 struct worker
*worker
;
985 struct hlist_node
*pos
;
988 spin_lock_irqsave(&gcwq
->lock
, flags
);
989 for_each_busy_worker(worker
, i
, pos
, gcwq
) {
990 if (worker
->task
!= current
)
992 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
994 * I'm @worker, no locking necessary. See if @work
995 * is headed to the same workqueue.
997 return worker
->current_cwq
->wq
== wq
;
999 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
1004 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
1005 struct work_struct
*work
)
1007 struct global_cwq
*gcwq
;
1008 struct cpu_workqueue_struct
*cwq
;
1009 struct list_head
*worklist
;
1010 unsigned int work_flags
;
1011 unsigned long flags
;
1013 debug_work_activate(work
);
1015 /* if dying, only works from the same workqueue are allowed */
1016 if (unlikely(wq
->flags
& WQ_DRAINING
) &&
1017 WARN_ON_ONCE(!is_chained_work(wq
)))
1020 /* determine gcwq to use */
1021 if (!(wq
->flags
& WQ_UNBOUND
)) {
1022 struct global_cwq
*last_gcwq
;
1024 if (unlikely(cpu
== WORK_CPU_UNBOUND
))
1025 cpu
= raw_smp_processor_id();
1028 * It's multi cpu. If @wq is non-reentrant and @work
1029 * was previously on a different cpu, it might still
1030 * be running there, in which case the work needs to
1031 * be queued on that cpu to guarantee non-reentrance.
1033 gcwq
= get_gcwq(cpu
);
1034 if (wq
->flags
& WQ_NON_REENTRANT
&&
1035 (last_gcwq
= get_work_gcwq(work
)) && last_gcwq
!= gcwq
) {
1036 struct worker
*worker
;
1038 spin_lock_irqsave(&last_gcwq
->lock
, flags
);
1040 worker
= find_worker_executing_work(last_gcwq
, work
);
1042 if (worker
&& worker
->current_cwq
->wq
== wq
)
1045 /* meh... not running there, queue here */
1046 spin_unlock_irqrestore(&last_gcwq
->lock
, flags
);
1047 spin_lock_irqsave(&gcwq
->lock
, flags
);
1050 spin_lock_irqsave(&gcwq
->lock
, flags
);
1052 gcwq
= get_gcwq(WORK_CPU_UNBOUND
);
1053 spin_lock_irqsave(&gcwq
->lock
, flags
);
1056 /* gcwq determined, get cwq and queue */
1057 cwq
= get_cwq(gcwq
->cpu
, wq
);
1058 trace_workqueue_queue_work(cpu
, cwq
, work
);
1060 if (WARN_ON(!list_empty(&work
->entry
))) {
1061 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
1065 cwq
->nr_in_flight
[cwq
->work_color
]++;
1066 work_flags
= work_color_to_flags(cwq
->work_color
);
1068 if (likely(cwq
->nr_active
< cwq
->max_active
)) {
1069 trace_workqueue_activate_work(work
);
1071 worklist
= pool_determine_ins_pos(cwq
->pool
, cwq
);
1073 work_flags
|= WORK_STRUCT_DELAYED
;
1074 worklist
= &cwq
->delayed_works
;
1077 insert_work(cwq
, work
, worklist
, work_flags
);
1079 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
1083 * queue_work - queue work on a workqueue
1084 * @wq: workqueue to use
1085 * @work: work to queue
1087 * Returns 0 if @work was already on a queue, non-zero otherwise.
1089 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1090 * it can be processed by another CPU.
1092 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
1096 ret
= queue_work_on(get_cpu(), wq
, work
);
1101 EXPORT_SYMBOL_GPL(queue_work
);
1104 * queue_work_on - queue work on specific cpu
1105 * @cpu: CPU number to execute work on
1106 * @wq: workqueue to use
1107 * @work: work to queue
1109 * Returns 0 if @work was already on a queue, non-zero otherwise.
1111 * We queue the work to a specific CPU, the caller must ensure it
1115 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
1119 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
1120 __queue_work(cpu
, wq
, work
);
1125 EXPORT_SYMBOL_GPL(queue_work_on
);
1127 static void delayed_work_timer_fn(unsigned long __data
)
1129 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
1130 struct cpu_workqueue_struct
*cwq
= get_work_cwq(&dwork
->work
);
1132 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
1136 * queue_delayed_work - queue work on a workqueue after delay
1137 * @wq: workqueue to use
1138 * @dwork: delayable work to queue
1139 * @delay: number of jiffies to wait before queueing
1141 * Returns 0 if @work was already on a queue, non-zero otherwise.
1143 int queue_delayed_work(struct workqueue_struct
*wq
,
1144 struct delayed_work
*dwork
, unsigned long delay
)
1147 return queue_work(wq
, &dwork
->work
);
1149 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
1151 EXPORT_SYMBOL_GPL(queue_delayed_work
);
1154 * queue_delayed_work_on - queue work on specific CPU after delay
1155 * @cpu: CPU number to execute work on
1156 * @wq: workqueue to use
1157 * @dwork: work to queue
1158 * @delay: number of jiffies to wait before queueing
1160 * Returns 0 if @work was already on a queue, non-zero otherwise.
1162 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
1163 struct delayed_work
*dwork
, unsigned long delay
)
1166 struct timer_list
*timer
= &dwork
->timer
;
1167 struct work_struct
*work
= &dwork
->work
;
1169 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
1172 BUG_ON(timer_pending(timer
));
1173 BUG_ON(!list_empty(&work
->entry
));
1175 timer_stats_timer_set_start_info(&dwork
->timer
);
1178 * This stores cwq for the moment, for the timer_fn.
1179 * Note that the work's gcwq is preserved to allow
1180 * reentrance detection for delayed works.
1182 if (!(wq
->flags
& WQ_UNBOUND
)) {
1183 struct global_cwq
*gcwq
= get_work_gcwq(work
);
1185 if (gcwq
&& gcwq
->cpu
!= WORK_CPU_UNBOUND
)
1188 lcpu
= raw_smp_processor_id();
1190 lcpu
= WORK_CPU_UNBOUND
;
1192 set_work_cwq(work
, get_cwq(lcpu
, wq
), 0);
1194 timer
->expires
= jiffies
+ delay
;
1195 timer
->data
= (unsigned long)dwork
;
1196 timer
->function
= delayed_work_timer_fn
;
1198 if (unlikely(cpu
>= 0))
1199 add_timer_on(timer
, cpu
);
1206 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
1209 * worker_enter_idle - enter idle state
1210 * @worker: worker which is entering idle state
1212 * @worker is entering idle state. Update stats and idle timer if
1216 * spin_lock_irq(gcwq->lock).
1218 static void worker_enter_idle(struct worker
*worker
)
1220 struct worker_pool
*pool
= worker
->pool
;
1221 struct global_cwq
*gcwq
= pool
->gcwq
;
1223 BUG_ON(worker
->flags
& WORKER_IDLE
);
1224 BUG_ON(!list_empty(&worker
->entry
) &&
1225 (worker
->hentry
.next
|| worker
->hentry
.pprev
));
1227 /* can't use worker_set_flags(), also called from start_worker() */
1228 worker
->flags
|= WORKER_IDLE
;
1230 worker
->last_active
= jiffies
;
1232 /* idle_list is LIFO */
1233 list_add(&worker
->entry
, &pool
->idle_list
);
1235 if (likely(!(worker
->flags
& WORKER_ROGUE
))) {
1236 if (too_many_workers(pool
) && !timer_pending(&pool
->idle_timer
))
1237 mod_timer(&pool
->idle_timer
,
1238 jiffies
+ IDLE_WORKER_TIMEOUT
);
1240 wake_up_all(&gcwq
->trustee_wait
);
1243 * Sanity check nr_running. Because trustee releases gcwq->lock
1244 * between setting %WORKER_ROGUE and zapping nr_running, the
1245 * warning may trigger spuriously. Check iff trustee is idle.
1247 WARN_ON_ONCE(gcwq
->trustee_state
== TRUSTEE_DONE
&&
1248 pool
->nr_workers
== pool
->nr_idle
&&
1249 atomic_read(get_pool_nr_running(pool
)));
1253 * worker_leave_idle - leave idle state
1254 * @worker: worker which is leaving idle state
1256 * @worker is leaving idle state. Update stats.
1259 * spin_lock_irq(gcwq->lock).
1261 static void worker_leave_idle(struct worker
*worker
)
1263 struct worker_pool
*pool
= worker
->pool
;
1265 BUG_ON(!(worker
->flags
& WORKER_IDLE
));
1266 worker_clr_flags(worker
, WORKER_IDLE
);
1268 list_del_init(&worker
->entry
);
1272 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1275 * Works which are scheduled while the cpu is online must at least be
1276 * scheduled to a worker which is bound to the cpu so that if they are
1277 * flushed from cpu callbacks while cpu is going down, they are
1278 * guaranteed to execute on the cpu.
1280 * This function is to be used by rogue workers and rescuers to bind
1281 * themselves to the target cpu and may race with cpu going down or
1282 * coming online. kthread_bind() can't be used because it may put the
1283 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1284 * verbatim as it's best effort and blocking and gcwq may be
1285 * [dis]associated in the meantime.
1287 * This function tries set_cpus_allowed() and locks gcwq and verifies
1288 * the binding against GCWQ_DISASSOCIATED which is set during
1289 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1290 * idle state or fetches works without dropping lock, it can guarantee
1291 * the scheduling requirement described in the first paragraph.
1294 * Might sleep. Called without any lock but returns with gcwq->lock
1298 * %true if the associated gcwq is online (@worker is successfully
1299 * bound), %false if offline.
1301 static bool worker_maybe_bind_and_lock(struct worker
*worker
)
1302 __acquires(&gcwq
->lock
)
1304 struct global_cwq
*gcwq
= worker
->pool
->gcwq
;
1305 struct task_struct
*task
= worker
->task
;
1309 * The following call may fail, succeed or succeed
1310 * without actually migrating the task to the cpu if
1311 * it races with cpu hotunplug operation. Verify
1312 * against GCWQ_DISASSOCIATED.
1314 if (!(gcwq
->flags
& GCWQ_DISASSOCIATED
))
1315 set_cpus_allowed_ptr(task
, get_cpu_mask(gcwq
->cpu
));
1317 spin_lock_irq(&gcwq
->lock
);
1318 if (gcwq
->flags
& GCWQ_DISASSOCIATED
)
1320 if (task_cpu(task
) == gcwq
->cpu
&&
1321 cpumask_equal(¤t
->cpus_allowed
,
1322 get_cpu_mask(gcwq
->cpu
)))
1324 spin_unlock_irq(&gcwq
->lock
);
1327 * We've raced with CPU hot[un]plug. Give it a breather
1328 * and retry migration. cond_resched() is required here;
1329 * otherwise, we might deadlock against cpu_stop trying to
1330 * bring down the CPU on non-preemptive kernel.
1338 * Function for worker->rebind_work used to rebind rogue busy workers
1339 * to the associated cpu which is coming back online. This is
1340 * scheduled by cpu up but can race with other cpu hotplug operations
1341 * and may be executed twice without intervening cpu down.
1343 static void worker_rebind_fn(struct work_struct
*work
)
1345 struct worker
*worker
= container_of(work
, struct worker
, rebind_work
);
1346 struct global_cwq
*gcwq
= worker
->pool
->gcwq
;
1348 if (worker_maybe_bind_and_lock(worker
))
1349 worker_clr_flags(worker
, WORKER_REBIND
);
1351 spin_unlock_irq(&gcwq
->lock
);
1354 static struct worker
*alloc_worker(void)
1356 struct worker
*worker
;
1358 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
1360 INIT_LIST_HEAD(&worker
->entry
);
1361 INIT_LIST_HEAD(&worker
->scheduled
);
1362 INIT_WORK(&worker
->rebind_work
, worker_rebind_fn
);
1363 /* on creation a worker is in !idle && prep state */
1364 worker
->flags
= WORKER_PREP
;
1370 * create_worker - create a new workqueue worker
1371 * @pool: pool the new worker will belong to
1372 * @bind: whether to set affinity to @cpu or not
1374 * Create a new worker which is bound to @pool. The returned worker
1375 * can be started by calling start_worker() or destroyed using
1379 * Might sleep. Does GFP_KERNEL allocations.
1382 * Pointer to the newly created worker.
1384 static struct worker
*create_worker(struct worker_pool
*pool
, bool bind
)
1386 struct global_cwq
*gcwq
= pool
->gcwq
;
1387 bool on_unbound_cpu
= gcwq
->cpu
== WORK_CPU_UNBOUND
;
1388 struct worker
*worker
= NULL
;
1391 spin_lock_irq(&gcwq
->lock
);
1392 while (ida_get_new(&pool
->worker_ida
, &id
)) {
1393 spin_unlock_irq(&gcwq
->lock
);
1394 if (!ida_pre_get(&pool
->worker_ida
, GFP_KERNEL
))
1396 spin_lock_irq(&gcwq
->lock
);
1398 spin_unlock_irq(&gcwq
->lock
);
1400 worker
= alloc_worker();
1404 worker
->pool
= pool
;
1407 if (!on_unbound_cpu
)
1408 worker
->task
= kthread_create_on_node(worker_thread
,
1410 cpu_to_node(gcwq
->cpu
),
1411 "kworker/%u:%d", gcwq
->cpu
, id
);
1413 worker
->task
= kthread_create(worker_thread
, worker
,
1414 "kworker/u:%d", id
);
1415 if (IS_ERR(worker
->task
))
1419 * A rogue worker will become a regular one if CPU comes
1420 * online later on. Make sure every worker has
1421 * PF_THREAD_BOUND set.
1423 if (bind
&& !on_unbound_cpu
)
1424 kthread_bind(worker
->task
, gcwq
->cpu
);
1426 worker
->task
->flags
|= PF_THREAD_BOUND
;
1428 worker
->flags
|= WORKER_UNBOUND
;
1434 spin_lock_irq(&gcwq
->lock
);
1435 ida_remove(&pool
->worker_ida
, id
);
1436 spin_unlock_irq(&gcwq
->lock
);
1443 * start_worker - start a newly created worker
1444 * @worker: worker to start
1446 * Make the gcwq aware of @worker and start it.
1449 * spin_lock_irq(gcwq->lock).
1451 static void start_worker(struct worker
*worker
)
1453 worker
->flags
|= WORKER_STARTED
;
1454 worker
->pool
->nr_workers
++;
1455 worker_enter_idle(worker
);
1456 wake_up_process(worker
->task
);
1460 * destroy_worker - destroy a workqueue worker
1461 * @worker: worker to be destroyed
1463 * Destroy @worker and adjust @gcwq stats accordingly.
1466 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1468 static void destroy_worker(struct worker
*worker
)
1470 struct worker_pool
*pool
= worker
->pool
;
1471 struct global_cwq
*gcwq
= pool
->gcwq
;
1472 int id
= worker
->id
;
1474 /* sanity check frenzy */
1475 BUG_ON(worker
->current_work
);
1476 BUG_ON(!list_empty(&worker
->scheduled
));
1478 if (worker
->flags
& WORKER_STARTED
)
1480 if (worker
->flags
& WORKER_IDLE
)
1483 list_del_init(&worker
->entry
);
1484 worker
->flags
|= WORKER_DIE
;
1486 spin_unlock_irq(&gcwq
->lock
);
1488 kthread_stop(worker
->task
);
1491 spin_lock_irq(&gcwq
->lock
);
1492 ida_remove(&pool
->worker_ida
, id
);
1495 static void idle_worker_timeout(unsigned long __pool
)
1497 struct worker_pool
*pool
= (void *)__pool
;
1498 struct global_cwq
*gcwq
= pool
->gcwq
;
1500 spin_lock_irq(&gcwq
->lock
);
1502 if (too_many_workers(pool
)) {
1503 struct worker
*worker
;
1504 unsigned long expires
;
1506 /* idle_list is kept in LIFO order, check the last one */
1507 worker
= list_entry(pool
->idle_list
.prev
, struct worker
, entry
);
1508 expires
= worker
->last_active
+ IDLE_WORKER_TIMEOUT
;
1510 if (time_before(jiffies
, expires
))
1511 mod_timer(&pool
->idle_timer
, expires
);
1513 /* it's been idle for too long, wake up manager */
1514 pool
->flags
|= POOL_MANAGE_WORKERS
;
1515 wake_up_worker(pool
);
1519 spin_unlock_irq(&gcwq
->lock
);
1522 static bool send_mayday(struct work_struct
*work
)
1524 struct cpu_workqueue_struct
*cwq
= get_work_cwq(work
);
1525 struct workqueue_struct
*wq
= cwq
->wq
;
1528 if (!(wq
->flags
& WQ_RESCUER
))
1531 /* mayday mayday mayday */
1532 cpu
= cwq
->pool
->gcwq
->cpu
;
1533 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1534 if (cpu
== WORK_CPU_UNBOUND
)
1536 if (!mayday_test_and_set_cpu(cpu
, wq
->mayday_mask
))
1537 wake_up_process(wq
->rescuer
->task
);
1541 static void gcwq_mayday_timeout(unsigned long __pool
)
1543 struct worker_pool
*pool
= (void *)__pool
;
1544 struct global_cwq
*gcwq
= pool
->gcwq
;
1545 struct work_struct
*work
;
1547 spin_lock_irq(&gcwq
->lock
);
1549 if (need_to_create_worker(pool
)) {
1551 * We've been trying to create a new worker but
1552 * haven't been successful. We might be hitting an
1553 * allocation deadlock. Send distress signals to
1556 list_for_each_entry(work
, &pool
->worklist
, entry
)
1560 spin_unlock_irq(&gcwq
->lock
);
1562 mod_timer(&pool
->mayday_timer
, jiffies
+ MAYDAY_INTERVAL
);
1566 * maybe_create_worker - create a new worker if necessary
1567 * @pool: pool to create a new worker for
1569 * Create a new worker for @pool if necessary. @pool is guaranteed to
1570 * have at least one idle worker on return from this function. If
1571 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1572 * sent to all rescuers with works scheduled on @pool to resolve
1573 * possible allocation deadlock.
1575 * On return, need_to_create_worker() is guaranteed to be false and
1576 * may_start_working() true.
1579 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1580 * multiple times. Does GFP_KERNEL allocations. Called only from
1584 * false if no action was taken and gcwq->lock stayed locked, true
1587 static bool maybe_create_worker(struct worker_pool
*pool
)
1588 __releases(&gcwq
->lock
)
1589 __acquires(&gcwq
->lock
)
1591 struct global_cwq
*gcwq
= pool
->gcwq
;
1593 if (!need_to_create_worker(pool
))
1596 spin_unlock_irq(&gcwq
->lock
);
1598 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1599 mod_timer(&pool
->mayday_timer
, jiffies
+ MAYDAY_INITIAL_TIMEOUT
);
1602 struct worker
*worker
;
1604 worker
= create_worker(pool
, true);
1606 del_timer_sync(&pool
->mayday_timer
);
1607 spin_lock_irq(&gcwq
->lock
);
1608 start_worker(worker
);
1609 BUG_ON(need_to_create_worker(pool
));
1613 if (!need_to_create_worker(pool
))
1616 __set_current_state(TASK_INTERRUPTIBLE
);
1617 schedule_timeout(CREATE_COOLDOWN
);
1619 if (!need_to_create_worker(pool
))
1623 del_timer_sync(&pool
->mayday_timer
);
1624 spin_lock_irq(&gcwq
->lock
);
1625 if (need_to_create_worker(pool
))
1631 * maybe_destroy_worker - destroy workers which have been idle for a while
1632 * @pool: pool to destroy workers for
1634 * Destroy @pool workers which have been idle for longer than
1635 * IDLE_WORKER_TIMEOUT.
1638 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1639 * multiple times. Called only from manager.
1642 * false if no action was taken and gcwq->lock stayed locked, true
1645 static bool maybe_destroy_workers(struct worker_pool
*pool
)
1649 while (too_many_workers(pool
)) {
1650 struct worker
*worker
;
1651 unsigned long expires
;
1653 worker
= list_entry(pool
->idle_list
.prev
, struct worker
, entry
);
1654 expires
= worker
->last_active
+ IDLE_WORKER_TIMEOUT
;
1656 if (time_before(jiffies
, expires
)) {
1657 mod_timer(&pool
->idle_timer
, expires
);
1661 destroy_worker(worker
);
1669 * manage_workers - manage worker pool
1672 * Assume the manager role and manage gcwq worker pool @worker belongs
1673 * to. At any given time, there can be only zero or one manager per
1674 * gcwq. The exclusion is handled automatically by this function.
1676 * The caller can safely start processing works on false return. On
1677 * true return, it's guaranteed that need_to_create_worker() is false
1678 * and may_start_working() is true.
1681 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1682 * multiple times. Does GFP_KERNEL allocations.
1685 * false if no action was taken and gcwq->lock stayed locked, true if
1686 * some action was taken.
1688 static bool manage_workers(struct worker
*worker
)
1690 struct worker_pool
*pool
= worker
->pool
;
1691 struct global_cwq
*gcwq
= pool
->gcwq
;
1694 if (pool
->flags
& POOL_MANAGING_WORKERS
)
1697 pool
->flags
&= ~POOL_MANAGE_WORKERS
;
1698 pool
->flags
|= POOL_MANAGING_WORKERS
;
1701 * Destroy and then create so that may_start_working() is true
1704 ret
|= maybe_destroy_workers(pool
);
1705 ret
|= maybe_create_worker(pool
);
1707 pool
->flags
&= ~POOL_MANAGING_WORKERS
;
1710 * The trustee might be waiting to take over the manager
1711 * position, tell it we're done.
1713 if (unlikely(gcwq
->trustee
))
1714 wake_up_all(&gcwq
->trustee_wait
);
1720 * move_linked_works - move linked works to a list
1721 * @work: start of series of works to be scheduled
1722 * @head: target list to append @work to
1723 * @nextp: out paramter for nested worklist walking
1725 * Schedule linked works starting from @work to @head. Work series to
1726 * be scheduled starts at @work and includes any consecutive work with
1727 * WORK_STRUCT_LINKED set in its predecessor.
1729 * If @nextp is not NULL, it's updated to point to the next work of
1730 * the last scheduled work. This allows move_linked_works() to be
1731 * nested inside outer list_for_each_entry_safe().
1734 * spin_lock_irq(gcwq->lock).
1736 static void move_linked_works(struct work_struct
*work
, struct list_head
*head
,
1737 struct work_struct
**nextp
)
1739 struct work_struct
*n
;
1742 * Linked worklist will always end before the end of the list,
1743 * use NULL for list head.
1745 list_for_each_entry_safe_from(work
, n
, NULL
, entry
) {
1746 list_move_tail(&work
->entry
, head
);
1747 if (!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))
1752 * If we're already inside safe list traversal and have moved
1753 * multiple works to the scheduled queue, the next position
1754 * needs to be updated.
1760 static void cwq_activate_first_delayed(struct cpu_workqueue_struct
*cwq
)
1762 struct work_struct
*work
= list_first_entry(&cwq
->delayed_works
,
1763 struct work_struct
, entry
);
1764 struct list_head
*pos
= pool_determine_ins_pos(cwq
->pool
, cwq
);
1766 trace_workqueue_activate_work(work
);
1767 move_linked_works(work
, pos
, NULL
);
1768 __clear_bit(WORK_STRUCT_DELAYED_BIT
, work_data_bits(work
));
1773 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1774 * @cwq: cwq of interest
1775 * @color: color of work which left the queue
1776 * @delayed: for a delayed work
1778 * A work either has completed or is removed from pending queue,
1779 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1782 * spin_lock_irq(gcwq->lock).
1784 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct
*cwq
, int color
,
1787 /* ignore uncolored works */
1788 if (color
== WORK_NO_COLOR
)
1791 cwq
->nr_in_flight
[color
]--;
1795 if (!list_empty(&cwq
->delayed_works
)) {
1796 /* one down, submit a delayed one */
1797 if (cwq
->nr_active
< cwq
->max_active
)
1798 cwq_activate_first_delayed(cwq
);
1802 /* is flush in progress and are we at the flushing tip? */
1803 if (likely(cwq
->flush_color
!= color
))
1806 /* are there still in-flight works? */
1807 if (cwq
->nr_in_flight
[color
])
1810 /* this cwq is done, clear flush_color */
1811 cwq
->flush_color
= -1;
1814 * If this was the last cwq, wake up the first flusher. It
1815 * will handle the rest.
1817 if (atomic_dec_and_test(&cwq
->wq
->nr_cwqs_to_flush
))
1818 complete(&cwq
->wq
->first_flusher
->done
);
1822 * process_one_work - process single work
1824 * @work: work to process
1826 * Process @work. This function contains all the logics necessary to
1827 * process a single work including synchronization against and
1828 * interaction with other workers on the same cpu, queueing and
1829 * flushing. As long as context requirement is met, any worker can
1830 * call this function to process a work.
1833 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1835 static void process_one_work(struct worker
*worker
, struct work_struct
*work
)
1836 __releases(&gcwq
->lock
)
1837 __acquires(&gcwq
->lock
)
1839 struct cpu_workqueue_struct
*cwq
= get_work_cwq(work
);
1840 struct worker_pool
*pool
= worker
->pool
;
1841 struct global_cwq
*gcwq
= pool
->gcwq
;
1842 struct hlist_head
*bwh
= busy_worker_head(gcwq
, work
);
1843 bool cpu_intensive
= cwq
->wq
->flags
& WQ_CPU_INTENSIVE
;
1844 work_func_t f
= work
->func
;
1846 struct worker
*collision
;
1847 #ifdef CONFIG_LOCKDEP
1849 * It is permissible to free the struct work_struct from
1850 * inside the function that is called from it, this we need to
1851 * take into account for lockdep too. To avoid bogus "held
1852 * lock freed" warnings as well as problems when looking into
1853 * work->lockdep_map, make a copy and use that here.
1855 struct lockdep_map lockdep_map
;
1857 lockdep_copy_map(&lockdep_map
, &work
->lockdep_map
);
1860 * A single work shouldn't be executed concurrently by
1861 * multiple workers on a single cpu. Check whether anyone is
1862 * already processing the work. If so, defer the work to the
1863 * currently executing one.
1865 collision
= __find_worker_executing_work(gcwq
, bwh
, work
);
1866 if (unlikely(collision
)) {
1867 move_linked_works(work
, &collision
->scheduled
, NULL
);
1871 /* claim and process */
1872 debug_work_deactivate(work
);
1873 hlist_add_head(&worker
->hentry
, bwh
);
1874 worker
->current_work
= work
;
1875 worker
->current_cwq
= cwq
;
1876 work_color
= get_work_color(work
);
1878 /* record the current cpu number in the work data and dequeue */
1879 set_work_cpu(work
, gcwq
->cpu
);
1880 list_del_init(&work
->entry
);
1883 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1884 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1886 if (unlikely(pool
->flags
& POOL_HIGHPRI_PENDING
)) {
1887 struct work_struct
*nwork
= list_first_entry(&pool
->worklist
,
1888 struct work_struct
, entry
);
1890 if (!list_empty(&pool
->worklist
) &&
1891 get_work_cwq(nwork
)->wq
->flags
& WQ_HIGHPRI
)
1892 wake_up_worker(pool
);
1894 pool
->flags
&= ~POOL_HIGHPRI_PENDING
;
1898 * CPU intensive works don't participate in concurrency
1899 * management. They're the scheduler's responsibility.
1901 if (unlikely(cpu_intensive
))
1902 worker_set_flags(worker
, WORKER_CPU_INTENSIVE
, true);
1905 * Unbound gcwq isn't concurrency managed and work items should be
1906 * executed ASAP. Wake up another worker if necessary.
1908 if ((worker
->flags
& WORKER_UNBOUND
) && need_more_worker(pool
))
1909 wake_up_worker(pool
);
1911 spin_unlock_irq(&gcwq
->lock
);
1913 work_clear_pending(work
);
1914 lock_map_acquire_read(&cwq
->wq
->lockdep_map
);
1915 lock_map_acquire(&lockdep_map
);
1916 trace_workqueue_execute_start(work
);
1919 * While we must be careful to not use "work" after this, the trace
1920 * point will only record its address.
1922 trace_workqueue_execute_end(work
);
1923 lock_map_release(&lockdep_map
);
1924 lock_map_release(&cwq
->wq
->lockdep_map
);
1926 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
1927 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
1929 current
->comm
, preempt_count(), task_pid_nr(current
));
1930 printk(KERN_ERR
" last function: ");
1931 print_symbol("%s\n", (unsigned long)f
);
1932 debug_show_held_locks(current
);
1936 spin_lock_irq(&gcwq
->lock
);
1938 /* clear cpu intensive status */
1939 if (unlikely(cpu_intensive
))
1940 worker_clr_flags(worker
, WORKER_CPU_INTENSIVE
);
1942 /* we're done with it, release */
1943 hlist_del_init(&worker
->hentry
);
1944 worker
->current_work
= NULL
;
1945 worker
->current_cwq
= NULL
;
1946 cwq_dec_nr_in_flight(cwq
, work_color
, false);
1950 * process_scheduled_works - process scheduled works
1953 * Process all scheduled works. Please note that the scheduled list
1954 * may change while processing a work, so this function repeatedly
1955 * fetches a work from the top and executes it.
1958 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1961 static void process_scheduled_works(struct worker
*worker
)
1963 while (!list_empty(&worker
->scheduled
)) {
1964 struct work_struct
*work
= list_first_entry(&worker
->scheduled
,
1965 struct work_struct
, entry
);
1966 process_one_work(worker
, work
);
1971 * worker_thread - the worker thread function
1974 * The gcwq worker thread function. There's a single dynamic pool of
1975 * these per each cpu. These workers process all works regardless of
1976 * their specific target workqueue. The only exception is works which
1977 * belong to workqueues with a rescuer which will be explained in
1980 static int worker_thread(void *__worker
)
1982 struct worker
*worker
= __worker
;
1983 struct worker_pool
*pool
= worker
->pool
;
1984 struct global_cwq
*gcwq
= pool
->gcwq
;
1986 /* tell the scheduler that this is a workqueue worker */
1987 worker
->task
->flags
|= PF_WQ_WORKER
;
1989 spin_lock_irq(&gcwq
->lock
);
1991 /* DIE can be set only while we're idle, checking here is enough */
1992 if (worker
->flags
& WORKER_DIE
) {
1993 spin_unlock_irq(&gcwq
->lock
);
1994 worker
->task
->flags
&= ~PF_WQ_WORKER
;
1998 worker_leave_idle(worker
);
2000 /* no more worker necessary? */
2001 if (!need_more_worker(pool
))
2004 /* do we need to manage? */
2005 if (unlikely(!may_start_working(pool
)) && manage_workers(worker
))
2009 * ->scheduled list can only be filled while a worker is
2010 * preparing to process a work or actually processing it.
2011 * Make sure nobody diddled with it while I was sleeping.
2013 BUG_ON(!list_empty(&worker
->scheduled
));
2016 * When control reaches this point, we're guaranteed to have
2017 * at least one idle worker or that someone else has already
2018 * assumed the manager role.
2020 worker_clr_flags(worker
, WORKER_PREP
);
2023 struct work_struct
*work
=
2024 list_first_entry(&pool
->worklist
,
2025 struct work_struct
, entry
);
2027 if (likely(!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))) {
2028 /* optimization path, not strictly necessary */
2029 process_one_work(worker
, work
);
2030 if (unlikely(!list_empty(&worker
->scheduled
)))
2031 process_scheduled_works(worker
);
2033 move_linked_works(work
, &worker
->scheduled
, NULL
);
2034 process_scheduled_works(worker
);
2036 } while (keep_working(pool
));
2038 worker_set_flags(worker
, WORKER_PREP
, false);
2040 if (unlikely(need_to_manage_workers(pool
)) && manage_workers(worker
))
2044 * gcwq->lock is held and there's no work to process and no
2045 * need to manage, sleep. Workers are woken up only while
2046 * holding gcwq->lock or from local cpu, so setting the
2047 * current state before releasing gcwq->lock is enough to
2048 * prevent losing any event.
2050 worker_enter_idle(worker
);
2051 __set_current_state(TASK_INTERRUPTIBLE
);
2052 spin_unlock_irq(&gcwq
->lock
);
2058 * rescuer_thread - the rescuer thread function
2059 * @__wq: the associated workqueue
2061 * Workqueue rescuer thread function. There's one rescuer for each
2062 * workqueue which has WQ_RESCUER set.
2064 * Regular work processing on a gcwq may block trying to create a new
2065 * worker which uses GFP_KERNEL allocation which has slight chance of
2066 * developing into deadlock if some works currently on the same queue
2067 * need to be processed to satisfy the GFP_KERNEL allocation. This is
2068 * the problem rescuer solves.
2070 * When such condition is possible, the gcwq summons rescuers of all
2071 * workqueues which have works queued on the gcwq and let them process
2072 * those works so that forward progress can be guaranteed.
2074 * This should happen rarely.
2076 static int rescuer_thread(void *__wq
)
2078 struct workqueue_struct
*wq
= __wq
;
2079 struct worker
*rescuer
= wq
->rescuer
;
2080 struct list_head
*scheduled
= &rescuer
->scheduled
;
2081 bool is_unbound
= wq
->flags
& WQ_UNBOUND
;
2084 set_user_nice(current
, RESCUER_NICE_LEVEL
);
2086 set_current_state(TASK_INTERRUPTIBLE
);
2088 if (kthread_should_stop())
2092 * See whether any cpu is asking for help. Unbounded
2093 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2095 for_each_mayday_cpu(cpu
, wq
->mayday_mask
) {
2096 unsigned int tcpu
= is_unbound
? WORK_CPU_UNBOUND
: cpu
;
2097 struct cpu_workqueue_struct
*cwq
= get_cwq(tcpu
, wq
);
2098 struct worker_pool
*pool
= cwq
->pool
;
2099 struct global_cwq
*gcwq
= pool
->gcwq
;
2100 struct work_struct
*work
, *n
;
2102 __set_current_state(TASK_RUNNING
);
2103 mayday_clear_cpu(cpu
, wq
->mayday_mask
);
2105 /* migrate to the target cpu if possible */
2106 rescuer
->pool
= pool
;
2107 worker_maybe_bind_and_lock(rescuer
);
2110 * Slurp in all works issued via this workqueue and
2113 BUG_ON(!list_empty(&rescuer
->scheduled
));
2114 list_for_each_entry_safe(work
, n
, &pool
->worklist
, entry
)
2115 if (get_work_cwq(work
) == cwq
)
2116 move_linked_works(work
, scheduled
, &n
);
2118 process_scheduled_works(rescuer
);
2121 * Leave this gcwq. If keep_working() is %true, notify a
2122 * regular worker; otherwise, we end up with 0 concurrency
2123 * and stalling the execution.
2125 if (keep_working(pool
))
2126 wake_up_worker(pool
);
2128 spin_unlock_irq(&gcwq
->lock
);
2136 struct work_struct work
;
2137 struct completion done
;
2140 static void wq_barrier_func(struct work_struct
*work
)
2142 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
2143 complete(&barr
->done
);
2147 * insert_wq_barrier - insert a barrier work
2148 * @cwq: cwq to insert barrier into
2149 * @barr: wq_barrier to insert
2150 * @target: target work to attach @barr to
2151 * @worker: worker currently executing @target, NULL if @target is not executing
2153 * @barr is linked to @target such that @barr is completed only after
2154 * @target finishes execution. Please note that the ordering
2155 * guarantee is observed only with respect to @target and on the local
2158 * Currently, a queued barrier can't be canceled. This is because
2159 * try_to_grab_pending() can't determine whether the work to be
2160 * grabbed is at the head of the queue and thus can't clear LINKED
2161 * flag of the previous work while there must be a valid next work
2162 * after a work with LINKED flag set.
2164 * Note that when @worker is non-NULL, @target may be modified
2165 * underneath us, so we can't reliably determine cwq from @target.
2168 * spin_lock_irq(gcwq->lock).
2170 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
2171 struct wq_barrier
*barr
,
2172 struct work_struct
*target
, struct worker
*worker
)
2174 struct list_head
*head
;
2175 unsigned int linked
= 0;
2178 * debugobject calls are safe here even with gcwq->lock locked
2179 * as we know for sure that this will not trigger any of the
2180 * checks and call back into the fixup functions where we
2183 INIT_WORK_ONSTACK(&barr
->work
, wq_barrier_func
);
2184 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
2185 init_completion(&barr
->done
);
2188 * If @target is currently being executed, schedule the
2189 * barrier to the worker; otherwise, put it after @target.
2192 head
= worker
->scheduled
.next
;
2194 unsigned long *bits
= work_data_bits(target
);
2196 head
= target
->entry
.next
;
2197 /* there can already be other linked works, inherit and set */
2198 linked
= *bits
& WORK_STRUCT_LINKED
;
2199 __set_bit(WORK_STRUCT_LINKED_BIT
, bits
);
2202 debug_work_activate(&barr
->work
);
2203 insert_work(cwq
, &barr
->work
, head
,
2204 work_color_to_flags(WORK_NO_COLOR
) | linked
);
2208 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2209 * @wq: workqueue being flushed
2210 * @flush_color: new flush color, < 0 for no-op
2211 * @work_color: new work color, < 0 for no-op
2213 * Prepare cwqs for workqueue flushing.
2215 * If @flush_color is non-negative, flush_color on all cwqs should be
2216 * -1. If no cwq has in-flight commands at the specified color, all
2217 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2218 * has in flight commands, its cwq->flush_color is set to
2219 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2220 * wakeup logic is armed and %true is returned.
2222 * The caller should have initialized @wq->first_flusher prior to
2223 * calling this function with non-negative @flush_color. If
2224 * @flush_color is negative, no flush color update is done and %false
2227 * If @work_color is non-negative, all cwqs should have the same
2228 * work_color which is previous to @work_color and all will be
2229 * advanced to @work_color.
2232 * mutex_lock(wq->flush_mutex).
2235 * %true if @flush_color >= 0 and there's something to flush. %false
2238 static bool flush_workqueue_prep_cwqs(struct workqueue_struct
*wq
,
2239 int flush_color
, int work_color
)
2244 if (flush_color
>= 0) {
2245 BUG_ON(atomic_read(&wq
->nr_cwqs_to_flush
));
2246 atomic_set(&wq
->nr_cwqs_to_flush
, 1);
2249 for_each_cwq_cpu(cpu
, wq
) {
2250 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2251 struct global_cwq
*gcwq
= cwq
->pool
->gcwq
;
2253 spin_lock_irq(&gcwq
->lock
);
2255 if (flush_color
>= 0) {
2256 BUG_ON(cwq
->flush_color
!= -1);
2258 if (cwq
->nr_in_flight
[flush_color
]) {
2259 cwq
->flush_color
= flush_color
;
2260 atomic_inc(&wq
->nr_cwqs_to_flush
);
2265 if (work_color
>= 0) {
2266 BUG_ON(work_color
!= work_next_color(cwq
->work_color
));
2267 cwq
->work_color
= work_color
;
2270 spin_unlock_irq(&gcwq
->lock
);
2273 if (flush_color
>= 0 && atomic_dec_and_test(&wq
->nr_cwqs_to_flush
))
2274 complete(&wq
->first_flusher
->done
);
2280 * flush_workqueue - ensure that any scheduled work has run to completion.
2281 * @wq: workqueue to flush
2283 * Forces execution of the workqueue and blocks until its completion.
2284 * This is typically used in driver shutdown handlers.
2286 * We sleep until all works which were queued on entry have been handled,
2287 * but we are not livelocked by new incoming ones.
2289 void flush_workqueue(struct workqueue_struct
*wq
)
2291 struct wq_flusher this_flusher
= {
2292 .list
= LIST_HEAD_INIT(this_flusher
.list
),
2294 .done
= COMPLETION_INITIALIZER_ONSTACK(this_flusher
.done
),
2298 lock_map_acquire(&wq
->lockdep_map
);
2299 lock_map_release(&wq
->lockdep_map
);
2301 mutex_lock(&wq
->flush_mutex
);
2304 * Start-to-wait phase
2306 next_color
= work_next_color(wq
->work_color
);
2308 if (next_color
!= wq
->flush_color
) {
2310 * Color space is not full. The current work_color
2311 * becomes our flush_color and work_color is advanced
2314 BUG_ON(!list_empty(&wq
->flusher_overflow
));
2315 this_flusher
.flush_color
= wq
->work_color
;
2316 wq
->work_color
= next_color
;
2318 if (!wq
->first_flusher
) {
2319 /* no flush in progress, become the first flusher */
2320 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
2322 wq
->first_flusher
= &this_flusher
;
2324 if (!flush_workqueue_prep_cwqs(wq
, wq
->flush_color
,
2326 /* nothing to flush, done */
2327 wq
->flush_color
= next_color
;
2328 wq
->first_flusher
= NULL
;
2333 BUG_ON(wq
->flush_color
== this_flusher
.flush_color
);
2334 list_add_tail(&this_flusher
.list
, &wq
->flusher_queue
);
2335 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
2339 * Oops, color space is full, wait on overflow queue.
2340 * The next flush completion will assign us
2341 * flush_color and transfer to flusher_queue.
2343 list_add_tail(&this_flusher
.list
, &wq
->flusher_overflow
);
2346 mutex_unlock(&wq
->flush_mutex
);
2348 wait_for_completion(&this_flusher
.done
);
2351 * Wake-up-and-cascade phase
2353 * First flushers are responsible for cascading flushes and
2354 * handling overflow. Non-first flushers can simply return.
2356 if (wq
->first_flusher
!= &this_flusher
)
2359 mutex_lock(&wq
->flush_mutex
);
2361 /* we might have raced, check again with mutex held */
2362 if (wq
->first_flusher
!= &this_flusher
)
2365 wq
->first_flusher
= NULL
;
2367 BUG_ON(!list_empty(&this_flusher
.list
));
2368 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
2371 struct wq_flusher
*next
, *tmp
;
2373 /* complete all the flushers sharing the current flush color */
2374 list_for_each_entry_safe(next
, tmp
, &wq
->flusher_queue
, list
) {
2375 if (next
->flush_color
!= wq
->flush_color
)
2377 list_del_init(&next
->list
);
2378 complete(&next
->done
);
2381 BUG_ON(!list_empty(&wq
->flusher_overflow
) &&
2382 wq
->flush_color
!= work_next_color(wq
->work_color
));
2384 /* this flush_color is finished, advance by one */
2385 wq
->flush_color
= work_next_color(wq
->flush_color
);
2387 /* one color has been freed, handle overflow queue */
2388 if (!list_empty(&wq
->flusher_overflow
)) {
2390 * Assign the same color to all overflowed
2391 * flushers, advance work_color and append to
2392 * flusher_queue. This is the start-to-wait
2393 * phase for these overflowed flushers.
2395 list_for_each_entry(tmp
, &wq
->flusher_overflow
, list
)
2396 tmp
->flush_color
= wq
->work_color
;
2398 wq
->work_color
= work_next_color(wq
->work_color
);
2400 list_splice_tail_init(&wq
->flusher_overflow
,
2401 &wq
->flusher_queue
);
2402 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
2405 if (list_empty(&wq
->flusher_queue
)) {
2406 BUG_ON(wq
->flush_color
!= wq
->work_color
);
2411 * Need to flush more colors. Make the next flusher
2412 * the new first flusher and arm cwqs.
2414 BUG_ON(wq
->flush_color
== wq
->work_color
);
2415 BUG_ON(wq
->flush_color
!= next
->flush_color
);
2417 list_del_init(&next
->list
);
2418 wq
->first_flusher
= next
;
2420 if (flush_workqueue_prep_cwqs(wq
, wq
->flush_color
, -1))
2424 * Meh... this color is already done, clear first
2425 * flusher and repeat cascading.
2427 wq
->first_flusher
= NULL
;
2431 mutex_unlock(&wq
->flush_mutex
);
2433 EXPORT_SYMBOL_GPL(flush_workqueue
);
2436 * drain_workqueue - drain a workqueue
2437 * @wq: workqueue to drain
2439 * Wait until the workqueue becomes empty. While draining is in progress,
2440 * only chain queueing is allowed. IOW, only currently pending or running
2441 * work items on @wq can queue further work items on it. @wq is flushed
2442 * repeatedly until it becomes empty. The number of flushing is detemined
2443 * by the depth of chaining and should be relatively short. Whine if it
2446 void drain_workqueue(struct workqueue_struct
*wq
)
2448 unsigned int flush_cnt
= 0;
2452 * __queue_work() needs to test whether there are drainers, is much
2453 * hotter than drain_workqueue() and already looks at @wq->flags.
2454 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2456 spin_lock(&workqueue_lock
);
2457 if (!wq
->nr_drainers
++)
2458 wq
->flags
|= WQ_DRAINING
;
2459 spin_unlock(&workqueue_lock
);
2461 flush_workqueue(wq
);
2463 for_each_cwq_cpu(cpu
, wq
) {
2464 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2467 spin_lock_irq(&cwq
->pool
->gcwq
->lock
);
2468 drained
= !cwq
->nr_active
&& list_empty(&cwq
->delayed_works
);
2469 spin_unlock_irq(&cwq
->pool
->gcwq
->lock
);
2474 if (++flush_cnt
== 10 ||
2475 (flush_cnt
% 100 == 0 && flush_cnt
<= 1000))
2476 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
2477 wq
->name
, flush_cnt
);
2481 spin_lock(&workqueue_lock
);
2482 if (!--wq
->nr_drainers
)
2483 wq
->flags
&= ~WQ_DRAINING
;
2484 spin_unlock(&workqueue_lock
);
2486 EXPORT_SYMBOL_GPL(drain_workqueue
);
2488 static bool start_flush_work(struct work_struct
*work
, struct wq_barrier
*barr
,
2489 bool wait_executing
)
2491 struct worker
*worker
= NULL
;
2492 struct global_cwq
*gcwq
;
2493 struct cpu_workqueue_struct
*cwq
;
2496 gcwq
= get_work_gcwq(work
);
2500 spin_lock_irq(&gcwq
->lock
);
2501 if (!list_empty(&work
->entry
)) {
2503 * See the comment near try_to_grab_pending()->smp_rmb().
2504 * If it was re-queued to a different gcwq under us, we
2505 * are not going to wait.
2508 cwq
= get_work_cwq(work
);
2509 if (unlikely(!cwq
|| gcwq
!= cwq
->pool
->gcwq
))
2511 } else if (wait_executing
) {
2512 worker
= find_worker_executing_work(gcwq
, work
);
2515 cwq
= worker
->current_cwq
;
2519 insert_wq_barrier(cwq
, barr
, work
, worker
);
2520 spin_unlock_irq(&gcwq
->lock
);
2523 * If @max_active is 1 or rescuer is in use, flushing another work
2524 * item on the same workqueue may lead to deadlock. Make sure the
2525 * flusher is not running on the same workqueue by verifying write
2528 if (cwq
->wq
->saved_max_active
== 1 || cwq
->wq
->flags
& WQ_RESCUER
)
2529 lock_map_acquire(&cwq
->wq
->lockdep_map
);
2531 lock_map_acquire_read(&cwq
->wq
->lockdep_map
);
2532 lock_map_release(&cwq
->wq
->lockdep_map
);
2536 spin_unlock_irq(&gcwq
->lock
);
2541 * flush_work - wait for a work to finish executing the last queueing instance
2542 * @work: the work to flush
2544 * Wait until @work has finished execution. This function considers
2545 * only the last queueing instance of @work. If @work has been
2546 * enqueued across different CPUs on a non-reentrant workqueue or on
2547 * multiple workqueues, @work might still be executing on return on
2548 * some of the CPUs from earlier queueing.
2550 * If @work was queued only on a non-reentrant, ordered or unbound
2551 * workqueue, @work is guaranteed to be idle on return if it hasn't
2552 * been requeued since flush started.
2555 * %true if flush_work() waited for the work to finish execution,
2556 * %false if it was already idle.
2558 bool flush_work(struct work_struct
*work
)
2560 struct wq_barrier barr
;
2562 lock_map_acquire(&work
->lockdep_map
);
2563 lock_map_release(&work
->lockdep_map
);
2565 if (start_flush_work(work
, &barr
, true)) {
2566 wait_for_completion(&barr
.done
);
2567 destroy_work_on_stack(&barr
.work
);
2572 EXPORT_SYMBOL_GPL(flush_work
);
2574 static bool wait_on_cpu_work(struct global_cwq
*gcwq
, struct work_struct
*work
)
2576 struct wq_barrier barr
;
2577 struct worker
*worker
;
2579 spin_lock_irq(&gcwq
->lock
);
2581 worker
= find_worker_executing_work(gcwq
, work
);
2582 if (unlikely(worker
))
2583 insert_wq_barrier(worker
->current_cwq
, &barr
, work
, worker
);
2585 spin_unlock_irq(&gcwq
->lock
);
2587 if (unlikely(worker
)) {
2588 wait_for_completion(&barr
.done
);
2589 destroy_work_on_stack(&barr
.work
);
2595 static bool wait_on_work(struct work_struct
*work
)
2602 lock_map_acquire(&work
->lockdep_map
);
2603 lock_map_release(&work
->lockdep_map
);
2605 for_each_gcwq_cpu(cpu
)
2606 ret
|= wait_on_cpu_work(get_gcwq(cpu
), work
);
2611 * flush_work_sync - wait until a work has finished execution
2612 * @work: the work to flush
2614 * Wait until @work has finished execution. On return, it's
2615 * guaranteed that all queueing instances of @work which happened
2616 * before this function is called are finished. In other words, if
2617 * @work hasn't been requeued since this function was called, @work is
2618 * guaranteed to be idle on return.
2621 * %true if flush_work_sync() waited for the work to finish execution,
2622 * %false if it was already idle.
2624 bool flush_work_sync(struct work_struct
*work
)
2626 struct wq_barrier barr
;
2627 bool pending
, waited
;
2629 /* we'll wait for executions separately, queue barr only if pending */
2630 pending
= start_flush_work(work
, &barr
, false);
2632 /* wait for executions to finish */
2633 waited
= wait_on_work(work
);
2635 /* wait for the pending one */
2637 wait_for_completion(&barr
.done
);
2638 destroy_work_on_stack(&barr
.work
);
2641 return pending
|| waited
;
2643 EXPORT_SYMBOL_GPL(flush_work_sync
);
2646 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2647 * so this work can't be re-armed in any way.
2649 static int try_to_grab_pending(struct work_struct
*work
)
2651 struct global_cwq
*gcwq
;
2654 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
2658 * The queueing is in progress, or it is already queued. Try to
2659 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2661 gcwq
= get_work_gcwq(work
);
2665 spin_lock_irq(&gcwq
->lock
);
2666 if (!list_empty(&work
->entry
)) {
2668 * This work is queued, but perhaps we locked the wrong gcwq.
2669 * In that case we must see the new value after rmb(), see
2670 * insert_work()->wmb().
2673 if (gcwq
== get_work_gcwq(work
)) {
2674 debug_work_deactivate(work
);
2675 list_del_init(&work
->entry
);
2676 cwq_dec_nr_in_flight(get_work_cwq(work
),
2677 get_work_color(work
),
2678 *work_data_bits(work
) & WORK_STRUCT_DELAYED
);
2682 spin_unlock_irq(&gcwq
->lock
);
2687 static bool __cancel_work_timer(struct work_struct
*work
,
2688 struct timer_list
* timer
)
2693 ret
= (timer
&& likely(del_timer(timer
)));
2695 ret
= try_to_grab_pending(work
);
2697 } while (unlikely(ret
< 0));
2699 clear_work_data(work
);
2704 * cancel_work_sync - cancel a work and wait for it to finish
2705 * @work: the work to cancel
2707 * Cancel @work and wait for its execution to finish. This function
2708 * can be used even if the work re-queues itself or migrates to
2709 * another workqueue. On return from this function, @work is
2710 * guaranteed to be not pending or executing on any CPU.
2712 * cancel_work_sync(&delayed_work->work) must not be used for
2713 * delayed_work's. Use cancel_delayed_work_sync() instead.
2715 * The caller must ensure that the workqueue on which @work was last
2716 * queued can't be destroyed before this function returns.
2719 * %true if @work was pending, %false otherwise.
2721 bool cancel_work_sync(struct work_struct
*work
)
2723 return __cancel_work_timer(work
, NULL
);
2725 EXPORT_SYMBOL_GPL(cancel_work_sync
);
2728 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2729 * @dwork: the delayed work to flush
2731 * Delayed timer is cancelled and the pending work is queued for
2732 * immediate execution. Like flush_work(), this function only
2733 * considers the last queueing instance of @dwork.
2736 * %true if flush_work() waited for the work to finish execution,
2737 * %false if it was already idle.
2739 bool flush_delayed_work(struct delayed_work
*dwork
)
2741 if (del_timer_sync(&dwork
->timer
))
2742 __queue_work(raw_smp_processor_id(),
2743 get_work_cwq(&dwork
->work
)->wq
, &dwork
->work
);
2744 return flush_work(&dwork
->work
);
2746 EXPORT_SYMBOL(flush_delayed_work
);
2749 * flush_delayed_work_sync - wait for a dwork to finish
2750 * @dwork: the delayed work to flush
2752 * Delayed timer is cancelled and the pending work is queued for
2753 * execution immediately. Other than timer handling, its behavior
2754 * is identical to flush_work_sync().
2757 * %true if flush_work_sync() waited for the work to finish execution,
2758 * %false if it was already idle.
2760 bool flush_delayed_work_sync(struct delayed_work
*dwork
)
2762 if (del_timer_sync(&dwork
->timer
))
2763 __queue_work(raw_smp_processor_id(),
2764 get_work_cwq(&dwork
->work
)->wq
, &dwork
->work
);
2765 return flush_work_sync(&dwork
->work
);
2767 EXPORT_SYMBOL(flush_delayed_work_sync
);
2770 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2771 * @dwork: the delayed work cancel
2773 * This is cancel_work_sync() for delayed works.
2776 * %true if @dwork was pending, %false otherwise.
2778 bool cancel_delayed_work_sync(struct delayed_work
*dwork
)
2780 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
2782 EXPORT_SYMBOL(cancel_delayed_work_sync
);
2785 * schedule_work - put work task in global workqueue
2786 * @work: job to be done
2788 * Returns zero if @work was already on the kernel-global workqueue and
2789 * non-zero otherwise.
2791 * This puts a job in the kernel-global workqueue if it was not already
2792 * queued and leaves it in the same position on the kernel-global
2793 * workqueue otherwise.
2795 int schedule_work(struct work_struct
*work
)
2797 return queue_work(system_wq
, work
);
2799 EXPORT_SYMBOL(schedule_work
);
2802 * schedule_work_on - put work task on a specific cpu
2803 * @cpu: cpu to put the work task on
2804 * @work: job to be done
2806 * This puts a job on a specific cpu
2808 int schedule_work_on(int cpu
, struct work_struct
*work
)
2810 return queue_work_on(cpu
, system_wq
, work
);
2812 EXPORT_SYMBOL(schedule_work_on
);
2815 * schedule_delayed_work - put work task in global workqueue after delay
2816 * @dwork: job to be done
2817 * @delay: number of jiffies to wait or 0 for immediate execution
2819 * After waiting for a given time this puts a job in the kernel-global
2822 int schedule_delayed_work(struct delayed_work
*dwork
,
2823 unsigned long delay
)
2825 return queue_delayed_work(system_wq
, dwork
, delay
);
2827 EXPORT_SYMBOL(schedule_delayed_work
);
2830 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2832 * @dwork: job to be done
2833 * @delay: number of jiffies to wait
2835 * After waiting for a given time this puts a job in the kernel-global
2836 * workqueue on the specified CPU.
2838 int schedule_delayed_work_on(int cpu
,
2839 struct delayed_work
*dwork
, unsigned long delay
)
2841 return queue_delayed_work_on(cpu
, system_wq
, dwork
, delay
);
2843 EXPORT_SYMBOL(schedule_delayed_work_on
);
2846 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2847 * @func: the function to call
2849 * schedule_on_each_cpu() executes @func on each online CPU using the
2850 * system workqueue and blocks until all CPUs have completed.
2851 * schedule_on_each_cpu() is very slow.
2854 * 0 on success, -errno on failure.
2856 int schedule_on_each_cpu(work_func_t func
)
2859 struct work_struct __percpu
*works
;
2861 works
= alloc_percpu(struct work_struct
);
2867 for_each_online_cpu(cpu
) {
2868 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
2870 INIT_WORK(work
, func
);
2871 schedule_work_on(cpu
, work
);
2874 for_each_online_cpu(cpu
)
2875 flush_work(per_cpu_ptr(works
, cpu
));
2883 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2885 * Forces execution of the kernel-global workqueue and blocks until its
2888 * Think twice before calling this function! It's very easy to get into
2889 * trouble if you don't take great care. Either of the following situations
2890 * will lead to deadlock:
2892 * One of the work items currently on the workqueue needs to acquire
2893 * a lock held by your code or its caller.
2895 * Your code is running in the context of a work routine.
2897 * They will be detected by lockdep when they occur, but the first might not
2898 * occur very often. It depends on what work items are on the workqueue and
2899 * what locks they need, which you have no control over.
2901 * In most situations flushing the entire workqueue is overkill; you merely
2902 * need to know that a particular work item isn't queued and isn't running.
2903 * In such cases you should use cancel_delayed_work_sync() or
2904 * cancel_work_sync() instead.
2906 void flush_scheduled_work(void)
2908 flush_workqueue(system_wq
);
2910 EXPORT_SYMBOL(flush_scheduled_work
);
2913 * execute_in_process_context - reliably execute the routine with user context
2914 * @fn: the function to execute
2915 * @ew: guaranteed storage for the execute work structure (must
2916 * be available when the work executes)
2918 * Executes the function immediately if process context is available,
2919 * otherwise schedules the function for delayed execution.
2921 * Returns: 0 - function was executed
2922 * 1 - function was scheduled for execution
2924 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
2926 if (!in_interrupt()) {
2931 INIT_WORK(&ew
->work
, fn
);
2932 schedule_work(&ew
->work
);
2936 EXPORT_SYMBOL_GPL(execute_in_process_context
);
2938 int keventd_up(void)
2940 return system_wq
!= NULL
;
2943 static int alloc_cwqs(struct workqueue_struct
*wq
)
2946 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2947 * Make sure that the alignment isn't lower than that of
2948 * unsigned long long.
2950 const size_t size
= sizeof(struct cpu_workqueue_struct
);
2951 const size_t align
= max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS
,
2952 __alignof__(unsigned long long));
2954 if (!(wq
->flags
& WQ_UNBOUND
))
2955 wq
->cpu_wq
.pcpu
= __alloc_percpu(size
, align
);
2960 * Allocate enough room to align cwq and put an extra
2961 * pointer at the end pointing back to the originally
2962 * allocated pointer which will be used for free.
2964 ptr
= kzalloc(size
+ align
+ sizeof(void *), GFP_KERNEL
);
2966 wq
->cpu_wq
.single
= PTR_ALIGN(ptr
, align
);
2967 *(void **)(wq
->cpu_wq
.single
+ 1) = ptr
;
2971 /* just in case, make sure it's actually aligned */
2972 BUG_ON(!IS_ALIGNED(wq
->cpu_wq
.v
, align
));
2973 return wq
->cpu_wq
.v
? 0 : -ENOMEM
;
2976 static void free_cwqs(struct workqueue_struct
*wq
)
2978 if (!(wq
->flags
& WQ_UNBOUND
))
2979 free_percpu(wq
->cpu_wq
.pcpu
);
2980 else if (wq
->cpu_wq
.single
) {
2981 /* the pointer to free is stored right after the cwq */
2982 kfree(*(void **)(wq
->cpu_wq
.single
+ 1));
2986 static int wq_clamp_max_active(int max_active
, unsigned int flags
,
2989 int lim
= flags
& WQ_UNBOUND
? WQ_UNBOUND_MAX_ACTIVE
: WQ_MAX_ACTIVE
;
2991 if (max_active
< 1 || max_active
> lim
)
2992 printk(KERN_WARNING
"workqueue: max_active %d requested for %s "
2993 "is out of range, clamping between %d and %d\n",
2994 max_active
, name
, 1, lim
);
2996 return clamp_val(max_active
, 1, lim
);
2999 struct workqueue_struct
*__alloc_workqueue_key(const char *fmt
,
3002 struct lock_class_key
*key
,
3003 const char *lock_name
, ...)
3005 va_list args
, args1
;
3006 struct workqueue_struct
*wq
;
3010 /* determine namelen, allocate wq and format name */
3011 va_start(args
, lock_name
);
3012 va_copy(args1
, args
);
3013 namelen
= vsnprintf(NULL
, 0, fmt
, args
) + 1;
3015 wq
= kzalloc(sizeof(*wq
) + namelen
, GFP_KERNEL
);
3019 vsnprintf(wq
->name
, namelen
, fmt
, args1
);
3024 * Workqueues which may be used during memory reclaim should
3025 * have a rescuer to guarantee forward progress.
3027 if (flags
& WQ_MEM_RECLAIM
)
3028 flags
|= WQ_RESCUER
;
3030 max_active
= max_active
?: WQ_DFL_ACTIVE
;
3031 max_active
= wq_clamp_max_active(max_active
, flags
, wq
->name
);
3035 wq
->saved_max_active
= max_active
;
3036 mutex_init(&wq
->flush_mutex
);
3037 atomic_set(&wq
->nr_cwqs_to_flush
, 0);
3038 INIT_LIST_HEAD(&wq
->flusher_queue
);
3039 INIT_LIST_HEAD(&wq
->flusher_overflow
);
3041 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
3042 INIT_LIST_HEAD(&wq
->list
);
3044 if (alloc_cwqs(wq
) < 0)
3047 for_each_cwq_cpu(cpu
, wq
) {
3048 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3049 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3051 BUG_ON((unsigned long)cwq
& WORK_STRUCT_FLAG_MASK
);
3052 cwq
->pool
= &gcwq
->pool
;
3054 cwq
->flush_color
= -1;
3055 cwq
->max_active
= max_active
;
3056 INIT_LIST_HEAD(&cwq
->delayed_works
);
3059 if (flags
& WQ_RESCUER
) {
3060 struct worker
*rescuer
;
3062 if (!alloc_mayday_mask(&wq
->mayday_mask
, GFP_KERNEL
))
3065 wq
->rescuer
= rescuer
= alloc_worker();
3069 rescuer
->task
= kthread_create(rescuer_thread
, wq
, "%s",
3071 if (IS_ERR(rescuer
->task
))
3074 rescuer
->task
->flags
|= PF_THREAD_BOUND
;
3075 wake_up_process(rescuer
->task
);
3079 * workqueue_lock protects global freeze state and workqueues
3080 * list. Grab it, set max_active accordingly and add the new
3081 * workqueue to workqueues list.
3083 spin_lock(&workqueue_lock
);
3085 if (workqueue_freezing
&& wq
->flags
& WQ_FREEZABLE
)
3086 for_each_cwq_cpu(cpu
, wq
)
3087 get_cwq(cpu
, wq
)->max_active
= 0;
3089 list_add(&wq
->list
, &workqueues
);
3091 spin_unlock(&workqueue_lock
);
3097 free_mayday_mask(wq
->mayday_mask
);
3103 EXPORT_SYMBOL_GPL(__alloc_workqueue_key
);
3106 * destroy_workqueue - safely terminate a workqueue
3107 * @wq: target workqueue
3109 * Safely destroy a workqueue. All work currently pending will be done first.
3111 void destroy_workqueue(struct workqueue_struct
*wq
)
3115 /* drain it before proceeding with destruction */
3116 drain_workqueue(wq
);
3119 * wq list is used to freeze wq, remove from list after
3120 * flushing is complete in case freeze races us.
3122 spin_lock(&workqueue_lock
);
3123 list_del(&wq
->list
);
3124 spin_unlock(&workqueue_lock
);
3127 for_each_cwq_cpu(cpu
, wq
) {
3128 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3131 for (i
= 0; i
< WORK_NR_COLORS
; i
++)
3132 BUG_ON(cwq
->nr_in_flight
[i
]);
3133 BUG_ON(cwq
->nr_active
);
3134 BUG_ON(!list_empty(&cwq
->delayed_works
));
3137 if (wq
->flags
& WQ_RESCUER
) {
3138 kthread_stop(wq
->rescuer
->task
);
3139 free_mayday_mask(wq
->mayday_mask
);
3146 EXPORT_SYMBOL_GPL(destroy_workqueue
);
3149 * workqueue_set_max_active - adjust max_active of a workqueue
3150 * @wq: target workqueue
3151 * @max_active: new max_active value.
3153 * Set max_active of @wq to @max_active.
3156 * Don't call from IRQ context.
3158 void workqueue_set_max_active(struct workqueue_struct
*wq
, int max_active
)
3162 max_active
= wq_clamp_max_active(max_active
, wq
->flags
, wq
->name
);
3164 spin_lock(&workqueue_lock
);
3166 wq
->saved_max_active
= max_active
;
3168 for_each_cwq_cpu(cpu
, wq
) {
3169 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3171 spin_lock_irq(&gcwq
->lock
);
3173 if (!(wq
->flags
& WQ_FREEZABLE
) ||
3174 !(gcwq
->flags
& GCWQ_FREEZING
))
3175 get_cwq(gcwq
->cpu
, wq
)->max_active
= max_active
;
3177 spin_unlock_irq(&gcwq
->lock
);
3180 spin_unlock(&workqueue_lock
);
3182 EXPORT_SYMBOL_GPL(workqueue_set_max_active
);
3185 * workqueue_congested - test whether a workqueue is congested
3186 * @cpu: CPU in question
3187 * @wq: target workqueue
3189 * Test whether @wq's cpu workqueue for @cpu is congested. There is
3190 * no synchronization around this function and the test result is
3191 * unreliable and only useful as advisory hints or for debugging.
3194 * %true if congested, %false otherwise.
3196 bool workqueue_congested(unsigned int cpu
, struct workqueue_struct
*wq
)
3198 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3200 return !list_empty(&cwq
->delayed_works
);
3202 EXPORT_SYMBOL_GPL(workqueue_congested
);
3205 * work_cpu - return the last known associated cpu for @work
3206 * @work: the work of interest
3209 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
3211 unsigned int work_cpu(struct work_struct
*work
)
3213 struct global_cwq
*gcwq
= get_work_gcwq(work
);
3215 return gcwq
? gcwq
->cpu
: WORK_CPU_NONE
;
3217 EXPORT_SYMBOL_GPL(work_cpu
);
3220 * work_busy - test whether a work is currently pending or running
3221 * @work: the work to be tested
3223 * Test whether @work is currently pending or running. There is no
3224 * synchronization around this function and the test result is
3225 * unreliable and only useful as advisory hints or for debugging.
3226 * Especially for reentrant wqs, the pending state might hide the
3230 * OR'd bitmask of WORK_BUSY_* bits.
3232 unsigned int work_busy(struct work_struct
*work
)
3234 struct global_cwq
*gcwq
= get_work_gcwq(work
);
3235 unsigned long flags
;
3236 unsigned int ret
= 0;
3241 spin_lock_irqsave(&gcwq
->lock
, flags
);
3243 if (work_pending(work
))
3244 ret
|= WORK_BUSY_PENDING
;
3245 if (find_worker_executing_work(gcwq
, work
))
3246 ret
|= WORK_BUSY_RUNNING
;
3248 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
3252 EXPORT_SYMBOL_GPL(work_busy
);
3257 * There are two challenges in supporting CPU hotplug. Firstly, there
3258 * are a lot of assumptions on strong associations among work, cwq and
3259 * gcwq which make migrating pending and scheduled works very
3260 * difficult to implement without impacting hot paths. Secondly,
3261 * gcwqs serve mix of short, long and very long running works making
3262 * blocked draining impractical.
3264 * This is solved by allowing a gcwq to be detached from CPU, running
3265 * it with unbound (rogue) workers and allowing it to be reattached
3266 * later if the cpu comes back online. A separate thread is created
3267 * to govern a gcwq in such state and is called the trustee of the
3270 * Trustee states and their descriptions.
3272 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3273 * new trustee is started with this state.
3275 * IN_CHARGE Once started, trustee will enter this state after
3276 * assuming the manager role and making all existing
3277 * workers rogue. DOWN_PREPARE waits for trustee to
3278 * enter this state. After reaching IN_CHARGE, trustee
3279 * tries to execute the pending worklist until it's empty
3280 * and the state is set to BUTCHER, or the state is set
3283 * BUTCHER Command state which is set by the cpu callback after
3284 * the cpu has went down. Once this state is set trustee
3285 * knows that there will be no new works on the worklist
3286 * and once the worklist is empty it can proceed to
3287 * killing idle workers.
3289 * RELEASE Command state which is set by the cpu callback if the
3290 * cpu down has been canceled or it has come online
3291 * again. After recognizing this state, trustee stops
3292 * trying to drain or butcher and clears ROGUE, rebinds
3293 * all remaining workers back to the cpu and releases
3296 * DONE Trustee will enter this state after BUTCHER or RELEASE
3299 * trustee CPU draining
3300 * took over down complete
3301 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3303 * | CPU is back online v return workers |
3304 * ----------------> RELEASE --------------
3308 * trustee_wait_event_timeout - timed event wait for trustee
3309 * @cond: condition to wait for
3310 * @timeout: timeout in jiffies
3312 * wait_event_timeout() for trustee to use. Handles locking and
3313 * checks for RELEASE request.
3316 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3317 * multiple times. To be used by trustee.
3320 * Positive indicating left time if @cond is satisfied, 0 if timed
3321 * out, -1 if canceled.
3323 #define trustee_wait_event_timeout(cond, timeout) ({ \
3324 long __ret = (timeout); \
3325 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3327 spin_unlock_irq(&gcwq->lock); \
3328 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3329 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3331 spin_lock_irq(&gcwq->lock); \
3333 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3337 * trustee_wait_event - event wait for trustee
3338 * @cond: condition to wait for
3340 * wait_event() for trustee to use. Automatically handles locking and
3341 * checks for CANCEL request.
3344 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3345 * multiple times. To be used by trustee.
3348 * 0 if @cond is satisfied, -1 if canceled.
3350 #define trustee_wait_event(cond) ({ \
3352 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3353 __ret1 < 0 ? -1 : 0; \
3356 static bool gcwq_is_managing_workers(struct global_cwq
*gcwq
)
3358 struct worker_pool
*pool
;
3360 for_each_worker_pool(pool
, gcwq
)
3361 if (pool
->flags
& POOL_MANAGING_WORKERS
)
3366 static bool gcwq_has_idle_workers(struct global_cwq
*gcwq
)
3368 struct worker_pool
*pool
;
3370 for_each_worker_pool(pool
, gcwq
)
3371 if (!list_empty(&pool
->idle_list
))
3376 static int __cpuinit
trustee_thread(void *__gcwq
)
3378 struct global_cwq
*gcwq
= __gcwq
;
3379 struct worker_pool
*pool
;
3380 struct worker
*worker
;
3381 struct work_struct
*work
;
3382 struct hlist_node
*pos
;
3386 BUG_ON(gcwq
->cpu
!= smp_processor_id());
3388 spin_lock_irq(&gcwq
->lock
);
3390 * Claim the manager position and make all workers rogue.
3391 * Trustee must be bound to the target cpu and can't be
3394 BUG_ON(gcwq
->cpu
!= smp_processor_id());
3395 rc
= trustee_wait_event(!gcwq_is_managing_workers(gcwq
));
3398 for_each_worker_pool(pool
, gcwq
) {
3399 pool
->flags
|= POOL_MANAGING_WORKERS
;
3401 list_for_each_entry(worker
, &pool
->idle_list
, entry
)
3402 worker
->flags
|= WORKER_ROGUE
;
3405 for_each_busy_worker(worker
, i
, pos
, gcwq
)
3406 worker
->flags
|= WORKER_ROGUE
;
3409 * Call schedule() so that we cross rq->lock and thus can
3410 * guarantee sched callbacks see the rogue flag. This is
3411 * necessary as scheduler callbacks may be invoked from other
3414 spin_unlock_irq(&gcwq
->lock
);
3416 spin_lock_irq(&gcwq
->lock
);
3419 * Sched callbacks are disabled now. Zap nr_running. After
3420 * this, nr_running stays zero and need_more_worker() and
3421 * keep_working() are always true as long as the worklist is
3424 for_each_worker_pool(pool
, gcwq
)
3425 atomic_set(get_pool_nr_running(pool
), 0);
3427 spin_unlock_irq(&gcwq
->lock
);
3428 for_each_worker_pool(pool
, gcwq
)
3429 del_timer_sync(&pool
->idle_timer
);
3430 spin_lock_irq(&gcwq
->lock
);
3433 * We're now in charge. Notify and proceed to drain. We need
3434 * to keep the gcwq running during the whole CPU down
3435 * procedure as other cpu hotunplug callbacks may need to
3436 * flush currently running tasks.
3438 gcwq
->trustee_state
= TRUSTEE_IN_CHARGE
;
3439 wake_up_all(&gcwq
->trustee_wait
);
3442 * The original cpu is in the process of dying and may go away
3443 * anytime now. When that happens, we and all workers would
3444 * be migrated to other cpus. Try draining any left work. We
3445 * want to get it over with ASAP - spam rescuers, wake up as
3446 * many idlers as necessary and create new ones till the
3447 * worklist is empty. Note that if the gcwq is frozen, there
3448 * may be frozen works in freezable cwqs. Don't declare
3449 * completion while frozen.
3454 for_each_worker_pool(pool
, gcwq
)
3455 busy
|= pool
->nr_workers
!= pool
->nr_idle
;
3457 if (!busy
&& !(gcwq
->flags
& GCWQ_FREEZING
) &&
3458 gcwq
->trustee_state
!= TRUSTEE_IN_CHARGE
)
3461 for_each_worker_pool(pool
, gcwq
) {
3464 list_for_each_entry(work
, &pool
->worklist
, entry
) {
3469 list_for_each_entry(worker
, &pool
->idle_list
, entry
) {
3472 wake_up_process(worker
->task
);
3475 if (need_to_create_worker(pool
)) {
3476 spin_unlock_irq(&gcwq
->lock
);
3477 worker
= create_worker(pool
, false);
3478 spin_lock_irq(&gcwq
->lock
);
3480 worker
->flags
|= WORKER_ROGUE
;
3481 start_worker(worker
);
3486 /* give a breather */
3487 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN
) < 0)
3492 * Either all works have been scheduled and cpu is down, or
3493 * cpu down has already been canceled. Wait for and butcher
3494 * all workers till we're canceled.
3497 rc
= trustee_wait_event(gcwq_has_idle_workers(gcwq
));
3500 for_each_worker_pool(pool
, gcwq
) {
3501 while (!list_empty(&pool
->idle_list
)) {
3502 worker
= list_first_entry(&pool
->idle_list
,
3503 struct worker
, entry
);
3504 destroy_worker(worker
);
3506 i
|= pool
->nr_workers
;
3508 } while (i
&& rc
>= 0);
3511 * At this point, either draining has completed and no worker
3512 * is left, or cpu down has been canceled or the cpu is being
3513 * brought back up. There shouldn't be any idle one left.
3514 * Tell the remaining busy ones to rebind once it finishes the
3515 * currently scheduled works by scheduling the rebind_work.
3517 for_each_worker_pool(pool
, gcwq
)
3518 WARN_ON(!list_empty(&pool
->idle_list
));
3520 for_each_busy_worker(worker
, i
, pos
, gcwq
) {
3521 struct work_struct
*rebind_work
= &worker
->rebind_work
;
3524 * Rebind_work may race with future cpu hotplug
3525 * operations. Use a separate flag to mark that
3526 * rebinding is scheduled.
3528 worker
->flags
|= WORKER_REBIND
;
3529 worker
->flags
&= ~WORKER_ROGUE
;
3531 /* queue rebind_work, wq doesn't matter, use the default one */
3532 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT
,
3533 work_data_bits(rebind_work
)))
3536 debug_work_activate(rebind_work
);
3537 insert_work(get_cwq(gcwq
->cpu
, system_wq
), rebind_work
,
3538 worker
->scheduled
.next
,
3539 work_color_to_flags(WORK_NO_COLOR
));
3542 /* relinquish manager role */
3543 for_each_worker_pool(pool
, gcwq
)
3544 pool
->flags
&= ~POOL_MANAGING_WORKERS
;
3546 /* notify completion */
3547 gcwq
->trustee
= NULL
;
3548 gcwq
->trustee_state
= TRUSTEE_DONE
;
3549 wake_up_all(&gcwq
->trustee_wait
);
3550 spin_unlock_irq(&gcwq
->lock
);
3555 * wait_trustee_state - wait for trustee to enter the specified state
3556 * @gcwq: gcwq the trustee of interest belongs to
3557 * @state: target state to wait for
3559 * Wait for the trustee to reach @state. DONE is already matched.
3562 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3563 * multiple times. To be used by cpu_callback.
3565 static void __cpuinit
wait_trustee_state(struct global_cwq
*gcwq
, int state
)
3566 __releases(&gcwq
->lock
)
3567 __acquires(&gcwq
->lock
)
3569 if (!(gcwq
->trustee_state
== state
||
3570 gcwq
->trustee_state
== TRUSTEE_DONE
)) {
3571 spin_unlock_irq(&gcwq
->lock
);
3572 __wait_event(gcwq
->trustee_wait
,
3573 gcwq
->trustee_state
== state
||
3574 gcwq
->trustee_state
== TRUSTEE_DONE
);
3575 spin_lock_irq(&gcwq
->lock
);
3579 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
3580 unsigned long action
,
3583 unsigned int cpu
= (unsigned long)hcpu
;
3584 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3585 struct task_struct
*new_trustee
= NULL
;
3586 struct worker
*new_workers
[NR_WORKER_POOLS
] = { };
3587 struct worker_pool
*pool
;
3588 unsigned long flags
;
3591 action
&= ~CPU_TASKS_FROZEN
;
3594 case CPU_DOWN_PREPARE
:
3595 new_trustee
= kthread_create(trustee_thread
, gcwq
,
3596 "workqueue_trustee/%d\n", cpu
);
3597 if (IS_ERR(new_trustee
))
3598 return notifier_from_errno(PTR_ERR(new_trustee
));
3599 kthread_bind(new_trustee
, cpu
);
3601 case CPU_UP_PREPARE
:
3603 for_each_worker_pool(pool
, gcwq
) {
3604 BUG_ON(pool
->first_idle
);
3605 new_workers
[i
] = create_worker(pool
, false);
3606 if (!new_workers
[i
++])
3611 /* some are called w/ irq disabled, don't disturb irq status */
3612 spin_lock_irqsave(&gcwq
->lock
, flags
);
3615 case CPU_DOWN_PREPARE
:
3616 /* initialize trustee and tell it to acquire the gcwq */
3617 BUG_ON(gcwq
->trustee
|| gcwq
->trustee_state
!= TRUSTEE_DONE
);
3618 gcwq
->trustee
= new_trustee
;
3619 gcwq
->trustee_state
= TRUSTEE_START
;
3620 wake_up_process(gcwq
->trustee
);
3621 wait_trustee_state(gcwq
, TRUSTEE_IN_CHARGE
);
3623 case CPU_UP_PREPARE
:
3625 for_each_worker_pool(pool
, gcwq
) {
3626 BUG_ON(pool
->first_idle
);
3627 pool
->first_idle
= new_workers
[i
++];
3633 * Before this, the trustee and all workers except for
3634 * the ones which are still executing works from
3635 * before the last CPU down must be on the cpu. After
3636 * this, they'll all be diasporas.
3638 gcwq
->flags
|= GCWQ_DISASSOCIATED
;
3642 gcwq
->trustee_state
= TRUSTEE_BUTCHER
;
3644 case CPU_UP_CANCELED
:
3645 for_each_worker_pool(pool
, gcwq
) {
3646 destroy_worker(pool
->first_idle
);
3647 pool
->first_idle
= NULL
;
3651 case CPU_DOWN_FAILED
:
3653 gcwq
->flags
&= ~GCWQ_DISASSOCIATED
;
3654 if (gcwq
->trustee_state
!= TRUSTEE_DONE
) {
3655 gcwq
->trustee_state
= TRUSTEE_RELEASE
;
3656 wake_up_process(gcwq
->trustee
);
3657 wait_trustee_state(gcwq
, TRUSTEE_DONE
);
3661 * Trustee is done and there might be no worker left.
3662 * Put the first_idle in and request a real manager to
3665 for_each_worker_pool(pool
, gcwq
) {
3666 spin_unlock_irq(&gcwq
->lock
);
3667 kthread_bind(pool
->first_idle
->task
, cpu
);
3668 spin_lock_irq(&gcwq
->lock
);
3669 pool
->flags
|= POOL_MANAGE_WORKERS
;
3670 start_worker(pool
->first_idle
);
3671 pool
->first_idle
= NULL
;
3676 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
3678 return notifier_from_errno(0);
3682 kthread_stop(new_trustee
);
3684 spin_lock_irqsave(&gcwq
->lock
, flags
);
3685 for (i
= 0; i
< NR_WORKER_POOLS
; i
++)
3687 destroy_worker(new_workers
[i
]);
3688 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
3695 struct work_for_cpu
{
3696 struct completion completion
;
3702 static int do_work_for_cpu(void *_wfc
)
3704 struct work_for_cpu
*wfc
= _wfc
;
3705 wfc
->ret
= wfc
->fn(wfc
->arg
);
3706 complete(&wfc
->completion
);
3711 * work_on_cpu - run a function in user context on a particular cpu
3712 * @cpu: the cpu to run on
3713 * @fn: the function to run
3714 * @arg: the function arg
3716 * This will return the value @fn returns.
3717 * It is up to the caller to ensure that the cpu doesn't go offline.
3718 * The caller must not hold any locks which would prevent @fn from completing.
3720 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
3722 struct task_struct
*sub_thread
;
3723 struct work_for_cpu wfc
= {
3724 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
3729 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
3730 if (IS_ERR(sub_thread
))
3731 return PTR_ERR(sub_thread
);
3732 kthread_bind(sub_thread
, cpu
);
3733 wake_up_process(sub_thread
);
3734 wait_for_completion(&wfc
.completion
);
3737 EXPORT_SYMBOL_GPL(work_on_cpu
);
3738 #endif /* CONFIG_SMP */
3740 #ifdef CONFIG_FREEZER
3743 * freeze_workqueues_begin - begin freezing workqueues
3745 * Start freezing workqueues. After this function returns, all freezable
3746 * workqueues will queue new works to their frozen_works list instead of
3750 * Grabs and releases workqueue_lock and gcwq->lock's.
3752 void freeze_workqueues_begin(void)
3756 spin_lock(&workqueue_lock
);
3758 BUG_ON(workqueue_freezing
);
3759 workqueue_freezing
= true;
3761 for_each_gcwq_cpu(cpu
) {
3762 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3763 struct workqueue_struct
*wq
;
3765 spin_lock_irq(&gcwq
->lock
);
3767 BUG_ON(gcwq
->flags
& GCWQ_FREEZING
);
3768 gcwq
->flags
|= GCWQ_FREEZING
;
3770 list_for_each_entry(wq
, &workqueues
, list
) {
3771 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3773 if (cwq
&& wq
->flags
& WQ_FREEZABLE
)
3774 cwq
->max_active
= 0;
3777 spin_unlock_irq(&gcwq
->lock
);
3780 spin_unlock(&workqueue_lock
);
3784 * freeze_workqueues_busy - are freezable workqueues still busy?
3786 * Check whether freezing is complete. This function must be called
3787 * between freeze_workqueues_begin() and thaw_workqueues().
3790 * Grabs and releases workqueue_lock.
3793 * %true if some freezable workqueues are still busy. %false if freezing
3796 bool freeze_workqueues_busy(void)
3801 spin_lock(&workqueue_lock
);
3803 BUG_ON(!workqueue_freezing
);
3805 for_each_gcwq_cpu(cpu
) {
3806 struct workqueue_struct
*wq
;
3808 * nr_active is monotonically decreasing. It's safe
3809 * to peek without lock.
3811 list_for_each_entry(wq
, &workqueues
, list
) {
3812 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3814 if (!cwq
|| !(wq
->flags
& WQ_FREEZABLE
))
3817 BUG_ON(cwq
->nr_active
< 0);
3818 if (cwq
->nr_active
) {
3825 spin_unlock(&workqueue_lock
);
3830 * thaw_workqueues - thaw workqueues
3832 * Thaw workqueues. Normal queueing is restored and all collected
3833 * frozen works are transferred to their respective gcwq worklists.
3836 * Grabs and releases workqueue_lock and gcwq->lock's.
3838 void thaw_workqueues(void)
3842 spin_lock(&workqueue_lock
);
3844 if (!workqueue_freezing
)
3847 for_each_gcwq_cpu(cpu
) {
3848 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3849 struct worker_pool
*pool
;
3850 struct workqueue_struct
*wq
;
3852 spin_lock_irq(&gcwq
->lock
);
3854 BUG_ON(!(gcwq
->flags
& GCWQ_FREEZING
));
3855 gcwq
->flags
&= ~GCWQ_FREEZING
;
3857 list_for_each_entry(wq
, &workqueues
, list
) {
3858 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3860 if (!cwq
|| !(wq
->flags
& WQ_FREEZABLE
))
3863 /* restore max_active and repopulate worklist */
3864 cwq
->max_active
= wq
->saved_max_active
;
3866 while (!list_empty(&cwq
->delayed_works
) &&
3867 cwq
->nr_active
< cwq
->max_active
)
3868 cwq_activate_first_delayed(cwq
);
3871 for_each_worker_pool(pool
, gcwq
)
3872 wake_up_worker(pool
);
3874 spin_unlock_irq(&gcwq
->lock
);
3877 workqueue_freezing
= false;
3879 spin_unlock(&workqueue_lock
);
3881 #endif /* CONFIG_FREEZER */
3883 static int __init
init_workqueues(void)
3888 cpu_notifier(workqueue_cpu_callback
, CPU_PRI_WORKQUEUE
);
3890 /* initialize gcwqs */
3891 for_each_gcwq_cpu(cpu
) {
3892 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3893 struct worker_pool
*pool
;
3895 spin_lock_init(&gcwq
->lock
);
3897 gcwq
->flags
|= GCWQ_DISASSOCIATED
;
3899 for (i
= 0; i
< BUSY_WORKER_HASH_SIZE
; i
++)
3900 INIT_HLIST_HEAD(&gcwq
->busy_hash
[i
]);
3902 for_each_worker_pool(pool
, gcwq
) {
3904 INIT_LIST_HEAD(&pool
->worklist
);
3905 INIT_LIST_HEAD(&pool
->idle_list
);
3907 init_timer_deferrable(&pool
->idle_timer
);
3908 pool
->idle_timer
.function
= idle_worker_timeout
;
3909 pool
->idle_timer
.data
= (unsigned long)pool
;
3911 setup_timer(&pool
->mayday_timer
, gcwq_mayday_timeout
,
3912 (unsigned long)pool
);
3914 ida_init(&pool
->worker_ida
);
3917 gcwq
->trustee_state
= TRUSTEE_DONE
;
3918 init_waitqueue_head(&gcwq
->trustee_wait
);
3921 /* create the initial worker */
3922 for_each_online_gcwq_cpu(cpu
) {
3923 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3924 struct worker_pool
*pool
;
3926 if (cpu
!= WORK_CPU_UNBOUND
)
3927 gcwq
->flags
&= ~GCWQ_DISASSOCIATED
;
3929 for_each_worker_pool(pool
, gcwq
) {
3930 struct worker
*worker
;
3932 worker
= create_worker(pool
, true);
3934 spin_lock_irq(&gcwq
->lock
);
3935 start_worker(worker
);
3936 spin_unlock_irq(&gcwq
->lock
);
3940 system_wq
= alloc_workqueue("events", 0, 0);
3941 system_long_wq
= alloc_workqueue("events_long", 0, 0);
3942 system_nrt_wq
= alloc_workqueue("events_nrt", WQ_NON_REENTRANT
, 0);
3943 system_unbound_wq
= alloc_workqueue("events_unbound", WQ_UNBOUND
,
3944 WQ_UNBOUND_MAX_ACTIVE
);
3945 system_freezable_wq
= alloc_workqueue("events_freezable",
3947 system_nrt_freezable_wq
= alloc_workqueue("events_nrt_freezable",
3948 WQ_NON_REENTRANT
| WQ_FREEZABLE
, 0);
3949 BUG_ON(!system_wq
|| !system_long_wq
|| !system_nrt_wq
||
3950 !system_unbound_wq
|| !system_freezable_wq
||
3951 !system_nrt_freezable_wq
);
3954 early_initcall(init_workqueues
);