1 /* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
3 * kernel/workqueue.c - generic async execution with shared worker pool
5 * Copyright (C) 2002 Ingo Molnar
7 * Derived from the taskqueue/keventd code by:
8 * David Woodhouse <dwmw2@infradead.org>
10 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
11 * Theodore Ts'o <tytso@mit.edu>
13 * Made to use alloc_percpu by Christoph Lameter.
15 * Copyright (C) 2010 SUSE Linux Products GmbH
16 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
18 * This is the generic async execution mechanism. Work items as are
19 * executed in process context. The worker pool is shared and
20 * automatically managed. There is one worker pool for each CPU and
21 * one extra for works which are better served by workers which are
22 * not bound to any specific CPU.
24 * Please read Documentation/workqueue.txt for details.
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/signal.h>
32 #include <linux/completion.h>
33 #include <linux/workqueue.h>
34 #include <linux/slab.h>
35 #include <linux/cpu.h>
36 #include <linux/notifier.h>
37 #include <linux/kthread.h>
38 #include <linux/hardirq.h>
39 #include <linux/mempolicy.h>
40 #include <linux/freezer.h>
41 #include <linux/kallsyms.h>
42 #include <linux/debug_locks.h>
43 #include <linux/lockdep.h>
44 #include <linux/idr.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/workqueue.h>
49 #include "workqueue_sched.h"
51 #if defined(CONFIG_BUZZZ)
52 #include <asm/buzzz.h>
53 #endif /* CONFIG_BUZZZ */
56 /* global_cwq flags */
57 GCWQ_MANAGE_WORKERS
= 1 << 0, /* need to manage workers */
58 GCWQ_MANAGING_WORKERS
= 1 << 1, /* managing workers */
59 GCWQ_DISASSOCIATED
= 1 << 2, /* cpu can't serve workers */
60 GCWQ_FREEZING
= 1 << 3, /* freeze in progress */
61 GCWQ_HIGHPRI_PENDING
= 1 << 4, /* highpri works on queue */
64 WORKER_STARTED
= 1 << 0, /* started */
65 WORKER_DIE
= 1 << 1, /* die die die */
66 WORKER_IDLE
= 1 << 2, /* is idle */
67 WORKER_PREP
= 1 << 3, /* preparing to run works */
68 WORKER_ROGUE
= 1 << 4, /* not bound to any cpu */
69 WORKER_REBIND
= 1 << 5, /* mom is home, come back */
70 WORKER_CPU_INTENSIVE
= 1 << 6, /* cpu intensive */
71 WORKER_UNBOUND
= 1 << 7, /* worker is unbound */
73 WORKER_NOT_RUNNING
= WORKER_PREP
| WORKER_ROGUE
| WORKER_REBIND
|
74 WORKER_CPU_INTENSIVE
| WORKER_UNBOUND
,
76 /* gcwq->trustee_state */
77 TRUSTEE_START
= 0, /* start */
78 TRUSTEE_IN_CHARGE
= 1, /* trustee in charge of gcwq */
79 TRUSTEE_BUTCHER
= 2, /* butcher workers */
80 TRUSTEE_RELEASE
= 3, /* release workers */
81 TRUSTEE_DONE
= 4, /* trustee is done */
83 BUSY_WORKER_HASH_ORDER
= 6, /* 64 pointers */
84 BUSY_WORKER_HASH_SIZE
= 1 << BUSY_WORKER_HASH_ORDER
,
85 BUSY_WORKER_HASH_MASK
= BUSY_WORKER_HASH_SIZE
- 1,
87 MAX_IDLE_WORKERS_RATIO
= 4, /* 1/4 of busy can be idle */
88 IDLE_WORKER_TIMEOUT
= 300 * HZ
, /* keep idle ones for 5 mins */
90 MAYDAY_INITIAL_TIMEOUT
= HZ
/ 100, /* call for help after 10ms */
91 MAYDAY_INTERVAL
= HZ
/ 10, /* and then every 100ms */
92 CREATE_COOLDOWN
= HZ
, /* time to breath after fail */
93 TRUSTEE_COOLDOWN
= HZ
/ 10, /* for trustee draining */
96 * Rescue workers are used only on emergencies and shared by
99 RESCUER_NICE_LEVEL
= -20,
103 * Structure fields follow one of the following exclusion rules.
105 * I: Modifiable by initialization/destruction paths and read-only for
108 * P: Preemption protected. Disabling preemption is enough and should
109 * only be modified and accessed from the local cpu.
111 * L: gcwq->lock protected. Access with gcwq->lock held.
113 * X: During normal operation, modification requires gcwq->lock and
114 * should be done only from local cpu. Either disabling preemption
115 * on local cpu or grabbing gcwq->lock is enough for read access.
116 * If GCWQ_DISASSOCIATED is set, it's identical to L.
118 * F: wq->flush_mutex protected.
120 * W: workqueue_lock protected.
126 * The poor guys doing the actual heavy lifting. All on-duty workers
127 * are either serving the manager role, on idle list or on busy hash.
130 /* on idle list while idle, on busy hash table while busy */
132 struct list_head entry
; /* L: while idle */
133 struct hlist_node hentry
; /* L: while busy */
136 struct work_struct
*current_work
; /* L: work being processed */
137 struct cpu_workqueue_struct
*current_cwq
; /* L: current_work's cwq */
138 struct list_head scheduled
; /* L: scheduled works */
139 struct task_struct
*task
; /* I: worker task */
140 struct global_cwq
*gcwq
; /* I: the associated gcwq */
141 /* 64 bytes boundary on 64bit, 32 on 32bit */
142 unsigned long last_active
; /* L: last active timestamp */
143 unsigned int flags
; /* X: flags */
144 int id
; /* I: worker id */
145 struct work_struct rebind_work
; /* L: rebind worker to cpu */
149 * Global per-cpu workqueue. There's one and only one for each cpu
150 * and all works are queued and processed here regardless of their
154 spinlock_t lock
; /* the gcwq lock */
155 struct list_head worklist
; /* L: list of pending works */
156 unsigned int cpu
; /* I: the associated cpu */
157 unsigned int flags
; /* L: GCWQ_* flags */
159 int nr_workers
; /* L: total number of workers */
160 int nr_idle
; /* L: currently idle ones */
162 /* workers are chained either in the idle_list or busy_hash */
163 struct list_head idle_list
; /* X: list of idle workers */
164 struct hlist_head busy_hash
[BUSY_WORKER_HASH_SIZE
];
165 /* L: hash of busy workers */
167 struct timer_list idle_timer
; /* L: worker idle timeout */
168 struct timer_list mayday_timer
; /* L: SOS timer for dworkers */
170 struct ida worker_ida
; /* L: for worker IDs */
172 struct task_struct
*trustee
; /* L: for gcwq shutdown */
173 unsigned int trustee_state
; /* L: trustee state */
174 wait_queue_head_t trustee_wait
; /* trustee wait */
175 struct worker
*first_idle
; /* L: first idle worker */
176 } ____cacheline_aligned_in_smp
;
179 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
180 * work_struct->data are used for flags and thus cwqs need to be
181 * aligned at two's power of the number of flag bits.
183 struct cpu_workqueue_struct
{
184 struct global_cwq
*gcwq
; /* I: the associated gcwq */
185 struct workqueue_struct
*wq
; /* I: the owning workqueue */
186 int work_color
; /* L: current color */
187 int flush_color
; /* L: flushing color */
188 int nr_in_flight
[WORK_NR_COLORS
];
189 /* L: nr of in_flight works */
190 int nr_active
; /* L: nr of active works */
191 int max_active
; /* L: max active works */
192 struct list_head delayed_works
; /* L: delayed works */
196 * Structure used to wait for workqueue flush.
199 struct list_head list
; /* F: list of flushers */
200 int flush_color
; /* F: flush color waiting for */
201 struct completion done
; /* flush completion */
205 * All cpumasks are assumed to be always set on UP and thus can't be
206 * used to determine whether there's something to be done.
209 typedef cpumask_var_t mayday_mask_t
;
210 #define mayday_test_and_set_cpu(cpu, mask) \
211 cpumask_test_and_set_cpu((cpu), (mask))
212 #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
213 #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
214 #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
215 #define free_mayday_mask(mask) free_cpumask_var((mask))
217 typedef unsigned long mayday_mask_t
;
218 #define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
219 #define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
220 #define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
221 #define alloc_mayday_mask(maskp, gfp) true
222 #define free_mayday_mask(mask) do { } while (0)
226 * The externally visible workqueue abstraction is an array of
227 * per-CPU workqueues:
229 struct workqueue_struct
{
230 unsigned int flags
; /* I: WQ_* flags */
232 struct cpu_workqueue_struct __percpu
*pcpu
;
233 struct cpu_workqueue_struct
*single
;
235 } cpu_wq
; /* I: cwq's */
236 struct list_head list
; /* W: list of all workqueues */
238 struct mutex flush_mutex
; /* protects wq flushing */
239 int work_color
; /* F: current work color */
240 int flush_color
; /* F: current flush color */
241 atomic_t nr_cwqs_to_flush
; /* flush in progress */
242 struct wq_flusher
*first_flusher
; /* F: first flusher */
243 struct list_head flusher_queue
; /* F: flush waiters */
244 struct list_head flusher_overflow
; /* F: flush overflow list */
246 mayday_mask_t mayday_mask
; /* cpus requesting rescue */
247 struct worker
*rescuer
; /* I: rescue worker */
249 int saved_max_active
; /* W: saved cwq max_active */
250 const char *name
; /* I: workqueue name */
251 #ifdef CONFIG_LOCKDEP
252 struct lockdep_map lockdep_map
;
256 struct workqueue_struct
*system_wq __read_mostly
;
257 struct workqueue_struct
*system_long_wq __read_mostly
;
258 struct workqueue_struct
*system_nrt_wq __read_mostly
;
259 struct workqueue_struct
*system_unbound_wq __read_mostly
;
260 EXPORT_SYMBOL_GPL(system_wq
);
261 EXPORT_SYMBOL_GPL(system_long_wq
);
262 EXPORT_SYMBOL_GPL(system_nrt_wq
);
263 EXPORT_SYMBOL_GPL(system_unbound_wq
);
265 #define for_each_busy_worker(worker, i, pos, gcwq) \
266 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
267 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
269 static inline int __next_gcwq_cpu(int cpu
, const struct cpumask
*mask
,
272 if (cpu
< nr_cpu_ids
) {
274 cpu
= cpumask_next(cpu
, mask
);
275 if (cpu
< nr_cpu_ids
)
279 return WORK_CPU_UNBOUND
;
281 return WORK_CPU_NONE
;
284 static inline int __next_wq_cpu(int cpu
, const struct cpumask
*mask
,
285 struct workqueue_struct
*wq
)
287 return __next_gcwq_cpu(cpu
, mask
, !(wq
->flags
& WQ_UNBOUND
) ? 1 : 2);
293 * An extra gcwq is defined for an invalid cpu number
294 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
295 * specific CPU. The following iterators are similar to
296 * for_each_*_cpu() iterators but also considers the unbound gcwq.
298 * for_each_gcwq_cpu() : possible CPUs + WORK_CPU_UNBOUND
299 * for_each_online_gcwq_cpu() : online CPUs + WORK_CPU_UNBOUND
300 * for_each_cwq_cpu() : possible CPUs for bound workqueues,
301 * WORK_CPU_UNBOUND for unbound workqueues
303 #define for_each_gcwq_cpu(cpu) \
304 for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3); \
305 (cpu) < WORK_CPU_NONE; \
306 (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
308 #define for_each_online_gcwq_cpu(cpu) \
309 for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3); \
310 (cpu) < WORK_CPU_NONE; \
311 (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
313 #define for_each_cwq_cpu(cpu, wq) \
314 for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq)); \
315 (cpu) < WORK_CPU_NONE; \
316 (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
318 #ifdef CONFIG_LOCKDEP
320 * in_workqueue_context() - in context of specified workqueue?
321 * @wq: the workqueue of interest
323 * Checks lockdep state to see if the current task is executing from
324 * within a workqueue item. This function exists only if lockdep is
327 int in_workqueue_context(struct workqueue_struct
*wq
)
329 return lock_is_held(&wq
->lockdep_map
);
333 #ifdef CONFIG_DEBUG_OBJECTS_WORK
335 static struct debug_obj_descr work_debug_descr
;
338 * fixup_init is called when:
339 * - an active object is initialized
341 static int work_fixup_init(void *addr
, enum debug_obj_state state
)
343 struct work_struct
*work
= addr
;
346 case ODEBUG_STATE_ACTIVE
:
347 cancel_work_sync(work
);
348 debug_object_init(work
, &work_debug_descr
);
356 * fixup_activate is called when:
357 * - an active object is activated
358 * - an unknown object is activated (might be a statically initialized object)
360 static int work_fixup_activate(void *addr
, enum debug_obj_state state
)
362 struct work_struct
*work
= addr
;
366 case ODEBUG_STATE_NOTAVAILABLE
:
368 * This is not really a fixup. The work struct was
369 * statically initialized. We just make sure that it
370 * is tracked in the object tracker.
372 if (test_bit(WORK_STRUCT_STATIC_BIT
, work_data_bits(work
))) {
373 debug_object_init(work
, &work_debug_descr
);
374 debug_object_activate(work
, &work_debug_descr
);
380 case ODEBUG_STATE_ACTIVE
:
389 * fixup_free is called when:
390 * - an active object is freed
392 static int work_fixup_free(void *addr
, enum debug_obj_state state
)
394 struct work_struct
*work
= addr
;
397 case ODEBUG_STATE_ACTIVE
:
398 cancel_work_sync(work
);
399 debug_object_free(work
, &work_debug_descr
);
406 static struct debug_obj_descr work_debug_descr
= {
407 .name
= "work_struct",
408 .fixup_init
= work_fixup_init
,
409 .fixup_activate
= work_fixup_activate
,
410 .fixup_free
= work_fixup_free
,
413 static inline void debug_work_activate(struct work_struct
*work
)
415 debug_object_activate(work
, &work_debug_descr
);
418 static inline void debug_work_deactivate(struct work_struct
*work
)
420 debug_object_deactivate(work
, &work_debug_descr
);
423 void __init_work(struct work_struct
*work
, int onstack
)
426 debug_object_init_on_stack(work
, &work_debug_descr
);
428 debug_object_init(work
, &work_debug_descr
);
430 EXPORT_SYMBOL_GPL(__init_work
);
432 void destroy_work_on_stack(struct work_struct
*work
)
434 debug_object_free(work
, &work_debug_descr
);
436 EXPORT_SYMBOL_GPL(destroy_work_on_stack
);
439 static inline void debug_work_activate(struct work_struct
*work
) { }
440 static inline void debug_work_deactivate(struct work_struct
*work
) { }
443 /* Serializes the accesses to the list of workqueues. */
444 static DEFINE_SPINLOCK(workqueue_lock
);
445 static LIST_HEAD(workqueues
);
446 static bool workqueue_freezing
; /* W: have wqs started freezing? */
449 * The almighty global cpu workqueues. nr_running is the only field
450 * which is expected to be used frequently by other cpus via
451 * try_to_wake_up(). Put it in a separate cacheline.
453 static DEFINE_PER_CPU(struct global_cwq
, global_cwq
);
454 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t
, gcwq_nr_running
);
457 * Global cpu workqueue and nr_running counter for unbound gcwq. The
458 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
459 * workers have WORKER_UNBOUND set.
461 static struct global_cwq unbound_global_cwq
;
462 static atomic_t unbound_gcwq_nr_running
= ATOMIC_INIT(0); /* always 0 */
464 static int worker_thread(void *__worker
);
466 static struct global_cwq
*get_gcwq(unsigned int cpu
)
468 if (cpu
!= WORK_CPU_UNBOUND
)
469 return &per_cpu(global_cwq
, cpu
);
471 return &unbound_global_cwq
;
474 static atomic_t
*get_gcwq_nr_running(unsigned int cpu
)
476 if (cpu
!= WORK_CPU_UNBOUND
)
477 return &per_cpu(gcwq_nr_running
, cpu
);
479 return &unbound_gcwq_nr_running
;
482 static struct cpu_workqueue_struct
*get_cwq(unsigned int cpu
,
483 struct workqueue_struct
*wq
)
485 if (!(wq
->flags
& WQ_UNBOUND
)) {
486 if (likely(cpu
< nr_cpu_ids
)) {
488 return per_cpu_ptr(wq
->cpu_wq
.pcpu
, cpu
);
490 return wq
->cpu_wq
.single
;
493 } else if (likely(cpu
== WORK_CPU_UNBOUND
))
494 return wq
->cpu_wq
.single
;
498 static unsigned int work_color_to_flags(int color
)
500 return color
<< WORK_STRUCT_COLOR_SHIFT
;
503 static int get_work_color(struct work_struct
*work
)
505 return (*work_data_bits(work
) >> WORK_STRUCT_COLOR_SHIFT
) &
506 ((1 << WORK_STRUCT_COLOR_BITS
) - 1);
509 static int work_next_color(int color
)
511 return (color
+ 1) % WORK_NR_COLORS
;
515 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
516 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is
517 * cleared and the work data contains the cpu number it was last on.
519 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
520 * cwq, cpu or clear work->data. These functions should only be
521 * called while the work is owned - ie. while the PENDING bit is set.
523 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
524 * corresponding to a work. gcwq is available once the work has been
525 * queued anywhere after initialization. cwq is available only from
526 * queueing until execution starts.
528 static inline void set_work_data(struct work_struct
*work
, unsigned long data
,
531 BUG_ON(!work_pending(work
));
532 atomic_long_set(&work
->data
, data
| flags
| work_static(work
));
535 static void set_work_cwq(struct work_struct
*work
,
536 struct cpu_workqueue_struct
*cwq
,
537 unsigned long extra_flags
)
539 set_work_data(work
, (unsigned long)cwq
,
540 WORK_STRUCT_PENDING
| WORK_STRUCT_CWQ
| extra_flags
);
543 static void set_work_cpu(struct work_struct
*work
, unsigned int cpu
)
545 set_work_data(work
, cpu
<< WORK_STRUCT_FLAG_BITS
, WORK_STRUCT_PENDING
);
548 static void clear_work_data(struct work_struct
*work
)
550 set_work_data(work
, WORK_STRUCT_NO_CPU
, 0);
553 static struct cpu_workqueue_struct
*get_work_cwq(struct work_struct
*work
)
555 unsigned long data
= atomic_long_read(&work
->data
);
557 if (data
& WORK_STRUCT_CWQ
)
558 return (void *)(data
& WORK_STRUCT_WQ_DATA_MASK
);
563 static struct global_cwq
*get_work_gcwq(struct work_struct
*work
)
565 unsigned long data
= atomic_long_read(&work
->data
);
568 if (data
& WORK_STRUCT_CWQ
)
569 return ((struct cpu_workqueue_struct
*)
570 (data
& WORK_STRUCT_WQ_DATA_MASK
))->gcwq
;
572 cpu
= data
>> WORK_STRUCT_FLAG_BITS
;
573 if (cpu
== WORK_CPU_NONE
)
576 BUG_ON(cpu
>= nr_cpu_ids
&& cpu
!= WORK_CPU_UNBOUND
);
577 return get_gcwq(cpu
);
581 * Policy functions. These define the policies on how the global
582 * worker pool is managed. Unless noted otherwise, these functions
583 * assume that they're being called with gcwq->lock held.
586 static bool __need_more_worker(struct global_cwq
*gcwq
)
588 return !atomic_read(get_gcwq_nr_running(gcwq
->cpu
)) ||
589 gcwq
->flags
& GCWQ_HIGHPRI_PENDING
;
593 * Need to wake up a worker? Called from anything but currently
596 static bool need_more_worker(struct global_cwq
*gcwq
)
598 return !list_empty(&gcwq
->worklist
) && __need_more_worker(gcwq
);
601 /* Can I start working? Called from busy but !running workers. */
602 static bool may_start_working(struct global_cwq
*gcwq
)
604 return gcwq
->nr_idle
;
607 /* Do I need to keep working? Called from currently running workers. */
608 static bool keep_working(struct global_cwq
*gcwq
)
610 atomic_t
*nr_running
= get_gcwq_nr_running(gcwq
->cpu
);
612 return !list_empty(&gcwq
->worklist
) && atomic_read(nr_running
) <= 1;
615 /* Do we need a new worker? Called from manager. */
616 static bool need_to_create_worker(struct global_cwq
*gcwq
)
618 return need_more_worker(gcwq
) && !may_start_working(gcwq
);
621 /* Do I need to be the manager? */
622 static bool need_to_manage_workers(struct global_cwq
*gcwq
)
624 return need_to_create_worker(gcwq
) || gcwq
->flags
& GCWQ_MANAGE_WORKERS
;
627 /* Do we have too many workers and should some go away? */
628 static bool too_many_workers(struct global_cwq
*gcwq
)
630 bool managing
= gcwq
->flags
& GCWQ_MANAGING_WORKERS
;
631 int nr_idle
= gcwq
->nr_idle
+ managing
; /* manager is considered idle */
632 int nr_busy
= gcwq
->nr_workers
- nr_idle
;
634 return nr_idle
> 2 && (nr_idle
- 2) * MAX_IDLE_WORKERS_RATIO
>= nr_busy
;
641 /* Return the first worker. Safe with preemption disabled */
642 static struct worker
*first_worker(struct global_cwq
*gcwq
)
644 if (unlikely(list_empty(&gcwq
->idle_list
)))
647 return list_first_entry(&gcwq
->idle_list
, struct worker
, entry
);
651 * wake_up_worker - wake up an idle worker
652 * @gcwq: gcwq to wake worker for
654 * Wake up the first idle worker of @gcwq.
657 * spin_lock_irq(gcwq->lock).
659 static void wake_up_worker(struct global_cwq
*gcwq
)
661 struct worker
*worker
= first_worker(gcwq
);
664 wake_up_process(worker
->task
);
668 * wq_worker_waking_up - a worker is waking up
669 * @task: task waking up
670 * @cpu: CPU @task is waking up to
672 * This function is called during try_to_wake_up() when a worker is
676 * spin_lock_irq(rq->lock)
678 void wq_worker_waking_up(struct task_struct
*task
, unsigned int cpu
)
680 struct worker
*worker
= kthread_data(task
);
682 if (likely(!(worker
->flags
& WORKER_NOT_RUNNING
)))
683 atomic_inc(get_gcwq_nr_running(cpu
));
687 * wq_worker_sleeping - a worker is going to sleep
688 * @task: task going to sleep
689 * @cpu: CPU in question, must be the current CPU number
691 * This function is called during schedule() when a busy worker is
692 * going to sleep. Worker on the same cpu can be woken up by
693 * returning pointer to its task.
696 * spin_lock_irq(rq->lock)
699 * Worker task on @cpu to wake up, %NULL if none.
701 struct task_struct
*wq_worker_sleeping(struct task_struct
*task
,
704 struct worker
*worker
= kthread_data(task
), *to_wakeup
= NULL
;
705 struct global_cwq
*gcwq
= get_gcwq(cpu
);
706 atomic_t
*nr_running
= get_gcwq_nr_running(cpu
);
708 if (unlikely(worker
->flags
& WORKER_NOT_RUNNING
))
711 /* this can only happen on the local cpu */
712 BUG_ON(cpu
!= raw_smp_processor_id());
715 * The counterpart of the following dec_and_test, implied mb,
716 * worklist not empty test sequence is in insert_work().
717 * Please read comment there.
719 * NOT_RUNNING is clear. This means that trustee is not in
720 * charge and we're running on the local cpu w/ rq lock held
721 * and preemption disabled, which in turn means that none else
722 * could be manipulating idle_list, so dereferencing idle_list
723 * without gcwq lock is safe.
725 if (atomic_dec_and_test(nr_running
) && !list_empty(&gcwq
->worklist
))
726 to_wakeup
= first_worker(gcwq
);
727 return to_wakeup
? to_wakeup
->task
: NULL
;
731 * worker_set_flags - set worker flags and adjust nr_running accordingly
733 * @flags: flags to set
734 * @wakeup: wakeup an idle worker if necessary
736 * Set @flags in @worker->flags and adjust nr_running accordingly. If
737 * nr_running becomes zero and @wakeup is %true, an idle worker is
741 * spin_lock_irq(gcwq->lock)
743 static inline void worker_set_flags(struct worker
*worker
, unsigned int flags
,
746 struct global_cwq
*gcwq
= worker
->gcwq
;
748 WARN_ON_ONCE(worker
->task
!= current
);
751 * If transitioning into NOT_RUNNING, adjust nr_running and
752 * wake up an idle worker as necessary if requested by
755 if ((flags
& WORKER_NOT_RUNNING
) &&
756 !(worker
->flags
& WORKER_NOT_RUNNING
)) {
757 atomic_t
*nr_running
= get_gcwq_nr_running(gcwq
->cpu
);
760 if (atomic_dec_and_test(nr_running
) &&
761 !list_empty(&gcwq
->worklist
))
762 wake_up_worker(gcwq
);
764 atomic_dec(nr_running
);
767 worker
->flags
|= flags
;
771 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
773 * @flags: flags to clear
775 * Clear @flags in @worker->flags and adjust nr_running accordingly.
778 * spin_lock_irq(gcwq->lock)
780 static inline void worker_clr_flags(struct worker
*worker
, unsigned int flags
)
782 struct global_cwq
*gcwq
= worker
->gcwq
;
783 unsigned int oflags
= worker
->flags
;
785 WARN_ON_ONCE(worker
->task
!= current
);
787 worker
->flags
&= ~flags
;
789 /* if transitioning out of NOT_RUNNING, increment nr_running */
790 if ((flags
& WORKER_NOT_RUNNING
) && (oflags
& WORKER_NOT_RUNNING
))
791 if (!(worker
->flags
& WORKER_NOT_RUNNING
))
792 atomic_inc(get_gcwq_nr_running(gcwq
->cpu
));
796 * busy_worker_head - return the busy hash head for a work
797 * @gcwq: gcwq of interest
798 * @work: work to be hashed
800 * Return hash head of @gcwq for @work.
803 * spin_lock_irq(gcwq->lock).
806 * Pointer to the hash head.
808 static struct hlist_head
*busy_worker_head(struct global_cwq
*gcwq
,
809 struct work_struct
*work
)
811 const int base_shift
= ilog2(sizeof(struct work_struct
));
812 unsigned long v
= (unsigned long)work
;
814 /* simple shift and fold hash, do we need something better? */
816 v
+= v
>> BUSY_WORKER_HASH_ORDER
;
817 v
&= BUSY_WORKER_HASH_MASK
;
819 return &gcwq
->busy_hash
[v
];
823 * __find_worker_executing_work - find worker which is executing a work
824 * @gcwq: gcwq of interest
825 * @bwh: hash head as returned by busy_worker_head()
826 * @work: work to find worker for
828 * Find a worker which is executing @work on @gcwq. @bwh should be
829 * the hash head obtained by calling busy_worker_head() with the same
833 * spin_lock_irq(gcwq->lock).
836 * Pointer to worker which is executing @work if found, NULL
839 static struct worker
*__find_worker_executing_work(struct global_cwq
*gcwq
,
840 struct hlist_head
*bwh
,
841 struct work_struct
*work
)
843 struct worker
*worker
;
844 struct hlist_node
*tmp
;
846 hlist_for_each_entry(worker
, tmp
, bwh
, hentry
)
847 if (worker
->current_work
== work
)
853 * find_worker_executing_work - find worker which is executing a work
854 * @gcwq: gcwq of interest
855 * @work: work to find worker for
857 * Find a worker which is executing @work on @gcwq. This function is
858 * identical to __find_worker_executing_work() except that this
859 * function calculates @bwh itself.
862 * spin_lock_irq(gcwq->lock).
865 * Pointer to worker which is executing @work if found, NULL
868 static struct worker
*find_worker_executing_work(struct global_cwq
*gcwq
,
869 struct work_struct
*work
)
871 return __find_worker_executing_work(gcwq
, busy_worker_head(gcwq
, work
),
876 * gcwq_determine_ins_pos - find insertion position
877 * @gcwq: gcwq of interest
878 * @cwq: cwq a work is being queued for
880 * A work for @cwq is about to be queued on @gcwq, determine insertion
881 * position for the work. If @cwq is for HIGHPRI wq, the work is
882 * queued at the head of the queue but in FIFO order with respect to
883 * other HIGHPRI works; otherwise, at the end of the queue. This
884 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
885 * there are HIGHPRI works pending.
888 * spin_lock_irq(gcwq->lock).
891 * Pointer to inserstion position.
893 static inline struct list_head
*gcwq_determine_ins_pos(struct global_cwq
*gcwq
,
894 struct cpu_workqueue_struct
*cwq
)
896 struct work_struct
*twork
;
898 if (likely(!(cwq
->wq
->flags
& WQ_HIGHPRI
)))
899 return &gcwq
->worklist
;
901 list_for_each_entry(twork
, &gcwq
->worklist
, entry
) {
902 struct cpu_workqueue_struct
*tcwq
= get_work_cwq(twork
);
904 if (!(tcwq
->wq
->flags
& WQ_HIGHPRI
))
908 gcwq
->flags
|= GCWQ_HIGHPRI_PENDING
;
909 return &twork
->entry
;
913 * insert_work - insert a work into gcwq
914 * @cwq: cwq @work belongs to
915 * @work: work to insert
916 * @head: insertion point
917 * @extra_flags: extra WORK_STRUCT_* flags to set
919 * Insert @work which belongs to @cwq into @gcwq after @head.
920 * @extra_flags is or'd to work_struct flags.
923 * spin_lock_irq(gcwq->lock).
925 static void insert_work(struct cpu_workqueue_struct
*cwq
,
926 struct work_struct
*work
, struct list_head
*head
,
927 unsigned int extra_flags
)
929 struct global_cwq
*gcwq
= cwq
->gcwq
;
931 /* we own @work, set data and link */
932 set_work_cwq(work
, cwq
, extra_flags
);
935 * Ensure that we get the right work->data if we see the
936 * result of list_add() below, see try_to_grab_pending().
940 list_add_tail(&work
->entry
, head
);
943 * Ensure either worker_sched_deactivated() sees the above
944 * list_add_tail() or we see zero nr_running to avoid workers
945 * lying around lazily while there are works to be processed.
949 if (__need_more_worker(gcwq
))
950 wake_up_worker(gcwq
);
953 static void __queue_work(unsigned int cpu
, struct workqueue_struct
*wq
,
954 struct work_struct
*work
)
956 struct global_cwq
*gcwq
;
957 struct cpu_workqueue_struct
*cwq
;
958 struct list_head
*worklist
;
959 unsigned int work_flags
;
962 debug_work_activate(work
);
964 if (WARN_ON_ONCE(wq
->flags
& WQ_DYING
))
967 /* determine gcwq to use */
968 if (!(wq
->flags
& WQ_UNBOUND
)) {
969 struct global_cwq
*last_gcwq
;
971 if (unlikely(cpu
== WORK_CPU_UNBOUND
))
972 cpu
= raw_smp_processor_id();
975 * It's multi cpu. If @wq is non-reentrant and @work
976 * was previously on a different cpu, it might still
977 * be running there, in which case the work needs to
978 * be queued on that cpu to guarantee non-reentrance.
980 gcwq
= get_gcwq(cpu
);
981 if (wq
->flags
& WQ_NON_REENTRANT
&&
982 (last_gcwq
= get_work_gcwq(work
)) && last_gcwq
!= gcwq
) {
983 struct worker
*worker
;
985 spin_lock_irqsave(&last_gcwq
->lock
, flags
);
987 worker
= find_worker_executing_work(last_gcwq
, work
);
989 if (worker
&& worker
->current_cwq
->wq
== wq
)
992 /* meh... not running there, queue here */
993 spin_unlock_irqrestore(&last_gcwq
->lock
, flags
);
994 spin_lock_irqsave(&gcwq
->lock
, flags
);
997 spin_lock_irqsave(&gcwq
->lock
, flags
);
999 gcwq
= get_gcwq(WORK_CPU_UNBOUND
);
1000 spin_lock_irqsave(&gcwq
->lock
, flags
);
1003 /* gcwq determined, get cwq and queue */
1004 cwq
= get_cwq(gcwq
->cpu
, wq
);
1006 BUG_ON(!list_empty(&work
->entry
));
1008 cwq
->nr_in_flight
[cwq
->work_color
]++;
1009 work_flags
= work_color_to_flags(cwq
->work_color
);
1011 if (likely(cwq
->nr_active
< cwq
->max_active
)) {
1013 worklist
= gcwq_determine_ins_pos(gcwq
, cwq
);
1015 work_flags
|= WORK_STRUCT_DELAYED
;
1016 worklist
= &cwq
->delayed_works
;
1019 insert_work(cwq
, work
, worklist
, work_flags
);
1021 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
1025 * queue_work - queue work on a workqueue
1026 * @wq: workqueue to use
1027 * @work: work to queue
1029 * Returns 0 if @work was already on a queue, non-zero otherwise.
1031 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1032 * it can be processed by another CPU.
1034 int queue_work(struct workqueue_struct
*wq
, struct work_struct
*work
)
1038 ret
= queue_work_on(get_cpu(), wq
, work
);
1043 EXPORT_SYMBOL_GPL(queue_work
);
1046 * queue_work_on - queue work on specific cpu
1047 * @cpu: CPU number to execute work on
1048 * @wq: workqueue to use
1049 * @work: work to queue
1051 * Returns 0 if @work was already on a queue, non-zero otherwise.
1053 * We queue the work to a specific CPU, the caller must ensure it
1057 queue_work_on(int cpu
, struct workqueue_struct
*wq
, struct work_struct
*work
)
1061 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
1062 __queue_work(cpu
, wq
, work
);
1067 EXPORT_SYMBOL_GPL(queue_work_on
);
1069 static void delayed_work_timer_fn(unsigned long __data
)
1071 struct delayed_work
*dwork
= (struct delayed_work
*)__data
;
1072 struct cpu_workqueue_struct
*cwq
= get_work_cwq(&dwork
->work
);
1074 __queue_work(smp_processor_id(), cwq
->wq
, &dwork
->work
);
1078 * queue_delayed_work - queue work on a workqueue after delay
1079 * @wq: workqueue to use
1080 * @dwork: delayable work to queue
1081 * @delay: number of jiffies to wait before queueing
1083 * Returns 0 if @work was already on a queue, non-zero otherwise.
1085 int queue_delayed_work(struct workqueue_struct
*wq
,
1086 struct delayed_work
*dwork
, unsigned long delay
)
1089 return queue_work(wq
, &dwork
->work
);
1091 return queue_delayed_work_on(-1, wq
, dwork
, delay
);
1093 EXPORT_SYMBOL_GPL(queue_delayed_work
);
1096 * queue_delayed_work_on - queue work on specific CPU after delay
1097 * @cpu: CPU number to execute work on
1098 * @wq: workqueue to use
1099 * @dwork: work to queue
1100 * @delay: number of jiffies to wait before queueing
1102 * Returns 0 if @work was already on a queue, non-zero otherwise.
1104 int queue_delayed_work_on(int cpu
, struct workqueue_struct
*wq
,
1105 struct delayed_work
*dwork
, unsigned long delay
)
1108 struct timer_list
*timer
= &dwork
->timer
;
1109 struct work_struct
*work
= &dwork
->work
;
1111 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
))) {
1114 BUG_ON(timer_pending(timer
));
1115 BUG_ON(!list_empty(&work
->entry
));
1117 timer_stats_timer_set_start_info(&dwork
->timer
);
1120 * This stores cwq for the moment, for the timer_fn.
1121 * Note that the work's gcwq is preserved to allow
1122 * reentrance detection for delayed works.
1124 if (!(wq
->flags
& WQ_UNBOUND
)) {
1125 struct global_cwq
*gcwq
= get_work_gcwq(work
);
1127 if (gcwq
&& gcwq
->cpu
!= WORK_CPU_UNBOUND
)
1130 lcpu
= raw_smp_processor_id();
1132 lcpu
= WORK_CPU_UNBOUND
;
1134 set_work_cwq(work
, get_cwq(lcpu
, wq
), 0);
1136 timer
->expires
= jiffies
+ delay
;
1137 timer
->data
= (unsigned long)dwork
;
1138 timer
->function
= delayed_work_timer_fn
;
1140 if (unlikely(cpu
>= 0))
1141 add_timer_on(timer
, cpu
);
1148 EXPORT_SYMBOL_GPL(queue_delayed_work_on
);
1151 * worker_enter_idle - enter idle state
1152 * @worker: worker which is entering idle state
1154 * @worker is entering idle state. Update stats and idle timer if
1158 * spin_lock_irq(gcwq->lock).
1160 static void worker_enter_idle(struct worker
*worker
)
1162 struct global_cwq
*gcwq
= worker
->gcwq
;
1164 BUG_ON(worker
->flags
& WORKER_IDLE
);
1165 BUG_ON(!list_empty(&worker
->entry
) &&
1166 (worker
->hentry
.next
|| worker
->hentry
.pprev
));
1168 /* can't use worker_set_flags(), also called from start_worker() */
1169 worker
->flags
|= WORKER_IDLE
;
1171 worker
->last_active
= jiffies
;
1173 /* idle_list is LIFO */
1174 list_add(&worker
->entry
, &gcwq
->idle_list
);
1176 if (likely(!(worker
->flags
& WORKER_ROGUE
))) {
1177 if (too_many_workers(gcwq
) && !timer_pending(&gcwq
->idle_timer
))
1178 mod_timer(&gcwq
->idle_timer
,
1179 jiffies
+ IDLE_WORKER_TIMEOUT
);
1181 wake_up_all(&gcwq
->trustee_wait
);
1183 /* sanity check nr_running */
1184 WARN_ON_ONCE(gcwq
->nr_workers
== gcwq
->nr_idle
&&
1185 atomic_read(get_gcwq_nr_running(gcwq
->cpu
)));
1189 * worker_leave_idle - leave idle state
1190 * @worker: worker which is leaving idle state
1192 * @worker is leaving idle state. Update stats.
1195 * spin_lock_irq(gcwq->lock).
1197 static void worker_leave_idle(struct worker
*worker
)
1199 struct global_cwq
*gcwq
= worker
->gcwq
;
1201 BUG_ON(!(worker
->flags
& WORKER_IDLE
));
1202 worker_clr_flags(worker
, WORKER_IDLE
);
1204 list_del_init(&worker
->entry
);
1208 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1211 * Works which are scheduled while the cpu is online must at least be
1212 * scheduled to a worker which is bound to the cpu so that if they are
1213 * flushed from cpu callbacks while cpu is going down, they are
1214 * guaranteed to execute on the cpu.
1216 * This function is to be used by rogue workers and rescuers to bind
1217 * themselves to the target cpu and may race with cpu going down or
1218 * coming online. kthread_bind() can't be used because it may put the
1219 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1220 * verbatim as it's best effort and blocking and gcwq may be
1221 * [dis]associated in the meantime.
1223 * This function tries set_cpus_allowed() and locks gcwq and verifies
1224 * the binding against GCWQ_DISASSOCIATED which is set during
1225 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1226 * idle state or fetches works without dropping lock, it can guarantee
1227 * the scheduling requirement described in the first paragraph.
1230 * Might sleep. Called without any lock but returns with gcwq->lock
1234 * %true if the associated gcwq is online (@worker is successfully
1235 * bound), %false if offline.
1237 static bool worker_maybe_bind_and_lock(struct worker
*worker
)
1238 __acquires(&gcwq
->lock
)
1240 struct global_cwq
*gcwq
= worker
->gcwq
;
1241 struct task_struct
*task
= worker
->task
;
1245 * The following call may fail, succeed or succeed
1246 * without actually migrating the task to the cpu if
1247 * it races with cpu hotunplug operation. Verify
1248 * against GCWQ_DISASSOCIATED.
1250 if (!(gcwq
->flags
& GCWQ_DISASSOCIATED
))
1251 set_cpus_allowed_ptr(task
, get_cpu_mask(gcwq
->cpu
));
1253 spin_lock_irq(&gcwq
->lock
);
1254 if (gcwq
->flags
& GCWQ_DISASSOCIATED
)
1256 if (task_cpu(task
) == gcwq
->cpu
&&
1257 cpumask_equal(¤t
->cpus_allowed
,
1258 get_cpu_mask(gcwq
->cpu
)))
1260 spin_unlock_irq(&gcwq
->lock
);
1262 /* CPU has come up inbetween, retry migration */
1268 * Function for worker->rebind_work used to rebind rogue busy workers
1269 * to the associated cpu which is coming back online. This is
1270 * scheduled by cpu up but can race with other cpu hotplug operations
1271 * and may be executed twice without intervening cpu down.
1273 static void worker_rebind_fn(struct work_struct
*work
)
1275 struct worker
*worker
= container_of(work
, struct worker
, rebind_work
);
1276 struct global_cwq
*gcwq
= worker
->gcwq
;
1278 if (worker_maybe_bind_and_lock(worker
))
1279 worker_clr_flags(worker
, WORKER_REBIND
);
1281 spin_unlock_irq(&gcwq
->lock
);
1284 static struct worker
*alloc_worker(void)
1286 struct worker
*worker
;
1288 worker
= kzalloc(sizeof(*worker
), GFP_KERNEL
);
1290 INIT_LIST_HEAD(&worker
->entry
);
1291 INIT_LIST_HEAD(&worker
->scheduled
);
1292 INIT_WORK(&worker
->rebind_work
, worker_rebind_fn
);
1293 /* on creation a worker is in !idle && prep state */
1294 worker
->flags
= WORKER_PREP
;
1300 * create_worker - create a new workqueue worker
1301 * @gcwq: gcwq the new worker will belong to
1302 * @bind: whether to set affinity to @cpu or not
1304 * Create a new worker which is bound to @gcwq. The returned worker
1305 * can be started by calling start_worker() or destroyed using
1309 * Might sleep. Does GFP_KERNEL allocations.
1312 * Pointer to the newly created worker.
1314 static struct worker
*create_worker(struct global_cwq
*gcwq
, bool bind
)
1316 bool on_unbound_cpu
= gcwq
->cpu
== WORK_CPU_UNBOUND
;
1317 struct worker
*worker
= NULL
;
1320 spin_lock_irq(&gcwq
->lock
);
1321 while (ida_get_new(&gcwq
->worker_ida
, &id
)) {
1322 spin_unlock_irq(&gcwq
->lock
);
1323 if (!ida_pre_get(&gcwq
->worker_ida
, GFP_KERNEL
))
1325 spin_lock_irq(&gcwq
->lock
);
1327 spin_unlock_irq(&gcwq
->lock
);
1329 worker
= alloc_worker();
1333 worker
->gcwq
= gcwq
;
1336 if (!on_unbound_cpu
)
1337 worker
->task
= kthread_create(worker_thread
, worker
,
1338 "kworker/%u:%d", gcwq
->cpu
, id
);
1340 worker
->task
= kthread_create(worker_thread
, worker
,
1341 "kworker/u:%d", id
);
1342 if (IS_ERR(worker
->task
))
1346 * A rogue worker will become a regular one if CPU comes
1347 * online later on. Make sure every worker has
1348 * PF_THREAD_BOUND set.
1350 if (bind
&& !on_unbound_cpu
)
1351 kthread_bind(worker
->task
, gcwq
->cpu
);
1353 worker
->task
->flags
|= PF_THREAD_BOUND
;
1355 worker
->flags
|= WORKER_UNBOUND
;
1361 spin_lock_irq(&gcwq
->lock
);
1362 ida_remove(&gcwq
->worker_ida
, id
);
1363 spin_unlock_irq(&gcwq
->lock
);
1370 * start_worker - start a newly created worker
1371 * @worker: worker to start
1373 * Make the gcwq aware of @worker and start it.
1376 * spin_lock_irq(gcwq->lock).
1378 static void start_worker(struct worker
*worker
)
1380 worker
->flags
|= WORKER_STARTED
;
1381 worker
->gcwq
->nr_workers
++;
1382 worker_enter_idle(worker
);
1383 wake_up_process(worker
->task
);
1387 * destroy_worker - destroy a workqueue worker
1388 * @worker: worker to be destroyed
1390 * Destroy @worker and adjust @gcwq stats accordingly.
1393 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1395 static void destroy_worker(struct worker
*worker
)
1397 struct global_cwq
*gcwq
= worker
->gcwq
;
1398 int id
= worker
->id
;
1400 /* sanity check frenzy */
1401 BUG_ON(worker
->current_work
);
1402 BUG_ON(!list_empty(&worker
->scheduled
));
1404 if (worker
->flags
& WORKER_STARTED
)
1406 if (worker
->flags
& WORKER_IDLE
)
1409 list_del_init(&worker
->entry
);
1410 worker
->flags
|= WORKER_DIE
;
1412 spin_unlock_irq(&gcwq
->lock
);
1414 kthread_stop(worker
->task
);
1417 spin_lock_irq(&gcwq
->lock
);
1418 ida_remove(&gcwq
->worker_ida
, id
);
1421 static void idle_worker_timeout(unsigned long __gcwq
)
1423 struct global_cwq
*gcwq
= (void *)__gcwq
;
1425 spin_lock_irq(&gcwq
->lock
);
1427 if (too_many_workers(gcwq
)) {
1428 struct worker
*worker
;
1429 unsigned long expires
;
1431 /* idle_list is kept in LIFO order, check the last one */
1432 worker
= list_entry(gcwq
->idle_list
.prev
, struct worker
, entry
);
1433 expires
= worker
->last_active
+ IDLE_WORKER_TIMEOUT
;
1435 if (time_before(jiffies
, expires
))
1436 mod_timer(&gcwq
->idle_timer
, expires
);
1438 /* it's been idle for too long, wake up manager */
1439 gcwq
->flags
|= GCWQ_MANAGE_WORKERS
;
1440 wake_up_worker(gcwq
);
1444 spin_unlock_irq(&gcwq
->lock
);
1447 static bool send_mayday(struct work_struct
*work
)
1449 struct cpu_workqueue_struct
*cwq
= get_work_cwq(work
);
1450 struct workqueue_struct
*wq
= cwq
->wq
;
1453 if (!(wq
->flags
& WQ_RESCUER
))
1456 /* mayday mayday mayday */
1457 cpu
= cwq
->gcwq
->cpu
;
1458 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1459 if (cpu
== WORK_CPU_UNBOUND
)
1461 if (!mayday_test_and_set_cpu(cpu
, wq
->mayday_mask
))
1462 wake_up_process(wq
->rescuer
->task
);
1466 static void gcwq_mayday_timeout(unsigned long __gcwq
)
1468 struct global_cwq
*gcwq
= (void *)__gcwq
;
1469 struct work_struct
*work
;
1471 spin_lock_irq(&gcwq
->lock
);
1473 if (need_to_create_worker(gcwq
)) {
1475 * We've been trying to create a new worker but
1476 * haven't been successful. We might be hitting an
1477 * allocation deadlock. Send distress signals to
1480 list_for_each_entry(work
, &gcwq
->worklist
, entry
)
1484 spin_unlock_irq(&gcwq
->lock
);
1486 mod_timer(&gcwq
->mayday_timer
, jiffies
+ MAYDAY_INTERVAL
);
1490 * maybe_create_worker - create a new worker if necessary
1491 * @gcwq: gcwq to create a new worker for
1493 * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
1494 * have at least one idle worker on return from this function. If
1495 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1496 * sent to all rescuers with works scheduled on @gcwq to resolve
1497 * possible allocation deadlock.
1499 * On return, need_to_create_worker() is guaranteed to be false and
1500 * may_start_working() true.
1503 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1504 * multiple times. Does GFP_KERNEL allocations. Called only from
1508 * false if no action was taken and gcwq->lock stayed locked, true
1511 static bool maybe_create_worker(struct global_cwq
*gcwq
)
1512 __releases(&gcwq
->lock
)
1513 __acquires(&gcwq
->lock
)
1515 if (!need_to_create_worker(gcwq
))
1518 spin_unlock_irq(&gcwq
->lock
);
1520 /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1521 mod_timer(&gcwq
->mayday_timer
, jiffies
+ MAYDAY_INITIAL_TIMEOUT
);
1524 struct worker
*worker
;
1526 worker
= create_worker(gcwq
, true);
1528 del_timer_sync(&gcwq
->mayday_timer
);
1529 spin_lock_irq(&gcwq
->lock
);
1530 start_worker(worker
);
1531 BUG_ON(need_to_create_worker(gcwq
));
1535 if (!need_to_create_worker(gcwq
))
1538 __set_current_state(TASK_INTERRUPTIBLE
);
1539 schedule_timeout(CREATE_COOLDOWN
);
1541 if (!need_to_create_worker(gcwq
))
1545 del_timer_sync(&gcwq
->mayday_timer
);
1546 spin_lock_irq(&gcwq
->lock
);
1547 if (need_to_create_worker(gcwq
))
1553 * maybe_destroy_worker - destroy workers which have been idle for a while
1554 * @gcwq: gcwq to destroy workers for
1556 * Destroy @gcwq workers which have been idle for longer than
1557 * IDLE_WORKER_TIMEOUT.
1560 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1561 * multiple times. Called only from manager.
1564 * false if no action was taken and gcwq->lock stayed locked, true
1567 static bool maybe_destroy_workers(struct global_cwq
*gcwq
)
1571 while (too_many_workers(gcwq
)) {
1572 struct worker
*worker
;
1573 unsigned long expires
;
1575 worker
= list_entry(gcwq
->idle_list
.prev
, struct worker
, entry
);
1576 expires
= worker
->last_active
+ IDLE_WORKER_TIMEOUT
;
1578 if (time_before(jiffies
, expires
)) {
1579 mod_timer(&gcwq
->idle_timer
, expires
);
1583 destroy_worker(worker
);
1591 * manage_workers - manage worker pool
1594 * Assume the manager role and manage gcwq worker pool @worker belongs
1595 * to. At any given time, there can be only zero or one manager per
1596 * gcwq. The exclusion is handled automatically by this function.
1598 * The caller can safely start processing works on false return. On
1599 * true return, it's guaranteed that need_to_create_worker() is false
1600 * and may_start_working() is true.
1603 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1604 * multiple times. Does GFP_KERNEL allocations.
1607 * false if no action was taken and gcwq->lock stayed locked, true if
1608 * some action was taken.
1610 static bool manage_workers(struct worker
*worker
)
1612 struct global_cwq
*gcwq
= worker
->gcwq
;
1615 if (gcwq
->flags
& GCWQ_MANAGING_WORKERS
)
1618 gcwq
->flags
&= ~GCWQ_MANAGE_WORKERS
;
1619 gcwq
->flags
|= GCWQ_MANAGING_WORKERS
;
1622 * Destroy and then create so that may_start_working() is true
1625 ret
|= maybe_destroy_workers(gcwq
);
1626 ret
|= maybe_create_worker(gcwq
);
1628 gcwq
->flags
&= ~GCWQ_MANAGING_WORKERS
;
1631 * The trustee might be waiting to take over the manager
1632 * position, tell it we're done.
1634 if (unlikely(gcwq
->trustee
))
1635 wake_up_all(&gcwq
->trustee_wait
);
1641 * move_linked_works - move linked works to a list
1642 * @work: start of series of works to be scheduled
1643 * @head: target list to append @work to
1644 * @nextp: out paramter for nested worklist walking
1646 * Schedule linked works starting from @work to @head. Work series to
1647 * be scheduled starts at @work and includes any consecutive work with
1648 * WORK_STRUCT_LINKED set in its predecessor.
1650 * If @nextp is not NULL, it's updated to point to the next work of
1651 * the last scheduled work. This allows move_linked_works() to be
1652 * nested inside outer list_for_each_entry_safe().
1655 * spin_lock_irq(gcwq->lock).
1657 static void move_linked_works(struct work_struct
*work
, struct list_head
*head
,
1658 struct work_struct
**nextp
)
1660 struct work_struct
*n
;
1663 * Linked worklist will always end before the end of the list,
1664 * use NULL for list head.
1666 list_for_each_entry_safe_from(work
, n
, NULL
, entry
) {
1667 list_move_tail(&work
->entry
, head
);
1668 if (!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))
1673 * If we're already inside safe list traversal and have moved
1674 * multiple works to the scheduled queue, the next position
1675 * needs to be updated.
1681 static void cwq_activate_first_delayed(struct cpu_workqueue_struct
*cwq
)
1683 struct work_struct
*work
= list_first_entry(&cwq
->delayed_works
,
1684 struct work_struct
, entry
);
1685 struct list_head
*pos
= gcwq_determine_ins_pos(cwq
->gcwq
, cwq
);
1687 move_linked_works(work
, pos
, NULL
);
1688 __clear_bit(WORK_STRUCT_DELAYED_BIT
, work_data_bits(work
));
1693 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1694 * @cwq: cwq of interest
1695 * @color: color of work which left the queue
1696 * @delayed: for a delayed work
1698 * A work either has completed or is removed from pending queue,
1699 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1702 * spin_lock_irq(gcwq->lock).
1704 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct
*cwq
, int color
,
1707 /* ignore uncolored works */
1708 if (color
== WORK_NO_COLOR
)
1711 cwq
->nr_in_flight
[color
]--;
1715 if (!list_empty(&cwq
->delayed_works
)) {
1716 /* one down, submit a delayed one */
1717 if (cwq
->nr_active
< cwq
->max_active
)
1718 cwq_activate_first_delayed(cwq
);
1722 /* is flush in progress and are we at the flushing tip? */
1723 if (likely(cwq
->flush_color
!= color
))
1726 /* are there still in-flight works? */
1727 if (cwq
->nr_in_flight
[color
])
1730 /* this cwq is done, clear flush_color */
1731 cwq
->flush_color
= -1;
1734 * If this was the last cwq, wake up the first flusher. It
1735 * will handle the rest.
1737 if (atomic_dec_and_test(&cwq
->wq
->nr_cwqs_to_flush
))
1738 complete(&cwq
->wq
->first_flusher
->done
);
1742 * process_one_work - process single work
1744 * @work: work to process
1746 * Process @work. This function contains all the logics necessary to
1747 * process a single work including synchronization against and
1748 * interaction with other workers on the same cpu, queueing and
1749 * flushing. As long as context requirement is met, any worker can
1750 * call this function to process a work.
1753 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1755 static void process_one_work(struct worker
*worker
, struct work_struct
*work
)
1756 __releases(&gcwq
->lock
)
1757 __acquires(&gcwq
->lock
)
1759 struct cpu_workqueue_struct
*cwq
= get_work_cwq(work
);
1760 struct global_cwq
*gcwq
= cwq
->gcwq
;
1761 struct hlist_head
*bwh
= busy_worker_head(gcwq
, work
);
1762 bool cpu_intensive
= cwq
->wq
->flags
& WQ_CPU_INTENSIVE
;
1763 work_func_t f
= work
->func
;
1765 struct worker
*collision
;
1766 #ifdef CONFIG_LOCKDEP
1768 * It is permissible to free the struct work_struct from
1769 * inside the function that is called from it, this we need to
1770 * take into account for lockdep too. To avoid bogus "held
1771 * lock freed" warnings as well as problems when looking into
1772 * work->lockdep_map, make a copy and use that here.
1774 struct lockdep_map lockdep_map
= work
->lockdep_map
;
1777 * A single work shouldn't be executed concurrently by
1778 * multiple workers on a single cpu. Check whether anyone is
1779 * already processing the work. If so, defer the work to the
1780 * currently executing one.
1782 collision
= __find_worker_executing_work(gcwq
, bwh
, work
);
1783 if (unlikely(collision
)) {
1784 move_linked_works(work
, &collision
->scheduled
, NULL
);
1788 /* claim and process */
1789 debug_work_deactivate(work
);
1790 hlist_add_head(&worker
->hentry
, bwh
);
1791 worker
->current_work
= work
;
1792 worker
->current_cwq
= cwq
;
1793 work_color
= get_work_color(work
);
1795 /* record the current cpu number in the work data and dequeue */
1796 set_work_cpu(work
, gcwq
->cpu
);
1797 list_del_init(&work
->entry
);
1800 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1801 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1803 if (unlikely(gcwq
->flags
& GCWQ_HIGHPRI_PENDING
)) {
1804 struct work_struct
*nwork
= list_first_entry(&gcwq
->worklist
,
1805 struct work_struct
, entry
);
1807 if (!list_empty(&gcwq
->worklist
) &&
1808 get_work_cwq(nwork
)->wq
->flags
& WQ_HIGHPRI
)
1809 wake_up_worker(gcwq
);
1811 gcwq
->flags
&= ~GCWQ_HIGHPRI_PENDING
;
1815 * CPU intensive works don't participate in concurrency
1816 * management. They're the scheduler's responsibility.
1818 if (unlikely(cpu_intensive
))
1819 worker_set_flags(worker
, WORKER_CPU_INTENSIVE
, true);
1821 spin_unlock_irq(&gcwq
->lock
);
1823 work_clear_pending(work
);
1824 lock_map_acquire(&cwq
->wq
->lockdep_map
);
1825 lock_map_acquire(&lockdep_map
);
1826 trace_workqueue_execute_start(work
);
1828 #if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1)
1829 buzzz_kevt_log1(BUZZZ_KEVT_ID_WORKQ_ENTRY
, (int)f
);
1830 #endif /* BUZZZ_KEVT_LVL */
1834 #if defined(BUZZZ_KEVT_LVL) && (BUZZZ_KEVT_LVL >= 1)
1835 buzzz_kevt_log1(BUZZZ_KEVT_ID_WORKQ_EXIT
, (int)f
);
1836 #endif /* BUZZZ_KEVT_LVL */
1839 * While we must be careful to not use "work" after this, the trace
1840 * point will only record its address.
1842 trace_workqueue_execute_end(work
);
1843 lock_map_release(&lockdep_map
);
1844 lock_map_release(&cwq
->wq
->lockdep_map
);
1846 if (unlikely(in_atomic() || lockdep_depth(current
) > 0)) {
1847 printk(KERN_ERR
"BUG: workqueue leaked lock or atomic: "
1849 current
->comm
, preempt_count(), task_pid_nr(current
));
1850 printk(KERN_ERR
" last function: ");
1851 print_symbol("%s\n", (unsigned long)f
);
1852 debug_show_held_locks(current
);
1856 spin_lock_irq(&gcwq
->lock
);
1858 /* clear cpu intensive status */
1859 if (unlikely(cpu_intensive
))
1860 worker_clr_flags(worker
, WORKER_CPU_INTENSIVE
);
1862 /* we're done with it, release */
1863 hlist_del_init(&worker
->hentry
);
1864 worker
->current_work
= NULL
;
1865 worker
->current_cwq
= NULL
;
1866 cwq_dec_nr_in_flight(cwq
, work_color
, false);
1870 * process_scheduled_works - process scheduled works
1873 * Process all scheduled works. Please note that the scheduled list
1874 * may change while processing a work, so this function repeatedly
1875 * fetches a work from the top and executes it.
1878 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1881 static void process_scheduled_works(struct worker
*worker
)
1883 while (!list_empty(&worker
->scheduled
)) {
1884 struct work_struct
*work
= list_first_entry(&worker
->scheduled
,
1885 struct work_struct
, entry
);
1886 process_one_work(worker
, work
);
1891 * worker_thread - the worker thread function
1894 * The gcwq worker thread function. There's a single dynamic pool of
1895 * these per each cpu. These workers process all works regardless of
1896 * their specific target workqueue. The only exception is works which
1897 * belong to workqueues with a rescuer which will be explained in
1900 static int worker_thread(void *__worker
)
1902 struct worker
*worker
= __worker
;
1903 struct global_cwq
*gcwq
= worker
->gcwq
;
1905 /* tell the scheduler that this is a workqueue worker */
1906 worker
->task
->flags
|= PF_WQ_WORKER
;
1908 spin_lock_irq(&gcwq
->lock
);
1910 /* DIE can be set only while we're idle, checking here is enough */
1911 if (worker
->flags
& WORKER_DIE
) {
1912 spin_unlock_irq(&gcwq
->lock
);
1913 worker
->task
->flags
&= ~PF_WQ_WORKER
;
1917 worker_leave_idle(worker
);
1919 /* no more worker necessary? */
1920 if (!need_more_worker(gcwq
))
1923 /* do we need to manage? */
1924 if (unlikely(!may_start_working(gcwq
)) && manage_workers(worker
))
1928 * ->scheduled list can only be filled while a worker is
1929 * preparing to process a work or actually processing it.
1930 * Make sure nobody diddled with it while I was sleeping.
1932 BUG_ON(!list_empty(&worker
->scheduled
));
1935 * When control reaches this point, we're guaranteed to have
1936 * at least one idle worker or that someone else has already
1937 * assumed the manager role.
1939 worker_clr_flags(worker
, WORKER_PREP
);
1942 struct work_struct
*work
=
1943 list_first_entry(&gcwq
->worklist
,
1944 struct work_struct
, entry
);
1946 if (likely(!(*work_data_bits(work
) & WORK_STRUCT_LINKED
))) {
1947 /* optimization path, not strictly necessary */
1948 process_one_work(worker
, work
);
1949 if (unlikely(!list_empty(&worker
->scheduled
)))
1950 process_scheduled_works(worker
);
1952 move_linked_works(work
, &worker
->scheduled
, NULL
);
1953 process_scheduled_works(worker
);
1955 } while (keep_working(gcwq
));
1957 worker_set_flags(worker
, WORKER_PREP
, false);
1959 if (unlikely(need_to_manage_workers(gcwq
)) && manage_workers(worker
))
1963 * gcwq->lock is held and there's no work to process and no
1964 * need to manage, sleep. Workers are woken up only while
1965 * holding gcwq->lock or from local cpu, so setting the
1966 * current state before releasing gcwq->lock is enough to
1967 * prevent losing any event.
1969 worker_enter_idle(worker
);
1970 __set_current_state(TASK_INTERRUPTIBLE
);
1971 spin_unlock_irq(&gcwq
->lock
);
1977 * rescuer_thread - the rescuer thread function
1978 * @__wq: the associated workqueue
1980 * Workqueue rescuer thread function. There's one rescuer for each
1981 * workqueue which has WQ_RESCUER set.
1983 * Regular work processing on a gcwq may block trying to create a new
1984 * worker which uses GFP_KERNEL allocation which has slight chance of
1985 * developing into deadlock if some works currently on the same queue
1986 * need to be processed to satisfy the GFP_KERNEL allocation. This is
1987 * the problem rescuer solves.
1989 * When such condition is possible, the gcwq summons rescuers of all
1990 * workqueues which have works queued on the gcwq and let them process
1991 * those works so that forward progress can be guaranteed.
1993 * This should happen rarely.
1995 static int rescuer_thread(void *__wq
)
1997 struct workqueue_struct
*wq
= __wq
;
1998 struct worker
*rescuer
= wq
->rescuer
;
1999 struct list_head
*scheduled
= &rescuer
->scheduled
;
2000 bool is_unbound
= wq
->flags
& WQ_UNBOUND
;
2003 set_user_nice(current
, RESCUER_NICE_LEVEL
);
2005 set_current_state(TASK_INTERRUPTIBLE
);
2007 if (kthread_should_stop())
2011 * See whether any cpu is asking for help. Unbounded
2012 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2014 for_each_mayday_cpu(cpu
, wq
->mayday_mask
) {
2015 unsigned int tcpu
= is_unbound
? WORK_CPU_UNBOUND
: cpu
;
2016 struct cpu_workqueue_struct
*cwq
= get_cwq(tcpu
, wq
);
2017 struct global_cwq
*gcwq
= cwq
->gcwq
;
2018 struct work_struct
*work
, *n
;
2020 __set_current_state(TASK_RUNNING
);
2021 mayday_clear_cpu(cpu
, wq
->mayday_mask
);
2023 /* migrate to the target cpu if possible */
2024 rescuer
->gcwq
= gcwq
;
2025 worker_maybe_bind_and_lock(rescuer
);
2028 * Slurp in all works issued via this workqueue and
2031 BUG_ON(!list_empty(&rescuer
->scheduled
));
2032 list_for_each_entry_safe(work
, n
, &gcwq
->worklist
, entry
)
2033 if (get_work_cwq(work
) == cwq
)
2034 move_linked_works(work
, scheduled
, &n
);
2036 process_scheduled_works(rescuer
);
2037 spin_unlock_irq(&gcwq
->lock
);
2045 struct work_struct work
;
2046 struct completion done
;
2049 static void wq_barrier_func(struct work_struct
*work
)
2051 struct wq_barrier
*barr
= container_of(work
, struct wq_barrier
, work
);
2052 complete(&barr
->done
);
2056 * insert_wq_barrier - insert a barrier work
2057 * @cwq: cwq to insert barrier into
2058 * @barr: wq_barrier to insert
2059 * @target: target work to attach @barr to
2060 * @worker: worker currently executing @target, NULL if @target is not executing
2062 * @barr is linked to @target such that @barr is completed only after
2063 * @target finishes execution. Please note that the ordering
2064 * guarantee is observed only with respect to @target and on the local
2067 * Currently, a queued barrier can't be canceled. This is because
2068 * try_to_grab_pending() can't determine whether the work to be
2069 * grabbed is at the head of the queue and thus can't clear LINKED
2070 * flag of the previous work while there must be a valid next work
2071 * after a work with LINKED flag set.
2073 * Note that when @worker is non-NULL, @target may be modified
2074 * underneath us, so we can't reliably determine cwq from @target.
2077 * spin_lock_irq(gcwq->lock).
2079 static void insert_wq_barrier(struct cpu_workqueue_struct
*cwq
,
2080 struct wq_barrier
*barr
,
2081 struct work_struct
*target
, struct worker
*worker
)
2083 struct list_head
*head
;
2084 unsigned int linked
= 0;
2087 * debugobject calls are safe here even with gcwq->lock locked
2088 * as we know for sure that this will not trigger any of the
2089 * checks and call back into the fixup functions where we
2092 INIT_WORK_ON_STACK(&barr
->work
, wq_barrier_func
);
2093 __set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(&barr
->work
));
2094 init_completion(&barr
->done
);
2097 * If @target is currently being executed, schedule the
2098 * barrier to the worker; otherwise, put it after @target.
2101 head
= worker
->scheduled
.next
;
2103 unsigned long *bits
= work_data_bits(target
);
2105 head
= target
->entry
.next
;
2106 /* there can already be other linked works, inherit and set */
2107 linked
= *bits
& WORK_STRUCT_LINKED
;
2108 __set_bit(WORK_STRUCT_LINKED_BIT
, bits
);
2111 debug_work_activate(&barr
->work
);
2112 insert_work(cwq
, &barr
->work
, head
,
2113 work_color_to_flags(WORK_NO_COLOR
) | linked
);
2117 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2118 * @wq: workqueue being flushed
2119 * @flush_color: new flush color, < 0 for no-op
2120 * @work_color: new work color, < 0 for no-op
2122 * Prepare cwqs for workqueue flushing.
2124 * If @flush_color is non-negative, flush_color on all cwqs should be
2125 * -1. If no cwq has in-flight commands at the specified color, all
2126 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2127 * has in flight commands, its cwq->flush_color is set to
2128 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2129 * wakeup logic is armed and %true is returned.
2131 * The caller should have initialized @wq->first_flusher prior to
2132 * calling this function with non-negative @flush_color. If
2133 * @flush_color is negative, no flush color update is done and %false
2136 * If @work_color is non-negative, all cwqs should have the same
2137 * work_color which is previous to @work_color and all will be
2138 * advanced to @work_color.
2141 * mutex_lock(wq->flush_mutex).
2144 * %true if @flush_color >= 0 and there's something to flush. %false
2147 static bool flush_workqueue_prep_cwqs(struct workqueue_struct
*wq
,
2148 int flush_color
, int work_color
)
2153 if (flush_color
>= 0) {
2154 BUG_ON(atomic_read(&wq
->nr_cwqs_to_flush
));
2155 atomic_set(&wq
->nr_cwqs_to_flush
, 1);
2158 for_each_cwq_cpu(cpu
, wq
) {
2159 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2160 struct global_cwq
*gcwq
= cwq
->gcwq
;
2162 spin_lock_irq(&gcwq
->lock
);
2164 if (flush_color
>= 0) {
2165 BUG_ON(cwq
->flush_color
!= -1);
2167 if (cwq
->nr_in_flight
[flush_color
]) {
2168 cwq
->flush_color
= flush_color
;
2169 atomic_inc(&wq
->nr_cwqs_to_flush
);
2174 if (work_color
>= 0) {
2175 BUG_ON(work_color
!= work_next_color(cwq
->work_color
));
2176 cwq
->work_color
= work_color
;
2179 spin_unlock_irq(&gcwq
->lock
);
2182 if (flush_color
>= 0 && atomic_dec_and_test(&wq
->nr_cwqs_to_flush
))
2183 complete(&wq
->first_flusher
->done
);
2189 * flush_workqueue - ensure that any scheduled work has run to completion.
2190 * @wq: workqueue to flush
2192 * Forces execution of the workqueue and blocks until its completion.
2193 * This is typically used in driver shutdown handlers.
2195 * We sleep until all works which were queued on entry have been handled,
2196 * but we are not livelocked by new incoming ones.
2198 void flush_workqueue(struct workqueue_struct
*wq
)
2200 struct wq_flusher this_flusher
= {
2201 .list
= LIST_HEAD_INIT(this_flusher
.list
),
2203 .done
= COMPLETION_INITIALIZER_ONSTACK(this_flusher
.done
),
2207 lock_map_acquire(&wq
->lockdep_map
);
2208 lock_map_release(&wq
->lockdep_map
);
2210 mutex_lock(&wq
->flush_mutex
);
2213 * Start-to-wait phase
2215 next_color
= work_next_color(wq
->work_color
);
2217 if (next_color
!= wq
->flush_color
) {
2219 * Color space is not full. The current work_color
2220 * becomes our flush_color and work_color is advanced
2223 BUG_ON(!list_empty(&wq
->flusher_overflow
));
2224 this_flusher
.flush_color
= wq
->work_color
;
2225 wq
->work_color
= next_color
;
2227 if (!wq
->first_flusher
) {
2228 /* no flush in progress, become the first flusher */
2229 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
2231 wq
->first_flusher
= &this_flusher
;
2233 if (!flush_workqueue_prep_cwqs(wq
, wq
->flush_color
,
2235 /* nothing to flush, done */
2236 wq
->flush_color
= next_color
;
2237 wq
->first_flusher
= NULL
;
2242 BUG_ON(wq
->flush_color
== this_flusher
.flush_color
);
2243 list_add_tail(&this_flusher
.list
, &wq
->flusher_queue
);
2244 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
2248 * Oops, color space is full, wait on overflow queue.
2249 * The next flush completion will assign us
2250 * flush_color and transfer to flusher_queue.
2252 list_add_tail(&this_flusher
.list
, &wq
->flusher_overflow
);
2255 mutex_unlock(&wq
->flush_mutex
);
2257 wait_for_completion(&this_flusher
.done
);
2260 * Wake-up-and-cascade phase
2262 * First flushers are responsible for cascading flushes and
2263 * handling overflow. Non-first flushers can simply return.
2265 if (wq
->first_flusher
!= &this_flusher
)
2268 mutex_lock(&wq
->flush_mutex
);
2270 /* we might have raced, check again with mutex held */
2271 if (wq
->first_flusher
!= &this_flusher
)
2274 wq
->first_flusher
= NULL
;
2276 BUG_ON(!list_empty(&this_flusher
.list
));
2277 BUG_ON(wq
->flush_color
!= this_flusher
.flush_color
);
2280 struct wq_flusher
*next
, *tmp
;
2282 /* complete all the flushers sharing the current flush color */
2283 list_for_each_entry_safe(next
, tmp
, &wq
->flusher_queue
, list
) {
2284 if (next
->flush_color
!= wq
->flush_color
)
2286 list_del_init(&next
->list
);
2287 complete(&next
->done
);
2290 BUG_ON(!list_empty(&wq
->flusher_overflow
) &&
2291 wq
->flush_color
!= work_next_color(wq
->work_color
));
2293 /* this flush_color is finished, advance by one */
2294 wq
->flush_color
= work_next_color(wq
->flush_color
);
2296 /* one color has been freed, handle overflow queue */
2297 if (!list_empty(&wq
->flusher_overflow
)) {
2299 * Assign the same color to all overflowed
2300 * flushers, advance work_color and append to
2301 * flusher_queue. This is the start-to-wait
2302 * phase for these overflowed flushers.
2304 list_for_each_entry(tmp
, &wq
->flusher_overflow
, list
)
2305 tmp
->flush_color
= wq
->work_color
;
2307 wq
->work_color
= work_next_color(wq
->work_color
);
2309 list_splice_tail_init(&wq
->flusher_overflow
,
2310 &wq
->flusher_queue
);
2311 flush_workqueue_prep_cwqs(wq
, -1, wq
->work_color
);
2314 if (list_empty(&wq
->flusher_queue
)) {
2315 BUG_ON(wq
->flush_color
!= wq
->work_color
);
2320 * Need to flush more colors. Make the next flusher
2321 * the new first flusher and arm cwqs.
2323 BUG_ON(wq
->flush_color
== wq
->work_color
);
2324 BUG_ON(wq
->flush_color
!= next
->flush_color
);
2326 list_del_init(&next
->list
);
2327 wq
->first_flusher
= next
;
2329 if (flush_workqueue_prep_cwqs(wq
, wq
->flush_color
, -1))
2333 * Meh... this color is already done, clear first
2334 * flusher and repeat cascading.
2336 wq
->first_flusher
= NULL
;
2340 mutex_unlock(&wq
->flush_mutex
);
2342 EXPORT_SYMBOL_GPL(flush_workqueue
);
2345 * flush_work - block until a work_struct's callback has terminated
2346 * @work: the work which is to be flushed
2348 * Returns false if @work has already terminated.
2350 * It is expected that, prior to calling flush_work(), the caller has
2351 * arranged for the work to not be requeued, otherwise it doesn't make
2352 * sense to use this function.
2354 int flush_work(struct work_struct
*work
)
2356 struct worker
*worker
= NULL
;
2357 struct global_cwq
*gcwq
;
2358 struct cpu_workqueue_struct
*cwq
;
2359 struct wq_barrier barr
;
2362 gcwq
= get_work_gcwq(work
);
2366 spin_lock_irq(&gcwq
->lock
);
2367 if (!list_empty(&work
->entry
)) {
2369 * See the comment near try_to_grab_pending()->smp_rmb().
2370 * If it was re-queued to a different gcwq under us, we
2371 * are not going to wait.
2374 cwq
= get_work_cwq(work
);
2375 if (unlikely(!cwq
|| gcwq
!= cwq
->gcwq
))
2378 worker
= find_worker_executing_work(gcwq
, work
);
2381 cwq
= worker
->current_cwq
;
2384 insert_wq_barrier(cwq
, &barr
, work
, worker
);
2385 spin_unlock_irq(&gcwq
->lock
);
2387 lock_map_acquire(&cwq
->wq
->lockdep_map
);
2388 lock_map_release(&cwq
->wq
->lockdep_map
);
2390 wait_for_completion(&barr
.done
);
2391 destroy_work_on_stack(&barr
.work
);
2394 spin_unlock_irq(&gcwq
->lock
);
2397 EXPORT_SYMBOL_GPL(flush_work
);
2400 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2401 * so this work can't be re-armed in any way.
2403 static int try_to_grab_pending(struct work_struct
*work
)
2405 struct global_cwq
*gcwq
;
2408 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT
, work_data_bits(work
)))
2412 * The queueing is in progress, or it is already queued. Try to
2413 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2415 gcwq
= get_work_gcwq(work
);
2419 spin_lock_irq(&gcwq
->lock
);
2420 if (!list_empty(&work
->entry
)) {
2422 * This work is queued, but perhaps we locked the wrong gcwq.
2423 * In that case we must see the new value after rmb(), see
2424 * insert_work()->wmb().
2427 if (gcwq
== get_work_gcwq(work
)) {
2428 debug_work_deactivate(work
);
2429 list_del_init(&work
->entry
);
2430 cwq_dec_nr_in_flight(get_work_cwq(work
),
2431 get_work_color(work
),
2432 *work_data_bits(work
) & WORK_STRUCT_DELAYED
);
2436 spin_unlock_irq(&gcwq
->lock
);
2441 static void wait_on_cpu_work(struct global_cwq
*gcwq
, struct work_struct
*work
)
2443 struct wq_barrier barr
;
2444 struct worker
*worker
;
2446 spin_lock_irq(&gcwq
->lock
);
2448 worker
= find_worker_executing_work(gcwq
, work
);
2449 if (unlikely(worker
))
2450 insert_wq_barrier(worker
->current_cwq
, &barr
, work
, worker
);
2452 spin_unlock_irq(&gcwq
->lock
);
2454 if (unlikely(worker
)) {
2455 wait_for_completion(&barr
.done
);
2456 destroy_work_on_stack(&barr
.work
);
2460 static void wait_on_work(struct work_struct
*work
)
2466 lock_map_acquire(&work
->lockdep_map
);
2467 lock_map_release(&work
->lockdep_map
);
2469 for_each_gcwq_cpu(cpu
)
2470 wait_on_cpu_work(get_gcwq(cpu
), work
);
2473 static int __cancel_work_timer(struct work_struct
*work
,
2474 struct timer_list
* timer
)
2479 ret
= (timer
&& likely(del_timer(timer
)));
2481 ret
= try_to_grab_pending(work
);
2483 } while (unlikely(ret
< 0));
2485 clear_work_data(work
);
2490 * cancel_work_sync - block until a work_struct's callback has terminated
2491 * @work: the work which is to be flushed
2493 * Returns true if @work was pending.
2495 * cancel_work_sync() will cancel the work if it is queued. If the work's
2496 * callback appears to be running, cancel_work_sync() will block until it
2499 * It is possible to use this function if the work re-queues itself. It can
2500 * cancel the work even if it migrates to another workqueue, however in that
2501 * case it only guarantees that work->func() has completed on the last queued
2504 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
2505 * pending, otherwise it goes into a busy-wait loop until the timer expires.
2507 * The caller must ensure that workqueue_struct on which this work was last
2508 * queued can't be destroyed before this function returns.
2510 int cancel_work_sync(struct work_struct
*work
)
2512 return __cancel_work_timer(work
, NULL
);
2514 EXPORT_SYMBOL_GPL(cancel_work_sync
);
2517 * cancel_delayed_work_sync - reliably kill off a delayed work.
2518 * @dwork: the delayed work struct
2520 * Returns true if @dwork was pending.
2522 * It is possible to use this function if @dwork rearms itself via queue_work()
2523 * or queue_delayed_work(). See also the comment for cancel_work_sync().
2525 int cancel_delayed_work_sync(struct delayed_work
*dwork
)
2527 return __cancel_work_timer(&dwork
->work
, &dwork
->timer
);
2529 EXPORT_SYMBOL(cancel_delayed_work_sync
);
2532 * schedule_work - put work task in global workqueue
2533 * @work: job to be done
2535 * Returns zero if @work was already on the kernel-global workqueue and
2536 * non-zero otherwise.
2538 * This puts a job in the kernel-global workqueue if it was not already
2539 * queued and leaves it in the same position on the kernel-global
2540 * workqueue otherwise.
2542 int schedule_work(struct work_struct
*work
)
2544 return queue_work(system_wq
, work
);
2546 EXPORT_SYMBOL(schedule_work
);
2549 * schedule_work_on - put work task on a specific cpu
2550 * @cpu: cpu to put the work task on
2551 * @work: job to be done
2553 * This puts a job on a specific cpu
2555 int schedule_work_on(int cpu
, struct work_struct
*work
)
2557 return queue_work_on(cpu
, system_wq
, work
);
2559 EXPORT_SYMBOL(schedule_work_on
);
2562 * schedule_delayed_work - put work task in global workqueue after delay
2563 * @dwork: job to be done
2564 * @delay: number of jiffies to wait or 0 for immediate execution
2566 * After waiting for a given time this puts a job in the kernel-global
2569 int schedule_delayed_work(struct delayed_work
*dwork
,
2570 unsigned long delay
)
2572 return queue_delayed_work(system_wq
, dwork
, delay
);
2574 EXPORT_SYMBOL(schedule_delayed_work
);
2577 * flush_delayed_work - block until a dwork_struct's callback has terminated
2578 * @dwork: the delayed work which is to be flushed
2580 * Any timeout is cancelled, and any pending work is run immediately.
2582 void flush_delayed_work(struct delayed_work
*dwork
)
2584 if (del_timer_sync(&dwork
->timer
)) {
2585 __queue_work(get_cpu(), get_work_cwq(&dwork
->work
)->wq
,
2589 flush_work(&dwork
->work
);
2591 EXPORT_SYMBOL(flush_delayed_work
);
2594 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2596 * @dwork: job to be done
2597 * @delay: number of jiffies to wait
2599 * After waiting for a given time this puts a job in the kernel-global
2600 * workqueue on the specified CPU.
2602 int schedule_delayed_work_on(int cpu
,
2603 struct delayed_work
*dwork
, unsigned long delay
)
2605 return queue_delayed_work_on(cpu
, system_wq
, dwork
, delay
);
2607 EXPORT_SYMBOL(schedule_delayed_work_on
);
2610 * schedule_on_each_cpu - call a function on each online CPU from keventd
2611 * @func: the function to call
2613 * Returns zero on success.
2614 * Returns -ve errno on failure.
2616 * schedule_on_each_cpu() is very slow.
2618 int schedule_on_each_cpu(work_func_t func
)
2621 struct work_struct __percpu
*works
;
2623 works
= alloc_percpu(struct work_struct
);
2629 for_each_online_cpu(cpu
) {
2630 struct work_struct
*work
= per_cpu_ptr(works
, cpu
);
2632 INIT_WORK(work
, func
);
2633 schedule_work_on(cpu
, work
);
2636 for_each_online_cpu(cpu
)
2637 flush_work(per_cpu_ptr(works
, cpu
));
2645 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2647 * Forces execution of the kernel-global workqueue and blocks until its
2650 * Think twice before calling this function! It's very easy to get into
2651 * trouble if you don't take great care. Either of the following situations
2652 * will lead to deadlock:
2654 * One of the work items currently on the workqueue needs to acquire
2655 * a lock held by your code or its caller.
2657 * Your code is running in the context of a work routine.
2659 * They will be detected by lockdep when they occur, but the first might not
2660 * occur very often. It depends on what work items are on the workqueue and
2661 * what locks they need, which you have no control over.
2663 * In most situations flushing the entire workqueue is overkill; you merely
2664 * need to know that a particular work item isn't queued and isn't running.
2665 * In such cases you should use cancel_delayed_work_sync() or
2666 * cancel_work_sync() instead.
2668 void flush_scheduled_work(void)
2670 flush_workqueue(system_wq
);
2672 EXPORT_SYMBOL(flush_scheduled_work
);
2675 * execute_in_process_context - reliably execute the routine with user context
2676 * @fn: the function to execute
2677 * @ew: guaranteed storage for the execute work structure (must
2678 * be available when the work executes)
2680 * Executes the function immediately if process context is available,
2681 * otherwise schedules the function for delayed execution.
2683 * Returns: 0 - function was executed
2684 * 1 - function was scheduled for execution
2686 int execute_in_process_context(work_func_t fn
, struct execute_work
*ew
)
2688 if (!in_interrupt()) {
2693 INIT_WORK(&ew
->work
, fn
);
2694 schedule_work(&ew
->work
);
2698 EXPORT_SYMBOL_GPL(execute_in_process_context
);
2700 int keventd_up(void)
2702 return system_wq
!= NULL
;
2705 static int alloc_cwqs(struct workqueue_struct
*wq
)
2708 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2709 * Make sure that the alignment isn't lower than that of
2710 * unsigned long long.
2712 const size_t size
= sizeof(struct cpu_workqueue_struct
);
2713 const size_t align
= max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS
,
2714 __alignof__(unsigned long long));
2716 bool percpu
= !(wq
->flags
& WQ_UNBOUND
);
2718 bool percpu
= false;
2722 wq
->cpu_wq
.pcpu
= __alloc_percpu(size
, align
);
2727 * Allocate enough room to align cwq and put an extra
2728 * pointer at the end pointing back to the originally
2729 * allocated pointer which will be used for free.
2731 ptr
= kzalloc(size
+ align
+ sizeof(void *), GFP_KERNEL
);
2733 wq
->cpu_wq
.single
= PTR_ALIGN(ptr
, align
);
2734 *(void **)(wq
->cpu_wq
.single
+ 1) = ptr
;
2738 /* just in case, make sure it's actually aligned */
2739 BUG_ON(!IS_ALIGNED(wq
->cpu_wq
.v
, align
));
2740 return wq
->cpu_wq
.v
? 0 : -ENOMEM
;
2743 static void free_cwqs(struct workqueue_struct
*wq
)
2746 bool percpu
= !(wq
->flags
& WQ_UNBOUND
);
2748 bool percpu
= false;
2752 free_percpu(wq
->cpu_wq
.pcpu
);
2753 else if (wq
->cpu_wq
.single
) {
2754 /* the pointer to free is stored right after the cwq */
2755 kfree(*(void **)(wq
->cpu_wq
.single
+ 1));
2759 static int wq_clamp_max_active(int max_active
, unsigned int flags
,
2762 int lim
= flags
& WQ_UNBOUND
? WQ_UNBOUND_MAX_ACTIVE
: WQ_MAX_ACTIVE
;
2764 if (max_active
< 1 || max_active
> lim
)
2765 printk(KERN_WARNING
"workqueue: max_active %d requested for %s "
2766 "is out of range, clamping between %d and %d\n",
2767 max_active
, name
, 1, lim
);
2769 return clamp_val(max_active
, 1, lim
);
2772 struct workqueue_struct
*__alloc_workqueue_key(const char *name
,
2775 struct lock_class_key
*key
,
2776 const char *lock_name
)
2778 struct workqueue_struct
*wq
;
2782 * Unbound workqueues aren't concurrency managed and should be
2783 * dispatched to workers immediately.
2785 if (flags
& WQ_UNBOUND
)
2786 flags
|= WQ_HIGHPRI
;
2788 max_active
= max_active
?: WQ_DFL_ACTIVE
;
2789 max_active
= wq_clamp_max_active(max_active
, flags
, name
);
2791 wq
= kzalloc(sizeof(*wq
), GFP_KERNEL
);
2796 wq
->saved_max_active
= max_active
;
2797 mutex_init(&wq
->flush_mutex
);
2798 atomic_set(&wq
->nr_cwqs_to_flush
, 0);
2799 INIT_LIST_HEAD(&wq
->flusher_queue
);
2800 INIT_LIST_HEAD(&wq
->flusher_overflow
);
2803 lockdep_init_map(&wq
->lockdep_map
, lock_name
, key
, 0);
2804 INIT_LIST_HEAD(&wq
->list
);
2806 if (alloc_cwqs(wq
) < 0)
2809 for_each_cwq_cpu(cpu
, wq
) {
2810 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2811 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2813 BUG_ON((unsigned long)cwq
& WORK_STRUCT_FLAG_MASK
);
2816 cwq
->flush_color
= -1;
2817 cwq
->max_active
= max_active
;
2818 INIT_LIST_HEAD(&cwq
->delayed_works
);
2821 if (flags
& WQ_RESCUER
) {
2822 struct worker
*rescuer
;
2824 if (!alloc_mayday_mask(&wq
->mayday_mask
, GFP_KERNEL
))
2827 wq
->rescuer
= rescuer
= alloc_worker();
2831 rescuer
->task
= kthread_create(rescuer_thread
, wq
, "%s", name
);
2832 if (IS_ERR(rescuer
->task
))
2835 rescuer
->task
->flags
|= PF_THREAD_BOUND
;
2836 wake_up_process(rescuer
->task
);
2840 * workqueue_lock protects global freeze state and workqueues
2841 * list. Grab it, set max_active accordingly and add the new
2842 * workqueue to workqueues list.
2844 spin_lock(&workqueue_lock
);
2846 if (workqueue_freezing
&& wq
->flags
& WQ_FREEZEABLE
)
2847 for_each_cwq_cpu(cpu
, wq
)
2848 get_cwq(cpu
, wq
)->max_active
= 0;
2850 list_add(&wq
->list
, &workqueues
);
2852 spin_unlock(&workqueue_lock
);
2858 free_mayday_mask(wq
->mayday_mask
);
2864 EXPORT_SYMBOL_GPL(__alloc_workqueue_key
);
2867 * destroy_workqueue - safely terminate a workqueue
2868 * @wq: target workqueue
2870 * Safely destroy a workqueue. All work currently pending will be done first.
2872 void destroy_workqueue(struct workqueue_struct
*wq
)
2876 wq
->flags
|= WQ_DYING
;
2877 flush_workqueue(wq
);
2880 * wq list is used to freeze wq, remove from list after
2881 * flushing is complete in case freeze races us.
2883 spin_lock(&workqueue_lock
);
2884 list_del(&wq
->list
);
2885 spin_unlock(&workqueue_lock
);
2888 for_each_cwq_cpu(cpu
, wq
) {
2889 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2892 for (i
= 0; i
< WORK_NR_COLORS
; i
++)
2893 BUG_ON(cwq
->nr_in_flight
[i
]);
2894 BUG_ON(cwq
->nr_active
);
2895 BUG_ON(!list_empty(&cwq
->delayed_works
));
2898 if (wq
->flags
& WQ_RESCUER
) {
2899 kthread_stop(wq
->rescuer
->task
);
2900 free_mayday_mask(wq
->mayday_mask
);
2907 EXPORT_SYMBOL_GPL(destroy_workqueue
);
2910 * workqueue_set_max_active - adjust max_active of a workqueue
2911 * @wq: target workqueue
2912 * @max_active: new max_active value.
2914 * Set max_active of @wq to @max_active.
2917 * Don't call from IRQ context.
2919 void workqueue_set_max_active(struct workqueue_struct
*wq
, int max_active
)
2923 max_active
= wq_clamp_max_active(max_active
, wq
->flags
, wq
->name
);
2925 spin_lock(&workqueue_lock
);
2927 wq
->saved_max_active
= max_active
;
2929 for_each_cwq_cpu(cpu
, wq
) {
2930 struct global_cwq
*gcwq
= get_gcwq(cpu
);
2932 spin_lock_irq(&gcwq
->lock
);
2934 if (!(wq
->flags
& WQ_FREEZEABLE
) ||
2935 !(gcwq
->flags
& GCWQ_FREEZING
))
2936 get_cwq(gcwq
->cpu
, wq
)->max_active
= max_active
;
2938 spin_unlock_irq(&gcwq
->lock
);
2941 spin_unlock(&workqueue_lock
);
2943 EXPORT_SYMBOL_GPL(workqueue_set_max_active
);
2946 * workqueue_congested - test whether a workqueue is congested
2947 * @cpu: CPU in question
2948 * @wq: target workqueue
2950 * Test whether @wq's cpu workqueue for @cpu is congested. There is
2951 * no synchronization around this function and the test result is
2952 * unreliable and only useful as advisory hints or for debugging.
2955 * %true if congested, %false otherwise.
2957 bool workqueue_congested(unsigned int cpu
, struct workqueue_struct
*wq
)
2959 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
2961 return !list_empty(&cwq
->delayed_works
);
2963 EXPORT_SYMBOL_GPL(workqueue_congested
);
2966 * work_cpu - return the last known associated cpu for @work
2967 * @work: the work of interest
2970 * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
2972 unsigned int work_cpu(struct work_struct
*work
)
2974 struct global_cwq
*gcwq
= get_work_gcwq(work
);
2976 return gcwq
? gcwq
->cpu
: WORK_CPU_NONE
;
2978 EXPORT_SYMBOL_GPL(work_cpu
);
2981 * work_busy - test whether a work is currently pending or running
2982 * @work: the work to be tested
2984 * Test whether @work is currently pending or running. There is no
2985 * synchronization around this function and the test result is
2986 * unreliable and only useful as advisory hints or for debugging.
2987 * Especially for reentrant wqs, the pending state might hide the
2991 * OR'd bitmask of WORK_BUSY_* bits.
2993 unsigned int work_busy(struct work_struct
*work
)
2995 struct global_cwq
*gcwq
= get_work_gcwq(work
);
2996 unsigned long flags
;
2997 unsigned int ret
= 0;
3002 spin_lock_irqsave(&gcwq
->lock
, flags
);
3004 if (work_pending(work
))
3005 ret
|= WORK_BUSY_PENDING
;
3006 if (find_worker_executing_work(gcwq
, work
))
3007 ret
|= WORK_BUSY_RUNNING
;
3009 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
3013 EXPORT_SYMBOL_GPL(work_busy
);
3018 * There are two challenges in supporting CPU hotplug. Firstly, there
3019 * are a lot of assumptions on strong associations among work, cwq and
3020 * gcwq which make migrating pending and scheduled works very
3021 * difficult to implement without impacting hot paths. Secondly,
3022 * gcwqs serve mix of short, long and very long running works making
3023 * blocked draining impractical.
3025 * This is solved by allowing a gcwq to be detached from CPU, running
3026 * it with unbound (rogue) workers and allowing it to be reattached
3027 * later if the cpu comes back online. A separate thread is created
3028 * to govern a gcwq in such state and is called the trustee of the
3031 * Trustee states and their descriptions.
3033 * START Command state used on startup. On CPU_DOWN_PREPARE, a
3034 * new trustee is started with this state.
3036 * IN_CHARGE Once started, trustee will enter this state after
3037 * assuming the manager role and making all existing
3038 * workers rogue. DOWN_PREPARE waits for trustee to
3039 * enter this state. After reaching IN_CHARGE, trustee
3040 * tries to execute the pending worklist until it's empty
3041 * and the state is set to BUTCHER, or the state is set
3044 * BUTCHER Command state which is set by the cpu callback after
3045 * the cpu has went down. Once this state is set trustee
3046 * knows that there will be no new works on the worklist
3047 * and once the worklist is empty it can proceed to
3048 * killing idle workers.
3050 * RELEASE Command state which is set by the cpu callback if the
3051 * cpu down has been canceled or it has come online
3052 * again. After recognizing this state, trustee stops
3053 * trying to drain or butcher and clears ROGUE, rebinds
3054 * all remaining workers back to the cpu and releases
3057 * DONE Trustee will enter this state after BUTCHER or RELEASE
3060 * trustee CPU draining
3061 * took over down complete
3062 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3064 * | CPU is back online v return workers |
3065 * ----------------> RELEASE --------------
3069 * trustee_wait_event_timeout - timed event wait for trustee
3070 * @cond: condition to wait for
3071 * @timeout: timeout in jiffies
3073 * wait_event_timeout() for trustee to use. Handles locking and
3074 * checks for RELEASE request.
3077 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3078 * multiple times. To be used by trustee.
3081 * Positive indicating left time if @cond is satisfied, 0 if timed
3082 * out, -1 if canceled.
3084 #define trustee_wait_event_timeout(cond, timeout) ({ \
3085 long __ret = (timeout); \
3086 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3088 spin_unlock_irq(&gcwq->lock); \
3089 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
3090 (gcwq->trustee_state == TRUSTEE_RELEASE), \
3092 spin_lock_irq(&gcwq->lock); \
3094 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
3098 * trustee_wait_event - event wait for trustee
3099 * @cond: condition to wait for
3101 * wait_event() for trustee to use. Automatically handles locking and
3102 * checks for CANCEL request.
3105 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3106 * multiple times. To be used by trustee.
3109 * 0 if @cond is satisfied, -1 if canceled.
3111 #define trustee_wait_event(cond) ({ \
3113 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3114 __ret1 < 0 ? -1 : 0; \
3117 static int __cpuinit
trustee_thread(void *__gcwq
)
3119 struct global_cwq
*gcwq
= __gcwq
;
3120 struct worker
*worker
;
3121 struct work_struct
*work
;
3122 struct hlist_node
*pos
;
3126 BUG_ON(gcwq
->cpu
!= smp_processor_id());
3128 spin_lock_irq(&gcwq
->lock
);
3130 * Claim the manager position and make all workers rogue.
3131 * Trustee must be bound to the target cpu and can't be
3134 BUG_ON(gcwq
->cpu
!= smp_processor_id());
3135 rc
= trustee_wait_event(!(gcwq
->flags
& GCWQ_MANAGING_WORKERS
));
3138 gcwq
->flags
|= GCWQ_MANAGING_WORKERS
;
3140 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
)
3141 worker
->flags
|= WORKER_ROGUE
;
3143 for_each_busy_worker(worker
, i
, pos
, gcwq
)
3144 worker
->flags
|= WORKER_ROGUE
;
3147 * Call schedule() so that we cross rq->lock and thus can
3148 * guarantee sched callbacks see the rogue flag. This is
3149 * necessary as scheduler callbacks may be invoked from other
3152 spin_unlock_irq(&gcwq
->lock
);
3154 spin_lock_irq(&gcwq
->lock
);
3157 * Sched callbacks are disabled now. Zap nr_running. After
3158 * this, nr_running stays zero and need_more_worker() and
3159 * keep_working() are always true as long as the worklist is
3162 atomic_set(get_gcwq_nr_running(gcwq
->cpu
), 0);
3164 spin_unlock_irq(&gcwq
->lock
);
3165 del_timer_sync(&gcwq
->idle_timer
);
3166 spin_lock_irq(&gcwq
->lock
);
3169 * We're now in charge. Notify and proceed to drain. We need
3170 * to keep the gcwq running during the whole CPU down
3171 * procedure as other cpu hotunplug callbacks may need to
3172 * flush currently running tasks.
3174 gcwq
->trustee_state
= TRUSTEE_IN_CHARGE
;
3175 wake_up_all(&gcwq
->trustee_wait
);
3178 * The original cpu is in the process of dying and may go away
3179 * anytime now. When that happens, we and all workers would
3180 * be migrated to other cpus. Try draining any left work. We
3181 * want to get it over with ASAP - spam rescuers, wake up as
3182 * many idlers as necessary and create new ones till the
3183 * worklist is empty. Note that if the gcwq is frozen, there
3184 * may be frozen works in freezeable cwqs. Don't declare
3185 * completion while frozen.
3187 while (gcwq
->nr_workers
!= gcwq
->nr_idle
||
3188 gcwq
->flags
& GCWQ_FREEZING
||
3189 gcwq
->trustee_state
== TRUSTEE_IN_CHARGE
) {
3192 list_for_each_entry(work
, &gcwq
->worklist
, entry
) {
3197 list_for_each_entry(worker
, &gcwq
->idle_list
, entry
) {
3200 wake_up_process(worker
->task
);
3203 if (need_to_create_worker(gcwq
)) {
3204 spin_unlock_irq(&gcwq
->lock
);
3205 worker
= create_worker(gcwq
, false);
3206 spin_lock_irq(&gcwq
->lock
);
3208 worker
->flags
|= WORKER_ROGUE
;
3209 start_worker(worker
);
3213 /* give a breather */
3214 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN
) < 0)
3219 * Either all works have been scheduled and cpu is down, or
3220 * cpu down has already been canceled. Wait for and butcher
3221 * all workers till we're canceled.
3224 rc
= trustee_wait_event(!list_empty(&gcwq
->idle_list
));
3225 while (!list_empty(&gcwq
->idle_list
))
3226 destroy_worker(list_first_entry(&gcwq
->idle_list
,
3227 struct worker
, entry
));
3228 } while (gcwq
->nr_workers
&& rc
>= 0);
3231 * At this point, either draining has completed and no worker
3232 * is left, or cpu down has been canceled or the cpu is being
3233 * brought back up. There shouldn't be any idle one left.
3234 * Tell the remaining busy ones to rebind once it finishes the
3235 * currently scheduled works by scheduling the rebind_work.
3237 WARN_ON(!list_empty(&gcwq
->idle_list
));
3239 for_each_busy_worker(worker
, i
, pos
, gcwq
) {
3240 struct work_struct
*rebind_work
= &worker
->rebind_work
;
3243 * Rebind_work may race with future cpu hotplug
3244 * operations. Use a separate flag to mark that
3245 * rebinding is scheduled.
3247 worker
->flags
|= WORKER_REBIND
;
3248 worker
->flags
&= ~WORKER_ROGUE
;
3250 /* queue rebind_work, wq doesn't matter, use the default one */
3251 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT
,
3252 work_data_bits(rebind_work
)))
3255 debug_work_activate(rebind_work
);
3256 insert_work(get_cwq(gcwq
->cpu
, system_wq
), rebind_work
,
3257 worker
->scheduled
.next
,
3258 work_color_to_flags(WORK_NO_COLOR
));
3261 /* relinquish manager role */
3262 gcwq
->flags
&= ~GCWQ_MANAGING_WORKERS
;
3264 /* notify completion */
3265 gcwq
->trustee
= NULL
;
3266 gcwq
->trustee_state
= TRUSTEE_DONE
;
3267 wake_up_all(&gcwq
->trustee_wait
);
3268 spin_unlock_irq(&gcwq
->lock
);
3273 * wait_trustee_state - wait for trustee to enter the specified state
3274 * @gcwq: gcwq the trustee of interest belongs to
3275 * @state: target state to wait for
3277 * Wait for the trustee to reach @state. DONE is already matched.
3280 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3281 * multiple times. To be used by cpu_callback.
3283 static void __cpuinit
wait_trustee_state(struct global_cwq
*gcwq
, int state
)
3284 __releases(&gcwq
->lock
)
3285 __acquires(&gcwq
->lock
)
3287 if (!(gcwq
->trustee_state
== state
||
3288 gcwq
->trustee_state
== TRUSTEE_DONE
)) {
3289 spin_unlock_irq(&gcwq
->lock
);
3290 __wait_event(gcwq
->trustee_wait
,
3291 gcwq
->trustee_state
== state
||
3292 gcwq
->trustee_state
== TRUSTEE_DONE
);
3293 spin_lock_irq(&gcwq
->lock
);
3297 static int __devinit
workqueue_cpu_callback(struct notifier_block
*nfb
,
3298 unsigned long action
,
3301 unsigned int cpu
= (unsigned long)hcpu
;
3302 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3303 struct task_struct
*new_trustee
= NULL
;
3304 struct worker
*uninitialized_var(new_worker
);
3305 unsigned long flags
;
3307 action
&= ~CPU_TASKS_FROZEN
;
3310 case CPU_DOWN_PREPARE
:
3311 new_trustee
= kthread_create(trustee_thread
, gcwq
,
3312 "workqueue_trustee/%d\n", cpu
);
3313 if (IS_ERR(new_trustee
))
3314 return notifier_from_errno(PTR_ERR(new_trustee
));
3315 kthread_bind(new_trustee
, cpu
);
3317 case CPU_UP_PREPARE
:
3318 BUG_ON(gcwq
->first_idle
);
3319 new_worker
= create_worker(gcwq
, false);
3322 kthread_stop(new_trustee
);
3327 /* some are called w/ irq disabled, don't disturb irq status */
3328 spin_lock_irqsave(&gcwq
->lock
, flags
);
3331 case CPU_DOWN_PREPARE
:
3332 /* initialize trustee and tell it to acquire the gcwq */
3333 BUG_ON(gcwq
->trustee
|| gcwq
->trustee_state
!= TRUSTEE_DONE
);
3334 gcwq
->trustee
= new_trustee
;
3335 gcwq
->trustee_state
= TRUSTEE_START
;
3336 wake_up_process(gcwq
->trustee
);
3337 wait_trustee_state(gcwq
, TRUSTEE_IN_CHARGE
);
3339 case CPU_UP_PREPARE
:
3340 BUG_ON(gcwq
->first_idle
);
3341 gcwq
->first_idle
= new_worker
;
3346 * Before this, the trustee and all workers except for
3347 * the ones which are still executing works from
3348 * before the last CPU down must be on the cpu. After
3349 * this, they'll all be diasporas.
3351 gcwq
->flags
|= GCWQ_DISASSOCIATED
;
3355 gcwq
->trustee_state
= TRUSTEE_BUTCHER
;
3357 case CPU_UP_CANCELED
:
3358 destroy_worker(gcwq
->first_idle
);
3359 gcwq
->first_idle
= NULL
;
3362 case CPU_DOWN_FAILED
:
3364 gcwq
->flags
&= ~GCWQ_DISASSOCIATED
;
3365 if (gcwq
->trustee_state
!= TRUSTEE_DONE
) {
3366 gcwq
->trustee_state
= TRUSTEE_RELEASE
;
3367 wake_up_process(gcwq
->trustee
);
3368 wait_trustee_state(gcwq
, TRUSTEE_DONE
);
3372 * Trustee is done and there might be no worker left.
3373 * Put the first_idle in and request a real manager to
3376 spin_unlock_irq(&gcwq
->lock
);
3377 kthread_bind(gcwq
->first_idle
->task
, cpu
);
3378 spin_lock_irq(&gcwq
->lock
);
3379 gcwq
->flags
|= GCWQ_MANAGE_WORKERS
;
3380 start_worker(gcwq
->first_idle
);
3381 gcwq
->first_idle
= NULL
;
3385 spin_unlock_irqrestore(&gcwq
->lock
, flags
);
3387 return notifier_from_errno(0);
3392 struct work_for_cpu
{
3393 struct completion completion
;
3399 static int do_work_for_cpu(void *_wfc
)
3401 struct work_for_cpu
*wfc
= _wfc
;
3402 wfc
->ret
= wfc
->fn(wfc
->arg
);
3403 complete(&wfc
->completion
);
3408 * work_on_cpu - run a function in user context on a particular cpu
3409 * @cpu: the cpu to run on
3410 * @fn: the function to run
3411 * @arg: the function arg
3413 * This will return the value @fn returns.
3414 * It is up to the caller to ensure that the cpu doesn't go offline.
3415 * The caller must not hold any locks which would prevent @fn from completing.
3417 long work_on_cpu(unsigned int cpu
, long (*fn
)(void *), void *arg
)
3419 struct task_struct
*sub_thread
;
3420 struct work_for_cpu wfc
= {
3421 .completion
= COMPLETION_INITIALIZER_ONSTACK(wfc
.completion
),
3426 sub_thread
= kthread_create(do_work_for_cpu
, &wfc
, "work_for_cpu");
3427 if (IS_ERR(sub_thread
))
3428 return PTR_ERR(sub_thread
);
3429 kthread_bind(sub_thread
, cpu
);
3430 wake_up_process(sub_thread
);
3431 wait_for_completion(&wfc
.completion
);
3434 EXPORT_SYMBOL_GPL(work_on_cpu
);
3435 #endif /* CONFIG_SMP */
3437 #ifdef CONFIG_FREEZER
3440 * freeze_workqueues_begin - begin freezing workqueues
3442 * Start freezing workqueues. After this function returns, all
3443 * freezeable workqueues will queue new works to their frozen_works
3444 * list instead of gcwq->worklist.
3447 * Grabs and releases workqueue_lock and gcwq->lock's.
3449 void freeze_workqueues_begin(void)
3453 spin_lock(&workqueue_lock
);
3455 BUG_ON(workqueue_freezing
);
3456 workqueue_freezing
= true;
3458 for_each_gcwq_cpu(cpu
) {
3459 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3460 struct workqueue_struct
*wq
;
3462 spin_lock_irq(&gcwq
->lock
);
3464 BUG_ON(gcwq
->flags
& GCWQ_FREEZING
);
3465 gcwq
->flags
|= GCWQ_FREEZING
;
3467 list_for_each_entry(wq
, &workqueues
, list
) {
3468 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3470 if (cwq
&& wq
->flags
& WQ_FREEZEABLE
)
3471 cwq
->max_active
= 0;
3474 spin_unlock_irq(&gcwq
->lock
);
3477 spin_unlock(&workqueue_lock
);
3481 * freeze_workqueues_busy - are freezeable workqueues still busy?
3483 * Check whether freezing is complete. This function must be called
3484 * between freeze_workqueues_begin() and thaw_workqueues().
3487 * Grabs and releases workqueue_lock.
3490 * %true if some freezeable workqueues are still busy. %false if
3491 * freezing is complete.
3493 bool freeze_workqueues_busy(void)
3498 spin_lock(&workqueue_lock
);
3500 BUG_ON(!workqueue_freezing
);
3502 for_each_gcwq_cpu(cpu
) {
3503 struct workqueue_struct
*wq
;
3505 * nr_active is monotonically decreasing. It's safe
3506 * to peek without lock.
3508 list_for_each_entry(wq
, &workqueues
, list
) {
3509 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3511 if (!cwq
|| !(wq
->flags
& WQ_FREEZEABLE
))
3514 BUG_ON(cwq
->nr_active
< 0);
3515 if (cwq
->nr_active
) {
3522 spin_unlock(&workqueue_lock
);
3527 * thaw_workqueues - thaw workqueues
3529 * Thaw workqueues. Normal queueing is restored and all collected
3530 * frozen works are transferred to their respective gcwq worklists.
3533 * Grabs and releases workqueue_lock and gcwq->lock's.
3535 void thaw_workqueues(void)
3539 spin_lock(&workqueue_lock
);
3541 if (!workqueue_freezing
)
3544 for_each_gcwq_cpu(cpu
) {
3545 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3546 struct workqueue_struct
*wq
;
3548 spin_lock_irq(&gcwq
->lock
);
3550 BUG_ON(!(gcwq
->flags
& GCWQ_FREEZING
));
3551 gcwq
->flags
&= ~GCWQ_FREEZING
;
3553 list_for_each_entry(wq
, &workqueues
, list
) {
3554 struct cpu_workqueue_struct
*cwq
= get_cwq(cpu
, wq
);
3556 if (!cwq
|| !(wq
->flags
& WQ_FREEZEABLE
))
3559 /* restore max_active and repopulate worklist */
3560 cwq
->max_active
= wq
->saved_max_active
;
3562 while (!list_empty(&cwq
->delayed_works
) &&
3563 cwq
->nr_active
< cwq
->max_active
)
3564 cwq_activate_first_delayed(cwq
);
3567 wake_up_worker(gcwq
);
3569 spin_unlock_irq(&gcwq
->lock
);
3572 workqueue_freezing
= false;
3574 spin_unlock(&workqueue_lock
);
3576 #endif /* CONFIG_FREEZER */
3578 static int __init
init_workqueues(void)
3583 cpu_notifier(workqueue_cpu_callback
, CPU_PRI_WORKQUEUE
);
3585 /* initialize gcwqs */
3586 for_each_gcwq_cpu(cpu
) {
3587 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3589 spin_lock_init(&gcwq
->lock
);
3590 INIT_LIST_HEAD(&gcwq
->worklist
);
3592 gcwq
->flags
|= GCWQ_DISASSOCIATED
;
3594 INIT_LIST_HEAD(&gcwq
->idle_list
);
3595 for (i
= 0; i
< BUSY_WORKER_HASH_SIZE
; i
++)
3596 INIT_HLIST_HEAD(&gcwq
->busy_hash
[i
]);
3598 init_timer_deferrable(&gcwq
->idle_timer
);
3599 gcwq
->idle_timer
.function
= idle_worker_timeout
;
3600 gcwq
->idle_timer
.data
= (unsigned long)gcwq
;
3602 setup_timer(&gcwq
->mayday_timer
, gcwq_mayday_timeout
,
3603 (unsigned long)gcwq
);
3605 ida_init(&gcwq
->worker_ida
);
3607 gcwq
->trustee_state
= TRUSTEE_DONE
;
3608 init_waitqueue_head(&gcwq
->trustee_wait
);
3611 /* create the initial worker */
3612 for_each_online_gcwq_cpu(cpu
) {
3613 struct global_cwq
*gcwq
= get_gcwq(cpu
);
3614 struct worker
*worker
;
3616 if (cpu
!= WORK_CPU_UNBOUND
)
3617 gcwq
->flags
&= ~GCWQ_DISASSOCIATED
;
3618 worker
= create_worker(gcwq
, true);
3620 spin_lock_irq(&gcwq
->lock
);
3621 start_worker(worker
);
3622 spin_unlock_irq(&gcwq
->lock
);
3625 system_wq
= alloc_workqueue("events", 0, 0);
3626 system_long_wq
= alloc_workqueue("events_long", 0, 0);
3627 system_nrt_wq
= alloc_workqueue("events_nrt", WQ_NON_REENTRANT
, 0);
3628 system_unbound_wq
= alloc_workqueue("events_unbound", WQ_UNBOUND
,
3629 WQ_UNBOUND_MAX_ACTIVE
);
3630 BUG_ON(!system_wq
|| !system_long_wq
|| !system_nrt_wq
);
3633 early_initcall(init_workqueues
);