hppfs: fix the leaks on close()
[linux-2.6.git] / include / linux / workqueue.h
blob8afab27cdbc2913bd95e8becb6c08fdfa06b7de9
1 /*
2 * workqueue.h --- work queue handling for Linux.
3 */
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/threads.h>
13 #include <linux/atomic.h>
15 struct workqueue_struct;
17 struct work_struct;
18 typedef void (*work_func_t)(struct work_struct *work);
19 void delayed_work_timer_fn(unsigned long __data);
22 * The first word is the work queue pointer and the flags rolled into
23 * one
25 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
27 enum {
28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
30 WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
32 #ifdef CONFIG_DEBUG_OBJECTS_WORK
33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
34 WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
35 #else
36 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
37 #endif
39 WORK_STRUCT_COLOR_BITS = 4,
41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
43 WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
45 #ifdef CONFIG_DEBUG_OBJECTS_WORK
46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
47 #else
48 WORK_STRUCT_STATIC = 0,
49 #endif
52 * The last color is no color used for works which don't
53 * participate in workqueue flushing.
55 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
56 WORK_NO_COLOR = WORK_NR_COLORS,
58 /* special cpu IDs */
59 WORK_CPU_UNBOUND = NR_CPUS,
60 WORK_CPU_END = NR_CPUS + 1,
63 * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
64 * This makes pwqs aligned to 256 bytes and allows 15 workqueue
65 * flush colors.
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS,
70 /* data contains off-queue information when !WORK_STRUCT_PWQ */
71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
76 * When a work item is off queue, its high bits point to the last
77 * pool it was on. Cap at 31 bits and use the highest number to
78 * indicate that no pool is associated.
80 WORK_OFFQ_FLAG_BITS = 1,
81 WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
82 WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
83 WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
84 WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
86 /* convenience constants */
87 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
88 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
89 WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
91 /* bit mask for work_busy() return values */
92 WORK_BUSY_PENDING = 1 << 0,
93 WORK_BUSY_RUNNING = 1 << 1,
96 struct work_struct {
97 atomic_long_t data;
98 struct list_head entry;
99 work_func_t func;
100 #ifdef CONFIG_LOCKDEP
101 struct lockdep_map lockdep_map;
102 #endif
105 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
106 #define WORK_DATA_STATIC_INIT() \
107 ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
109 struct delayed_work {
110 struct work_struct work;
111 struct timer_list timer;
113 /* target workqueue and CPU ->timer uses to queue ->work */
114 struct workqueue_struct *wq;
115 int cpu;
118 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
120 return container_of(work, struct delayed_work, work);
123 struct execute_work {
124 struct work_struct work;
127 #ifdef CONFIG_LOCKDEP
129 * NB: because we have to copy the lockdep_map, setting _key
130 * here is required, otherwise it could get initialised to the
131 * copy of the lockdep_map!
133 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
134 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
135 #else
136 #define __WORK_INIT_LOCKDEP_MAP(n, k)
137 #endif
139 #define __WORK_INITIALIZER(n, f) { \
140 .data = WORK_DATA_STATIC_INIT(), \
141 .entry = { &(n).entry, &(n).entry }, \
142 .func = (f), \
143 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
146 #define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
147 .work = __WORK_INITIALIZER((n).work, (f)), \
148 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
149 0, (unsigned long)&(n), \
150 (tflags) | TIMER_IRQSAFE), \
153 #define DECLARE_WORK(n, f) \
154 struct work_struct n = __WORK_INITIALIZER(n, f)
156 #define DECLARE_DELAYED_WORK(n, f) \
157 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
159 #define DECLARE_DEFERRABLE_WORK(n, f) \
160 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
163 * initialize a work item's function pointer
165 #define PREPARE_WORK(_work, _func) \
166 do { \
167 (_work)->func = (_func); \
168 } while (0)
170 #define PREPARE_DELAYED_WORK(_work, _func) \
171 PREPARE_WORK(&(_work)->work, (_func))
173 #ifdef CONFIG_DEBUG_OBJECTS_WORK
174 extern void __init_work(struct work_struct *work, int onstack);
175 extern void destroy_work_on_stack(struct work_struct *work);
176 static inline unsigned int work_static(struct work_struct *work)
178 return *work_data_bits(work) & WORK_STRUCT_STATIC;
180 #else
181 static inline void __init_work(struct work_struct *work, int onstack) { }
182 static inline void destroy_work_on_stack(struct work_struct *work) { }
183 static inline unsigned int work_static(struct work_struct *work) { return 0; }
184 #endif
187 * initialize all of a work item in one go
189 * NOTE! No point in using "atomic_long_set()": using a direct
190 * assignment of the work data initializer allows the compiler
191 * to generate better code.
193 #ifdef CONFIG_LOCKDEP
194 #define __INIT_WORK(_work, _func, _onstack) \
195 do { \
196 static struct lock_class_key __key; \
198 __init_work((_work), _onstack); \
199 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
200 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
201 INIT_LIST_HEAD(&(_work)->entry); \
202 PREPARE_WORK((_work), (_func)); \
203 } while (0)
204 #else
205 #define __INIT_WORK(_work, _func, _onstack) \
206 do { \
207 __init_work((_work), _onstack); \
208 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
209 INIT_LIST_HEAD(&(_work)->entry); \
210 PREPARE_WORK((_work), (_func)); \
211 } while (0)
212 #endif
214 #define INIT_WORK(_work, _func) \
215 do { \
216 __INIT_WORK((_work), (_func), 0); \
217 } while (0)
219 #define INIT_WORK_ONSTACK(_work, _func) \
220 do { \
221 __INIT_WORK((_work), (_func), 1); \
222 } while (0)
224 #define __INIT_DELAYED_WORK(_work, _func, _tflags) \
225 do { \
226 INIT_WORK(&(_work)->work, (_func)); \
227 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
228 (unsigned long)(_work), \
229 (_tflags) | TIMER_IRQSAFE); \
230 } while (0)
232 #define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
233 do { \
234 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
235 __setup_timer_on_stack(&(_work)->timer, \
236 delayed_work_timer_fn, \
237 (unsigned long)(_work), \
238 (_tflags) | TIMER_IRQSAFE); \
239 } while (0)
241 #define INIT_DELAYED_WORK(_work, _func) \
242 __INIT_DELAYED_WORK(_work, _func, 0)
244 #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
245 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
247 #define INIT_DEFERRABLE_WORK(_work, _func) \
248 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
250 #define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
251 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
254 * work_pending - Find out whether a work item is currently pending
255 * @work: The work item in question
257 #define work_pending(work) \
258 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
261 * delayed_work_pending - Find out whether a delayable work item is currently
262 * pending
263 * @work: The work item in question
265 #define delayed_work_pending(w) \
266 work_pending(&(w)->work)
269 * work_clear_pending - for internal use only, mark a work item as not pending
270 * @work: The work item in question
272 #define work_clear_pending(work) \
273 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
276 * Workqueue flags and constants. For details, please refer to
277 * Documentation/workqueue.txt.
279 enum {
280 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
281 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
282 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
283 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
284 WQ_HIGHPRI = 1 << 4, /* high priority */
285 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
287 WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
288 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
290 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
291 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
292 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
295 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
296 #define WQ_UNBOUND_MAX_ACTIVE \
297 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
300 * System-wide workqueues which are always present.
302 * system_wq is the one used by schedule[_delayed]_work[_on]().
303 * Multi-CPU multi-threaded. There are users which expect relatively
304 * short queue flush time. Don't queue works which can run for too
305 * long.
307 * system_long_wq is similar to system_wq but may host long running
308 * works. Queue flushing might take relatively long.
310 * system_unbound_wq is unbound workqueue. Workers are not bound to
311 * any specific CPU, not concurrency managed, and all queued works are
312 * executed immediately as long as max_active limit is not reached and
313 * resources are available.
315 * system_freezable_wq is equivalent to system_wq except that it's
316 * freezable.
318 extern struct workqueue_struct *system_wq;
319 extern struct workqueue_struct *system_long_wq;
320 extern struct workqueue_struct *system_unbound_wq;
321 extern struct workqueue_struct *system_freezable_wq;
323 static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
325 return system_wq;
328 static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
330 return system_freezable_wq;
333 /* equivlalent to system_wq and system_freezable_wq, deprecated */
334 #define system_nrt_wq __system_nrt_wq()
335 #define system_nrt_freezable_wq __system_nrt_freezable_wq()
337 extern struct workqueue_struct *
338 __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
339 struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
342 * alloc_workqueue - allocate a workqueue
343 * @fmt: printf format for the name of the workqueue
344 * @flags: WQ_* flags
345 * @max_active: max in-flight work items, 0 for default
346 * @args: args for @fmt
348 * Allocate a workqueue with the specified parameters. For detailed
349 * information on WQ_* flags, please refer to Documentation/workqueue.txt.
351 * The __lock_name macro dance is to guarantee that single lock_class_key
352 * doesn't end up with different namesm, which isn't allowed by lockdep.
354 * RETURNS:
355 * Pointer to the allocated workqueue on success, %NULL on failure.
357 #ifdef CONFIG_LOCKDEP
358 #define alloc_workqueue(fmt, flags, max_active, args...) \
359 ({ \
360 static struct lock_class_key __key; \
361 const char *__lock_name; \
363 if (__builtin_constant_p(fmt)) \
364 __lock_name = (fmt); \
365 else \
366 __lock_name = #fmt; \
368 __alloc_workqueue_key((fmt), (flags), (max_active), \
369 &__key, __lock_name, ##args); \
371 #else
372 #define alloc_workqueue(fmt, flags, max_active, args...) \
373 __alloc_workqueue_key((fmt), (flags), (max_active), \
374 NULL, NULL, ##args)
375 #endif
378 * alloc_ordered_workqueue - allocate an ordered workqueue
379 * @fmt: printf format for the name of the workqueue
380 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
381 * @args: args for @fmt
383 * Allocate an ordered workqueue. An ordered workqueue executes at
384 * most one work item at any given time in the queued order. They are
385 * implemented as unbound workqueues with @max_active of one.
387 * RETURNS:
388 * Pointer to the allocated workqueue on success, %NULL on failure.
390 #define alloc_ordered_workqueue(fmt, flags, args...) \
391 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
393 #define create_workqueue(name) \
394 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
395 #define create_freezable_workqueue(name) \
396 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
397 #define create_singlethread_workqueue(name) \
398 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
400 extern void destroy_workqueue(struct workqueue_struct *wq);
402 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
403 struct work_struct *work);
404 extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
405 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
406 struct delayed_work *work, unsigned long delay);
407 extern bool queue_delayed_work(struct workqueue_struct *wq,
408 struct delayed_work *work, unsigned long delay);
409 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
410 struct delayed_work *dwork, unsigned long delay);
411 extern bool mod_delayed_work(struct workqueue_struct *wq,
412 struct delayed_work *dwork, unsigned long delay);
414 extern void flush_workqueue(struct workqueue_struct *wq);
415 extern void drain_workqueue(struct workqueue_struct *wq);
416 extern void flush_scheduled_work(void);
418 extern bool schedule_work_on(int cpu, struct work_struct *work);
419 extern bool schedule_work(struct work_struct *work);
420 extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
421 unsigned long delay);
422 extern bool schedule_delayed_work(struct delayed_work *work,
423 unsigned long delay);
424 extern int schedule_on_each_cpu(work_func_t func);
425 extern int keventd_up(void);
427 int execute_in_process_context(work_func_t fn, struct execute_work *);
429 extern bool flush_work(struct work_struct *work);
430 extern bool cancel_work_sync(struct work_struct *work);
432 extern bool flush_delayed_work(struct delayed_work *dwork);
433 extern bool cancel_delayed_work(struct delayed_work *dwork);
434 extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
436 extern void workqueue_set_max_active(struct workqueue_struct *wq,
437 int max_active);
438 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
439 extern unsigned int work_busy(struct work_struct *work);
442 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
443 * if it returns 0 the timer function may be running and the queueing is in
444 * progress.
446 static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
448 bool ret;
450 ret = del_timer(&work->timer);
451 if (ret)
452 work_clear_pending(&work->work);
453 return ret;
456 /* used to be different but now identical to flush_work(), deprecated */
457 static inline bool __deprecated flush_work_sync(struct work_struct *work)
459 return flush_work(work);
462 /* used to be different but now identical to flush_delayed_work(), deprecated */
463 static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
465 return flush_delayed_work(dwork);
468 #ifndef CONFIG_SMP
469 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
471 return fn(arg);
473 #else
474 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
475 #endif /* CONFIG_SMP */
477 #ifdef CONFIG_FREEZER
478 extern void freeze_workqueues_begin(void);
479 extern bool freeze_workqueues_busy(void);
480 extern void thaw_workqueues(void);
481 #endif /* CONFIG_FREEZER */
483 #endif