r8169: fix rx checksum offload
[linux-2.6/kvm.git] / include / linux / workqueue.h
blob4f9d277bcd9a5cd32ef35bc304bdbe3f6339a38a
1 /*
2 * workqueue.h --- work queue handling for Linux.
3 */
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <linux/threads.h>
13 #include <asm/atomic.h>
15 struct workqueue_struct;
17 struct work_struct;
18 typedef void (*work_func_t)(struct work_struct *work);
21 * The first word is the work queue pointer and the flags rolled into
22 * one
24 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
26 enum {
27 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
28 WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */
29 WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */
30 #ifdef CONFIG_DEBUG_OBJECTS_WORK
31 WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */
32 WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
33 #else
34 WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
35 #endif
37 WORK_STRUCT_COLOR_BITS = 4,
39 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
40 WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
41 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
42 #ifdef CONFIG_DEBUG_OBJECTS_WORK
43 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
44 #else
45 WORK_STRUCT_STATIC = 0,
46 #endif
49 * The last color is no color used for works which don't
50 * participate in workqueue flushing.
52 WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
53 WORK_NO_COLOR = WORK_NR_COLORS,
55 /* special cpu IDs */
56 WORK_CPU_UNBOUND = NR_CPUS,
57 WORK_CPU_NONE = NR_CPUS + 1,
58 WORK_CPU_LAST = WORK_CPU_NONE,
61 * Reserve 7 bits off of cwq pointer w/ debugobjects turned
62 * off. This makes cwqs aligned to 128 bytes which isn't too
63 * excessive while allowing 15 workqueue flush colors.
65 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
66 WORK_STRUCT_COLOR_BITS,
68 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
69 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
70 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
72 /* bit mask for work_busy() return values */
73 WORK_BUSY_PENDING = 1 << 0,
74 WORK_BUSY_RUNNING = 1 << 1,
77 struct work_struct {
78 atomic_long_t data;
79 struct list_head entry;
80 work_func_t func;
81 #ifdef CONFIG_LOCKDEP
82 struct lockdep_map lockdep_map;
83 #endif
86 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
87 #define WORK_DATA_STATIC_INIT() \
88 ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
90 struct delayed_work {
91 struct work_struct work;
92 struct timer_list timer;
95 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
97 return container_of(work, struct delayed_work, work);
100 struct execute_work {
101 struct work_struct work;
104 #ifdef CONFIG_LOCKDEP
106 * NB: because we have to copy the lockdep_map, setting _key
107 * here is required, otherwise it could get initialised to the
108 * copy of the lockdep_map!
110 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
111 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
112 #else
113 #define __WORK_INIT_LOCKDEP_MAP(n, k)
114 #endif
116 #define __WORK_INITIALIZER(n, f) { \
117 .data = WORK_DATA_STATIC_INIT(), \
118 .entry = { &(n).entry, &(n).entry }, \
119 .func = (f), \
120 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
123 #define __DELAYED_WORK_INITIALIZER(n, f) { \
124 .work = __WORK_INITIALIZER((n).work, (f)), \
125 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
128 #define DECLARE_WORK(n, f) \
129 struct work_struct n = __WORK_INITIALIZER(n, f)
131 #define DECLARE_DELAYED_WORK(n, f) \
132 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
135 * initialize a work item's function pointer
137 #define PREPARE_WORK(_work, _func) \
138 do { \
139 (_work)->func = (_func); \
140 } while (0)
142 #define PREPARE_DELAYED_WORK(_work, _func) \
143 PREPARE_WORK(&(_work)->work, (_func))
145 #ifdef CONFIG_DEBUG_OBJECTS_WORK
146 extern void __init_work(struct work_struct *work, int onstack);
147 extern void destroy_work_on_stack(struct work_struct *work);
148 static inline unsigned int work_static(struct work_struct *work)
150 return *work_data_bits(work) & WORK_STRUCT_STATIC;
152 #else
153 static inline void __init_work(struct work_struct *work, int onstack) { }
154 static inline void destroy_work_on_stack(struct work_struct *work) { }
155 static inline unsigned int work_static(struct work_struct *work) { return 0; }
156 #endif
159 * initialize all of a work item in one go
161 * NOTE! No point in using "atomic_long_set()": using a direct
162 * assignment of the work data initializer allows the compiler
163 * to generate better code.
165 #ifdef CONFIG_LOCKDEP
166 #define __INIT_WORK(_work, _func, _onstack) \
167 do { \
168 static struct lock_class_key __key; \
170 __init_work((_work), _onstack); \
171 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
172 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
173 INIT_LIST_HEAD(&(_work)->entry); \
174 PREPARE_WORK((_work), (_func)); \
175 } while (0)
176 #else
177 #define __INIT_WORK(_work, _func, _onstack) \
178 do { \
179 __init_work((_work), _onstack); \
180 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
181 INIT_LIST_HEAD(&(_work)->entry); \
182 PREPARE_WORK((_work), (_func)); \
183 } while (0)
184 #endif
186 #define INIT_WORK(_work, _func) \
187 do { \
188 __INIT_WORK((_work), (_func), 0); \
189 } while (0)
191 #define INIT_WORK_ON_STACK(_work, _func) \
192 do { \
193 __INIT_WORK((_work), (_func), 1); \
194 } while (0)
196 #define INIT_DELAYED_WORK(_work, _func) \
197 do { \
198 INIT_WORK(&(_work)->work, (_func)); \
199 init_timer(&(_work)->timer); \
200 } while (0)
202 #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
203 do { \
204 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
205 init_timer_on_stack(&(_work)->timer); \
206 } while (0)
208 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
209 do { \
210 INIT_WORK(&(_work)->work, (_func)); \
211 init_timer_deferrable(&(_work)->timer); \
212 } while (0)
215 * work_pending - Find out whether a work item is currently pending
216 * @work: The work item in question
218 #define work_pending(work) \
219 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
222 * delayed_work_pending - Find out whether a delayable work item is currently
223 * pending
224 * @work: The work item in question
226 #define delayed_work_pending(w) \
227 work_pending(&(w)->work)
230 * work_clear_pending - for internal use only, mark a work item as not pending
231 * @work: The work item in question
233 #define work_clear_pending(work) \
234 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
236 enum {
237 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
238 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
239 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
240 WQ_RESCUER = 1 << 3, /* has an rescue worker */
241 WQ_HIGHPRI = 1 << 4, /* high priority */
242 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
244 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
245 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
246 WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
249 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
250 #define WQ_UNBOUND_MAX_ACTIVE \
251 max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
254 * System-wide workqueues which are always present.
256 * system_wq is the one used by schedule[_delayed]_work[_on]().
257 * Multi-CPU multi-threaded. There are users which expect relatively
258 * short queue flush time. Don't queue works which can run for too
259 * long.
261 * system_long_wq is similar to system_wq but may host long running
262 * works. Queue flushing might take relatively long.
264 * system_nrt_wq is non-reentrant and guarantees that any given work
265 * item is never executed in parallel by multiple CPUs. Queue
266 * flushing might take relatively long.
268 * system_unbound_wq is unbound workqueue. Workers are not bound to
269 * any specific CPU, not concurrency managed, and all queued works are
270 * executed immediately as long as max_active limit is not reached and
271 * resources are available.
273 extern struct workqueue_struct *system_wq;
274 extern struct workqueue_struct *system_long_wq;
275 extern struct workqueue_struct *system_nrt_wq;
276 extern struct workqueue_struct *system_unbound_wq;
278 extern struct workqueue_struct *
279 __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
280 struct lock_class_key *key, const char *lock_name);
282 #ifdef CONFIG_LOCKDEP
283 #define alloc_workqueue(name, flags, max_active) \
284 ({ \
285 static struct lock_class_key __key; \
286 const char *__lock_name; \
288 if (__builtin_constant_p(name)) \
289 __lock_name = (name); \
290 else \
291 __lock_name = #name; \
293 __alloc_workqueue_key((name), (flags), (max_active), \
294 &__key, __lock_name); \
296 #else
297 #define alloc_workqueue(name, flags, max_active) \
298 __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
299 #endif
301 #define create_workqueue(name) \
302 alloc_workqueue((name), WQ_RESCUER, 1)
303 #define create_freezeable_workqueue(name) \
304 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1)
305 #define create_singlethread_workqueue(name) \
306 alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1)
308 extern void destroy_workqueue(struct workqueue_struct *wq);
310 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
311 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
312 struct work_struct *work);
313 extern int queue_delayed_work(struct workqueue_struct *wq,
314 struct delayed_work *work, unsigned long delay);
315 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
316 struct delayed_work *work, unsigned long delay);
318 extern void flush_workqueue(struct workqueue_struct *wq);
319 extern void flush_scheduled_work(void);
320 extern void flush_delayed_work(struct delayed_work *work);
322 extern int schedule_work(struct work_struct *work);
323 extern int schedule_work_on(int cpu, struct work_struct *work);
324 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
325 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
326 unsigned long delay);
327 extern int schedule_on_each_cpu(work_func_t func);
328 extern int keventd_up(void);
330 int execute_in_process_context(work_func_t fn, struct execute_work *);
332 extern int flush_work(struct work_struct *work);
333 extern int cancel_work_sync(struct work_struct *work);
335 extern void workqueue_set_max_active(struct workqueue_struct *wq,
336 int max_active);
337 extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
338 extern unsigned int work_cpu(struct work_struct *work);
339 extern unsigned int work_busy(struct work_struct *work);
342 * Kill off a pending schedule_delayed_work(). Note that the work callback
343 * function may still be running on return from cancel_delayed_work(), unless
344 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
345 * cancel_work_sync() to wait on it.
347 static inline int cancel_delayed_work(struct delayed_work *work)
349 int ret;
351 ret = del_timer_sync(&work->timer);
352 if (ret)
353 work_clear_pending(&work->work);
354 return ret;
358 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
359 * if it returns 0 the timer function may be running and the queueing is in
360 * progress.
362 static inline int __cancel_delayed_work(struct delayed_work *work)
364 int ret;
366 ret = del_timer(&work->timer);
367 if (ret)
368 work_clear_pending(&work->work);
369 return ret;
372 extern int cancel_delayed_work_sync(struct delayed_work *work);
374 /* Obsolete. use cancel_delayed_work_sync() */
375 static inline
376 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
377 struct delayed_work *work)
379 cancel_delayed_work_sync(work);
382 /* Obsolete. use cancel_delayed_work_sync() */
383 static inline
384 void cancel_rearming_delayed_work(struct delayed_work *work)
386 cancel_delayed_work_sync(work);
389 #ifndef CONFIG_SMP
390 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
392 return fn(arg);
394 #else
395 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
396 #endif /* CONFIG_SMP */
398 #ifdef CONFIG_FREEZER
399 extern void freeze_workqueues_begin(void);
400 extern bool freeze_workqueues_busy(void);
401 extern void thaw_workqueues(void);
402 #endif /* CONFIG_FREEZER */
404 #ifdef CONFIG_LOCKDEP
405 int in_workqueue_context(struct workqueue_struct *wq);
406 #endif
408 #endif