workqueue: update cwq alignement
[linux-2.6/x86.git] / include / linux / workqueue.h
blobb90958a037dc2793c87563df5692bf6c3427675a
1 /*
2 * workqueue.h --- work queue handling for Linux.
3 */
5 #ifndef _LINUX_WORKQUEUE_H
6 #define _LINUX_WORKQUEUE_H
8 #include <linux/timer.h>
9 #include <linux/linkage.h>
10 #include <linux/bitops.h>
11 #include <linux/lockdep.h>
12 #include <asm/atomic.h>
14 struct workqueue_struct;
16 struct work_struct;
17 typedef void (*work_func_t)(struct work_struct *work);
20 * The first word is the work queue pointer and the flags rolled into
21 * one
23 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
25 enum {
26 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
27 #ifdef CONFIG_DEBUG_OBJECTS_WORK
28 WORK_STRUCT_STATIC_BIT = 1, /* static initializer (debugobjects) */
29 WORK_STRUCT_FLAG_BITS = 2,
30 #else
31 WORK_STRUCT_FLAG_BITS = 1,
32 #endif
34 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
35 #ifdef CONFIG_DEBUG_OBJECTS_WORK
36 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
37 #else
38 WORK_STRUCT_STATIC = 0,
39 #endif
41 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
42 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
45 struct work_struct {
46 atomic_long_t data;
47 struct list_head entry;
48 work_func_t func;
49 #ifdef CONFIG_LOCKDEP
50 struct lockdep_map lockdep_map;
51 #endif
54 #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
55 #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC)
57 struct delayed_work {
58 struct work_struct work;
59 struct timer_list timer;
62 static inline struct delayed_work *to_delayed_work(struct work_struct *work)
64 return container_of(work, struct delayed_work, work);
67 struct execute_work {
68 struct work_struct work;
71 #ifdef CONFIG_LOCKDEP
73 * NB: because we have to copy the lockdep_map, setting _key
74 * here is required, otherwise it could get initialised to the
75 * copy of the lockdep_map!
77 #define __WORK_INIT_LOCKDEP_MAP(n, k) \
78 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
79 #else
80 #define __WORK_INIT_LOCKDEP_MAP(n, k)
81 #endif
83 #define __WORK_INITIALIZER(n, f) { \
84 .data = WORK_DATA_STATIC_INIT(), \
85 .entry = { &(n).entry, &(n).entry }, \
86 .func = (f), \
87 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
90 #define __DELAYED_WORK_INITIALIZER(n, f) { \
91 .work = __WORK_INITIALIZER((n).work, (f)), \
92 .timer = TIMER_INITIALIZER(NULL, 0, 0), \
95 #define DECLARE_WORK(n, f) \
96 struct work_struct n = __WORK_INITIALIZER(n, f)
98 #define DECLARE_DELAYED_WORK(n, f) \
99 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
102 * initialize a work item's function pointer
104 #define PREPARE_WORK(_work, _func) \
105 do { \
106 (_work)->func = (_func); \
107 } while (0)
109 #define PREPARE_DELAYED_WORK(_work, _func) \
110 PREPARE_WORK(&(_work)->work, (_func))
112 #ifdef CONFIG_DEBUG_OBJECTS_WORK
113 extern void __init_work(struct work_struct *work, int onstack);
114 extern void destroy_work_on_stack(struct work_struct *work);
115 static inline unsigned int work_static(struct work_struct *work)
117 return *work_data_bits(work) & WORK_STRUCT_STATIC;
119 #else
120 static inline void __init_work(struct work_struct *work, int onstack) { }
121 static inline void destroy_work_on_stack(struct work_struct *work) { }
122 static inline unsigned int work_static(struct work_struct *work) { return 0; }
123 #endif
126 * initialize all of a work item in one go
128 * NOTE! No point in using "atomic_long_set()": using a direct
129 * assignment of the work data initializer allows the compiler
130 * to generate better code.
132 #ifdef CONFIG_LOCKDEP
133 #define __INIT_WORK(_work, _func, _onstack) \
134 do { \
135 static struct lock_class_key __key; \
137 __init_work((_work), _onstack); \
138 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
139 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
140 INIT_LIST_HEAD(&(_work)->entry); \
141 PREPARE_WORK((_work), (_func)); \
142 } while (0)
143 #else
144 #define __INIT_WORK(_work, _func, _onstack) \
145 do { \
146 __init_work((_work), _onstack); \
147 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
148 INIT_LIST_HEAD(&(_work)->entry); \
149 PREPARE_WORK((_work), (_func)); \
150 } while (0)
151 #endif
153 #define INIT_WORK(_work, _func) \
154 do { \
155 __INIT_WORK((_work), (_func), 0); \
156 } while (0)
158 #define INIT_WORK_ON_STACK(_work, _func) \
159 do { \
160 __INIT_WORK((_work), (_func), 1); \
161 } while (0)
163 #define INIT_DELAYED_WORK(_work, _func) \
164 do { \
165 INIT_WORK(&(_work)->work, (_func)); \
166 init_timer(&(_work)->timer); \
167 } while (0)
169 #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
170 do { \
171 INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
172 init_timer_on_stack(&(_work)->timer); \
173 } while (0)
175 #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
176 do { \
177 INIT_WORK(&(_work)->work, (_func)); \
178 init_timer_deferrable(&(_work)->timer); \
179 } while (0)
182 * work_pending - Find out whether a work item is currently pending
183 * @work: The work item in question
185 #define work_pending(work) \
186 test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
189 * delayed_work_pending - Find out whether a delayable work item is currently
190 * pending
191 * @work: The work item in question
193 #define delayed_work_pending(w) \
194 work_pending(&(w)->work)
197 * work_clear_pending - for internal use only, mark a work item as not pending
198 * @work: The work item in question
200 #define work_clear_pending(work) \
201 clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
203 enum {
204 WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */
205 WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */
208 extern struct workqueue_struct *
209 __create_workqueue_key(const char *name, unsigned int flags,
210 struct lock_class_key *key, const char *lock_name);
212 #ifdef CONFIG_LOCKDEP
213 #define __create_workqueue(name, flags) \
214 ({ \
215 static struct lock_class_key __key; \
216 const char *__lock_name; \
218 if (__builtin_constant_p(name)) \
219 __lock_name = (name); \
220 else \
221 __lock_name = #name; \
223 __create_workqueue_key((name), (flags), &__key, \
224 __lock_name); \
226 #else
227 #define __create_workqueue(name, flags) \
228 __create_workqueue_key((name), (flags), NULL, NULL)
229 #endif
231 #define create_workqueue(name) \
232 __create_workqueue((name), 0)
233 #define create_freezeable_workqueue(name) \
234 __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD)
235 #define create_singlethread_workqueue(name) \
236 __create_workqueue((name), WQ_SINGLE_THREAD)
238 extern void destroy_workqueue(struct workqueue_struct *wq);
240 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
241 extern int queue_work_on(int cpu, struct workqueue_struct *wq,
242 struct work_struct *work);
243 extern int queue_delayed_work(struct workqueue_struct *wq,
244 struct delayed_work *work, unsigned long delay);
245 extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
246 struct delayed_work *work, unsigned long delay);
248 extern void flush_workqueue(struct workqueue_struct *wq);
249 extern void flush_scheduled_work(void);
250 extern void flush_delayed_work(struct delayed_work *work);
252 extern int schedule_work(struct work_struct *work);
253 extern int schedule_work_on(int cpu, struct work_struct *work);
254 extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
255 extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
256 unsigned long delay);
257 extern int schedule_on_each_cpu(work_func_t func);
258 extern int current_is_keventd(void);
259 extern int keventd_up(void);
261 extern void init_workqueues(void);
262 int execute_in_process_context(work_func_t fn, struct execute_work *);
264 extern int flush_work(struct work_struct *work);
266 extern int cancel_work_sync(struct work_struct *work);
269 * Kill off a pending schedule_delayed_work(). Note that the work callback
270 * function may still be running on return from cancel_delayed_work(), unless
271 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
272 * cancel_work_sync() to wait on it.
274 static inline int cancel_delayed_work(struct delayed_work *work)
276 int ret;
278 ret = del_timer_sync(&work->timer);
279 if (ret)
280 work_clear_pending(&work->work);
281 return ret;
285 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
286 * if it returns 0 the timer function may be running and the queueing is in
287 * progress.
289 static inline int __cancel_delayed_work(struct delayed_work *work)
291 int ret;
293 ret = del_timer(&work->timer);
294 if (ret)
295 work_clear_pending(&work->work);
296 return ret;
299 extern int cancel_delayed_work_sync(struct delayed_work *work);
301 /* Obsolete. use cancel_delayed_work_sync() */
302 static inline
303 void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
304 struct delayed_work *work)
306 cancel_delayed_work_sync(work);
309 /* Obsolete. use cancel_delayed_work_sync() */
310 static inline
311 void cancel_rearming_delayed_work(struct delayed_work *work)
313 cancel_delayed_work_sync(work);
316 #ifndef CONFIG_SMP
317 static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
319 return fn(arg);
321 #else
322 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
323 #endif /* CONFIG_SMP */
324 #endif