dm raid: restructure parse_raid_params
[linux-2.6.git] / kernel / task_work.c
blob91d4e1742a0c4ec1cd8805b6663f5a95b5d42b3b
1 #include <linux/spinlock.h>
2 #include <linux/task_work.h>
3 #include <linux/tracehook.h>
5 int
6 task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
8 struct callback_head *last, *first;
9 unsigned long flags;
12 * Not inserting the new work if the task has already passed
13 * exit_task_work() is the responisbility of callers.
15 raw_spin_lock_irqsave(&task->pi_lock, flags);
16 last = task->task_works;
17 first = last ? last->next : twork;
18 twork->next = first;
19 if (last)
20 last->next = twork;
21 task->task_works = twork;
22 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
24 /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
25 if (notify)
26 set_notify_resume(task);
27 return 0;
30 struct callback_head *
31 task_work_cancel(struct task_struct *task, task_work_func_t func)
33 unsigned long flags;
34 struct callback_head *last, *res = NULL;
36 raw_spin_lock_irqsave(&task->pi_lock, flags);
37 last = task->task_works;
38 if (last) {
39 struct callback_head *q = last, *p = q->next;
40 while (1) {
41 if (p->func == func) {
42 q->next = p->next;
43 if (p == last)
44 task->task_works = q == p ? NULL : q;
45 res = p;
46 break;
48 if (p == last)
49 break;
50 q = p;
51 p = q->next;
54 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
55 return res;
58 void task_work_run(void)
60 struct task_struct *task = current;
61 struct callback_head *p, *q;
63 while (1) {
64 raw_spin_lock_irq(&task->pi_lock);
65 p = task->task_works;
66 task->task_works = NULL;
67 raw_spin_unlock_irq(&task->pi_lock);
69 if (unlikely(!p))
70 return;
72 q = p->next; /* head */
73 p->next = NULL; /* cut it */
74 while (q) {
75 p = q->next;
76 q->func(q);
77 q = p;