Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / kernel / context.c
blob864a70131c88dfadc27ed1468362d061fc973e83
1 /*
2 * linux/kernel/context.c
4 * Mechanism for running arbitrary tasks in process context
6 * dwmw2@redhat.com: Genesis
8 * andrewm@uow.edu.au: 2.4.0-test12
9 * - Child reaping
10 * - Support for tasks which re-add themselves
11 * - flush_scheduled_tasks.
14 #define __KERNEL_SYSCALLS__
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/init.h>
20 #include <linux/unistd.h>
21 #include <linux/signal.h>
23 static DECLARE_TASK_QUEUE(tq_context);
24 static DECLARE_WAIT_QUEUE_HEAD(context_task_wq);
25 static DECLARE_WAIT_QUEUE_HEAD(context_task_done);
26 static int keventd_running;
27 static struct task_struct *keventd_task;
29 static int need_keventd(const char *who)
31 if (keventd_running == 0)
32 printk(KERN_ERR "%s(): keventd has not started\n", who);
33 return keventd_running;
36 int current_is_keventd(void)
38 int ret = 0;
39 if (need_keventd(__FUNCTION__))
40 ret = (current == keventd_task);
41 return ret;
44 /**
45 * schedule_task - schedule a function for subsequent execution in process context.
46 * @task: pointer to a &tq_struct which defines the function to be scheduled.
48 * May be called from interrupt context. The scheduled function is run at some
49 * time in the near future by the keventd kernel thread. If it can sleep, it
50 * should be designed to do so for the minimum possible time, as it will be
51 * stalling all other scheduled tasks.
53 * schedule_task() returns non-zero if the task was successfully scheduled.
54 * If @task is already residing on a task queue then schedule_task() fails
55 * to schedule your task and returns zero.
57 int schedule_task(struct tq_struct *task)
59 int ret;
60 need_keventd(__FUNCTION__);
61 ret = queue_task(task, &tq_context);
62 wake_up(&context_task_wq);
63 return ret;
66 static int context_thread(void *dummy)
68 struct task_struct *curtask = current;
69 DECLARE_WAITQUEUE(wait, curtask);
70 struct k_sigaction sa;
72 daemonize();
73 strcpy(curtask->comm, "keventd");
74 keventd_running = 1;
75 keventd_task = curtask;
77 spin_lock_irq(&curtask->sigmask_lock);
78 siginitsetinv(&curtask->blocked, sigmask(SIGCHLD));
79 recalc_sigpending(curtask);
80 spin_unlock_irq(&curtask->sigmask_lock);
82 /* Install a handler so SIGCLD is delivered */
83 sa.sa.sa_handler = SIG_IGN;
84 sa.sa.sa_flags = 0;
85 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
86 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
89 * If one of the functions on a task queue re-adds itself
90 * to the task queue we call schedule() in state TASK_RUNNING
92 for (;;) {
93 set_task_state(curtask, TASK_INTERRUPTIBLE);
94 add_wait_queue(&context_task_wq, &wait);
95 if (TQ_ACTIVE(tq_context))
96 set_task_state(curtask, TASK_RUNNING);
97 schedule();
98 remove_wait_queue(&context_task_wq, &wait);
99 run_task_queue(&tq_context);
100 wake_up(&context_task_done);
101 if (signal_pending(curtask)) {
102 while (waitpid(-1, (unsigned int *)0, __WALL|WNOHANG) > 0)
104 flush_signals(curtask);
105 recalc_sigpending(curtask);
111 * flush_scheduled_tasks - ensure that any scheduled tasks have run to completion.
113 * Forces execution of the schedule_task() queue and blocks until its completion.
115 * If a kernel subsystem uses schedule_task() and wishes to flush any pending
116 * tasks, it should use this function. This is typically used in driver shutdown
117 * handlers.
119 * The caller should hold no spinlocks and should hold no semaphores which could
120 * cause the scheduled tasks to block.
122 static struct tq_struct dummy_task;
124 void flush_scheduled_tasks(void)
126 int count;
127 DECLARE_WAITQUEUE(wait, current);
130 * Do it twice. It's possible, albeit highly unlikely, that
131 * the caller queued a task immediately before calling us,
132 * and that the eventd thread was already past the run_task_queue()
133 * but not yet into wake_up(), so it woke us up before completing
134 * the caller's queued task or our new dummy task.
136 add_wait_queue(&context_task_done, &wait);
137 for (count = 0; count < 2; count++) {
138 set_current_state(TASK_UNINTERRUPTIBLE);
140 /* Queue a dummy task to make sure we get kicked */
141 schedule_task(&dummy_task);
143 /* Wait for it to complete */
144 schedule();
146 remove_wait_queue(&context_task_done, &wait);
149 int start_context_thread(void)
151 kernel_thread(context_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
152 return 0;
155 EXPORT_SYMBOL(schedule_task);
156 EXPORT_SYMBOL(flush_scheduled_tasks);