Staging: sm7xx: add a new framebuffer driver
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / dst / thread_pool.c
blob29a82b2602f3296b6b44fc4ca4626be629201350
1 /*
2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
3 * All rights reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/dst.h>
18 #include <linux/kthread.h>
19 #include <linux/slab.h>
22 * Thread pool abstraction allows to schedule a work to be performed
23 * on behalf of kernel thread. One does not operate with threads itself,
24 * instead user provides setup and cleanup callbacks for thread pool itself,
25 * and action and cleanup callbacks for each submitted work.
27 * Each worker has private data initialized at creation time and data,
28 * provided by user at scheduling time.
30 * When action is being performed, thread can not be used by other users,
31 * instead they will sleep until there is free thread to pick their work.
33 struct thread_pool_worker {
34 struct list_head worker_entry;
36 struct task_struct *thread;
38 struct thread_pool *pool;
40 int error;
41 int has_data;
42 int need_exit;
43 unsigned int id;
45 wait_queue_head_t wait;
47 void *private;
48 void *schedule_data;
50 int (*action)(void *private, void *schedule_data);
51 void (*cleanup)(void *private);
54 static void thread_pool_exit_worker(struct thread_pool_worker *w)
56 kthread_stop(w->thread);
58 w->cleanup(w->private);
59 kfree(w);
63 * Called to mark thread as ready and allow users to schedule new work.
65 static void thread_pool_worker_make_ready(struct thread_pool_worker *w)
67 struct thread_pool *p = w->pool;
69 mutex_lock(&p->thread_lock);
71 if (!w->need_exit) {
72 list_move_tail(&w->worker_entry, &p->ready_list);
73 w->has_data = 0;
74 mutex_unlock(&p->thread_lock);
76 wake_up(&p->wait);
77 } else {
78 p->thread_num--;
79 list_del(&w->worker_entry);
80 mutex_unlock(&p->thread_lock);
82 thread_pool_exit_worker(w);
87 * Thread action loop: waits until there is new work.
89 static int thread_pool_worker_func(void *data)
91 struct thread_pool_worker *w = data;
93 while (!kthread_should_stop()) {
94 wait_event_interruptible(w->wait,
95 kthread_should_stop() || w->has_data);
97 if (kthread_should_stop())
98 break;
100 if (!w->has_data)
101 continue;
103 w->action(w->private, w->schedule_data);
104 thread_pool_worker_make_ready(w);
107 return 0;
111 * Remove single worker without specifying which one.
113 void thread_pool_del_worker(struct thread_pool *p)
115 struct thread_pool_worker *w = NULL;
117 while (!w && p->thread_num) {
118 wait_event(p->wait, !list_empty(&p->ready_list) ||
119 !p->thread_num);
121 dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
122 __func__, list_empty(&p->ready_list),
123 p->thread_num);
125 mutex_lock(&p->thread_lock);
126 if (!list_empty(&p->ready_list)) {
127 w = list_first_entry(&p->ready_list,
128 struct thread_pool_worker,
129 worker_entry);
131 dprintk("%s: deleting w: %p, thread_num: %d, "
132 "list: %p [%p.%p].\n", __func__,
133 w, p->thread_num, &p->ready_list,
134 p->ready_list.prev, p->ready_list.next);
136 p->thread_num--;
137 list_del(&w->worker_entry);
139 mutex_unlock(&p->thread_lock);
142 if (w)
143 thread_pool_exit_worker(w);
144 dprintk("%s: deleted w: %p, thread_num: %d.\n",
145 __func__, w, p->thread_num);
149 * Remove a worker with given ID.
151 void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id)
153 struct thread_pool_worker *w;
154 int found = 0;
156 mutex_lock(&p->thread_lock);
157 list_for_each_entry(w, &p->ready_list, worker_entry) {
158 if (w->id == id) {
159 found = 1;
160 p->thread_num--;
161 list_del(&w->worker_entry);
162 break;
166 if (!found) {
167 list_for_each_entry(w, &p->active_list, worker_entry) {
168 if (w->id == id) {
169 w->need_exit = 1;
170 break;
174 mutex_unlock(&p->thread_lock);
176 if (found)
177 thread_pool_exit_worker(w);
181 * Add new worker thread with given parameters.
182 * If initialization callback fails, return error.
184 int thread_pool_add_worker(struct thread_pool *p,
185 char *name,
186 unsigned int id,
187 void *(*init)(void *private),
188 void (*cleanup)(void *private),
189 void *private)
191 struct thread_pool_worker *w;
192 int err = -ENOMEM;
194 w = kzalloc(sizeof(struct thread_pool_worker), GFP_KERNEL);
195 if (!w)
196 goto err_out_exit;
198 w->pool = p;
199 init_waitqueue_head(&w->wait);
200 w->cleanup = cleanup;
201 w->id = id;
203 w->thread = kthread_run(thread_pool_worker_func, w, "%s", name);
204 if (IS_ERR(w->thread)) {
205 err = PTR_ERR(w->thread);
206 goto err_out_free;
209 w->private = init(private);
210 if (IS_ERR(w->private)) {
211 err = PTR_ERR(w->private);
212 goto err_out_stop_thread;
215 mutex_lock(&p->thread_lock);
216 list_add_tail(&w->worker_entry, &p->ready_list);
217 p->thread_num++;
218 mutex_unlock(&p->thread_lock);
220 return 0;
222 err_out_stop_thread:
223 kthread_stop(w->thread);
224 err_out_free:
225 kfree(w);
226 err_out_exit:
227 return err;
231 * Destroy the whole pool.
233 void thread_pool_destroy(struct thread_pool *p)
235 while (p->thread_num) {
236 dprintk("%s: num: %d.\n", __func__, p->thread_num);
237 thread_pool_del_worker(p);
240 kfree(p);
244 * Create a pool with given number of threads.
245 * They will have sequential IDs started from zero.
247 struct thread_pool *thread_pool_create(int num, char *name,
248 void *(*init)(void *private),
249 void (*cleanup)(void *private),
250 void *private)
252 struct thread_pool_worker *w, *tmp;
253 struct thread_pool *p;
254 int err = -ENOMEM;
255 int i;
257 p = kzalloc(sizeof(struct thread_pool), GFP_KERNEL);
258 if (!p)
259 goto err_out_exit;
261 init_waitqueue_head(&p->wait);
262 mutex_init(&p->thread_lock);
263 INIT_LIST_HEAD(&p->ready_list);
264 INIT_LIST_HEAD(&p->active_list);
265 p->thread_num = 0;
267 for (i = 0; i < num; ++i) {
268 err = thread_pool_add_worker(p, name, i, init,
269 cleanup, private);
270 if (err)
271 goto err_out_free_all;
274 return p;
276 err_out_free_all:
277 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
278 list_del(&w->worker_entry);
279 thread_pool_exit_worker(w);
281 kfree(p);
282 err_out_exit:
283 return ERR_PTR(err);
287 * Schedule execution of the action on a given thread,
288 * provided ID pointer has to match previously stored
289 * private data.
291 int thread_pool_schedule_private(struct thread_pool *p,
292 int (*setup)(void *private, void *data),
293 int (*action)(void *private, void *data),
294 void *data, long timeout, void *id)
296 struct thread_pool_worker *w, *tmp, *worker = NULL;
297 int err = 0;
299 while (!worker && !err) {
300 timeout = wait_event_interruptible_timeout(p->wait,
301 !list_empty(&p->ready_list),
302 timeout);
304 if (!timeout) {
305 err = -ETIMEDOUT;
306 break;
309 worker = NULL;
310 mutex_lock(&p->thread_lock);
311 list_for_each_entry_safe(w, tmp, &p->ready_list, worker_entry) {
312 if (id && id != w->private)
313 continue;
315 worker = w;
317 list_move_tail(&w->worker_entry, &p->active_list);
319 err = setup(w->private, data);
320 if (!err) {
321 w->schedule_data = data;
322 w->action = action;
323 w->has_data = 1;
324 wake_up(&w->wait);
325 } else {
326 list_move_tail(&w->worker_entry,
327 &p->ready_list);
330 break;
332 mutex_unlock(&p->thread_lock);
335 return err;
339 * Schedule execution on arbitrary thread from the pool.
341 int thread_pool_schedule(struct thread_pool *p,
342 int (*setup)(void *private, void *data),
343 int (*action)(void *private, void *data),
344 void *data, long timeout)
346 return thread_pool_schedule_private(p, setup,
347 action, data, timeout, NULL);