2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/kernel.h>
17 #include <linux/dst.h>
18 #include <linux/kthread.h>
19 #include <linux/slab.h>
22 * Thread pool abstraction allows to schedule a work to be performed
23 * on behalf of kernel thread. One does not operate with threads itself,
24 * instead user provides setup and cleanup callbacks for thread pool itself,
25 * and action and cleanup callbacks for each submitted work.
27 * Each worker has private data initialized at creation time and data,
28 * provided by user at scheduling time.
30 * When action is being performed, thread can not be used by other users,
31 * instead they will sleep until there is free thread to pick their work.
33 struct thread_pool_worker
{
34 struct list_head worker_entry
;
36 struct task_struct
*thread
;
38 struct thread_pool
*pool
;
45 wait_queue_head_t wait
;
50 int (*action
)(void *private, void *schedule_data
);
51 void (*cleanup
)(void *private);
54 static void thread_pool_exit_worker(struct thread_pool_worker
*w
)
56 kthread_stop(w
->thread
);
58 w
->cleanup(w
->private);
63 * Called to mark thread as ready and allow users to schedule new work.
65 static void thread_pool_worker_make_ready(struct thread_pool_worker
*w
)
67 struct thread_pool
*p
= w
->pool
;
69 mutex_lock(&p
->thread_lock
);
72 list_move_tail(&w
->worker_entry
, &p
->ready_list
);
74 mutex_unlock(&p
->thread_lock
);
79 list_del(&w
->worker_entry
);
80 mutex_unlock(&p
->thread_lock
);
82 thread_pool_exit_worker(w
);
87 * Thread action loop: waits until there is new work.
89 static int thread_pool_worker_func(void *data
)
91 struct thread_pool_worker
*w
= data
;
93 while (!kthread_should_stop()) {
94 wait_event_interruptible(w
->wait
,
95 kthread_should_stop() || w
->has_data
);
97 if (kthread_should_stop())
103 w
->action(w
->private, w
->schedule_data
);
104 thread_pool_worker_make_ready(w
);
111 * Remove single worker without specifying which one.
113 void thread_pool_del_worker(struct thread_pool
*p
)
115 struct thread_pool_worker
*w
= NULL
;
117 while (!w
&& p
->thread_num
) {
118 wait_event(p
->wait
, !list_empty(&p
->ready_list
) ||
121 dprintk("%s: locking list_empty: %d, thread_num: %d.\n",
122 __func__
, list_empty(&p
->ready_list
),
125 mutex_lock(&p
->thread_lock
);
126 if (!list_empty(&p
->ready_list
)) {
127 w
= list_first_entry(&p
->ready_list
,
128 struct thread_pool_worker
,
131 dprintk("%s: deleting w: %p, thread_num: %d, "
132 "list: %p [%p.%p].\n", __func__
,
133 w
, p
->thread_num
, &p
->ready_list
,
134 p
->ready_list
.prev
, p
->ready_list
.next
);
137 list_del(&w
->worker_entry
);
139 mutex_unlock(&p
->thread_lock
);
143 thread_pool_exit_worker(w
);
144 dprintk("%s: deleted w: %p, thread_num: %d.\n",
145 __func__
, w
, p
->thread_num
);
149 * Remove a worker with given ID.
151 void thread_pool_del_worker_id(struct thread_pool
*p
, unsigned int id
)
153 struct thread_pool_worker
*w
;
156 mutex_lock(&p
->thread_lock
);
157 list_for_each_entry(w
, &p
->ready_list
, worker_entry
) {
161 list_del(&w
->worker_entry
);
167 list_for_each_entry(w
, &p
->active_list
, worker_entry
) {
174 mutex_unlock(&p
->thread_lock
);
177 thread_pool_exit_worker(w
);
181 * Add new worker thread with given parameters.
182 * If initialization callback fails, return error.
184 int thread_pool_add_worker(struct thread_pool
*p
,
187 void *(*init
)(void *private),
188 void (*cleanup
)(void *private),
191 struct thread_pool_worker
*w
;
194 w
= kzalloc(sizeof(struct thread_pool_worker
), GFP_KERNEL
);
199 init_waitqueue_head(&w
->wait
);
200 w
->cleanup
= cleanup
;
203 w
->thread
= kthread_run(thread_pool_worker_func
, w
, "%s", name
);
204 if (IS_ERR(w
->thread
)) {
205 err
= PTR_ERR(w
->thread
);
209 w
->private = init(private);
210 if (IS_ERR(w
->private)) {
211 err
= PTR_ERR(w
->private);
212 goto err_out_stop_thread
;
215 mutex_lock(&p
->thread_lock
);
216 list_add_tail(&w
->worker_entry
, &p
->ready_list
);
218 mutex_unlock(&p
->thread_lock
);
223 kthread_stop(w
->thread
);
231 * Destroy the whole pool.
233 void thread_pool_destroy(struct thread_pool
*p
)
235 while (p
->thread_num
) {
236 dprintk("%s: num: %d.\n", __func__
, p
->thread_num
);
237 thread_pool_del_worker(p
);
244 * Create a pool with given number of threads.
245 * They will have sequential IDs started from zero.
247 struct thread_pool
*thread_pool_create(int num
, char *name
,
248 void *(*init
)(void *private),
249 void (*cleanup
)(void *private),
252 struct thread_pool_worker
*w
, *tmp
;
253 struct thread_pool
*p
;
257 p
= kzalloc(sizeof(struct thread_pool
), GFP_KERNEL
);
261 init_waitqueue_head(&p
->wait
);
262 mutex_init(&p
->thread_lock
);
263 INIT_LIST_HEAD(&p
->ready_list
);
264 INIT_LIST_HEAD(&p
->active_list
);
267 for (i
= 0; i
< num
; ++i
) {
268 err
= thread_pool_add_worker(p
, name
, i
, init
,
271 goto err_out_free_all
;
277 list_for_each_entry_safe(w
, tmp
, &p
->ready_list
, worker_entry
) {
278 list_del(&w
->worker_entry
);
279 thread_pool_exit_worker(w
);
287 * Schedule execution of the action on a given thread,
288 * provided ID pointer has to match previously stored
291 int thread_pool_schedule_private(struct thread_pool
*p
,
292 int (*setup
)(void *private, void *data
),
293 int (*action
)(void *private, void *data
),
294 void *data
, long timeout
, void *id
)
296 struct thread_pool_worker
*w
, *tmp
, *worker
= NULL
;
299 while (!worker
&& !err
) {
300 timeout
= wait_event_interruptible_timeout(p
->wait
,
301 !list_empty(&p
->ready_list
),
310 mutex_lock(&p
->thread_lock
);
311 list_for_each_entry_safe(w
, tmp
, &p
->ready_list
, worker_entry
) {
312 if (id
&& id
!= w
->private)
317 list_move_tail(&w
->worker_entry
, &p
->active_list
);
319 err
= setup(w
->private, data
);
321 w
->schedule_data
= data
;
326 list_move_tail(&w
->worker_entry
,
332 mutex_unlock(&p
->thread_lock
);
339 * Schedule execution on arbitrary thread from the pool.
341 int thread_pool_schedule(struct thread_pool
*p
,
342 int (*setup
)(void *private, void *data
),
343 int (*action
)(void *private, void *data
),
344 void *data
, long timeout
)
346 return thread_pool_schedule_private(p
, setup
,
347 action
, data
, timeout
, NULL
);