Merge remote-tracking branch 'remotes/kraxel/tags/pull-vnc-20140918-1' into staging
[qemu-kvm.git] / thread-pool.c
blobbc07d7a1c9f7302e563def595499784f21aa81c8
1 /*
2 * QEMU block layer thread pool
4 * Copyright IBM, Corp. 2008
5 * Copyright Red Hat, Inc. 2012
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
17 #include "qemu-common.h"
18 #include "qemu/queue.h"
19 #include "qemu/thread.h"
20 #include "qemu/osdep.h"
21 #include "block/coroutine.h"
22 #include "trace.h"
23 #include "block/thread-pool.h"
24 #include "qemu/main-loop.h"
26 static void do_spawn_thread(ThreadPool *pool);
28 typedef struct ThreadPoolElement ThreadPoolElement;
30 enum ThreadState {
31 THREAD_QUEUED,
32 THREAD_ACTIVE,
33 THREAD_DONE,
34 THREAD_CANCELED,
37 struct ThreadPoolElement {
38 BlockDriverAIOCB common;
39 ThreadPool *pool;
40 ThreadPoolFunc *func;
41 void *arg;
43 /* Moving state out of THREAD_QUEUED is protected by lock. After
44 * that, only the worker thread can write to it. Reads and writes
45 * of state and ret are ordered with memory barriers.
47 enum ThreadState state;
48 int ret;
50 /* Access to this list is protected by lock. */
51 QTAILQ_ENTRY(ThreadPoolElement) reqs;
53 /* Access to this list is protected by the global mutex. */
54 QLIST_ENTRY(ThreadPoolElement) all;
57 struct ThreadPool {
58 AioContext *ctx;
59 QEMUBH *completion_bh;
60 QemuMutex lock;
61 QemuCond check_cancel;
62 QemuCond worker_stopped;
63 QemuSemaphore sem;
64 int max_threads;
65 QEMUBH *new_thread_bh;
67 /* The following variables are only accessed from one AioContext. */
68 QLIST_HEAD(, ThreadPoolElement) head;
70 /* The following variables are protected by lock. */
71 QTAILQ_HEAD(, ThreadPoolElement) request_list;
72 int cur_threads;
73 int idle_threads;
74 int new_threads; /* backlog of threads we need to create */
75 int pending_threads; /* threads created but not running yet */
76 int pending_cancellations; /* whether we need a cond_broadcast */
77 bool stopping;
80 static void *worker_thread(void *opaque)
82 ThreadPool *pool = opaque;
84 qemu_mutex_lock(&pool->lock);
85 pool->pending_threads--;
86 do_spawn_thread(pool);
88 while (!pool->stopping) {
89 ThreadPoolElement *req;
90 int ret;
92 do {
93 pool->idle_threads++;
94 qemu_mutex_unlock(&pool->lock);
95 ret = qemu_sem_timedwait(&pool->sem, 10000);
96 qemu_mutex_lock(&pool->lock);
97 pool->idle_threads--;
98 } while (ret == -1 && !QTAILQ_EMPTY(&pool->request_list));
99 if (ret == -1 || pool->stopping) {
100 break;
103 req = QTAILQ_FIRST(&pool->request_list);
104 QTAILQ_REMOVE(&pool->request_list, req, reqs);
105 req->state = THREAD_ACTIVE;
106 qemu_mutex_unlock(&pool->lock);
108 ret = req->func(req->arg);
110 req->ret = ret;
111 /* Write ret before state. */
112 smp_wmb();
113 req->state = THREAD_DONE;
115 qemu_mutex_lock(&pool->lock);
116 if (pool->pending_cancellations) {
117 qemu_cond_broadcast(&pool->check_cancel);
120 qemu_bh_schedule(pool->completion_bh);
123 pool->cur_threads--;
124 qemu_cond_signal(&pool->worker_stopped);
125 qemu_mutex_unlock(&pool->lock);
126 return NULL;
129 static void do_spawn_thread(ThreadPool *pool)
131 QemuThread t;
133 /* Runs with lock taken. */
134 if (!pool->new_threads) {
135 return;
138 pool->new_threads--;
139 pool->pending_threads++;
141 qemu_thread_create(&t, "worker", worker_thread, pool, QEMU_THREAD_DETACHED);
144 static void spawn_thread_bh_fn(void *opaque)
146 ThreadPool *pool = opaque;
148 qemu_mutex_lock(&pool->lock);
149 do_spawn_thread(pool);
150 qemu_mutex_unlock(&pool->lock);
153 static void spawn_thread(ThreadPool *pool)
155 pool->cur_threads++;
156 pool->new_threads++;
157 /* If there are threads being created, they will spawn new workers, so
158 * we don't spend time creating many threads in a loop holding a mutex or
159 * starving the current vcpu.
161 * If there are no idle threads, ask the main thread to create one, so we
162 * inherit the correct affinity instead of the vcpu affinity.
164 if (!pool->pending_threads) {
165 qemu_bh_schedule(pool->new_thread_bh);
169 static void thread_pool_completion_bh(void *opaque)
171 ThreadPool *pool = opaque;
172 ThreadPoolElement *elem, *next;
174 restart:
175 QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
176 if (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
177 continue;
179 if (elem->state == THREAD_DONE) {
180 trace_thread_pool_complete(pool, elem, elem->common.opaque,
181 elem->ret);
183 if (elem->state == THREAD_DONE && elem->common.cb) {
184 QLIST_REMOVE(elem, all);
185 /* Read state before ret. */
186 smp_rmb();
188 /* Schedule ourselves in case elem->common.cb() calls aio_poll() to
189 * wait for another request that completed at the same time.
191 qemu_bh_schedule(pool->completion_bh);
193 elem->common.cb(elem->common.opaque, elem->ret);
194 qemu_aio_release(elem);
195 goto restart;
196 } else {
197 /* remove the request */
198 QLIST_REMOVE(elem, all);
199 qemu_aio_release(elem);
204 static void thread_pool_cancel(BlockDriverAIOCB *acb)
206 ThreadPoolElement *elem = (ThreadPoolElement *)acb;
207 ThreadPool *pool = elem->pool;
209 trace_thread_pool_cancel(elem, elem->common.opaque);
211 qemu_mutex_lock(&pool->lock);
212 if (elem->state == THREAD_QUEUED &&
213 /* No thread has yet started working on elem. we can try to "steal"
214 * the item from the worker if we can get a signal from the
215 * semaphore. Because this is non-blocking, we can do it with
216 * the lock taken and ensure that elem will remain THREAD_QUEUED.
218 qemu_sem_timedwait(&pool->sem, 0) == 0) {
219 QTAILQ_REMOVE(&pool->request_list, elem, reqs);
220 elem->state = THREAD_CANCELED;
221 qemu_bh_schedule(pool->completion_bh);
222 } else {
223 pool->pending_cancellations++;
224 while (elem->state != THREAD_CANCELED && elem->state != THREAD_DONE) {
225 qemu_cond_wait(&pool->check_cancel, &pool->lock);
227 pool->pending_cancellations--;
229 qemu_mutex_unlock(&pool->lock);
230 thread_pool_completion_bh(pool);
233 static const AIOCBInfo thread_pool_aiocb_info = {
234 .aiocb_size = sizeof(ThreadPoolElement),
235 .cancel = thread_pool_cancel,
238 BlockDriverAIOCB *thread_pool_submit_aio(ThreadPool *pool,
239 ThreadPoolFunc *func, void *arg,
240 BlockDriverCompletionFunc *cb, void *opaque)
242 ThreadPoolElement *req;
244 req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
245 req->func = func;
246 req->arg = arg;
247 req->state = THREAD_QUEUED;
248 req->pool = pool;
250 QLIST_INSERT_HEAD(&pool->head, req, all);
252 trace_thread_pool_submit(pool, req, arg);
254 qemu_mutex_lock(&pool->lock);
255 if (pool->idle_threads == 0 && pool->cur_threads < pool->max_threads) {
256 spawn_thread(pool);
258 QTAILQ_INSERT_TAIL(&pool->request_list, req, reqs);
259 qemu_mutex_unlock(&pool->lock);
260 qemu_sem_post(&pool->sem);
261 return &req->common;
264 typedef struct ThreadPoolCo {
265 Coroutine *co;
266 int ret;
267 } ThreadPoolCo;
269 static void thread_pool_co_cb(void *opaque, int ret)
271 ThreadPoolCo *co = opaque;
273 co->ret = ret;
274 qemu_coroutine_enter(co->co, NULL);
277 int coroutine_fn thread_pool_submit_co(ThreadPool *pool, ThreadPoolFunc *func,
278 void *arg)
280 ThreadPoolCo tpc = { .co = qemu_coroutine_self(), .ret = -EINPROGRESS };
281 assert(qemu_in_coroutine());
282 thread_pool_submit_aio(pool, func, arg, thread_pool_co_cb, &tpc);
283 qemu_coroutine_yield();
284 return tpc.ret;
287 void thread_pool_submit(ThreadPool *pool, ThreadPoolFunc *func, void *arg)
289 thread_pool_submit_aio(pool, func, arg, NULL, NULL);
292 static void thread_pool_init_one(ThreadPool *pool, AioContext *ctx)
294 if (!ctx) {
295 ctx = qemu_get_aio_context();
298 memset(pool, 0, sizeof(*pool));
299 pool->ctx = ctx;
300 pool->completion_bh = aio_bh_new(ctx, thread_pool_completion_bh, pool);
301 qemu_mutex_init(&pool->lock);
302 qemu_cond_init(&pool->check_cancel);
303 qemu_cond_init(&pool->worker_stopped);
304 qemu_sem_init(&pool->sem, 0);
305 pool->max_threads = 64;
306 pool->new_thread_bh = aio_bh_new(ctx, spawn_thread_bh_fn, pool);
308 QLIST_INIT(&pool->head);
309 QTAILQ_INIT(&pool->request_list);
312 ThreadPool *thread_pool_new(AioContext *ctx)
314 ThreadPool *pool = g_new(ThreadPool, 1);
315 thread_pool_init_one(pool, ctx);
316 return pool;
319 void thread_pool_free(ThreadPool *pool)
321 if (!pool) {
322 return;
325 assert(QLIST_EMPTY(&pool->head));
327 qemu_mutex_lock(&pool->lock);
329 /* Stop new threads from spawning */
330 qemu_bh_delete(pool->new_thread_bh);
331 pool->cur_threads -= pool->new_threads;
332 pool->new_threads = 0;
334 /* Wait for worker threads to terminate */
335 pool->stopping = true;
336 while (pool->cur_threads > 0) {
337 qemu_sem_post(&pool->sem);
338 qemu_cond_wait(&pool->worker_stopped, &pool->lock);
341 qemu_mutex_unlock(&pool->lock);
343 qemu_bh_delete(pool->completion_bh);
344 qemu_sem_destroy(&pool->sem);
345 qemu_cond_destroy(&pool->check_cancel);
346 qemu_cond_destroy(&pool->worker_stopped);
347 qemu_mutex_destroy(&pool->lock);
348 g_free(pool);