tests/tcg: merge configure.sh back into main configure script
[qemu.git] / block / aio_task.c
blob9bd17ea2c13d2e6763c5afc538021a4ea3e3330f
1 /*
2 * Aio tasks loops
4 * Copyright (c) 2019 Virtuozzo International GmbH.
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
25 #include "qemu/osdep.h"
26 #include "block/aio.h"
27 #include "block/aio_task.h"
29 struct AioTaskPool {
30 Coroutine *main_co;
31 int status;
32 int max_busy_tasks;
33 int busy_tasks;
34 bool waiting;
37 static void coroutine_fn aio_task_co(void *opaque)
39 AioTask *task = opaque;
40 AioTaskPool *pool = task->pool;
42 assert(pool->busy_tasks < pool->max_busy_tasks);
43 pool->busy_tasks++;
45 task->ret = task->func(task);
47 pool->busy_tasks--;
49 if (task->ret < 0 && pool->status == 0) {
50 pool->status = task->ret;
53 g_free(task);
55 if (pool->waiting) {
56 pool->waiting = false;
57 aio_co_wake(pool->main_co);
61 void coroutine_fn aio_task_pool_wait_one(AioTaskPool *pool)
63 assert(pool->busy_tasks > 0);
64 assert(qemu_coroutine_self() == pool->main_co);
66 pool->waiting = true;
67 qemu_coroutine_yield();
69 assert(!pool->waiting);
70 assert(pool->busy_tasks < pool->max_busy_tasks);
73 void coroutine_fn aio_task_pool_wait_slot(AioTaskPool *pool)
75 if (pool->busy_tasks < pool->max_busy_tasks) {
76 return;
79 aio_task_pool_wait_one(pool);
82 void coroutine_fn aio_task_pool_wait_all(AioTaskPool *pool)
84 while (pool->busy_tasks > 0) {
85 aio_task_pool_wait_one(pool);
89 void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task)
91 aio_task_pool_wait_slot(pool);
93 task->pool = pool;
94 qemu_coroutine_enter(qemu_coroutine_create(aio_task_co, task));
97 AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks)
99 AioTaskPool *pool = g_new0(AioTaskPool, 1);
101 assert(max_busy_tasks > 0);
103 pool->main_co = qemu_coroutine_self();
104 pool->max_busy_tasks = max_busy_tasks;
106 return pool;
109 void aio_task_pool_free(AioTaskPool *pool)
111 g_free(pool);
114 int aio_task_pool_status(AioTaskPool *pool)
116 if (!pool) {
117 return 0; /* Sugar for lazy allocation of aio pool */
120 return pool->status;
123 bool aio_task_pool_empty(AioTaskPool *pool)
125 return pool->busy_tasks == 0;