Immediate versions of ro[lr]
[qemu/mini2440.git] / posix-aio-compat.c
blob65c80ecc3e16521b5f0054621bb93971c8003e38
1 /*
2 * QEMU posix-aio emulation
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <sys/ioctl.h>
15 #include <pthread.h>
16 #include <unistd.h>
17 #include <errno.h>
18 #include <time.h>
19 #include <string.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include "osdep.h"
24 #include "posix-aio-compat.h"
26 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
27 static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
28 static pthread_t thread_id;
29 static pthread_attr_t attr;
30 static int max_threads = 64;
31 static int cur_threads = 0;
32 static int idle_threads = 0;
33 static TAILQ_HEAD(, qemu_paiocb) request_list;
35 static void die2(int err, const char *what)
37 fprintf(stderr, "%s failed: %s\n", what, strerror(err));
38 abort();
41 static void die(const char *what)
43 die2(errno, what);
46 static void mutex_lock(pthread_mutex_t *mutex)
48 int ret = pthread_mutex_lock(mutex);
49 if (ret) die2(ret, "pthread_mutex_lock");
52 static void mutex_unlock(pthread_mutex_t *mutex)
54 int ret = pthread_mutex_unlock(mutex);
55 if (ret) die2(ret, "pthread_mutex_unlock");
58 static int cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
59 struct timespec *ts)
61 int ret = pthread_cond_timedwait(cond, mutex, ts);
62 if (ret && ret != ETIMEDOUT) die2(ret, "pthread_cond_timedwait");
63 return ret;
66 static void cond_signal(pthread_cond_t *cond)
68 int ret = pthread_cond_signal(cond);
69 if (ret) die2(ret, "pthread_cond_signal");
72 static void thread_create(pthread_t *thread, pthread_attr_t *attr,
73 void *(*start_routine)(void*), void *arg)
75 int ret = pthread_create(thread, attr, start_routine, arg);
76 if (ret) die2(ret, "pthread_create");
79 static size_t handle_aiocb_readwrite(struct qemu_paiocb *aiocb)
81 size_t offset = 0;
82 ssize_t len;
84 while (offset < aiocb->aio_nbytes) {
85 if (aiocb->aio_type == QEMU_PAIO_WRITE)
86 len = pwrite(aiocb->aio_fildes,
87 (const char *)aiocb->aio_buf + offset,
88 aiocb->aio_nbytes - offset,
89 aiocb->aio_offset + offset);
90 else
91 len = pread(aiocb->aio_fildes,
92 (char *)aiocb->aio_buf + offset,
93 aiocb->aio_nbytes - offset,
94 aiocb->aio_offset + offset);
96 if (len == -1 && errno == EINTR)
97 continue;
98 else if (len == -1) {
99 offset = -errno;
100 break;
101 } else if (len == 0)
102 break;
104 offset += len;
107 return offset;
110 static size_t handle_aiocb_ioctl(struct qemu_paiocb *aiocb)
112 int ret;
114 ret = ioctl(aiocb->aio_fildes, aiocb->aio_ioctl_cmd, aiocb->aio_buf);
115 if (ret == -1)
116 return -errno;
117 return ret;
120 static void *aio_thread(void *unused)
122 pid_t pid;
123 sigset_t set;
125 pid = getpid();
127 /* block all signals */
128 if (sigfillset(&set)) die("sigfillset");
129 if (sigprocmask(SIG_BLOCK, &set, NULL)) die("sigprocmask");
131 while (1) {
132 struct qemu_paiocb *aiocb;
133 size_t ret = 0;
134 qemu_timeval tv;
135 struct timespec ts;
137 qemu_gettimeofday(&tv);
138 ts.tv_sec = tv.tv_sec + 10;
139 ts.tv_nsec = 0;
141 mutex_lock(&lock);
143 while (TAILQ_EMPTY(&request_list) &&
144 !(ret == ETIMEDOUT)) {
145 ret = cond_timedwait(&cond, &lock, &ts);
148 if (TAILQ_EMPTY(&request_list))
149 break;
151 aiocb = TAILQ_FIRST(&request_list);
152 TAILQ_REMOVE(&request_list, aiocb, node);
153 aiocb->active = 1;
154 idle_threads--;
155 mutex_unlock(&lock);
157 switch (aiocb->aio_type) {
158 case QEMU_PAIO_READ:
159 case QEMU_PAIO_WRITE:
160 ret = handle_aiocb_readwrite(aiocb);
161 break;
162 case QEMU_PAIO_IOCTL:
163 ret = handle_aiocb_ioctl(aiocb);
164 break;
165 default:
166 fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type);
167 ret = -EINVAL;
168 break;
171 mutex_lock(&lock);
172 aiocb->ret = ret;
173 idle_threads++;
174 mutex_unlock(&lock);
176 if (kill(pid, aiocb->ev_signo)) die("kill failed");
179 idle_threads--;
180 cur_threads--;
181 mutex_unlock(&lock);
183 return NULL;
186 static void spawn_thread(void)
188 cur_threads++;
189 idle_threads++;
190 thread_create(&thread_id, &attr, aio_thread, NULL);
193 int qemu_paio_init(struct qemu_paioinit *aioinit)
195 int ret;
197 ret = pthread_attr_init(&attr);
198 if (ret) die2(ret, "pthread_attr_init");
200 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
201 if (ret) die2(ret, "pthread_attr_setdetachstate");
203 TAILQ_INIT(&request_list);
205 return 0;
208 static int qemu_paio_submit(struct qemu_paiocb *aiocb, int type)
210 aiocb->aio_type = type;
211 aiocb->ret = -EINPROGRESS;
212 aiocb->active = 0;
213 mutex_lock(&lock);
214 if (idle_threads == 0 && cur_threads < max_threads)
215 spawn_thread();
216 TAILQ_INSERT_TAIL(&request_list, aiocb, node);
217 mutex_unlock(&lock);
218 cond_signal(&cond);
220 return 0;
223 int qemu_paio_read(struct qemu_paiocb *aiocb)
225 return qemu_paio_submit(aiocb, QEMU_PAIO_READ);
228 int qemu_paio_write(struct qemu_paiocb *aiocb)
230 return qemu_paio_submit(aiocb, QEMU_PAIO_WRITE);
233 int qemu_paio_ioctl(struct qemu_paiocb *aiocb)
235 return qemu_paio_submit(aiocb, QEMU_PAIO_IOCTL);
238 ssize_t qemu_paio_return(struct qemu_paiocb *aiocb)
240 ssize_t ret;
242 mutex_lock(&lock);
243 ret = aiocb->ret;
244 mutex_unlock(&lock);
246 return ret;
249 int qemu_paio_error(struct qemu_paiocb *aiocb)
251 ssize_t ret = qemu_paio_return(aiocb);
253 if (ret < 0)
254 ret = -ret;
255 else
256 ret = 0;
258 return ret;
261 int qemu_paio_cancel(int fd, struct qemu_paiocb *aiocb)
263 int ret;
265 mutex_lock(&lock);
266 if (!aiocb->active) {
267 TAILQ_REMOVE(&request_list, aiocb, node);
268 aiocb->ret = -ECANCELED;
269 ret = QEMU_PAIO_CANCELED;
270 } else if (aiocb->ret == -EINPROGRESS)
271 ret = QEMU_PAIO_NOTCANCELED;
272 else
273 ret = QEMU_PAIO_ALLDONE;
274 mutex_unlock(&lock);
276 return ret;