Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / arch / um / os-Linux / sigio.c
blobabf47a7c4abd795c039f4c98136163819adb92a4
1 /*
2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
6 #include <unistd.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <poll.h>
10 #include <pty.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <string.h>
14 #include "kern_constants.h"
15 #include "kern_util.h"
16 #include "init.h"
17 #include "os.h"
18 #include "sigio.h"
19 #include "um_malloc.h"
20 #include "user.h"
23 * Protected by sigio_lock(), also used by sigio_cleanup, which is an
24 * exitcall.
26 static int write_sigio_pid = -1;
27 static unsigned long write_sigio_stack;
30 * These arrays are initialized before the sigio thread is started, and
31 * the descriptors closed after it is killed. So, it can't see them change.
32 * On the UML side, they are changed under the sigio_lock.
34 #define SIGIO_FDS_INIT {-1, -1}
36 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
37 static int sigio_private[2] = SIGIO_FDS_INIT;
39 struct pollfds {
40 struct pollfd *poll;
41 int size;
42 int used;
46 * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
47 * synchronizes with it.
49 static struct pollfds current_poll;
50 static struct pollfds next_poll;
51 static struct pollfds all_sigio_fds;
53 static int write_sigio_thread(void *unused)
55 struct pollfds *fds, tmp;
56 struct pollfd *p;
57 int i, n, respond_fd;
58 char c;
60 signal(SIGWINCH, SIG_IGN);
61 fds = &current_poll;
62 while (1) {
63 n = poll(fds->poll, fds->used, -1);
64 if (n < 0) {
65 if (errno == EINTR)
66 continue;
67 printk(UM_KERN_ERR "write_sigio_thread : poll returned "
68 "%d, errno = %d\n", n, errno);
70 for (i = 0; i < fds->used; i++) {
71 p = &fds->poll[i];
72 if (p->revents == 0)
73 continue;
74 if (p->fd == sigio_private[1]) {
75 CATCH_EINTR(n = read(sigio_private[1], &c,
76 sizeof(c)));
77 if (n != sizeof(c))
78 printk(UM_KERN_ERR
79 "write_sigio_thread : "
80 "read on socket failed, "
81 "err = %d\n", errno);
82 tmp = current_poll;
83 current_poll = next_poll;
84 next_poll = tmp;
85 respond_fd = sigio_private[1];
87 else {
88 respond_fd = write_sigio_fds[1];
89 fds->used--;
90 memmove(&fds->poll[i], &fds->poll[i + 1],
91 (fds->used - i) * sizeof(*fds->poll));
94 CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
95 if (n != sizeof(c))
96 printk(UM_KERN_ERR "write_sigio_thread : "
97 "write on socket failed, err = %d\n",
98 errno);
102 return 0;
105 static int need_poll(struct pollfds *polls, int n)
107 struct pollfd *new;
109 if (n <= polls->size)
110 return 0;
112 new = kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
113 if (new == NULL) {
114 printk(UM_KERN_ERR "need_poll : failed to allocate new "
115 "pollfds\n");
116 return -ENOMEM;
119 memcpy(new, polls->poll, polls->used * sizeof(struct pollfd));
120 kfree(polls->poll);
122 polls->poll = new;
123 polls->size = n;
124 return 0;
128 * Must be called with sigio_lock held, because it's needed by the marked
129 * critical section.
131 static void update_thread(void)
133 unsigned long flags;
134 int n;
135 char c;
137 flags = set_signals(0);
138 CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
139 if (n != sizeof(c)) {
140 printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
141 errno);
142 goto fail;
145 CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
146 if (n != sizeof(c)) {
147 printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
148 errno);
149 goto fail;
152 set_signals(flags);
153 return;
154 fail:
155 /* Critical section start */
156 if (write_sigio_pid != -1) {
157 os_kill_process(write_sigio_pid, 1);
158 free_stack(write_sigio_stack, 0);
160 write_sigio_pid = -1;
161 close(sigio_private[0]);
162 close(sigio_private[1]);
163 close(write_sigio_fds[0]);
164 close(write_sigio_fds[1]);
165 /* Critical section end */
166 set_signals(flags);
169 int add_sigio_fd(int fd)
171 struct pollfd *p;
172 int err = 0, i, n;
174 sigio_lock();
175 for (i = 0; i < all_sigio_fds.used; i++) {
176 if (all_sigio_fds.poll[i].fd == fd)
177 break;
179 if (i == all_sigio_fds.used)
180 goto out;
182 p = &all_sigio_fds.poll[i];
184 for (i = 0; i < current_poll.used; i++) {
185 if (current_poll.poll[i].fd == fd)
186 goto out;
189 n = current_poll.used;
190 err = need_poll(&next_poll, n + 1);
191 if (err)
192 goto out;
194 memcpy(next_poll.poll, current_poll.poll,
195 current_poll.used * sizeof(struct pollfd));
196 next_poll.poll[n] = *p;
197 next_poll.used = n + 1;
198 update_thread();
199 out:
200 sigio_unlock();
201 return err;
204 int ignore_sigio_fd(int fd)
206 struct pollfd *p;
207 int err = 0, i, n = 0;
210 * This is called from exitcalls elsewhere in UML - if
211 * sigio_cleanup has already run, then update_thread will hang
212 * or fail because the thread is no longer running.
214 if (write_sigio_pid == -1)
215 return -EIO;
217 sigio_lock();
218 for (i = 0; i < current_poll.used; i++) {
219 if (current_poll.poll[i].fd == fd)
220 break;
222 if (i == current_poll.used)
223 goto out;
225 err = need_poll(&next_poll, current_poll.used - 1);
226 if (err)
227 goto out;
229 for (i = 0; i < current_poll.used; i++) {
230 p = &current_poll.poll[i];
231 if (p->fd != fd)
232 next_poll.poll[n++] = *p;
234 next_poll.used = current_poll.used - 1;
236 update_thread();
237 out:
238 sigio_unlock();
239 return err;
242 static struct pollfd *setup_initial_poll(int fd)
244 struct pollfd *p;
246 p = kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
247 if (p == NULL) {
248 printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
249 "poll\n");
250 return NULL;
252 *p = ((struct pollfd) { .fd = fd,
253 .events = POLLIN,
254 .revents = 0 });
255 return p;
258 static void write_sigio_workaround(void)
260 struct pollfd *p;
261 int err;
262 int l_write_sigio_fds[2];
263 int l_sigio_private[2];
264 int l_write_sigio_pid;
266 /* We call this *tons* of times - and most ones we must just fail. */
267 sigio_lock();
268 l_write_sigio_pid = write_sigio_pid;
269 sigio_unlock();
271 if (l_write_sigio_pid != -1)
272 return;
274 err = os_pipe(l_write_sigio_fds, 1, 1);
275 if (err < 0) {
276 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
277 "err = %d\n", -err);
278 return;
280 err = os_pipe(l_sigio_private, 1, 1);
281 if (err < 0) {
282 printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
283 "err = %d\n", -err);
284 goto out_close1;
287 p = setup_initial_poll(l_sigio_private[1]);
288 if (!p)
289 goto out_close2;
291 sigio_lock();
294 * Did we race? Don't try to optimize this, please, it's not so likely
295 * to happen, and no more than once at the boot.
297 if (write_sigio_pid != -1)
298 goto out_free;
300 current_poll = ((struct pollfds) { .poll = p,
301 .used = 1,
302 .size = 1 });
304 if (write_sigio_irq(l_write_sigio_fds[0]))
305 goto out_clear_poll;
307 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
308 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
310 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
311 CLONE_FILES | CLONE_VM,
312 &write_sigio_stack);
314 if (write_sigio_pid < 0)
315 goto out_clear;
317 sigio_unlock();
318 return;
320 out_clear:
321 write_sigio_pid = -1;
322 write_sigio_fds[0] = -1;
323 write_sigio_fds[1] = -1;
324 sigio_private[0] = -1;
325 sigio_private[1] = -1;
326 out_clear_poll:
327 current_poll = ((struct pollfds) { .poll = NULL,
328 .size = 0,
329 .used = 0 });
330 out_free:
331 sigio_unlock();
332 kfree(p);
333 out_close2:
334 close(l_sigio_private[0]);
335 close(l_sigio_private[1]);
336 out_close1:
337 close(l_write_sigio_fds[0]);
338 close(l_write_sigio_fds[1]);
341 /* Changed during early boot */
342 static int pty_output_sigio = 0;
343 static int pty_close_sigio = 0;
345 void maybe_sigio_broken(int fd, int read)
347 int err;
349 if (!isatty(fd))
350 return;
352 if ((read || pty_output_sigio) && (!read || pty_close_sigio))
353 return;
355 write_sigio_workaround();
357 sigio_lock();
358 err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
359 if (err) {
360 printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
361 "for descriptor %d\n", fd);
362 goto out;
365 all_sigio_fds.poll[all_sigio_fds.used++] =
366 ((struct pollfd) { .fd = fd,
367 .events = read ? POLLIN : POLLOUT,
368 .revents = 0 });
369 out:
370 sigio_unlock();
373 static void sigio_cleanup(void)
375 if (write_sigio_pid == -1)
376 return;
378 os_kill_process(write_sigio_pid, 1);
379 free_stack(write_sigio_stack, 0);
380 write_sigio_pid = -1;
383 __uml_exitcall(sigio_cleanup);
385 /* Used as a flag during SIGIO testing early in boot */
386 static volatile int got_sigio = 0;
388 static void __init handler(int sig)
390 got_sigio = 1;
393 struct openpty_arg {
394 int master;
395 int slave;
396 int err;
399 static void openpty_cb(void *arg)
401 struct openpty_arg *info = arg;
403 info->err = 0;
404 if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
405 info->err = -errno;
408 static int async_pty(int master, int slave)
410 int flags;
412 flags = fcntl(master, F_GETFL);
413 if (flags < 0)
414 return -errno;
416 if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
417 (fcntl(master, F_SETOWN, os_getpid()) < 0))
418 return -errno;
420 if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
421 return -errno;
423 return 0;
426 static void __init check_one_sigio(void (*proc)(int, int))
428 struct sigaction old, new;
429 struct openpty_arg pty = { .master = -1, .slave = -1 };
430 int master, slave, err;
432 initial_thread_cb(openpty_cb, &pty);
433 if (pty.err) {
434 printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
435 -pty.err);
436 return;
439 master = pty.master;
440 slave = pty.slave;
442 if ((master == -1) || (slave == -1)) {
443 printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
444 "pty\n");
445 return;
448 /* Not now, but complain so we now where we failed. */
449 err = raw(master);
450 if (err < 0) {
451 printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
452 -err);
453 return;
456 err = async_pty(master, slave);
457 if (err < 0) {
458 printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
459 "err = %d\n", -err);
460 return;
463 if (sigaction(SIGIO, NULL, &old) < 0) {
464 printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
465 "errno = %d\n", errno);
466 return;
469 new = old;
470 new.sa_handler = handler;
471 if (sigaction(SIGIO, &new, NULL) < 0) {
472 printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
473 "errno = %d\n", errno);
474 return;
477 got_sigio = 0;
478 (*proc)(master, slave);
480 close(master);
481 close(slave);
483 if (sigaction(SIGIO, &old, NULL) < 0)
484 printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
485 "errno = %d\n", errno);
488 static void tty_output(int master, int slave)
490 int n;
491 char buf[512];
493 printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
495 memset(buf, 0, sizeof(buf));
497 while (write(master, buf, sizeof(buf)) > 0) ;
498 if (errno != EAGAIN)
499 printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
500 errno);
501 while (((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio)
504 if (got_sigio) {
505 printk(UM_KERN_CONT "Yes\n");
506 pty_output_sigio = 1;
507 } else if (n == -EAGAIN)
508 printk(UM_KERN_CONT "No, enabling workaround\n");
509 else
510 printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
513 static void tty_close(int master, int slave)
515 printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
516 "close...");
518 close(slave);
519 if (got_sigio) {
520 printk(UM_KERN_CONT "Yes\n");
521 pty_close_sigio = 1;
522 } else
523 printk(UM_KERN_CONT "No, enabling workaround\n");
526 void __init check_sigio(void)
528 if ((access("/dev/ptmx", R_OK) < 0) &&
529 (access("/dev/ptyp0", R_OK) < 0)) {
530 printk(UM_KERN_WARNING "No pseudo-terminals available - "
531 "skipping pty SIGIO check\n");
532 return;
534 check_one_sigio(tty_output);
535 check_one_sigio(tty_close);
538 /* Here because it only does the SIGIO testing for now */
539 void __init os_check_bugs(void)
541 check_sigio();