[PATCH] uml: move outside spinlock call not needing it
[linux-2.6/kvm.git] / arch / um / os-Linux / sigio.c
blob00e9388e947af4fa59442a3fdad627a82b7384e2
1 /*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include <unistd.h>
7 #include <stdlib.h>
8 #include <termios.h>
9 #include <pty.h>
10 #include <signal.h>
11 #include <errno.h>
12 #include <string.h>
13 #include <sched.h>
14 #include <sys/socket.h>
15 #include <sys/poll.h>
16 #include "init.h"
17 #include "user.h"
18 #include "kern_util.h"
19 #include "user_util.h"
20 #include "sigio.h"
21 #include "os.h"
23 /* Protected by sigio_lock(), also used by sigio_cleanup, which is an
24 * exitcall.
26 static int write_sigio_pid = -1;
28 /* These arrays are initialized before the sigio thread is started, and
29 * the descriptors closed after it is killed. So, it can't see them change.
30 * On the UML side, they are changed under the sigio_lock.
32 #define SIGIO_FDS_INIT {-1, -1}
34 static int write_sigio_fds[2] = SIGIO_FDS_INIT;
35 static int sigio_private[2] = SIGIO_FDS_INIT;
37 struct pollfds {
38 struct pollfd *poll;
39 int size;
40 int used;
43 /* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
44 * synchronizes with it.
46 struct pollfds current_poll = {
47 .poll = NULL,
48 .size = 0,
49 .used = 0
52 struct pollfds next_poll = {
53 .poll = NULL,
54 .size = 0,
55 .used = 0
58 static int write_sigio_thread(void *unused)
60 struct pollfds *fds, tmp;
61 struct pollfd *p;
62 int i, n, respond_fd;
63 char c;
65 signal(SIGWINCH, SIG_IGN);
66 fds = &current_poll;
67 while(1){
68 n = poll(fds->poll, fds->used, -1);
69 if(n < 0){
70 if(errno == EINTR) continue;
71 printk("write_sigio_thread : poll returned %d, "
72 "errno = %d\n", n, errno);
74 for(i = 0; i < fds->used; i++){
75 p = &fds->poll[i];
76 if(p->revents == 0) continue;
77 if(p->fd == sigio_private[1]){
78 n = os_read_file(sigio_private[1], &c, sizeof(c));
79 if(n != sizeof(c))
80 printk("write_sigio_thread : "
81 "read failed, err = %d\n", -n);
82 tmp = current_poll;
83 current_poll = next_poll;
84 next_poll = tmp;
85 respond_fd = sigio_private[1];
87 else {
88 respond_fd = write_sigio_fds[1];
89 fds->used--;
90 memmove(&fds->poll[i], &fds->poll[i + 1],
91 (fds->used - i) * sizeof(*fds->poll));
94 n = os_write_file(respond_fd, &c, sizeof(c));
95 if(n != sizeof(c))
96 printk("write_sigio_thread : write failed, "
97 "err = %d\n", -n);
101 return 0;
104 static int need_poll(int n)
106 if(n <= next_poll.size){
107 next_poll.used = n;
108 return(0);
110 kfree(next_poll.poll);
111 next_poll.poll = um_kmalloc_atomic(n * sizeof(struct pollfd));
112 if(next_poll.poll == NULL){
113 printk("need_poll : failed to allocate new pollfds\n");
114 next_poll.size = 0;
115 next_poll.used = 0;
116 return(-1);
118 next_poll.size = n;
119 next_poll.used = n;
120 return(0);
123 /* Must be called with sigio_lock held, because it's needed by the marked
124 * critical section. */
125 static void update_thread(void)
127 unsigned long flags;
128 int n;
129 char c;
131 flags = set_signals(0);
132 n = os_write_file(sigio_private[0], &c, sizeof(c));
133 if(n != sizeof(c)){
134 printk("update_thread : write failed, err = %d\n", -n);
135 goto fail;
138 n = os_read_file(sigio_private[0], &c, sizeof(c));
139 if(n != sizeof(c)){
140 printk("update_thread : read failed, err = %d\n", -n);
141 goto fail;
144 set_signals(flags);
145 return;
146 fail:
147 /* Critical section start */
148 if(write_sigio_pid != -1)
149 os_kill_process(write_sigio_pid, 1);
150 write_sigio_pid = -1;
151 close(sigio_private[0]);
152 close(sigio_private[1]);
153 close(write_sigio_fds[0]);
154 close(write_sigio_fds[1]);
155 /* Critical section end */
156 set_signals(flags);
159 int add_sigio_fd(int fd, int read)
161 int err = 0, i, n, events;
163 sigio_lock();
164 for(i = 0; i < current_poll.used; i++){
165 if(current_poll.poll[i].fd == fd)
166 goto out;
169 n = current_poll.used + 1;
170 err = need_poll(n);
171 if(err)
172 goto out;
174 for(i = 0; i < current_poll.used; i++)
175 next_poll.poll[i] = current_poll.poll[i];
177 if(read) events = POLLIN;
178 else events = POLLOUT;
180 next_poll.poll[n - 1] = ((struct pollfd) { .fd = fd,
181 .events = events,
182 .revents = 0 });
183 update_thread();
184 out:
185 sigio_unlock();
186 return(err);
189 int ignore_sigio_fd(int fd)
191 struct pollfd *p;
192 int err = 0, i, n = 0;
194 sigio_lock();
195 for(i = 0; i < current_poll.used; i++){
196 if(current_poll.poll[i].fd == fd) break;
198 if(i == current_poll.used)
199 goto out;
201 err = need_poll(current_poll.used - 1);
202 if(err)
203 goto out;
205 for(i = 0; i < current_poll.used; i++){
206 p = &current_poll.poll[i];
207 if(p->fd != fd) next_poll.poll[n++] = current_poll.poll[i];
209 if(n == i){
210 printk("ignore_sigio_fd : fd %d not found\n", fd);
211 err = -1;
212 goto out;
215 update_thread();
216 out:
217 sigio_unlock();
218 return(err);
221 static struct pollfd *setup_initial_poll(int fd)
223 struct pollfd *p;
225 p = um_kmalloc(sizeof(struct pollfd));
226 if (p == NULL) {
227 printk("setup_initial_poll : failed to allocate poll\n");
228 return NULL;
230 *p = ((struct pollfd) { .fd = fd,
231 .events = POLLIN,
232 .revents = 0 });
233 return p;
236 void write_sigio_workaround(void)
238 unsigned long stack;
239 struct pollfd *p;
240 int err;
241 int l_write_sigio_fds[2];
242 int l_sigio_private[2];
243 int l_write_sigio_pid;
245 /* We call this *tons* of times - and most ones we must just fail. */
246 sigio_lock();
247 l_write_sigio_pid = write_sigio_pid;
248 sigio_unlock();
250 if (l_write_sigio_pid != -1)
251 return;
253 err = os_pipe(l_write_sigio_fds, 1, 1);
254 if(err < 0){
255 printk("write_sigio_workaround - os_pipe 1 failed, "
256 "err = %d\n", -err);
257 return;
259 err = os_pipe(l_sigio_private, 1, 1);
260 if(err < 0){
261 printk("write_sigio_workaround - os_pipe 2 failed, "
262 "err = %d\n", -err);
263 goto out_close1;
266 p = setup_initial_poll(l_sigio_private[1]);
267 if(!p)
268 goto out_close2;
270 sigio_lock();
272 /* Did we race? Don't try to optimize this, please, it's not so likely
273 * to happen, and no more than once at the boot. */
274 if(write_sigio_pid != -1)
275 goto out_free;
277 current_poll = ((struct pollfds) { .poll = p,
278 .used = 1,
279 .size = 1 });
281 if (write_sigio_irq(l_write_sigio_fds[0]))
282 goto out_clear_poll;
284 memcpy(write_sigio_fds, l_write_sigio_fds, sizeof(l_write_sigio_fds));
285 memcpy(sigio_private, l_sigio_private, sizeof(l_sigio_private));
287 write_sigio_pid = run_helper_thread(write_sigio_thread, NULL,
288 CLONE_FILES | CLONE_VM, &stack, 0);
290 if (write_sigio_pid < 0)
291 goto out_clear;
293 sigio_unlock();
294 return;
296 out_clear:
297 write_sigio_pid = -1;
298 write_sigio_fds[0] = -1;
299 write_sigio_fds[1] = -1;
300 sigio_private[0] = -1;
301 sigio_private[1] = -1;
302 out_clear_poll:
303 current_poll = ((struct pollfds) { .poll = NULL,
304 .size = 0,
305 .used = 0 });
306 out_free:
307 sigio_unlock();
308 kfree(p);
309 out_close2:
310 close(l_sigio_private[0]);
311 close(l_sigio_private[1]);
312 out_close1:
313 close(l_write_sigio_fds[0]);
314 close(l_write_sigio_fds[1]);
317 void sigio_cleanup(void)
319 if(write_sigio_pid != -1){
320 os_kill_process(write_sigio_pid, 1);
321 write_sigio_pid = -1;