Linux-2.6.12-rc2
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / um / kernel / irq_user.c
blob6d6f9484b884d357240beaf65266c0d376644e55
1 /*
2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
6 #include <stdlib.h>
7 #include <unistd.h>
8 #include <errno.h>
9 #include <signal.h>
10 #include <string.h>
11 #include <sys/poll.h>
12 #include <sys/types.h>
13 #include <sys/time.h>
14 #include "user_util.h"
15 #include "kern_util.h"
16 #include "user.h"
17 #include "process.h"
18 #include "signal_user.h"
19 #include "sigio.h"
20 #include "irq_user.h"
21 #include "os.h"
23 struct irq_fd {
24 struct irq_fd *next;
25 void *id;
26 int fd;
27 int type;
28 int irq;
29 int pid;
30 int events;
31 int current_events;
32 int freed;
35 static struct irq_fd *active_fds = NULL;
36 static struct irq_fd **last_irq_ptr = &active_fds;
38 static struct pollfd *pollfds = NULL;
39 static int pollfds_num = 0;
40 static int pollfds_size = 0;
42 extern int io_count, intr_count;
44 void sigio_handler(int sig, union uml_pt_regs *regs)
46 struct irq_fd *irq_fd, *next;
47 int i, n;
49 if(smp_sigio_handler()) return;
50 while(1){
51 n = poll(pollfds, pollfds_num, 0);
52 if(n < 0){
53 if(errno == EINTR) continue;
54 printk("sigio_handler : poll returned %d, "
55 "errno = %d\n", n, errno);
56 break;
58 if(n == 0) break;
60 irq_fd = active_fds;
61 for(i = 0; i < pollfds_num; i++){
62 if(pollfds[i].revents != 0){
63 irq_fd->current_events = pollfds[i].revents;
64 pollfds[i].fd = -1;
66 irq_fd = irq_fd->next;
69 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = next){
70 next = irq_fd->next;
71 if(irq_fd->current_events != 0){
72 irq_fd->current_events = 0;
73 do_IRQ(irq_fd->irq, regs);
75 /* This is here because the next irq may be
76 * freed in the handler. If a console goes
77 * away, both the read and write irqs will be
78 * freed. After do_IRQ, ->next will point to
79 * a good IRQ.
80 * Irqs can't be freed inside their handlers,
81 * so the next best thing is to have them
82 * marked as needing freeing, so that they
83 * can be freed here.
85 next = irq_fd->next;
86 if(irq_fd->freed){
87 free_irq(irq_fd->irq, irq_fd->id);
88 free_irq_by_irq_and_dev(irq_fd->irq,
89 irq_fd->id);
96 int activate_ipi(int fd, int pid)
98 return(os_set_fd_async(fd, pid));
101 static void maybe_sigio_broken(int fd, int type)
103 if(isatty(fd)){
104 if((type == IRQ_WRITE) && !pty_output_sigio){
105 write_sigio_workaround();
106 add_sigio_fd(fd, 0);
108 else if((type == IRQ_READ) && !pty_close_sigio){
109 write_sigio_workaround();
110 add_sigio_fd(fd, 1);
115 int activate_fd(int irq, int fd, int type, void *dev_id)
117 struct pollfd *tmp_pfd;
118 struct irq_fd *new_fd, *irq_fd;
119 unsigned long flags;
120 int pid, events, err, n, size;
122 pid = os_getpid();
123 err = os_set_fd_async(fd, pid);
124 if(err < 0)
125 goto out;
127 new_fd = um_kmalloc(sizeof(*new_fd));
128 err = -ENOMEM;
129 if(new_fd == NULL)
130 goto out;
132 if(type == IRQ_READ) events = POLLIN | POLLPRI;
133 else events = POLLOUT;
134 *new_fd = ((struct irq_fd) { .next = NULL,
135 .id = dev_id,
136 .fd = fd,
137 .type = type,
138 .irq = irq,
139 .pid = pid,
140 .events = events,
141 .current_events = 0,
142 .freed = 0 } );
144 /* Critical section - locked by a spinlock because this stuff can
145 * be changed from interrupt handlers. The stuff above is done
146 * outside the lock because it allocates memory.
149 /* Actually, it only looks like it can be called from interrupt
150 * context. The culprit is reactivate_fd, which calls
151 * maybe_sigio_broken, which calls write_sigio_workaround,
152 * which calls activate_fd. However, write_sigio_workaround should
153 * only be called once, at boot time. That would make it clear that
154 * this is called only from process context, and can be locked with
155 * a semaphore.
157 flags = irq_lock();
158 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
159 if((irq_fd->fd == fd) && (irq_fd->type == type)){
160 printk("Registering fd %d twice\n", fd);
161 printk("Irqs : %d, %d\n", irq_fd->irq, irq);
162 printk("Ids : 0x%x, 0x%x\n", irq_fd->id, dev_id);
163 goto out_unlock;
167 n = pollfds_num;
168 if(n == pollfds_size){
169 while(1){
170 /* Here we have to drop the lock in order to call
171 * kmalloc, which might sleep. If something else
172 * came in and changed the pollfds array, we free
173 * the buffer and try again.
175 irq_unlock(flags);
176 size = (pollfds_num + 1) * sizeof(pollfds[0]);
177 tmp_pfd = um_kmalloc(size);
178 flags = irq_lock();
179 if(tmp_pfd == NULL)
180 goto out_unlock;
181 if(n == pollfds_size)
182 break;
183 kfree(tmp_pfd);
185 if(pollfds != NULL){
186 memcpy(tmp_pfd, pollfds,
187 sizeof(pollfds[0]) * pollfds_size);
188 kfree(pollfds);
190 pollfds = tmp_pfd;
191 pollfds_size++;
194 if(type == IRQ_WRITE)
195 fd = -1;
197 pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
198 .events = events,
199 .revents = 0 });
200 pollfds_num++;
202 *last_irq_ptr = new_fd;
203 last_irq_ptr = &new_fd->next;
205 irq_unlock(flags);
207 /* This calls activate_fd, so it has to be outside the critical
208 * section.
210 maybe_sigio_broken(fd, type);
212 return(0);
214 out_unlock:
215 irq_unlock(flags);
216 kfree(new_fd);
217 out:
218 return(err);
221 static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
223 struct irq_fd **prev;
224 unsigned long flags;
225 int i = 0;
227 flags = irq_lock();
228 prev = &active_fds;
229 while(*prev != NULL){
230 if((*test)(*prev, arg)){
231 struct irq_fd *old_fd = *prev;
232 if((pollfds[i].fd != -1) &&
233 (pollfds[i].fd != (*prev)->fd)){
234 printk("free_irq_by_cb - mismatch between "
235 "active_fds and pollfds, fd %d vs %d\n",
236 (*prev)->fd, pollfds[i].fd);
237 goto out;
239 memcpy(&pollfds[i], &pollfds[i + 1],
240 (pollfds_num - i - 1) * sizeof(pollfds[0]));
241 pollfds_num--;
242 if(last_irq_ptr == &old_fd->next)
243 last_irq_ptr = prev;
244 *prev = (*prev)->next;
245 if(old_fd->type == IRQ_WRITE)
246 ignore_sigio_fd(old_fd->fd);
247 kfree(old_fd);
248 continue;
250 prev = &(*prev)->next;
251 i++;
253 out:
254 irq_unlock(flags);
257 struct irq_and_dev {
258 int irq;
259 void *dev;
262 static int same_irq_and_dev(struct irq_fd *irq, void *d)
264 struct irq_and_dev *data = d;
266 return((irq->irq == data->irq) && (irq->id == data->dev));
269 void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
271 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
272 .dev = dev });
274 free_irq_by_cb(same_irq_and_dev, &data);
277 static int same_fd(struct irq_fd *irq, void *fd)
279 return(irq->fd == *((int *) fd));
282 void free_irq_by_fd(int fd)
284 free_irq_by_cb(same_fd, &fd);
287 static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
289 struct irq_fd *irq;
290 int i = 0;
292 for(irq=active_fds; irq != NULL; irq = irq->next){
293 if((irq->fd == fd) && (irq->irq == irqnum)) break;
294 i++;
296 if(irq == NULL){
297 printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
298 goto out;
300 if((pollfds[i].fd != -1) && (pollfds[i].fd != fd)){
301 printk("find_irq_by_fd - mismatch between active_fds and "
302 "pollfds, fd %d vs %d, need %d\n", irq->fd,
303 pollfds[i].fd, fd);
304 irq = NULL;
305 goto out;
307 *index_out = i;
308 out:
309 return(irq);
312 void free_irq_later(int irq, void *dev_id)
314 struct irq_fd *irq_fd;
315 unsigned long flags;
317 flags = irq_lock();
318 for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
319 if((irq_fd->irq == irq) && (irq_fd->id == dev_id))
320 break;
322 if(irq_fd == NULL){
323 printk("free_irq_later found no irq, irq = %d, "
324 "dev_id = 0x%p\n", irq, dev_id);
325 goto out;
327 irq_fd->freed = 1;
328 out:
329 irq_unlock(flags);
332 void reactivate_fd(int fd, int irqnum)
334 struct irq_fd *irq;
335 unsigned long flags;
336 int i;
338 flags = irq_lock();
339 irq = find_irq_by_fd(fd, irqnum, &i);
340 if(irq == NULL){
341 irq_unlock(flags);
342 return;
345 pollfds[i].fd = irq->fd;
347 irq_unlock(flags);
349 /* This calls activate_fd, so it has to be outside the critical
350 * section.
352 maybe_sigio_broken(fd, irq->type);
355 void deactivate_fd(int fd, int irqnum)
357 struct irq_fd *irq;
358 unsigned long flags;
359 int i;
361 flags = irq_lock();
362 irq = find_irq_by_fd(fd, irqnum, &i);
363 if(irq == NULL)
364 goto out;
365 pollfds[i].fd = -1;
366 out:
367 irq_unlock(flags);
370 int deactivate_all_fds(void)
372 struct irq_fd *irq;
373 int err;
375 for(irq=active_fds;irq != NULL;irq = irq->next){
376 err = os_clear_fd_async(irq->fd);
377 if(err)
378 return(err);
380 /* If there is a signal already queued, after unblocking ignore it */
381 set_handler(SIGIO, SIG_IGN, 0, -1);
383 return(0);
386 void forward_ipi(int fd, int pid)
388 int err;
390 err = os_set_owner(fd, pid);
391 if(err < 0)
392 printk("forward_ipi: set_owner failed, fd = %d, me = %d, "
393 "target = %d, err = %d\n", fd, os_getpid(), pid, -err);
396 void forward_interrupts(int pid)
398 struct irq_fd *irq;
399 unsigned long flags;
400 int err;
402 flags = irq_lock();
403 for(irq=active_fds;irq != NULL;irq = irq->next){
404 err = os_set_owner(irq->fd, pid);
405 if(err < 0){
406 /* XXX Just remove the irq rather than
407 * print out an infinite stream of these
409 printk("Failed to forward %d to pid %d, err = %d\n",
410 irq->fd, pid, -err);
413 irq->pid = pid;
415 irq_unlock(flags);
418 void init_irq_signals(int on_sigstack)
420 __sighandler_t h;
421 int flags;
423 flags = on_sigstack ? SA_ONSTACK : 0;
424 if(timer_irq_inited) h = (__sighandler_t) alarm_handler;
425 else h = boot_timer_handler;
427 set_handler(SIGVTALRM, h, flags | SA_RESTART,
428 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);
429 set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
430 SIGUSR1, SIGIO, SIGWINCH, SIGALRM, SIGVTALRM, -1);
431 signal(SIGWINCH, SIG_IGN);
435 * Overrides for Emacs so that we follow Linus's tabbing style.
436 * Emacs will notice this stuff at the end of the file and automatically
437 * adjust the settings for this buffer only. This must remain at the end
438 * of the file.
439 * ---------------------------------------------------------------------------
440 * Local variables:
441 * c-file-style: "linux"
442 * End: