thinkpad-acpi: add quirklist engine
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / select.c
blob6d76b825aa457028800021540ff63551a689ac05
1 /*
2 * This file contains the procedures for the handling of select and poll
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
7 * 4 February 1994
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
12 * 24 January 2000
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
17 #include <linux/kernel.h>
18 #include <linux/syscalls.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/fs.h>
26 #include <linux/rcupdate.h>
27 #include <linux/hrtimer.h>
29 #include <asm/uaccess.h>
33 * Estimate expected accuracy in ns from a timeval.
35 * After quite a bit of churning around, we've settled on
36 * a simple thing of taking 0.1% of the timeout as the
37 * slack, with a cap of 100 msec.
38 * "nice" tasks get a 0.5% slack instead.
40 * Consider this comment an open invitation to come up with even
41 * better solutions..
44 static long __estimate_accuracy(struct timespec *tv)
46 long slack;
47 int divfactor = 1000;
49 if (task_nice(current) > 0)
50 divfactor = divfactor / 5;
52 slack = tv->tv_nsec / divfactor;
53 slack += tv->tv_sec * (NSEC_PER_SEC/divfactor);
55 if (slack > 100 * NSEC_PER_MSEC)
56 slack = 100 * NSEC_PER_MSEC;
58 if (slack < 0)
59 slack = 0;
60 return slack;
63 static long estimate_accuracy(struct timespec *tv)
65 unsigned long ret;
66 struct timespec now;
69 * Realtime tasks get a slack of 0 for obvious reasons.
72 if (rt_task(current))
73 return 0;
75 ktime_get_ts(&now);
76 now = timespec_sub(*tv, now);
77 ret = __estimate_accuracy(&now);
78 if (ret < current->timer_slack_ns)
79 return current->timer_slack_ns;
80 return ret;
85 struct poll_table_page {
86 struct poll_table_page * next;
87 struct poll_table_entry * entry;
88 struct poll_table_entry entries[0];
91 #define POLL_TABLE_FULL(table) \
92 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
95 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
96 * I have rewritten this, taking some shortcuts: This code may not be easy to
97 * follow, but it should be free of race-conditions, and it's practical. If you
98 * understand what I'm doing here, then you understand how the linux
99 * sleep/wakeup mechanism works.
101 * Two very simple procedures, poll_wait() and poll_freewait() make all the
102 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
103 * as all select/poll functions have to call it to add an entry to the
104 * poll table.
106 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
107 poll_table *p);
109 void poll_initwait(struct poll_wqueues *pwq)
111 init_poll_funcptr(&pwq->pt, __pollwait);
112 pwq->polling_task = current;
113 pwq->triggered = 0;
114 pwq->error = 0;
115 pwq->table = NULL;
116 pwq->inline_index = 0;
118 EXPORT_SYMBOL(poll_initwait);
120 static void free_poll_entry(struct poll_table_entry *entry)
122 remove_wait_queue(entry->wait_address, &entry->wait);
123 fput(entry->filp);
126 void poll_freewait(struct poll_wqueues *pwq)
128 struct poll_table_page * p = pwq->table;
129 int i;
130 for (i = 0; i < pwq->inline_index; i++)
131 free_poll_entry(pwq->inline_entries + i);
132 while (p) {
133 struct poll_table_entry * entry;
134 struct poll_table_page *old;
136 entry = p->entry;
137 do {
138 entry--;
139 free_poll_entry(entry);
140 } while (entry > p->entries);
141 old = p;
142 p = p->next;
143 free_page((unsigned long) old);
146 EXPORT_SYMBOL(poll_freewait);
148 static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
150 struct poll_table_page *table = p->table;
152 if (p->inline_index < N_INLINE_POLL_ENTRIES)
153 return p->inline_entries + p->inline_index++;
155 if (!table || POLL_TABLE_FULL(table)) {
156 struct poll_table_page *new_table;
158 new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
159 if (!new_table) {
160 p->error = -ENOMEM;
161 return NULL;
163 new_table->entry = new_table->entries;
164 new_table->next = table;
165 p->table = new_table;
166 table = new_table;
169 return table->entry++;
172 static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
174 struct poll_wqueues *pwq = wait->private;
175 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
178 * Although this function is called under waitqueue lock, LOCK
179 * doesn't imply write barrier and the users expect write
180 * barrier semantics on wakeup functions. The following
181 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
182 * and is paired with set_mb() in poll_schedule_timeout.
184 smp_wmb();
185 pwq->triggered = 1;
188 * Perform the default wake up operation using a dummy
189 * waitqueue.
191 * TODO: This is hacky but there currently is no interface to
192 * pass in @sync. @sync is scheduled to be removed and once
193 * that happens, wake_up_process() can be used directly.
195 return default_wake_function(&dummy_wait, mode, sync, key);
198 /* Add a new entry */
199 static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
200 poll_table *p)
202 struct poll_wqueues *pwq = container_of(p, struct poll_wqueues, pt);
203 struct poll_table_entry *entry = poll_get_entry(pwq);
204 if (!entry)
205 return;
206 get_file(filp);
207 entry->filp = filp;
208 entry->wait_address = wait_address;
209 init_waitqueue_func_entry(&entry->wait, pollwake);
210 entry->wait.private = pwq;
211 add_wait_queue(wait_address, &entry->wait);
214 int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
215 ktime_t *expires, unsigned long slack)
217 int rc = -EINTR;
219 set_current_state(state);
220 if (!pwq->triggered)
221 rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
222 __set_current_state(TASK_RUNNING);
225 * Prepare for the next iteration.
227 * The following set_mb() serves two purposes. First, it's
228 * the counterpart rmb of the wmb in pollwake() such that data
229 * written before wake up is always visible after wake up.
230 * Second, the full barrier guarantees that triggered clearing
231 * doesn't pass event check of the next iteration. Note that
232 * this problem doesn't exist for the first iteration as
233 * add_wait_queue() has full barrier semantics.
235 set_mb(pwq->triggered, 0);
237 return rc;
239 EXPORT_SYMBOL(poll_schedule_timeout);
242 * poll_select_set_timeout - helper function to setup the timeout value
243 * @to: pointer to timespec variable for the final timeout
244 * @sec: seconds (from user space)
245 * @nsec: nanoseconds (from user space)
247 * Note, we do not use a timespec for the user space value here, That
248 * way we can use the function for timeval and compat interfaces as well.
250 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
252 int poll_select_set_timeout(struct timespec *to, long sec, long nsec)
254 struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec};
256 if (!timespec_valid(&ts))
257 return -EINVAL;
259 /* Optimize for the zero timeout value here */
260 if (!sec && !nsec) {
261 to->tv_sec = to->tv_nsec = 0;
262 } else {
263 ktime_get_ts(to);
264 *to = timespec_add_safe(*to, ts);
266 return 0;
269 static int poll_select_copy_remaining(struct timespec *end_time, void __user *p,
270 int timeval, int ret)
272 struct timespec rts;
273 struct timeval rtv;
275 if (!p)
276 return ret;
278 if (current->personality & STICKY_TIMEOUTS)
279 goto sticky;
281 /* No update for zero timeout */
282 if (!end_time->tv_sec && !end_time->tv_nsec)
283 return ret;
285 ktime_get_ts(&rts);
286 rts = timespec_sub(*end_time, rts);
287 if (rts.tv_sec < 0)
288 rts.tv_sec = rts.tv_nsec = 0;
290 if (timeval) {
291 rtv.tv_sec = rts.tv_sec;
292 rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
294 if (!copy_to_user(p, &rtv, sizeof(rtv)))
295 return ret;
297 } else if (!copy_to_user(p, &rts, sizeof(rts)))
298 return ret;
301 * If an application puts its timeval in read-only memory, we
302 * don't want the Linux-specific update to the timeval to
303 * cause a fault after the select has completed
304 * successfully. However, because we're not updating the
305 * timeval, we can't restart the system call.
308 sticky:
309 if (ret == -ERESTARTNOHAND)
310 ret = -EINTR;
311 return ret;
314 #define FDS_IN(fds, n) (fds->in + n)
315 #define FDS_OUT(fds, n) (fds->out + n)
316 #define FDS_EX(fds, n) (fds->ex + n)
318 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
320 static int max_select_fd(unsigned long n, fd_set_bits *fds)
322 unsigned long *open_fds;
323 unsigned long set;
324 int max;
325 struct fdtable *fdt;
327 /* handle last in-complete long-word first */
328 set = ~(~0UL << (n & (__NFDBITS-1)));
329 n /= __NFDBITS;
330 fdt = files_fdtable(current->files);
331 open_fds = fdt->open_fds->fds_bits+n;
332 max = 0;
333 if (set) {
334 set &= BITS(fds, n);
335 if (set) {
336 if (!(set & ~*open_fds))
337 goto get_max;
338 return -EBADF;
341 while (n) {
342 open_fds--;
343 n--;
344 set = BITS(fds, n);
345 if (!set)
346 continue;
347 if (set & ~*open_fds)
348 return -EBADF;
349 if (max)
350 continue;
351 get_max:
352 do {
353 max++;
354 set >>= 1;
355 } while (set);
356 max += n * __NFDBITS;
359 return max;
362 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
363 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
364 #define POLLEX_SET (POLLPRI)
366 int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
368 ktime_t expire, *to = NULL;
369 struct poll_wqueues table;
370 poll_table *wait;
371 int retval, i, timed_out = 0;
372 unsigned long slack = 0;
374 rcu_read_lock();
375 retval = max_select_fd(n, fds);
376 rcu_read_unlock();
378 if (retval < 0)
379 return retval;
380 n = retval;
382 poll_initwait(&table);
383 wait = &table.pt;
384 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
385 wait = NULL;
386 timed_out = 1;
389 if (end_time && !timed_out)
390 slack = estimate_accuracy(end_time);
392 retval = 0;
393 for (;;) {
394 unsigned long *rinp, *routp, *rexp, *inp, *outp, *exp;
396 inp = fds->in; outp = fds->out; exp = fds->ex;
397 rinp = fds->res_in; routp = fds->res_out; rexp = fds->res_ex;
399 for (i = 0; i < n; ++rinp, ++routp, ++rexp) {
400 unsigned long in, out, ex, all_bits, bit = 1, mask, j;
401 unsigned long res_in = 0, res_out = 0, res_ex = 0;
402 const struct file_operations *f_op = NULL;
403 struct file *file = NULL;
405 in = *inp++; out = *outp++; ex = *exp++;
406 all_bits = in | out | ex;
407 if (all_bits == 0) {
408 i += __NFDBITS;
409 continue;
412 for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
413 int fput_needed;
414 if (i >= n)
415 break;
416 if (!(bit & all_bits))
417 continue;
418 file = fget_light(i, &fput_needed);
419 if (file) {
420 f_op = file->f_op;
421 mask = DEFAULT_POLLMASK;
422 if (f_op && f_op->poll)
423 mask = (*f_op->poll)(file, retval ? NULL : wait);
424 fput_light(file, fput_needed);
425 if ((mask & POLLIN_SET) && (in & bit)) {
426 res_in |= bit;
427 retval++;
429 if ((mask & POLLOUT_SET) && (out & bit)) {
430 res_out |= bit;
431 retval++;
433 if ((mask & POLLEX_SET) && (ex & bit)) {
434 res_ex |= bit;
435 retval++;
439 if (res_in)
440 *rinp = res_in;
441 if (res_out)
442 *routp = res_out;
443 if (res_ex)
444 *rexp = res_ex;
445 cond_resched();
447 wait = NULL;
448 if (retval || timed_out || signal_pending(current))
449 break;
450 if (table.error) {
451 retval = table.error;
452 break;
456 * If this is the first loop and we have a timeout
457 * given, then we convert to ktime_t and set the to
458 * pointer to the expiry value.
460 if (end_time && !to) {
461 expire = timespec_to_ktime(*end_time);
462 to = &expire;
465 if (!poll_schedule_timeout(&table, TASK_INTERRUPTIBLE,
466 to, slack))
467 timed_out = 1;
470 poll_freewait(&table);
472 return retval;
476 * We can actually return ERESTARTSYS instead of EINTR, but I'd
477 * like to be certain this leads to no problems. So I return
478 * EINTR just for safety.
480 * Update: ERESTARTSYS breaks at least the xview clock binary, so
481 * I'm trying ERESTARTNOHAND which restart only when you want to.
483 #define MAX_SELECT_SECONDS \
484 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
486 int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
487 fd_set __user *exp, struct timespec *end_time)
489 fd_set_bits fds;
490 void *bits;
491 int ret, max_fds;
492 unsigned int size;
493 struct fdtable *fdt;
494 /* Allocate small arguments on the stack to save memory and be faster */
495 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
497 ret = -EINVAL;
498 if (n < 0)
499 goto out_nofds;
501 /* max_fds can increase, so grab it once to avoid race */
502 rcu_read_lock();
503 fdt = files_fdtable(current->files);
504 max_fds = fdt->max_fds;
505 rcu_read_unlock();
506 if (n > max_fds)
507 n = max_fds;
510 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
511 * since we used fdset we need to allocate memory in units of
512 * long-words.
514 size = FDS_BYTES(n);
515 bits = stack_fds;
516 if (size > sizeof(stack_fds) / 6) {
517 /* Not enough space in on-stack array; must use kmalloc */
518 ret = -ENOMEM;
519 bits = kmalloc(6 * size, GFP_KERNEL);
520 if (!bits)
521 goto out_nofds;
523 fds.in = bits;
524 fds.out = bits + size;
525 fds.ex = bits + 2*size;
526 fds.res_in = bits + 3*size;
527 fds.res_out = bits + 4*size;
528 fds.res_ex = bits + 5*size;
530 if ((ret = get_fd_set(n, inp, fds.in)) ||
531 (ret = get_fd_set(n, outp, fds.out)) ||
532 (ret = get_fd_set(n, exp, fds.ex)))
533 goto out;
534 zero_fd_set(n, fds.res_in);
535 zero_fd_set(n, fds.res_out);
536 zero_fd_set(n, fds.res_ex);
538 ret = do_select(n, &fds, end_time);
540 if (ret < 0)
541 goto out;
542 if (!ret) {
543 ret = -ERESTARTNOHAND;
544 if (signal_pending(current))
545 goto out;
546 ret = 0;
549 if (set_fd_set(n, inp, fds.res_in) ||
550 set_fd_set(n, outp, fds.res_out) ||
551 set_fd_set(n, exp, fds.res_ex))
552 ret = -EFAULT;
554 out:
555 if (bits != stack_fds)
556 kfree(bits);
557 out_nofds:
558 return ret;
561 SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
562 fd_set __user *, exp, struct timeval __user *, tvp)
564 struct timespec end_time, *to = NULL;
565 struct timeval tv;
566 int ret;
568 if (tvp) {
569 if (copy_from_user(&tv, tvp, sizeof(tv)))
570 return -EFAULT;
572 to = &end_time;
573 if (poll_select_set_timeout(to,
574 tv.tv_sec + (tv.tv_usec / USEC_PER_SEC),
575 (tv.tv_usec % USEC_PER_SEC) * NSEC_PER_USEC))
576 return -EINVAL;
579 ret = core_sys_select(n, inp, outp, exp, to);
580 ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
582 return ret;
585 #ifdef HAVE_SET_RESTORE_SIGMASK
586 static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
587 fd_set __user *exp, struct timespec __user *tsp,
588 const sigset_t __user *sigmask, size_t sigsetsize)
590 sigset_t ksigmask, sigsaved;
591 struct timespec ts, end_time, *to = NULL;
592 int ret;
594 if (tsp) {
595 if (copy_from_user(&ts, tsp, sizeof(ts)))
596 return -EFAULT;
598 to = &end_time;
599 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
600 return -EINVAL;
603 if (sigmask) {
604 /* XXX: Don't preclude handling different sized sigset_t's. */
605 if (sigsetsize != sizeof(sigset_t))
606 return -EINVAL;
607 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
608 return -EFAULT;
610 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
611 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
614 ret = core_sys_select(n, inp, outp, exp, to);
615 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
617 if (ret == -ERESTARTNOHAND) {
619 * Don't restore the signal mask yet. Let do_signal() deliver
620 * the signal on the way back to userspace, before the signal
621 * mask is restored.
623 if (sigmask) {
624 memcpy(&current->saved_sigmask, &sigsaved,
625 sizeof(sigsaved));
626 set_restore_sigmask();
628 } else if (sigmask)
629 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
631 return ret;
635 * Most architectures can't handle 7-argument syscalls. So we provide a
636 * 6-argument version where the sixth argument is a pointer to a structure
637 * which has a pointer to the sigset_t itself followed by a size_t containing
638 * the sigset size.
640 SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
641 fd_set __user *, exp, struct timespec __user *, tsp,
642 void __user *, sig)
644 size_t sigsetsize = 0;
645 sigset_t __user *up = NULL;
647 if (sig) {
648 if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
649 || __get_user(up, (sigset_t __user * __user *)sig)
650 || __get_user(sigsetsize,
651 (size_t __user *)(sig+sizeof(void *))))
652 return -EFAULT;
655 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
657 #endif /* HAVE_SET_RESTORE_SIGMASK */
659 struct poll_list {
660 struct poll_list *next;
661 int len;
662 struct pollfd entries[0];
665 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
668 * Fish for pollable events on the pollfd->fd file descriptor. We're only
669 * interested in events matching the pollfd->events mask, and the result
670 * matching that mask is both recorded in pollfd->revents and returned. The
671 * pwait poll_table will be used by the fd-provided poll handler for waiting,
672 * if non-NULL.
674 static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait)
676 unsigned int mask;
677 int fd;
679 mask = 0;
680 fd = pollfd->fd;
681 if (fd >= 0) {
682 int fput_needed;
683 struct file * file;
685 file = fget_light(fd, &fput_needed);
686 mask = POLLNVAL;
687 if (file != NULL) {
688 mask = DEFAULT_POLLMASK;
689 if (file->f_op && file->f_op->poll)
690 mask = file->f_op->poll(file, pwait);
691 /* Mask out unneeded events. */
692 mask &= pollfd->events | POLLERR | POLLHUP;
693 fput_light(file, fput_needed);
696 pollfd->revents = mask;
698 return mask;
701 static int do_poll(unsigned int nfds, struct poll_list *list,
702 struct poll_wqueues *wait, struct timespec *end_time)
704 poll_table* pt = &wait->pt;
705 ktime_t expire, *to = NULL;
706 int timed_out = 0, count = 0;
707 unsigned long slack = 0;
709 /* Optimise the no-wait case */
710 if (end_time && !end_time->tv_sec && !end_time->tv_nsec) {
711 pt = NULL;
712 timed_out = 1;
715 if (end_time && !timed_out)
716 slack = estimate_accuracy(end_time);
718 for (;;) {
719 struct poll_list *walk;
721 for (walk = list; walk != NULL; walk = walk->next) {
722 struct pollfd * pfd, * pfd_end;
724 pfd = walk->entries;
725 pfd_end = pfd + walk->len;
726 for (; pfd != pfd_end; pfd++) {
728 * Fish for events. If we found one, record it
729 * and kill the poll_table, so we don't
730 * needlessly register any other waiters after
731 * this. They'll get immediately deregistered
732 * when we break out and return.
734 if (do_pollfd(pfd, pt)) {
735 count++;
736 pt = NULL;
741 * All waiters have already been registered, so don't provide
742 * a poll_table to them on the next loop iteration.
744 pt = NULL;
745 if (!count) {
746 count = wait->error;
747 if (signal_pending(current))
748 count = -EINTR;
750 if (count || timed_out)
751 break;
754 * If this is the first loop and we have a timeout
755 * given, then we convert to ktime_t and set the to
756 * pointer to the expiry value.
758 if (end_time && !to) {
759 expire = timespec_to_ktime(*end_time);
760 to = &expire;
763 if (!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
764 timed_out = 1;
766 return count;
769 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
770 sizeof(struct pollfd))
772 int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
773 struct timespec *end_time)
775 struct poll_wqueues table;
776 int err = -EFAULT, fdcount, len, size;
777 /* Allocate small arguments on the stack to save memory and be
778 faster - use long to make sure the buffer is aligned properly
779 on 64 bit archs to avoid unaligned access */
780 long stack_pps[POLL_STACK_ALLOC/sizeof(long)];
781 struct poll_list *const head = (struct poll_list *)stack_pps;
782 struct poll_list *walk = head;
783 unsigned long todo = nfds;
785 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
786 return -EINVAL;
788 len = min_t(unsigned int, nfds, N_STACK_PPS);
789 for (;;) {
790 walk->next = NULL;
791 walk->len = len;
792 if (!len)
793 break;
795 if (copy_from_user(walk->entries, ufds + nfds-todo,
796 sizeof(struct pollfd) * walk->len))
797 goto out_fds;
799 todo -= walk->len;
800 if (!todo)
801 break;
803 len = min(todo, POLLFD_PER_PAGE);
804 size = sizeof(struct poll_list) + sizeof(struct pollfd) * len;
805 walk = walk->next = kmalloc(size, GFP_KERNEL);
806 if (!walk) {
807 err = -ENOMEM;
808 goto out_fds;
812 poll_initwait(&table);
813 fdcount = do_poll(nfds, head, &table, end_time);
814 poll_freewait(&table);
816 for (walk = head; walk; walk = walk->next) {
817 struct pollfd *fds = walk->entries;
818 int j;
820 for (j = 0; j < walk->len; j++, ufds++)
821 if (__put_user(fds[j].revents, &ufds->revents))
822 goto out_fds;
825 err = fdcount;
826 out_fds:
827 walk = head->next;
828 while (walk) {
829 struct poll_list *pos = walk;
830 walk = walk->next;
831 kfree(pos);
834 return err;
837 static long do_restart_poll(struct restart_block *restart_block)
839 struct pollfd __user *ufds = restart_block->poll.ufds;
840 int nfds = restart_block->poll.nfds;
841 struct timespec *to = NULL, end_time;
842 int ret;
844 if (restart_block->poll.has_timeout) {
845 end_time.tv_sec = restart_block->poll.tv_sec;
846 end_time.tv_nsec = restart_block->poll.tv_nsec;
847 to = &end_time;
850 ret = do_sys_poll(ufds, nfds, to);
852 if (ret == -EINTR) {
853 restart_block->fn = do_restart_poll;
854 ret = -ERESTART_RESTARTBLOCK;
856 return ret;
859 SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
860 long, timeout_msecs)
862 struct timespec end_time, *to = NULL;
863 int ret;
865 if (timeout_msecs >= 0) {
866 to = &end_time;
867 poll_select_set_timeout(to, timeout_msecs / MSEC_PER_SEC,
868 NSEC_PER_MSEC * (timeout_msecs % MSEC_PER_SEC));
871 ret = do_sys_poll(ufds, nfds, to);
873 if (ret == -EINTR) {
874 struct restart_block *restart_block;
876 restart_block = &current_thread_info()->restart_block;
877 restart_block->fn = do_restart_poll;
878 restart_block->poll.ufds = ufds;
879 restart_block->poll.nfds = nfds;
881 if (timeout_msecs >= 0) {
882 restart_block->poll.tv_sec = end_time.tv_sec;
883 restart_block->poll.tv_nsec = end_time.tv_nsec;
884 restart_block->poll.has_timeout = 1;
885 } else
886 restart_block->poll.has_timeout = 0;
888 ret = -ERESTART_RESTARTBLOCK;
890 return ret;
893 #ifdef HAVE_SET_RESTORE_SIGMASK
894 SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
895 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
896 size_t, sigsetsize)
898 sigset_t ksigmask, sigsaved;
899 struct timespec ts, end_time, *to = NULL;
900 int ret;
902 if (tsp) {
903 if (copy_from_user(&ts, tsp, sizeof(ts)))
904 return -EFAULT;
906 to = &end_time;
907 if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
908 return -EINVAL;
911 if (sigmask) {
912 /* XXX: Don't preclude handling different sized sigset_t's. */
913 if (sigsetsize != sizeof(sigset_t))
914 return -EINVAL;
915 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
916 return -EFAULT;
918 sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
919 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
922 ret = do_sys_poll(ufds, nfds, to);
924 /* We can restart this syscall, usually */
925 if (ret == -EINTR) {
927 * Don't restore the signal mask yet. Let do_signal() deliver
928 * the signal on the way back to userspace, before the signal
929 * mask is restored.
931 if (sigmask) {
932 memcpy(&current->saved_sigmask, &sigsaved,
933 sizeof(sigsaved));
934 set_restore_sigmask();
936 ret = -ERESTARTNOHAND;
937 } else if (sigmask)
938 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
940 ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
942 return ret;
944 #endif /* HAVE_SET_RESTORE_SIGMASK */