2 * This file contains the procedures for the handling of select and poll
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
17 #include <linux/kernel.h>
18 #include <linux/syscalls.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
26 #include <linux/rcupdate.h>
27 #include <linux/hrtimer.h>
29 #include <asm/uaccess.h>
33 * Estimate expected accuracy in ns from a timeval.
35 * After quite a bit of churning around, we've settled on
36 * a simple thing of taking 0.1% of the timeout as the
37 * slack, with a cap of 100 msec.
38 * "nice" tasks get a 0.5% slack instead.
40 * Consider this comment an open invitation to come up with even
44 static long __estimate_accuracy(struct timespec
*tv
)
49 if (task_nice(current
) > 0)
50 divfactor
= divfactor
/ 5;
52 slack
= tv
->tv_nsec
/ divfactor
;
53 slack
+= tv
->tv_sec
* (NSEC_PER_SEC
/divfactor
);
55 if (slack
> 100 * NSEC_PER_MSEC
)
56 slack
= 100 * NSEC_PER_MSEC
;
63 static long estimate_accuracy(struct timespec
*tv
)
69 * Realtime tasks get a slack of 0 for obvious reasons.
76 now
= timespec_sub(*tv
, now
);
77 ret
= __estimate_accuracy(&now
);
78 if (ret
< current
->timer_slack_ns
)
79 return current
->timer_slack_ns
;
85 struct poll_table_page
{
86 struct poll_table_page
* next
;
87 struct poll_table_entry
* entry
;
88 struct poll_table_entry entries
[0];
91 #define POLL_TABLE_FULL(table) \
92 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
95 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
96 * I have rewritten this, taking some shortcuts: This code may not be easy to
97 * follow, but it should be free of race-conditions, and it's practical. If you
98 * understand what I'm doing here, then you understand how the linux
99 * sleep/wakeup mechanism works.
101 * Two very simple procedures, poll_wait() and poll_freewait() make all the
102 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
103 * as all select/poll functions have to call it to add an entry to the
106 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
109 void poll_initwait(struct poll_wqueues
*pwq
)
111 init_poll_funcptr(&pwq
->pt
, __pollwait
);
112 pwq
->polling_task
= current
;
115 pwq
->inline_index
= 0;
117 EXPORT_SYMBOL(poll_initwait
);
119 static void free_poll_entry(struct poll_table_entry
*entry
)
121 remove_wait_queue(entry
->wait_address
, &entry
->wait
);
125 void poll_freewait(struct poll_wqueues
*pwq
)
127 struct poll_table_page
* p
= pwq
->table
;
129 for (i
= 0; i
< pwq
->inline_index
; i
++)
130 free_poll_entry(pwq
->inline_entries
+ i
);
132 struct poll_table_entry
* entry
;
133 struct poll_table_page
*old
;
138 free_poll_entry(entry
);
139 } while (entry
> p
->entries
);
142 free_page((unsigned long) old
);
145 EXPORT_SYMBOL(poll_freewait
);
147 static struct poll_table_entry
*poll_get_entry(struct poll_wqueues
*p
)
149 struct poll_table_page
*table
= p
->table
;
151 if (p
->inline_index
< N_INLINE_POLL_ENTRIES
)
152 return p
->inline_entries
+ p
->inline_index
++;
154 if (!table
|| POLL_TABLE_FULL(table
)) {
155 struct poll_table_page
*new_table
;
157 new_table
= (struct poll_table_page
*) __get_free_page(GFP_KERNEL
);
162 new_table
->entry
= new_table
->entries
;
163 new_table
->next
= table
;
164 p
->table
= new_table
;
168 return table
->entry
++;
171 static int pollwake(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
173 struct poll_wqueues
*pwq
= wait
->private;
174 DECLARE_WAITQUEUE(dummy_wait
, pwq
->polling_task
);
177 * Although this function is called under waitqueue lock, LOCK
178 * doesn't imply write barrier and the users expect write
179 * barrier semantics on wakeup functions. The following
180 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
181 * and is paired with set_mb() in poll_schedule_timeout.
187 * Perform the default wake up operation using a dummy
190 * TODO: This is hacky but there currently is no interface to
191 * pass in @sync. @sync is scheduled to be removed and once
192 * that happens, wake_up_process() can be used directly.
194 return default_wake_function(&dummy_wait
, mode
, sync
, key
);
197 /* Add a new entry */
198 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
201 struct poll_wqueues
*pwq
= container_of(p
, struct poll_wqueues
, pt
);
202 struct poll_table_entry
*entry
= poll_get_entry(pwq
);
207 entry
->wait_address
= wait_address
;
208 init_waitqueue_func_entry(&entry
->wait
, pollwake
);
209 entry
->wait
.private = pwq
;
210 add_wait_queue(wait_address
, &entry
->wait
);
213 int poll_schedule_timeout(struct poll_wqueues
*pwq
, int state
,
214 ktime_t
*expires
, unsigned long slack
)
218 set_current_state(state
);
220 rc
= schedule_hrtimeout_range(expires
, slack
, HRTIMER_MODE_ABS
);
221 __set_current_state(TASK_RUNNING
);
224 * Prepare for the next iteration.
226 * The following set_mb() serves two purposes. First, it's
227 * the counterpart rmb of the wmb in pollwake() such that data
228 * written before wake up is always visible after wake up.
229 * Second, the full barrier guarantees that triggered clearing
230 * doesn't pass event check of the next iteration. Note that
231 * this problem doesn't exist for the first iteration as
232 * add_wait_queue() has full barrier semantics.
234 set_mb(pwq
->triggered
, 0);
238 EXPORT_SYMBOL(poll_schedule_timeout
);
241 * poll_select_set_timeout - helper function to setup the timeout value
242 * @to: pointer to timespec variable for the final timeout
243 * @sec: seconds (from user space)
244 * @nsec: nanoseconds (from user space)
246 * Note, we do not use a timespec for the user space value here, That
247 * way we can use the function for timeval and compat interfaces as well.
249 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
251 int poll_select_set_timeout(struct timespec
*to
, long sec
, long nsec
)
253 struct timespec ts
= {.tv_sec
= sec
, .tv_nsec
= nsec
};
255 if (!timespec_valid(&ts
))
258 /* Optimize for the zero timeout value here */
260 to
->tv_sec
= to
->tv_nsec
= 0;
263 *to
= timespec_add_safe(*to
, ts
);
268 static int poll_select_copy_remaining(struct timespec
*end_time
, void __user
*p
,
269 int timeval
, int ret
)
277 if (current
->personality
& STICKY_TIMEOUTS
)
280 /* No update for zero timeout */
281 if (!end_time
->tv_sec
&& !end_time
->tv_nsec
)
285 rts
= timespec_sub(*end_time
, rts
);
287 rts
.tv_sec
= rts
.tv_nsec
= 0;
290 rtv
.tv_sec
= rts
.tv_sec
;
291 rtv
.tv_usec
= rts
.tv_nsec
/ NSEC_PER_USEC
;
293 if (!copy_to_user(p
, &rtv
, sizeof(rtv
)))
296 } else if (!copy_to_user(p
, &rts
, sizeof(rts
)))
300 * If an application puts its timeval in read-only memory, we
301 * don't want the Linux-specific update to the timeval to
302 * cause a fault after the select has completed
303 * successfully. However, because we're not updating the
304 * timeval, we can't restart the system call.
308 if (ret
== -ERESTARTNOHAND
)
313 #define FDS_IN(fds, n) (fds->in + n)
314 #define FDS_OUT(fds, n) (fds->out + n)
315 #define FDS_EX(fds, n) (fds->ex + n)
317 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
319 static int max_select_fd(unsigned long n
, fd_set_bits
*fds
)
321 unsigned long *open_fds
;
326 /* handle last in-complete long-word first */
327 set
= ~(~0UL << (n
& (__NFDBITS
-1)));
329 fdt
= files_fdtable(current
->files
);
330 open_fds
= fdt
->open_fds
->fds_bits
+n
;
335 if (!(set
& ~*open_fds
))
346 if (set
& ~*open_fds
)
355 max
+= n
* __NFDBITS
;
361 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
362 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
363 #define POLLEX_SET (POLLPRI)
365 int do_select(int n
, fd_set_bits
*fds
, struct timespec
*end_time
)
367 ktime_t expire
, *to
= NULL
;
368 struct poll_wqueues table
;
370 int retval
, i
, timed_out
= 0;
371 unsigned long slack
= 0;
374 retval
= max_select_fd(n
, fds
);
381 poll_initwait(&table
);
383 if (end_time
&& !end_time
->tv_sec
&& !end_time
->tv_nsec
) {
388 if (end_time
&& !timed_out
)
389 slack
= estimate_accuracy(end_time
);
393 unsigned long *rinp
, *routp
, *rexp
, *inp
, *outp
, *exp
;
395 inp
= fds
->in
; outp
= fds
->out
; exp
= fds
->ex
;
396 rinp
= fds
->res_in
; routp
= fds
->res_out
; rexp
= fds
->res_ex
;
398 for (i
= 0; i
< n
; ++rinp
, ++routp
, ++rexp
) {
399 unsigned long in
, out
, ex
, all_bits
, bit
= 1, mask
, j
;
400 unsigned long res_in
= 0, res_out
= 0, res_ex
= 0;
401 const struct file_operations
*f_op
= NULL
;
402 struct file
*file
= NULL
;
404 in
= *inp
++; out
= *outp
++; ex
= *exp
++;
405 all_bits
= in
| out
| ex
;
411 for (j
= 0; j
< __NFDBITS
; ++j
, ++i
, bit
<<= 1) {
415 if (!(bit
& all_bits
))
417 file
= fget_light(i
, &fput_needed
);
420 mask
= DEFAULT_POLLMASK
;
421 if (f_op
&& f_op
->poll
)
422 mask
= (*f_op
->poll
)(file
, retval
? NULL
: wait
);
423 fput_light(file
, fput_needed
);
424 if ((mask
& POLLIN_SET
) && (in
& bit
)) {
428 if ((mask
& POLLOUT_SET
) && (out
& bit
)) {
432 if ((mask
& POLLEX_SET
) && (ex
& bit
)) {
447 if (retval
|| timed_out
|| signal_pending(current
))
450 retval
= table
.error
;
455 * If this is the first loop and we have a timeout
456 * given, then we convert to ktime_t and set the to
457 * pointer to the expiry value.
459 if (end_time
&& !to
) {
460 expire
= timespec_to_ktime(*end_time
);
464 if (!poll_schedule_timeout(&table
, TASK_INTERRUPTIBLE
,
469 poll_freewait(&table
);
475 * We can actually return ERESTARTSYS instead of EINTR, but I'd
476 * like to be certain this leads to no problems. So I return
477 * EINTR just for safety.
479 * Update: ERESTARTSYS breaks at least the xview clock binary, so
480 * I'm trying ERESTARTNOHAND which restart only when you want to.
482 #define MAX_SELECT_SECONDS \
483 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
485 int core_sys_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
486 fd_set __user
*exp
, struct timespec
*end_time
)
493 /* Allocate small arguments on the stack to save memory and be faster */
494 long stack_fds
[SELECT_STACK_ALLOC
/sizeof(long)];
500 /* max_fds can increase, so grab it once to avoid race */
502 fdt
= files_fdtable(current
->files
);
503 max_fds
= fdt
->max_fds
;
509 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
510 * since we used fdset we need to allocate memory in units of
515 if (size
> sizeof(stack_fds
) / 6) {
516 /* Not enough space in on-stack array; must use kmalloc */
518 bits
= kmalloc(6 * size
, GFP_KERNEL
);
523 fds
.out
= bits
+ size
;
524 fds
.ex
= bits
+ 2*size
;
525 fds
.res_in
= bits
+ 3*size
;
526 fds
.res_out
= bits
+ 4*size
;
527 fds
.res_ex
= bits
+ 5*size
;
529 if ((ret
= get_fd_set(n
, inp
, fds
.in
)) ||
530 (ret
= get_fd_set(n
, outp
, fds
.out
)) ||
531 (ret
= get_fd_set(n
, exp
, fds
.ex
)))
533 zero_fd_set(n
, fds
.res_in
);
534 zero_fd_set(n
, fds
.res_out
);
535 zero_fd_set(n
, fds
.res_ex
);
537 ret
= do_select(n
, &fds
, end_time
);
542 ret
= -ERESTARTNOHAND
;
543 if (signal_pending(current
))
548 if (set_fd_set(n
, inp
, fds
.res_in
) ||
549 set_fd_set(n
, outp
, fds
.res_out
) ||
550 set_fd_set(n
, exp
, fds
.res_ex
))
554 if (bits
!= stack_fds
)
560 SYSCALL_DEFINE5(select
, int, n
, fd_set __user
*, inp
, fd_set __user
*, outp
,
561 fd_set __user
*, exp
, struct timeval __user
*, tvp
)
563 struct timespec end_time
, *to
= NULL
;
568 if (copy_from_user(&tv
, tvp
, sizeof(tv
)))
572 if (poll_select_set_timeout(to
,
573 tv
.tv_sec
+ (tv
.tv_usec
/ USEC_PER_SEC
),
574 (tv
.tv_usec
% USEC_PER_SEC
) * NSEC_PER_USEC
))
578 ret
= core_sys_select(n
, inp
, outp
, exp
, to
);
579 ret
= poll_select_copy_remaining(&end_time
, tvp
, 1, ret
);
584 #ifdef HAVE_SET_RESTORE_SIGMASK
585 static long do_pselect(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
586 fd_set __user
*exp
, struct timespec __user
*tsp
,
587 const sigset_t __user
*sigmask
, size_t sigsetsize
)
589 sigset_t ksigmask
, sigsaved
;
590 struct timespec ts
, end_time
, *to
= NULL
;
594 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
598 if (poll_select_set_timeout(to
, ts
.tv_sec
, ts
.tv_nsec
))
603 /* XXX: Don't preclude handling different sized sigset_t's. */
604 if (sigsetsize
!= sizeof(sigset_t
))
606 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
609 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
610 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
613 ret
= core_sys_select(n
, inp
, outp
, exp
, to
);
614 ret
= poll_select_copy_remaining(&end_time
, tsp
, 0, ret
);
616 if (ret
== -ERESTARTNOHAND
) {
618 * Don't restore the signal mask yet. Let do_signal() deliver
619 * the signal on the way back to userspace, before the signal
623 memcpy(¤t
->saved_sigmask
, &sigsaved
,
625 set_restore_sigmask();
628 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
634 * Most architectures can't handle 7-argument syscalls. So we provide a
635 * 6-argument version where the sixth argument is a pointer to a structure
636 * which has a pointer to the sigset_t itself followed by a size_t containing
639 SYSCALL_DEFINE6(pselect6
, int, n
, fd_set __user
*, inp
, fd_set __user
*, outp
,
640 fd_set __user
*, exp
, struct timespec __user
*, tsp
,
643 size_t sigsetsize
= 0;
644 sigset_t __user
*up
= NULL
;
647 if (!access_ok(VERIFY_READ
, sig
, sizeof(void *)+sizeof(size_t))
648 || __get_user(up
, (sigset_t __user
* __user
*)sig
)
649 || __get_user(sigsetsize
,
650 (size_t __user
*)(sig
+sizeof(void *))))
654 return do_pselect(n
, inp
, outp
, exp
, tsp
, up
, sigsetsize
);
656 #endif /* HAVE_SET_RESTORE_SIGMASK */
659 struct poll_list
*next
;
661 struct pollfd entries
[0];
664 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
667 * Fish for pollable events on the pollfd->fd file descriptor. We're only
668 * interested in events matching the pollfd->events mask, and the result
669 * matching that mask is both recorded in pollfd->revents and returned. The
670 * pwait poll_table will be used by the fd-provided poll handler for waiting,
673 static inline unsigned int do_pollfd(struct pollfd
*pollfd
, poll_table
*pwait
)
684 file
= fget_light(fd
, &fput_needed
);
687 mask
= DEFAULT_POLLMASK
;
688 if (file
->f_op
&& file
->f_op
->poll
)
689 mask
= file
->f_op
->poll(file
, pwait
);
690 /* Mask out unneeded events. */
691 mask
&= pollfd
->events
| POLLERR
| POLLHUP
;
692 fput_light(file
, fput_needed
);
695 pollfd
->revents
= mask
;
700 static int do_poll(unsigned int nfds
, struct poll_list
*list
,
701 struct poll_wqueues
*wait
, struct timespec
*end_time
)
703 poll_table
* pt
= &wait
->pt
;
704 ktime_t expire
, *to
= NULL
;
705 int timed_out
= 0, count
= 0;
706 unsigned long slack
= 0;
708 /* Optimise the no-wait case */
709 if (end_time
&& !end_time
->tv_sec
&& !end_time
->tv_nsec
) {
714 if (end_time
&& !timed_out
)
715 slack
= estimate_accuracy(end_time
);
718 struct poll_list
*walk
;
720 for (walk
= list
; walk
!= NULL
; walk
= walk
->next
) {
721 struct pollfd
* pfd
, * pfd_end
;
724 pfd_end
= pfd
+ walk
->len
;
725 for (; pfd
!= pfd_end
; pfd
++) {
727 * Fish for events. If we found one, record it
728 * and kill the poll_table, so we don't
729 * needlessly register any other waiters after
730 * this. They'll get immediately deregistered
731 * when we break out and return.
733 if (do_pollfd(pfd
, pt
)) {
740 * All waiters have already been registered, so don't provide
741 * a poll_table to them on the next loop iteration.
746 if (signal_pending(current
))
749 if (count
|| timed_out
)
753 * If this is the first loop and we have a timeout
754 * given, then we convert to ktime_t and set the to
755 * pointer to the expiry value.
757 if (end_time
&& !to
) {
758 expire
= timespec_to_ktime(*end_time
);
762 if (!poll_schedule_timeout(wait
, TASK_INTERRUPTIBLE
, to
, slack
))
768 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
769 sizeof(struct pollfd))
771 int do_sys_poll(struct pollfd __user
*ufds
, unsigned int nfds
,
772 struct timespec
*end_time
)
774 struct poll_wqueues table
;
775 int err
= -EFAULT
, fdcount
, len
, size
;
776 /* Allocate small arguments on the stack to save memory and be
777 faster - use long to make sure the buffer is aligned properly
778 on 64 bit archs to avoid unaligned access */
779 long stack_pps
[POLL_STACK_ALLOC
/sizeof(long)];
780 struct poll_list
*const head
= (struct poll_list
*)stack_pps
;
781 struct poll_list
*walk
= head
;
782 unsigned long todo
= nfds
;
784 if (nfds
> current
->signal
->rlim
[RLIMIT_NOFILE
].rlim_cur
)
787 len
= min_t(unsigned int, nfds
, N_STACK_PPS
);
794 if (copy_from_user(walk
->entries
, ufds
+ nfds
-todo
,
795 sizeof(struct pollfd
) * walk
->len
))
802 len
= min(todo
, POLLFD_PER_PAGE
);
803 size
= sizeof(struct poll_list
) + sizeof(struct pollfd
) * len
;
804 walk
= walk
->next
= kmalloc(size
, GFP_KERNEL
);
811 poll_initwait(&table
);
812 fdcount
= do_poll(nfds
, head
, &table
, end_time
);
813 poll_freewait(&table
);
815 for (walk
= head
; walk
; walk
= walk
->next
) {
816 struct pollfd
*fds
= walk
->entries
;
819 for (j
= 0; j
< walk
->len
; j
++, ufds
++)
820 if (__put_user(fds
[j
].revents
, &ufds
->revents
))
828 struct poll_list
*pos
= walk
;
836 static long do_restart_poll(struct restart_block
*restart_block
)
838 struct pollfd __user
*ufds
= restart_block
->poll
.ufds
;
839 int nfds
= restart_block
->poll
.nfds
;
840 struct timespec
*to
= NULL
, end_time
;
843 if (restart_block
->poll
.has_timeout
) {
844 end_time
.tv_sec
= restart_block
->poll
.tv_sec
;
845 end_time
.tv_nsec
= restart_block
->poll
.tv_nsec
;
849 ret
= do_sys_poll(ufds
, nfds
, to
);
852 restart_block
->fn
= do_restart_poll
;
853 ret
= -ERESTART_RESTARTBLOCK
;
858 SYSCALL_DEFINE3(poll
, struct pollfd __user
*, ufds
, unsigned int, nfds
,
861 struct timespec end_time
, *to
= NULL
;
864 if (timeout_msecs
>= 0) {
866 poll_select_set_timeout(to
, timeout_msecs
/ MSEC_PER_SEC
,
867 NSEC_PER_MSEC
* (timeout_msecs
% MSEC_PER_SEC
));
870 ret
= do_sys_poll(ufds
, nfds
, to
);
873 struct restart_block
*restart_block
;
875 restart_block
= ¤t_thread_info()->restart_block
;
876 restart_block
->fn
= do_restart_poll
;
877 restart_block
->poll
.ufds
= ufds
;
878 restart_block
->poll
.nfds
= nfds
;
880 if (timeout_msecs
>= 0) {
881 restart_block
->poll
.tv_sec
= end_time
.tv_sec
;
882 restart_block
->poll
.tv_nsec
= end_time
.tv_nsec
;
883 restart_block
->poll
.has_timeout
= 1;
885 restart_block
->poll
.has_timeout
= 0;
887 ret
= -ERESTART_RESTARTBLOCK
;
892 #ifdef HAVE_SET_RESTORE_SIGMASK
893 SYSCALL_DEFINE5(ppoll
, struct pollfd __user
*, ufds
, unsigned int, nfds
,
894 struct timespec __user
*, tsp
, const sigset_t __user
*, sigmask
,
897 sigset_t ksigmask
, sigsaved
;
898 struct timespec ts
, end_time
, *to
= NULL
;
902 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
906 if (poll_select_set_timeout(to
, ts
.tv_sec
, ts
.tv_nsec
))
911 /* XXX: Don't preclude handling different sized sigset_t's. */
912 if (sigsetsize
!= sizeof(sigset_t
))
914 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
917 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
918 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
921 ret
= do_sys_poll(ufds
, nfds
, to
);
923 /* We can restart this syscall, usually */
926 * Don't restore the signal mask yet. Let do_signal() deliver
927 * the signal on the way back to userspace, before the signal
931 memcpy(¤t
->saved_sigmask
, &sigsaved
,
933 set_restore_sigmask();
935 ret
= -ERESTARTNOHAND
;
937 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
939 ret
= poll_select_copy_remaining(&end_time
, tsp
, 0, ret
);
943 #endif /* HAVE_SET_RESTORE_SIGMASK */