2 * This file contains the procedures for the handling of select and poll
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
24 #include <linux/file.h>
25 #include <linux/fdtable.h>
27 #include <linux/rcupdate.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/rt.h>
30 #include <linux/freezer.h>
31 #include <net/busy_poll.h>
33 #include <asm/uaccess.h>
37 * Estimate expected accuracy in ns from a timeval.
39 * After quite a bit of churning around, we've settled on
40 * a simple thing of taking 0.1% of the timeout as the
41 * slack, with a cap of 100 msec.
42 * "nice" tasks get a 0.5% slack instead.
44 * Consider this comment an open invitation to come up with even
48 #define MAX_SLACK (100 * NSEC_PER_MSEC)
50 static long __estimate_accuracy(struct timespec
*tv
)
58 if (task_nice(current
) > 0)
59 divfactor
= divfactor
/ 5;
61 if (tv
->tv_sec
> MAX_SLACK
/ (NSEC_PER_SEC
/divfactor
))
64 slack
= tv
->tv_nsec
/ divfactor
;
65 slack
+= tv
->tv_sec
* (NSEC_PER_SEC
/divfactor
);
67 if (slack
> MAX_SLACK
)
73 long select_estimate_accuracy(struct timespec
*tv
)
79 * Realtime tasks get a slack of 0 for obvious reasons.
86 now
= timespec_sub(*tv
, now
);
87 ret
= __estimate_accuracy(&now
);
88 if (ret
< current
->timer_slack_ns
)
89 return current
->timer_slack_ns
;
95 struct poll_table_page
{
96 struct poll_table_page
* next
;
97 struct poll_table_entry
* entry
;
98 struct poll_table_entry entries
[0];
101 #define POLL_TABLE_FULL(table) \
102 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
105 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
106 * I have rewritten this, taking some shortcuts: This code may not be easy to
107 * follow, but it should be free of race-conditions, and it's practical. If you
108 * understand what I'm doing here, then you understand how the linux
109 * sleep/wakeup mechanism works.
111 * Two very simple procedures, poll_wait() and poll_freewait() make all the
112 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
113 * as all select/poll functions have to call it to add an entry to the
116 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
119 void poll_initwait(struct poll_wqueues
*pwq
)
121 init_poll_funcptr(&pwq
->pt
, __pollwait
);
122 pwq
->polling_task
= current
;
126 pwq
->inline_index
= 0;
128 EXPORT_SYMBOL(poll_initwait
);
130 static void free_poll_entry(struct poll_table_entry
*entry
)
132 remove_wait_queue(entry
->wait_address
, &entry
->wait
);
136 void poll_freewait(struct poll_wqueues
*pwq
)
138 struct poll_table_page
* p
= pwq
->table
;
140 for (i
= 0; i
< pwq
->inline_index
; i
++)
141 free_poll_entry(pwq
->inline_entries
+ i
);
143 struct poll_table_entry
* entry
;
144 struct poll_table_page
*old
;
149 free_poll_entry(entry
);
150 } while (entry
> p
->entries
);
153 free_page((unsigned long) old
);
156 EXPORT_SYMBOL(poll_freewait
);
158 static struct poll_table_entry
*poll_get_entry(struct poll_wqueues
*p
)
160 struct poll_table_page
*table
= p
->table
;
162 if (p
->inline_index
< N_INLINE_POLL_ENTRIES
)
163 return p
->inline_entries
+ p
->inline_index
++;
165 if (!table
|| POLL_TABLE_FULL(table
)) {
166 struct poll_table_page
*new_table
;
168 new_table
= (struct poll_table_page
*) __get_free_page(GFP_KERNEL
);
173 new_table
->entry
= new_table
->entries
;
174 new_table
->next
= table
;
175 p
->table
= new_table
;
179 return table
->entry
++;
182 static int __pollwake(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
184 struct poll_wqueues
*pwq
= wait
->private;
185 DECLARE_WAITQUEUE(dummy_wait
, pwq
->polling_task
);
188 * Although this function is called under waitqueue lock, LOCK
189 * doesn't imply write barrier and the users expect write
190 * barrier semantics on wakeup functions. The following
191 * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
192 * and is paired with set_mb() in poll_schedule_timeout.
198 * Perform the default wake up operation using a dummy
201 * TODO: This is hacky but there currently is no interface to
202 * pass in @sync. @sync is scheduled to be removed and once
203 * that happens, wake_up_process() can be used directly.
205 return default_wake_function(&dummy_wait
, mode
, sync
, key
);
208 static int pollwake(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
210 struct poll_table_entry
*entry
;
212 entry
= container_of(wait
, struct poll_table_entry
, wait
);
213 if (key
&& !((unsigned long)key
& entry
->key
))
215 return __pollwake(wait
, mode
, sync
, key
);
218 /* Add a new entry */
219 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
222 struct poll_wqueues
*pwq
= container_of(p
, struct poll_wqueues
, pt
);
223 struct poll_table_entry
*entry
= poll_get_entry(pwq
);
226 entry
->filp
= get_file(filp
);
227 entry
->wait_address
= wait_address
;
228 entry
->key
= p
->_key
;
229 init_waitqueue_func_entry(&entry
->wait
, pollwake
);
230 entry
->wait
.private = pwq
;
231 add_wait_queue(wait_address
, &entry
->wait
);
234 int poll_schedule_timeout(struct poll_wqueues
*pwq
, int state
,
235 ktime_t
*expires
, unsigned long slack
)
239 set_current_state(state
);
241 rc
= freezable_schedule_hrtimeout_range(expires
, slack
,
243 __set_current_state(TASK_RUNNING
);
246 * Prepare for the next iteration.
248 * The following set_mb() serves two purposes. First, it's
249 * the counterpart rmb of the wmb in pollwake() such that data
250 * written before wake up is always visible after wake up.
251 * Second, the full barrier guarantees that triggered clearing
252 * doesn't pass event check of the next iteration. Note that
253 * this problem doesn't exist for the first iteration as
254 * add_wait_queue() has full barrier semantics.
256 set_mb(pwq
->triggered
, 0);
260 EXPORT_SYMBOL(poll_schedule_timeout
);
263 * poll_select_set_timeout - helper function to setup the timeout value
264 * @to: pointer to timespec variable for the final timeout
265 * @sec: seconds (from user space)
266 * @nsec: nanoseconds (from user space)
268 * Note, we do not use a timespec for the user space value here, That
269 * way we can use the function for timeval and compat interfaces as well.
271 * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0.
273 int poll_select_set_timeout(struct timespec
*to
, long sec
, long nsec
)
275 struct timespec ts
= {.tv_sec
= sec
, .tv_nsec
= nsec
};
277 if (!timespec_valid(&ts
))
280 /* Optimize for the zero timeout value here */
282 to
->tv_sec
= to
->tv_nsec
= 0;
285 *to
= timespec_add_safe(*to
, ts
);
290 static int poll_select_copy_remaining(struct timespec
*end_time
, void __user
*p
,
291 int timeval
, int ret
)
299 if (current
->personality
& STICKY_TIMEOUTS
)
302 /* No update for zero timeout */
303 if (!end_time
->tv_sec
&& !end_time
->tv_nsec
)
307 rts
= timespec_sub(*end_time
, rts
);
309 rts
.tv_sec
= rts
.tv_nsec
= 0;
312 if (sizeof(rtv
) > sizeof(rtv
.tv_sec
) + sizeof(rtv
.tv_usec
))
313 memset(&rtv
, 0, sizeof(rtv
));
314 rtv
.tv_sec
= rts
.tv_sec
;
315 rtv
.tv_usec
= rts
.tv_nsec
/ NSEC_PER_USEC
;
317 if (!copy_to_user(p
, &rtv
, sizeof(rtv
)))
320 } else if (!copy_to_user(p
, &rts
, sizeof(rts
)))
324 * If an application puts its timeval in read-only memory, we
325 * don't want the Linux-specific update to the timeval to
326 * cause a fault after the select has completed
327 * successfully. However, because we're not updating the
328 * timeval, we can't restart the system call.
332 if (ret
== -ERESTARTNOHAND
)
337 #define FDS_IN(fds, n) (fds->in + n)
338 #define FDS_OUT(fds, n) (fds->out + n)
339 #define FDS_EX(fds, n) (fds->ex + n)
341 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
343 static int max_select_fd(unsigned long n
, fd_set_bits
*fds
)
345 unsigned long *open_fds
;
350 /* handle last in-complete long-word first */
351 set
= ~(~0UL << (n
& (BITS_PER_LONG
-1)));
353 fdt
= files_fdtable(current
->files
);
354 open_fds
= fdt
->open_fds
+ n
;
359 if (!(set
& ~*open_fds
))
370 if (set
& ~*open_fds
)
379 max
+= n
* BITS_PER_LONG
;
385 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
386 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
387 #define POLLEX_SET (POLLPRI)
389 static inline void wait_key_set(poll_table
*wait
, unsigned long in
,
390 unsigned long out
, unsigned long bit
,
391 unsigned int ll_flag
)
393 wait
->_key
= POLLEX_SET
| ll_flag
;
395 wait
->_key
|= POLLIN_SET
;
397 wait
->_key
|= POLLOUT_SET
;
400 int do_select(int n
, fd_set_bits
*fds
, struct timespec
*end_time
)
402 ktime_t expire
, *to
= NULL
;
403 struct poll_wqueues table
;
405 int retval
, i
, timed_out
= 0;
406 unsigned long slack
= 0;
407 unsigned int busy_flag
= net_busy_loop_on() ? POLL_BUSY_LOOP
: 0;
408 unsigned long busy_end
= 0;
411 retval
= max_select_fd(n
, fds
);
418 poll_initwait(&table
);
420 if (end_time
&& !end_time
->tv_sec
&& !end_time
->tv_nsec
) {
425 if (end_time
&& !timed_out
)
426 slack
= select_estimate_accuracy(end_time
);
430 unsigned long *rinp
, *routp
, *rexp
, *inp
, *outp
, *exp
;
431 bool can_busy_loop
= false;
433 inp
= fds
->in
; outp
= fds
->out
; exp
= fds
->ex
;
434 rinp
= fds
->res_in
; routp
= fds
->res_out
; rexp
= fds
->res_ex
;
436 for (i
= 0; i
< n
; ++rinp
, ++routp
, ++rexp
) {
437 unsigned long in
, out
, ex
, all_bits
, bit
= 1, mask
, j
;
438 unsigned long res_in
= 0, res_out
= 0, res_ex
= 0;
440 in
= *inp
++; out
= *outp
++; ex
= *exp
++;
441 all_bits
= in
| out
| ex
;
447 for (j
= 0; j
< BITS_PER_LONG
; ++j
, ++i
, bit
<<= 1) {
451 if (!(bit
& all_bits
))
455 const struct file_operations
*f_op
;
457 mask
= DEFAULT_POLLMASK
;
458 if (f_op
&& f_op
->poll
) {
459 wait_key_set(wait
, in
, out
,
461 mask
= (*f_op
->poll
)(f
.file
, wait
);
464 if ((mask
& POLLIN_SET
) && (in
& bit
)) {
469 if ((mask
& POLLOUT_SET
) && (out
& bit
)) {
474 if ((mask
& POLLEX_SET
) && (ex
& bit
)) {
479 /* got something, stop busy polling */
481 can_busy_loop
= false;
485 * only remember a returned
486 * POLL_BUSY_LOOP if we asked for it
488 } else if (busy_flag
& mask
)
489 can_busy_loop
= true;
502 if (retval
|| timed_out
|| signal_pending(current
))
505 retval
= table
.error
;
509 /* only if found POLL_BUSY_LOOP sockets && not out of time */
510 if (can_busy_loop
&& !need_resched()) {
512 busy_end
= busy_loop_end_time();
515 if (!busy_loop_timeout(busy_end
))
521 * If this is the first loop and we have a timeout
522 * given, then we convert to ktime_t and set the to
523 * pointer to the expiry value.
525 if (end_time
&& !to
) {
526 expire
= timespec_to_ktime(*end_time
);
530 if (!poll_schedule_timeout(&table
, TASK_INTERRUPTIBLE
,
535 poll_freewait(&table
);
541 * We can actually return ERESTARTSYS instead of EINTR, but I'd
542 * like to be certain this leads to no problems. So I return
543 * EINTR just for safety.
545 * Update: ERESTARTSYS breaks at least the xview clock binary, so
546 * I'm trying ERESTARTNOHAND which restart only when you want to.
548 int core_sys_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
549 fd_set __user
*exp
, struct timespec
*end_time
)
556 /* Allocate small arguments on the stack to save memory and be faster */
557 long stack_fds
[SELECT_STACK_ALLOC
/sizeof(long)];
563 /* max_fds can increase, so grab it once to avoid race */
565 fdt
= files_fdtable(current
->files
);
566 max_fds
= fdt
->max_fds
;
572 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
573 * since we used fdset we need to allocate memory in units of
578 if (size
> sizeof(stack_fds
) / 6) {
579 /* Not enough space in on-stack array; must use kmalloc */
581 bits
= kmalloc(6 * size
, GFP_KERNEL
);
586 fds
.out
= bits
+ size
;
587 fds
.ex
= bits
+ 2*size
;
588 fds
.res_in
= bits
+ 3*size
;
589 fds
.res_out
= bits
+ 4*size
;
590 fds
.res_ex
= bits
+ 5*size
;
592 if ((ret
= get_fd_set(n
, inp
, fds
.in
)) ||
593 (ret
= get_fd_set(n
, outp
, fds
.out
)) ||
594 (ret
= get_fd_set(n
, exp
, fds
.ex
)))
596 zero_fd_set(n
, fds
.res_in
);
597 zero_fd_set(n
, fds
.res_out
);
598 zero_fd_set(n
, fds
.res_ex
);
600 ret
= do_select(n
, &fds
, end_time
);
605 ret
= -ERESTARTNOHAND
;
606 if (signal_pending(current
))
611 if (set_fd_set(n
, inp
, fds
.res_in
) ||
612 set_fd_set(n
, outp
, fds
.res_out
) ||
613 set_fd_set(n
, exp
, fds
.res_ex
))
617 if (bits
!= stack_fds
)
623 SYSCALL_DEFINE5(select
, int, n
, fd_set __user
*, inp
, fd_set __user
*, outp
,
624 fd_set __user
*, exp
, struct timeval __user
*, tvp
)
626 struct timespec end_time
, *to
= NULL
;
631 if (copy_from_user(&tv
, tvp
, sizeof(tv
)))
635 if (poll_select_set_timeout(to
,
636 tv
.tv_sec
+ (tv
.tv_usec
/ USEC_PER_SEC
),
637 (tv
.tv_usec
% USEC_PER_SEC
) * NSEC_PER_USEC
))
641 ret
= core_sys_select(n
, inp
, outp
, exp
, to
);
642 ret
= poll_select_copy_remaining(&end_time
, tvp
, 1, ret
);
647 static long do_pselect(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
648 fd_set __user
*exp
, struct timespec __user
*tsp
,
649 const sigset_t __user
*sigmask
, size_t sigsetsize
)
651 sigset_t ksigmask
, sigsaved
;
652 struct timespec ts
, end_time
, *to
= NULL
;
656 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
660 if (poll_select_set_timeout(to
, ts
.tv_sec
, ts
.tv_nsec
))
665 /* XXX: Don't preclude handling different sized sigset_t's. */
666 if (sigsetsize
!= sizeof(sigset_t
))
668 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
671 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
672 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
675 ret
= core_sys_select(n
, inp
, outp
, exp
, to
);
676 ret
= poll_select_copy_remaining(&end_time
, tsp
, 0, ret
);
678 if (ret
== -ERESTARTNOHAND
) {
680 * Don't restore the signal mask yet. Let do_signal() deliver
681 * the signal on the way back to userspace, before the signal
685 memcpy(¤t
->saved_sigmask
, &sigsaved
,
687 set_restore_sigmask();
690 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
696 * Most architectures can't handle 7-argument syscalls. So we provide a
697 * 6-argument version where the sixth argument is a pointer to a structure
698 * which has a pointer to the sigset_t itself followed by a size_t containing
701 SYSCALL_DEFINE6(pselect6
, int, n
, fd_set __user
*, inp
, fd_set __user
*, outp
,
702 fd_set __user
*, exp
, struct timespec __user
*, tsp
,
705 size_t sigsetsize
= 0;
706 sigset_t __user
*up
= NULL
;
709 if (!access_ok(VERIFY_READ
, sig
, sizeof(void *)+sizeof(size_t))
710 || __get_user(up
, (sigset_t __user
* __user
*)sig
)
711 || __get_user(sigsetsize
,
712 (size_t __user
*)(sig
+sizeof(void *))))
716 return do_pselect(n
, inp
, outp
, exp
, tsp
, up
, sigsetsize
);
719 #ifdef __ARCH_WANT_SYS_OLD_SELECT
720 struct sel_arg_struct
{
722 fd_set __user
*inp
, *outp
, *exp
;
723 struct timeval __user
*tvp
;
726 SYSCALL_DEFINE1(old_select
, struct sel_arg_struct __user
*, arg
)
728 struct sel_arg_struct a
;
730 if (copy_from_user(&a
, arg
, sizeof(a
)))
732 return sys_select(a
.n
, a
.inp
, a
.outp
, a
.exp
, a
.tvp
);
737 struct poll_list
*next
;
739 struct pollfd entries
[0];
742 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
745 * Fish for pollable events on the pollfd->fd file descriptor. We're only
746 * interested in events matching the pollfd->events mask, and the result
747 * matching that mask is both recorded in pollfd->revents and returned. The
748 * pwait poll_table will be used by the fd-provided poll handler for waiting,
749 * if pwait->_qproc is non-NULL.
751 static inline unsigned int do_pollfd(struct pollfd
*pollfd
, poll_table
*pwait
,
753 unsigned int busy_flag
)
761 struct fd f
= fdget(fd
);
764 mask
= DEFAULT_POLLMASK
;
765 if (f
.file
->f_op
&& f
.file
->f_op
->poll
) {
766 pwait
->_key
= pollfd
->events
|POLLERR
|POLLHUP
;
767 pwait
->_key
|= busy_flag
;
768 mask
= f
.file
->f_op
->poll(f
.file
, pwait
);
769 if (mask
& busy_flag
)
770 *can_busy_poll
= true;
772 /* Mask out unneeded events. */
773 mask
&= pollfd
->events
| POLLERR
| POLLHUP
;
777 pollfd
->revents
= mask
;
782 static int do_poll(unsigned int nfds
, struct poll_list
*list
,
783 struct poll_wqueues
*wait
, struct timespec
*end_time
)
785 poll_table
* pt
= &wait
->pt
;
786 ktime_t expire
, *to
= NULL
;
787 int timed_out
= 0, count
= 0;
788 unsigned long slack
= 0;
789 unsigned int busy_flag
= net_busy_loop_on() ? POLL_BUSY_LOOP
: 0;
790 unsigned long busy_end
= 0;
792 /* Optimise the no-wait case */
793 if (end_time
&& !end_time
->tv_sec
&& !end_time
->tv_nsec
) {
798 if (end_time
&& !timed_out
)
799 slack
= select_estimate_accuracy(end_time
);
802 struct poll_list
*walk
;
803 bool can_busy_loop
= false;
805 for (walk
= list
; walk
!= NULL
; walk
= walk
->next
) {
806 struct pollfd
* pfd
, * pfd_end
;
809 pfd_end
= pfd
+ walk
->len
;
810 for (; pfd
!= pfd_end
; pfd
++) {
812 * Fish for events. If we found one, record it
813 * and kill poll_table->_qproc, so we don't
814 * needlessly register any other waiters after
815 * this. They'll get immediately deregistered
816 * when we break out and return.
818 if (do_pollfd(pfd
, pt
, &can_busy_loop
,
822 /* found something, stop busy polling */
824 can_busy_loop
= false;
829 * All waiters have already been registered, so don't provide
830 * a poll_table->_qproc to them on the next loop iteration.
835 if (signal_pending(current
))
838 if (count
|| timed_out
)
841 /* only if found POLL_BUSY_LOOP sockets && not out of time */
842 if (can_busy_loop
&& !need_resched()) {
844 busy_end
= busy_loop_end_time();
847 if (!busy_loop_timeout(busy_end
))
853 * If this is the first loop and we have a timeout
854 * given, then we convert to ktime_t and set the to
855 * pointer to the expiry value.
857 if (end_time
&& !to
) {
858 expire
= timespec_to_ktime(*end_time
);
862 if (!poll_schedule_timeout(wait
, TASK_INTERRUPTIBLE
, to
, slack
))
868 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
869 sizeof(struct pollfd))
871 int do_sys_poll(struct pollfd __user
*ufds
, unsigned int nfds
,
872 struct timespec
*end_time
)
874 struct poll_wqueues table
;
875 int err
= -EFAULT
, fdcount
, len
, size
;
876 /* Allocate small arguments on the stack to save memory and be
877 faster - use long to make sure the buffer is aligned properly
878 on 64 bit archs to avoid unaligned access */
879 long stack_pps
[POLL_STACK_ALLOC
/sizeof(long)];
880 struct poll_list
*const head
= (struct poll_list
*)stack_pps
;
881 struct poll_list
*walk
= head
;
882 unsigned long todo
= nfds
;
884 if (nfds
> rlimit(RLIMIT_NOFILE
))
887 len
= min_t(unsigned int, nfds
, N_STACK_PPS
);
894 if (copy_from_user(walk
->entries
, ufds
+ nfds
-todo
,
895 sizeof(struct pollfd
) * walk
->len
))
902 len
= min(todo
, POLLFD_PER_PAGE
);
903 size
= sizeof(struct poll_list
) + sizeof(struct pollfd
) * len
;
904 walk
= walk
->next
= kmalloc(size
, GFP_KERNEL
);
911 poll_initwait(&table
);
912 fdcount
= do_poll(nfds
, head
, &table
, end_time
);
913 poll_freewait(&table
);
915 for (walk
= head
; walk
; walk
= walk
->next
) {
916 struct pollfd
*fds
= walk
->entries
;
919 for (j
= 0; j
< walk
->len
; j
++, ufds
++)
920 if (__put_user(fds
[j
].revents
, &ufds
->revents
))
928 struct poll_list
*pos
= walk
;
936 static long do_restart_poll(struct restart_block
*restart_block
)
938 struct pollfd __user
*ufds
= restart_block
->poll
.ufds
;
939 int nfds
= restart_block
->poll
.nfds
;
940 struct timespec
*to
= NULL
, end_time
;
943 if (restart_block
->poll
.has_timeout
) {
944 end_time
.tv_sec
= restart_block
->poll
.tv_sec
;
945 end_time
.tv_nsec
= restart_block
->poll
.tv_nsec
;
949 ret
= do_sys_poll(ufds
, nfds
, to
);
952 restart_block
->fn
= do_restart_poll
;
953 ret
= -ERESTART_RESTARTBLOCK
;
958 SYSCALL_DEFINE3(poll
, struct pollfd __user
*, ufds
, unsigned int, nfds
,
961 struct timespec end_time
, *to
= NULL
;
964 if (timeout_msecs
>= 0) {
966 poll_select_set_timeout(to
, timeout_msecs
/ MSEC_PER_SEC
,
967 NSEC_PER_MSEC
* (timeout_msecs
% MSEC_PER_SEC
));
970 ret
= do_sys_poll(ufds
, nfds
, to
);
973 struct restart_block
*restart_block
;
975 restart_block
= ¤t_thread_info()->restart_block
;
976 restart_block
->fn
= do_restart_poll
;
977 restart_block
->poll
.ufds
= ufds
;
978 restart_block
->poll
.nfds
= nfds
;
980 if (timeout_msecs
>= 0) {
981 restart_block
->poll
.tv_sec
= end_time
.tv_sec
;
982 restart_block
->poll
.tv_nsec
= end_time
.tv_nsec
;
983 restart_block
->poll
.has_timeout
= 1;
985 restart_block
->poll
.has_timeout
= 0;
987 ret
= -ERESTART_RESTARTBLOCK
;
992 SYSCALL_DEFINE5(ppoll
, struct pollfd __user
*, ufds
, unsigned int, nfds
,
993 struct timespec __user
*, tsp
, const sigset_t __user
*, sigmask
,
996 sigset_t ksigmask
, sigsaved
;
997 struct timespec ts
, end_time
, *to
= NULL
;
1001 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
1005 if (poll_select_set_timeout(to
, ts
.tv_sec
, ts
.tv_nsec
))
1010 /* XXX: Don't preclude handling different sized sigset_t's. */
1011 if (sigsetsize
!= sizeof(sigset_t
))
1013 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
1016 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
1017 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
1020 ret
= do_sys_poll(ufds
, nfds
, to
);
1022 /* We can restart this syscall, usually */
1023 if (ret
== -EINTR
) {
1025 * Don't restore the signal mask yet. Let do_signal() deliver
1026 * the signal on the way back to userspace, before the signal
1030 memcpy(¤t
->saved_sigmask
, &sigsaved
,
1032 set_restore_sigmask();
1034 ret
= -ERESTARTNOHAND
;
1036 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1038 ret
= poll_select_copy_remaining(&end_time
, tsp
, 0, ret
);