2 * This file contains the procedures for the handling of select and poll
4 * Created for Linux based loosely upon Mathius Lattner's minix
5 * patches by Peter MacDonald. Heavily edited by Linus.
8 * COFF/ELF binary emulation. If the process has the STICKY_TIMEOUTS
9 * flag set in its personality we do *not* modify the given timeout
10 * parameter to reflect time remaining.
13 * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
14 * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
17 #include <linux/kernel.h>
18 #include <linux/syscalls.h>
19 #include <linux/module.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
23 #include <linux/file.h>
25 #include <linux/rcupdate.h>
27 #include <asm/uaccess.h>
29 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
31 struct poll_table_page
{
32 struct poll_table_page
* next
;
33 struct poll_table_entry
* entry
;
34 struct poll_table_entry entries
[0];
37 #define POLL_TABLE_FULL(table) \
38 ((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
41 * Ok, Peter made a complicated, but straightforward multiple_wait() function.
42 * I have rewritten this, taking some shortcuts: This code may not be easy to
43 * follow, but it should be free of race-conditions, and it's practical. If you
44 * understand what I'm doing here, then you understand how the linux
45 * sleep/wakeup mechanism works.
47 * Two very simple procedures, poll_wait() and poll_freewait() make all the
48 * work. poll_wait() is an inline-function defined in <linux/poll.h>,
49 * as all select/poll functions have to call it to add an entry to the
52 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
55 void poll_initwait(struct poll_wqueues
*pwq
)
57 init_poll_funcptr(&pwq
->pt
, __pollwait
);
60 pwq
->inline_index
= 0;
63 EXPORT_SYMBOL(poll_initwait
);
65 static void free_poll_entry(struct poll_table_entry
*entry
)
67 remove_wait_queue(entry
->wait_address
, &entry
->wait
);
71 void poll_freewait(struct poll_wqueues
*pwq
)
73 struct poll_table_page
* p
= pwq
->table
;
75 for (i
= 0; i
< pwq
->inline_index
; i
++)
76 free_poll_entry(pwq
->inline_entries
+ i
);
78 struct poll_table_entry
* entry
;
79 struct poll_table_page
*old
;
84 free_poll_entry(entry
);
85 } while (entry
> p
->entries
);
88 free_page((unsigned long) old
);
92 EXPORT_SYMBOL(poll_freewait
);
94 static struct poll_table_entry
*poll_get_entry(poll_table
*_p
)
96 struct poll_wqueues
*p
= container_of(_p
, struct poll_wqueues
, pt
);
97 struct poll_table_page
*table
= p
->table
;
99 if (p
->inline_index
< N_INLINE_POLL_ENTRIES
)
100 return p
->inline_entries
+ p
->inline_index
++;
102 if (!table
|| POLL_TABLE_FULL(table
)) {
103 struct poll_table_page
*new_table
;
105 new_table
= (struct poll_table_page
*) __get_free_page(GFP_KERNEL
);
108 __set_current_state(TASK_RUNNING
);
111 new_table
->entry
= new_table
->entries
;
112 new_table
->next
= table
;
113 p
->table
= new_table
;
117 return table
->entry
++;
120 /* Add a new entry */
121 static void __pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
,
124 struct poll_table_entry
*entry
= poll_get_entry(p
);
129 entry
->wait_address
= wait_address
;
130 init_waitqueue_entry(&entry
->wait
, current
);
131 add_wait_queue(wait_address
, &entry
->wait
);
134 #define FDS_IN(fds, n) (fds->in + n)
135 #define FDS_OUT(fds, n) (fds->out + n)
136 #define FDS_EX(fds, n) (fds->ex + n)
138 #define BITS(fds, n) (*FDS_IN(fds, n)|*FDS_OUT(fds, n)|*FDS_EX(fds, n))
140 static int max_select_fd(unsigned long n
, fd_set_bits
*fds
)
142 unsigned long *open_fds
;
147 /* handle last in-complete long-word first */
148 set
= ~(~0UL << (n
& (__NFDBITS
-1)));
150 fdt
= files_fdtable(current
->files
);
151 open_fds
= fdt
->open_fds
->fds_bits
+n
;
156 if (!(set
& ~*open_fds
))
167 if (set
& ~*open_fds
)
176 max
+= n
* __NFDBITS
;
182 #define MEM(i,m) ((m)+(unsigned)(i)/__NFDBITS)
183 #define ISSET(i,m) (((i)&*(m)) != 0)
184 #define SET(i,m) (*(m) |= (i))
186 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
187 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
188 #define POLLEX_SET (POLLPRI)
190 int do_select(int n
, fd_set_bits
*fds
, s64
*timeout
)
192 struct poll_wqueues table
;
197 retval
= max_select_fd(n
, fds
);
204 poll_initwait(&table
);
210 unsigned long *rinp
, *routp
, *rexp
, *inp
, *outp
, *exp
;
213 set_current_state(TASK_INTERRUPTIBLE
);
215 inp
= fds
->in
; outp
= fds
->out
; exp
= fds
->ex
;
216 rinp
= fds
->res_in
; routp
= fds
->res_out
; rexp
= fds
->res_ex
;
218 for (i
= 0; i
< n
; ++rinp
, ++routp
, ++rexp
) {
219 unsigned long in
, out
, ex
, all_bits
, bit
= 1, mask
, j
;
220 unsigned long res_in
= 0, res_out
= 0, res_ex
= 0;
221 const struct file_operations
*f_op
= NULL
;
222 struct file
*file
= NULL
;
224 in
= *inp
++; out
= *outp
++; ex
= *exp
++;
225 all_bits
= in
| out
| ex
;
231 for (j
= 0; j
< __NFDBITS
; ++j
, ++i
, bit
<<= 1) {
235 if (!(bit
& all_bits
))
237 file
= fget_light(i
, &fput_needed
);
240 mask
= DEFAULT_POLLMASK
;
241 if (f_op
&& f_op
->poll
)
242 mask
= (*f_op
->poll
)(file
, retval
? NULL
: wait
);
243 fput_light(file
, fput_needed
);
244 if ((mask
& POLLIN_SET
) && (in
& bit
)) {
248 if ((mask
& POLLOUT_SET
) && (out
& bit
)) {
252 if ((mask
& POLLEX_SET
) && (ex
& bit
)) {
267 if (retval
|| !*timeout
|| signal_pending(current
))
270 retval
= table
.error
;
275 /* Wait indefinitely */
276 __timeout
= MAX_SCHEDULE_TIMEOUT
;
277 } else if (unlikely(*timeout
>= (s64
)MAX_SCHEDULE_TIMEOUT
- 1)) {
278 /* Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in a loop */
279 __timeout
= MAX_SCHEDULE_TIMEOUT
- 1;
280 *timeout
-= __timeout
;
282 __timeout
= *timeout
;
285 __timeout
= schedule_timeout(__timeout
);
287 *timeout
+= __timeout
;
289 __set_current_state(TASK_RUNNING
);
291 poll_freewait(&table
);
297 * We can actually return ERESTARTSYS instead of EINTR, but I'd
298 * like to be certain this leads to no problems. So I return
299 * EINTR just for safety.
301 * Update: ERESTARTSYS breaks at least the xview clock binary, so
302 * I'm trying ERESTARTNOHAND which restart only when you want to.
304 #define MAX_SELECT_SECONDS \
305 ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
307 static int core_sys_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
308 fd_set __user
*exp
, s64
*timeout
)
315 /* Allocate small arguments on the stack to save memory and be faster */
316 long stack_fds
[SELECT_STACK_ALLOC
/sizeof(long)];
322 /* max_fds can increase, so grab it once to avoid race */
324 fdt
= files_fdtable(current
->files
);
325 max_fds
= fdt
->max_fds
;
331 * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
332 * since we used fdset we need to allocate memory in units of
337 if (size
> sizeof(stack_fds
) / 6) {
338 /* Not enough space in on-stack array; must use kmalloc */
340 bits
= kmalloc(6 * size
, GFP_KERNEL
);
345 fds
.out
= bits
+ size
;
346 fds
.ex
= bits
+ 2*size
;
347 fds
.res_in
= bits
+ 3*size
;
348 fds
.res_out
= bits
+ 4*size
;
349 fds
.res_ex
= bits
+ 5*size
;
351 if ((ret
= get_fd_set(n
, inp
, fds
.in
)) ||
352 (ret
= get_fd_set(n
, outp
, fds
.out
)) ||
353 (ret
= get_fd_set(n
, exp
, fds
.ex
)))
355 zero_fd_set(n
, fds
.res_in
);
356 zero_fd_set(n
, fds
.res_out
);
357 zero_fd_set(n
, fds
.res_ex
);
359 ret
= do_select(n
, &fds
, timeout
);
364 ret
= -ERESTARTNOHAND
;
365 if (signal_pending(current
))
370 if (set_fd_set(n
, inp
, fds
.res_in
) ||
371 set_fd_set(n
, outp
, fds
.res_out
) ||
372 set_fd_set(n
, exp
, fds
.res_ex
))
376 if (bits
!= stack_fds
)
382 asmlinkage
long sys_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
383 fd_set __user
*exp
, struct timeval __user
*tvp
)
390 if (copy_from_user(&tv
, tvp
, sizeof(tv
)))
393 if (tv
.tv_sec
< 0 || tv
.tv_usec
< 0)
396 /* Cast to u64 to make GCC stop complaining */
397 if ((u64
)tv
.tv_sec
>= (u64
)MAX_INT64_SECONDS
)
398 timeout
= -1; /* infinite */
400 timeout
= DIV_ROUND_UP(tv
.tv_usec
, USEC_PER_SEC
/HZ
);
401 timeout
+= tv
.tv_sec
* HZ
;
405 ret
= core_sys_select(n
, inp
, outp
, exp
, &timeout
);
410 if (current
->personality
& STICKY_TIMEOUTS
)
412 rtv
.tv_usec
= jiffies_to_usecs(do_div((*(u64
*)&timeout
), HZ
));
413 rtv
.tv_sec
= timeout
;
414 if (timeval_compare(&rtv
, &tv
) >= 0)
416 if (copy_to_user(tvp
, &rtv
, sizeof(rtv
))) {
419 * If an application puts its timeval in read-only
420 * memory, we don't want the Linux-specific update to
421 * the timeval to cause a fault after the select has
422 * completed successfully. However, because we're not
423 * updating the timeval, we can't restart the system
426 if (ret
== -ERESTARTNOHAND
)
434 #ifdef TIF_RESTORE_SIGMASK
435 asmlinkage
long sys_pselect7(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
436 fd_set __user
*exp
, struct timespec __user
*tsp
,
437 const sigset_t __user
*sigmask
, size_t sigsetsize
)
439 s64 timeout
= MAX_SCHEDULE_TIMEOUT
;
440 sigset_t ksigmask
, sigsaved
;
445 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
448 if (ts
.tv_sec
< 0 || ts
.tv_nsec
< 0)
451 /* Cast to u64 to make GCC stop complaining */
452 if ((u64
)ts
.tv_sec
>= (u64
)MAX_INT64_SECONDS
)
453 timeout
= -1; /* infinite */
455 timeout
= DIV_ROUND_UP(ts
.tv_nsec
, NSEC_PER_SEC
/HZ
);
456 timeout
+= ts
.tv_sec
* HZ
;
461 /* XXX: Don't preclude handling different sized sigset_t's. */
462 if (sigsetsize
!= sizeof(sigset_t
))
464 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
467 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
468 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
471 ret
= core_sys_select(n
, inp
, outp
, exp
, &timeout
);
476 if (current
->personality
& STICKY_TIMEOUTS
)
478 rts
.tv_nsec
= jiffies_to_usecs(do_div((*(u64
*)&timeout
), HZ
)) *
480 rts
.tv_sec
= timeout
;
481 if (timespec_compare(&rts
, &ts
) >= 0)
483 if (copy_to_user(tsp
, &rts
, sizeof(rts
))) {
486 * If an application puts its timeval in read-only
487 * memory, we don't want the Linux-specific update to
488 * the timeval to cause a fault after the select has
489 * completed successfully. However, because we're not
490 * updating the timeval, we can't restart the system
493 if (ret
== -ERESTARTNOHAND
)
498 if (ret
== -ERESTARTNOHAND
) {
500 * Don't restore the signal mask yet. Let do_signal() deliver
501 * the signal on the way back to userspace, before the signal
505 memcpy(¤t
->saved_sigmask
, &sigsaved
,
507 set_thread_flag(TIF_RESTORE_SIGMASK
);
510 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
516 * Most architectures can't handle 7-argument syscalls. So we provide a
517 * 6-argument version where the sixth argument is a pointer to a structure
518 * which has a pointer to the sigset_t itself followed by a size_t containing
521 asmlinkage
long sys_pselect6(int n
, fd_set __user
*inp
, fd_set __user
*outp
,
522 fd_set __user
*exp
, struct timespec __user
*tsp
, void __user
*sig
)
524 size_t sigsetsize
= 0;
525 sigset_t __user
*up
= NULL
;
528 if (!access_ok(VERIFY_READ
, sig
, sizeof(void *)+sizeof(size_t))
529 || __get_user(up
, (sigset_t __user
* __user
*)sig
)
530 || __get_user(sigsetsize
,
531 (size_t __user
*)(sig
+sizeof(void *))))
535 return sys_pselect7(n
, inp
, outp
, exp
, tsp
, up
, sigsetsize
);
537 #endif /* TIF_RESTORE_SIGMASK */
540 struct poll_list
*next
;
542 struct pollfd entries
[0];
545 #define POLLFD_PER_PAGE ((PAGE_SIZE-sizeof(struct poll_list)) / sizeof(struct pollfd))
548 * Fish for pollable events on the pollfd->fd file descriptor. We're only
549 * interested in events matching the pollfd->events mask, and the result
550 * matching that mask is both recorded in pollfd->revents and returned. The
551 * pwait poll_table will be used by the fd-provided poll handler for waiting,
554 static inline unsigned int do_pollfd(struct pollfd
*pollfd
, poll_table
*pwait
)
565 file
= fget_light(fd
, &fput_needed
);
568 mask
= DEFAULT_POLLMASK
;
569 if (file
->f_op
&& file
->f_op
->poll
)
570 mask
= file
->f_op
->poll(file
, pwait
);
571 /* Mask out unneeded events. */
572 mask
&= pollfd
->events
| POLLERR
| POLLHUP
;
573 fput_light(file
, fput_needed
);
576 pollfd
->revents
= mask
;
581 static int do_poll(unsigned int nfds
, struct poll_list
*list
,
582 struct poll_wqueues
*wait
, s64
*timeout
)
585 poll_table
* pt
= &wait
->pt
;
587 /* Optimise the no-wait case */
592 struct poll_list
*walk
;
595 set_current_state(TASK_INTERRUPTIBLE
);
596 for (walk
= list
; walk
!= NULL
; walk
= walk
->next
) {
597 struct pollfd
* pfd
, * pfd_end
;
600 pfd_end
= pfd
+ walk
->len
;
601 for (; pfd
!= pfd_end
; pfd
++) {
603 * Fish for events. If we found one, record it
604 * and kill the poll_table, so we don't
605 * needlessly register any other waiters after
606 * this. They'll get immediately deregistered
607 * when we break out and return.
609 if (do_pollfd(pfd
, pt
)) {
616 * All waiters have already been registered, so don't provide
617 * a poll_table to them on the next loop iteration.
620 if (count
|| !*timeout
|| signal_pending(current
))
627 /* Wait indefinitely */
628 __timeout
= MAX_SCHEDULE_TIMEOUT
;
629 } else if (unlikely(*timeout
>= (s64
)MAX_SCHEDULE_TIMEOUT
-1)) {
631 * Wait for longer than MAX_SCHEDULE_TIMEOUT. Do it in
634 __timeout
= MAX_SCHEDULE_TIMEOUT
- 1;
635 *timeout
-= __timeout
;
637 __timeout
= *timeout
;
641 __timeout
= schedule_timeout(__timeout
);
643 *timeout
+= __timeout
;
645 __set_current_state(TASK_RUNNING
);
649 #define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list)) / \
650 sizeof(struct pollfd))
652 int do_sys_poll(struct pollfd __user
*ufds
, unsigned int nfds
, s64
*timeout
)
654 struct poll_wqueues table
;
657 struct poll_list
*head
;
658 struct poll_list
*walk
;
659 /* Allocate small arguments on the stack to save memory and be
660 faster - use long to make sure the buffer is aligned properly
661 on 64 bit archs to avoid unaligned access */
662 long stack_pps
[POLL_STACK_ALLOC
/sizeof(long)];
663 struct poll_list
*stack_pp
= NULL
;
665 /* Do a sanity check on nfds ... */
666 if (nfds
> current
->signal
->rlim
[RLIMIT_NOFILE
].rlim_cur
)
669 poll_initwait(&table
);
676 struct poll_list
*pp
;
678 if (stack_pp
== NULL
)
681 num
= POLLFD_PER_PAGE
;
684 size
= sizeof(struct poll_list
) + sizeof(struct pollfd
)*num
;
686 stack_pp
= pp
= (struct poll_list
*)stack_pps
;
688 pp
= kmalloc(size
, GFP_KERNEL
);
700 if (copy_from_user(pp
->entries
, ufds
+ nfds
-i
,
701 sizeof(struct pollfd
)*num
)) {
708 fdcount
= do_poll(nfds
, head
, &table
, timeout
);
710 /* OK, now copy the revents fields back to user space. */
713 while(walk
!= NULL
) {
714 struct pollfd
*fds
= walk
->entries
;
717 for (j
=0; j
< walk
->len
; j
++, ufds
++) {
718 if(__put_user(fds
[j
].revents
, &ufds
->revents
))
724 if (!fdcount
&& signal_pending(current
))
729 struct poll_list
*pp
= walk
->next
;
730 if (walk
!= stack_pp
)
734 poll_freewait(&table
);
738 asmlinkage
long sys_poll(struct pollfd __user
*ufds
, unsigned int nfds
,
743 if (timeout_msecs
> 0) {
745 /* We can only overflow if HZ > 1000 */
746 if (timeout_msecs
/ 1000 > (s64
)0x7fffffffffffffffULL
/ (s64
)HZ
)
747 timeout_jiffies
= -1;
750 timeout_jiffies
= msecs_to_jiffies(timeout_msecs
) + 1;
752 /* Infinite (< 0) or no (0) timeout */
753 timeout_jiffies
= timeout_msecs
;
756 return do_sys_poll(ufds
, nfds
, &timeout_jiffies
);
759 #ifdef TIF_RESTORE_SIGMASK
760 asmlinkage
long sys_ppoll(struct pollfd __user
*ufds
, unsigned int nfds
,
761 struct timespec __user
*tsp
, const sigset_t __user
*sigmask
,
764 sigset_t ksigmask
, sigsaved
;
770 if (copy_from_user(&ts
, tsp
, sizeof(ts
)))
773 /* Cast to u64 to make GCC stop complaining */
774 if ((u64
)ts
.tv_sec
>= (u64
)MAX_INT64_SECONDS
)
775 timeout
= -1; /* infinite */
777 timeout
= DIV_ROUND_UP(ts
.tv_nsec
, NSEC_PER_SEC
/HZ
);
778 timeout
+= ts
.tv_sec
* HZ
;
783 /* XXX: Don't preclude handling different sized sigset_t's. */
784 if (sigsetsize
!= sizeof(sigset_t
))
786 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
789 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
790 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
793 ret
= do_sys_poll(ufds
, nfds
, &timeout
);
795 /* We can restart this syscall, usually */
798 * Don't restore the signal mask yet. Let do_signal() deliver
799 * the signal on the way back to userspace, before the signal
803 memcpy(¤t
->saved_sigmask
, &sigsaved
,
805 set_thread_flag(TIF_RESTORE_SIGMASK
);
807 ret
= -ERESTARTNOHAND
;
809 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
811 if (tsp
&& timeout
>= 0) {
814 if (current
->personality
& STICKY_TIMEOUTS
)
816 /* Yes, we know it's actually an s64, but it's also positive. */
817 rts
.tv_nsec
= jiffies_to_usecs(do_div((*(u64
*)&timeout
), HZ
)) *
819 rts
.tv_sec
= timeout
;
820 if (timespec_compare(&rts
, &ts
) >= 0)
822 if (copy_to_user(tsp
, &rts
, sizeof(rts
))) {
825 * If an application puts its timeval in read-only
826 * memory, we don't want the Linux-specific update to
827 * the timeval to cause a fault after the select has
828 * completed successfully. However, because we're not
829 * updating the timeval, we can't restart the system
832 if (ret
== -ERESTARTNOHAND
&& timeout
>= 0)
839 #endif /* TIF_RESTORE_SIGMASK */