2 * fs/eventpoll.c ( Efficent event polling implementation )
3 * Copyright (C) 2001,...,2006 Davide Libenzi
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * Davide Libenzi <davidel@xmailserver.org>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
19 #include <linux/file.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/poll.h>
25 #include <linux/smp_lock.h>
26 #include <linux/string.h>
27 #include <linux/list.h>
28 #include <linux/hash.h>
29 #include <linux/spinlock.h>
30 #include <linux/syscalls.h>
31 #include <linux/rwsem.h>
32 #include <linux/rbtree.h>
33 #include <linux/wait.h>
34 #include <linux/eventpoll.h>
35 #include <linux/mount.h>
36 #include <linux/bitops.h>
37 #include <linux/mutex.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
42 #include <asm/atomic.h>
43 #include <asm/semaphore.h>
48 * There are three level of locking required by epoll :
51 * 2) ep->sem (rw_semaphore)
52 * 3) ep->lock (rw_lock)
54 * The acquire order is the one listed above, from 1 to 3.
55 * We need a spinlock (ep->lock) because we manipulate objects
56 * from inside the poll callback, that might be triggered from
57 * a wake_up() that in turn might be called from IRQ context.
58 * So we can't sleep inside the poll callback and hence we need
59 * a spinlock. During the event transfer loop (from kernel to
60 * user space) we could end up sleeping due a copy_to_user(), so
61 * we need a lock that will allow us to sleep. This lock is a
62 * read-write semaphore (ep->sem). It is acquired on read during
63 * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
64 * and during eventpoll_release_file(). Then we also need a global
65 * semaphore to serialize eventpoll_release_file() and ep_free().
66 * This semaphore is acquired by ep_free() during the epoll file
67 * cleanup path and it is also acquired by eventpoll_release_file()
68 * if a file has been pushed inside an epoll set and it is then
69 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
70 * It is possible to drop the "ep->sem" and to use the global
71 * semaphore "epmutex" (together with "ep->lock") to have it working,
72 * but having "ep->sem" will make the interface more scalable.
73 * Events that require holding "epmutex" are very rare, while for
74 * normal operations the epoll private "ep->sem" will guarantee
75 * a greater scalability.
79 #define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */
84 #define DPRINTK(x) printk x
85 #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
86 #else /* #if DEBUG_EPOLL > 0 */
87 #define DPRINTK(x) (void) 0
88 #define DNPRINTK(n, x) (void) 0
89 #endif /* #if DEBUG_EPOLL > 0 */
94 #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
95 #else /* #if DEBUG_EPI != 0 */
96 #define EPI_SLAB_DEBUG 0
97 #endif /* #if DEBUG_EPI != 0 */
99 /* Epoll private bits inside the event mask */
100 #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
102 /* Maximum number of poll wake up nests we are allowing */
103 #define EP_MAX_POLLWAKE_NESTS 4
105 /* Maximum msec timeout value storeable in a long int */
106 #define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ)
108 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
111 struct epoll_filefd
{
117 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
118 * It is used to keep track on all tasks that are currently inside the wake_up() code
119 * to 1) short-circuit the one coming from the same task and same wait queue head
120 * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
121 * 3) let go the ones coming from other tasks.
123 struct wake_task_node
{
124 struct list_head llink
;
125 struct task_struct
*task
;
126 wait_queue_head_t
*wq
;
130 * This is used to implement the safe poll wake up avoiding to reenter
131 * the poll callback from inside wake_up().
133 struct poll_safewake
{
134 struct list_head wake_task_list
;
139 * This structure is stored inside the "private_data" member of the file
140 * structure and rapresent the main data sructure for the eventpoll
144 /* Protect the this structure access */
148 * This semaphore is used to ensure that files are not removed
149 * while epoll is using them. This is read-held during the event
150 * collection loop and it is write-held during the file cleanup
151 * path, the epoll file exit code and the ctl operations.
153 struct rw_semaphore sem
;
155 /* Wait queue used by sys_epoll_wait() */
156 wait_queue_head_t wq
;
158 /* Wait queue used by file->poll() */
159 wait_queue_head_t poll_wait
;
161 /* List of ready file descriptors */
162 struct list_head rdllist
;
164 /* RB-Tree root used to store monitored fd structs */
168 /* Wait structure used by the poll hooks */
169 struct eppoll_entry
{
170 /* List header used to link this structure to the "struct epitem" */
171 struct list_head llink
;
173 /* The "base" pointer is set to the container "struct epitem" */
177 * Wait queue item that will be linked to the target file wait
182 /* The wait queue head that linked the "wait" wait queue item */
183 wait_queue_head_t
*whead
;
187 * Each file descriptor added to the eventpoll interface will
188 * have an entry of this type linked to the hash.
191 /* RB-Tree node used to link this structure to the eventpoll rb-tree */
194 /* List header used to link this structure to the eventpoll ready list */
195 struct list_head rdllink
;
197 /* The file descriptor information this item refers to */
198 struct epoll_filefd ffd
;
200 /* Number of active wait queue attached to poll operations */
203 /* List containing poll wait queues */
204 struct list_head pwqlist
;
206 /* The "container" of this item */
207 struct eventpoll
*ep
;
209 /* The structure that describe the interested events and the source fd */
210 struct epoll_event event
;
213 * Used to keep track of the usage count of the structure. This avoids
214 * that the structure will desappear from underneath our processing.
218 /* List header used to link this item to the "struct file" items list */
219 struct list_head fllink
;
221 /* List header used to link the item to the transfer list */
222 struct list_head txlink
;
225 * This is used during the collection/transfer of events to userspace
226 * to pin items empty events set.
228 unsigned int revents
;
231 /* Wrapper struct used by poll queueing */
239 static void ep_poll_safewake_init(struct poll_safewake
*psw
);
240 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
);
241 static int ep_getfd(int *efd
, struct inode
**einode
, struct file
**efile
,
242 struct eventpoll
*ep
);
243 static int ep_alloc(struct eventpoll
**pep
);
244 static void ep_free(struct eventpoll
*ep
);
245 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
);
246 static void ep_use_epitem(struct epitem
*epi
);
247 static void ep_release_epitem(struct epitem
*epi
);
248 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
250 static void ep_rbtree_insert(struct eventpoll
*ep
, struct epitem
*epi
);
251 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
252 struct file
*tfile
, int fd
);
253 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
,
254 struct epoll_event
*event
);
255 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
);
256 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
);
257 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
);
258 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
);
259 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
);
260 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
);
261 static int ep_collect_ready_items(struct eventpoll
*ep
,
262 struct list_head
*txlist
, int maxevents
);
263 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
264 struct epoll_event __user
*events
);
265 static void ep_reinject_items(struct eventpoll
*ep
, struct list_head
*txlist
);
266 static int ep_events_transfer(struct eventpoll
*ep
,
267 struct epoll_event __user
*events
,
269 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
270 int maxevents
, long timeout
);
271 static int eventpollfs_delete_dentry(struct dentry
*dentry
);
272 static struct inode
*ep_eventpoll_inode(void);
273 static int eventpollfs_get_sb(struct file_system_type
*fs_type
,
274 int flags
, const char *dev_name
,
275 void *data
, struct vfsmount
*mnt
);
278 * This semaphore is used to serialize ep_free() and eventpoll_release_file().
280 static struct mutex epmutex
;
282 /* Safe wake up implementation */
283 static struct poll_safewake psw
;
285 /* Slab cache used to allocate "struct epitem" */
286 static struct kmem_cache
*epi_cache __read_mostly
;
288 /* Slab cache used to allocate "struct eppoll_entry" */
289 static struct kmem_cache
*pwq_cache __read_mostly
;
291 /* Virtual fs used to allocate inodes for eventpoll files */
292 static struct vfsmount
*eventpoll_mnt __read_mostly
;
294 /* File callbacks that implement the eventpoll file behaviour */
295 static const struct file_operations eventpoll_fops
= {
296 .release
= ep_eventpoll_close
,
297 .poll
= ep_eventpoll_poll
301 * This is used to register the virtual file system from where
302 * eventpoll inodes are allocated.
304 static struct file_system_type eventpoll_fs_type
= {
305 .name
= "eventpollfs",
306 .get_sb
= eventpollfs_get_sb
,
307 .kill_sb
= kill_anon_super
,
310 /* Very basic directory entry operations for the eventpoll virtual file system */
311 static struct dentry_operations eventpollfs_dentry_operations
= {
312 .d_delete
= eventpollfs_delete_dentry
,
317 /* Fast test to see if the file is an evenpoll file */
318 static inline int is_file_epoll(struct file
*f
)
320 return f
->f_op
== &eventpoll_fops
;
323 /* Setup the structure that is used as key for the rb-tree */
324 static inline void ep_set_ffd(struct epoll_filefd
*ffd
,
325 struct file
*file
, int fd
)
331 /* Compare rb-tree keys */
332 static inline int ep_cmp_ffd(struct epoll_filefd
*p1
,
333 struct epoll_filefd
*p2
)
335 return (p1
->file
> p2
->file
? +1:
336 (p1
->file
< p2
->file
? -1 : p1
->fd
- p2
->fd
));
339 /* Special initialization for the rb-tree node to detect linkage */
340 static inline void ep_rb_initnode(struct rb_node
*n
)
345 /* Removes a node from the rb-tree and marks it for a fast is-linked check */
346 static inline void ep_rb_erase(struct rb_node
*n
, struct rb_root
*r
)
352 /* Fast check to verify that the item is linked to the main rb-tree */
353 static inline int ep_rb_linked(struct rb_node
*n
)
355 return rb_parent(n
) != n
;
359 * Remove the item from the list and perform its initialization.
360 * This is useful for us because we can test if the item is linked
361 * using "ep_is_linked(p)".
363 static inline void ep_list_del(struct list_head
*p
)
369 /* Tells us if the item is currently linked */
370 static inline int ep_is_linked(struct list_head
*p
)
372 return !list_empty(p
);
375 /* Get the "struct epitem" from a wait queue pointer */
376 static inline struct epitem
* ep_item_from_wait(wait_queue_t
*p
)
378 return container_of(p
, struct eppoll_entry
, wait
)->base
;
381 /* Get the "struct epitem" from an epoll queue wrapper */
382 static inline struct epitem
* ep_item_from_epqueue(poll_table
*p
)
384 return container_of(p
, struct ep_pqueue
, pt
)->epi
;
387 /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
388 static inline int ep_op_hash_event(int op
)
390 return op
!= EPOLL_CTL_DEL
;
393 /* Initialize the poll safe wake up structure */
394 static void ep_poll_safewake_init(struct poll_safewake
*psw
)
397 INIT_LIST_HEAD(&psw
->wake_task_list
);
398 spin_lock_init(&psw
->lock
);
403 * Perform a safe wake up of the poll wait list. The problem is that
404 * with the new callback'd wake up system, it is possible that the
405 * poll callback is reentered from inside the call to wake_up() done
406 * on the poll wait queue head. The rule is that we cannot reenter the
407 * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
408 * and we cannot reenter the same wait queue head at all. This will
409 * enable to have a hierarchy of epoll file descriptor of no more than
410 * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
411 * because this one gets called by the poll callback, that in turn is called
412 * from inside a wake_up(), that might be called from irq context.
414 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
)
418 struct task_struct
*this_task
= current
;
419 struct list_head
*lsthead
= &psw
->wake_task_list
, *lnk
;
420 struct wake_task_node
*tncur
;
421 struct wake_task_node tnode
;
423 spin_lock_irqsave(&psw
->lock
, flags
);
425 /* Try to see if the current task is already inside this wakeup call */
426 list_for_each(lnk
, lsthead
) {
427 tncur
= list_entry(lnk
, struct wake_task_node
, llink
);
429 if (tncur
->wq
== wq
||
430 (tncur
->task
== this_task
&& ++wake_nests
> EP_MAX_POLLWAKE_NESTS
)) {
432 * Ops ... loop detected or maximum nest level reached.
433 * We abort this wake by breaking the cycle itself.
435 spin_unlock_irqrestore(&psw
->lock
, flags
);
440 /* Add the current task to the list */
441 tnode
.task
= this_task
;
443 list_add(&tnode
.llink
, lsthead
);
445 spin_unlock_irqrestore(&psw
->lock
, flags
);
447 /* Do really wake up now */
450 /* Remove the current task from the list */
451 spin_lock_irqsave(&psw
->lock
, flags
);
452 list_del(&tnode
.llink
);
453 spin_unlock_irqrestore(&psw
->lock
, flags
);
458 * This is called from eventpoll_release() to unlink files from the eventpoll
459 * interface. We need to have this facility to cleanup correctly files that are
460 * closed without being removed from the eventpoll interface.
462 void eventpoll_release_file(struct file
*file
)
464 struct list_head
*lsthead
= &file
->f_ep_links
;
465 struct eventpoll
*ep
;
469 * We don't want to get "file->f_ep_lock" because it is not
470 * necessary. It is not necessary because we're in the "struct file"
471 * cleanup path, and this means that noone is using this file anymore.
472 * The only hit might come from ep_free() but by holding the semaphore
473 * will correctly serialize the operation. We do need to acquire
474 * "ep->sem" after "epmutex" because ep_remove() requires it when called
475 * from anywhere but ep_free().
477 mutex_lock(&epmutex
);
479 while (!list_empty(lsthead
)) {
480 epi
= list_entry(lsthead
->next
, struct epitem
, fllink
);
483 ep_list_del(&epi
->fllink
);
484 down_write(&ep
->sem
);
489 mutex_unlock(&epmutex
);
494 * It opens an eventpoll file descriptor by suggesting a storage of "size"
495 * file descriptors. The size parameter is just an hint about how to size
496 * data structures. It won't prevent the user to store more than "size"
497 * file descriptors inside the epoll interface. It is the kernel part of
498 * the userspace epoll_create(2).
500 asmlinkage
long sys_epoll_create(int size
)
503 struct eventpoll
*ep
;
507 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d)\n",
511 * Sanity check on the size parameter, and create the internal data
512 * structure ( "struct eventpoll" ).
515 if (size
<= 0 || (error
= ep_alloc(&ep
)) != 0)
519 * Creates all the items needed to setup an eventpoll file. That is,
520 * a file structure, and inode and a free file descriptor.
522 error
= ep_getfd(&fd
, &inode
, &file
, ep
);
526 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
535 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
536 current
, size
, error
));
542 * The following function implements the controller interface for
543 * the eventpoll file that enables the insertion/removal/change of
544 * file descriptors inside the interest set. It represents
545 * the kernel part of the user space epoll_ctl(2).
548 sys_epoll_ctl(int epfd
, int op
, int fd
, struct epoll_event __user
*event
)
551 struct file
*file
, *tfile
;
552 struct eventpoll
*ep
;
554 struct epoll_event epds
;
556 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
557 current
, epfd
, op
, fd
, event
));
560 if (ep_op_hash_event(op
) &&
561 copy_from_user(&epds
, event
, sizeof(struct epoll_event
)))
564 /* Get the "struct file *" for the eventpoll file */
570 /* Get the "struct file *" for the target file */
575 /* The target file descriptor must support poll */
577 if (!tfile
->f_op
|| !tfile
->f_op
->poll
)
581 * We have to check that the file structure underneath the file descriptor
582 * the user passed to us _is_ an eventpoll file. And also we do not permit
583 * adding an epoll file descriptor inside itself.
586 if (file
== tfile
|| !is_file_epoll(file
))
590 * At this point it is safe to assume that the "private_data" contains
591 * our own data structure.
593 ep
= file
->private_data
;
595 down_write(&ep
->sem
);
597 /* Try to lookup the file inside our hash table */
598 epi
= ep_find(ep
, tfile
, fd
);
604 epds
.events
|= POLLERR
| POLLHUP
;
606 error
= ep_insert(ep
, &epds
, tfile
, fd
);
612 error
= ep_remove(ep
, epi
);
618 epds
.events
|= POLLERR
| POLLHUP
;
619 error
= ep_modify(ep
, epi
, &epds
);
626 * The function ep_find() increments the usage count of the structure
627 * so, if this is not NULL, we need to release it.
630 ep_release_epitem(epi
);
639 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
640 current
, epfd
, op
, fd
, event
, error
));
647 * Implement the event wait interface for the eventpoll file. It is the kernel
648 * part of the user space epoll_wait(2).
650 asmlinkage
long sys_epoll_wait(int epfd
, struct epoll_event __user
*events
,
651 int maxevents
, int timeout
)
655 struct eventpoll
*ep
;
657 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
658 current
, epfd
, events
, maxevents
, timeout
));
660 /* The maximum number of event must be greater than zero */
661 if (maxevents
<= 0 || maxevents
> EP_MAX_EVENTS
)
664 /* Verify that the area passed by the user is writeable */
665 if (!access_ok(VERIFY_WRITE
, events
, maxevents
* sizeof(struct epoll_event
))) {
670 /* Get the "struct file *" for the eventpoll file */
677 * We have to check that the file structure underneath the fd
678 * the user passed to us _is_ an eventpoll file.
681 if (!is_file_epoll(file
))
685 * At this point it is safe to assume that the "private_data" contains
686 * our own data structure.
688 ep
= file
->private_data
;
690 /* Time to fish for events ... */
691 error
= ep_poll(ep
, events
, maxevents
, timeout
);
696 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
697 current
, epfd
, events
, maxevents
, timeout
, error
));
703 #ifdef TIF_RESTORE_SIGMASK
706 * Implement the event wait interface for the eventpoll file. It is the kernel
707 * part of the user space epoll_pwait(2).
709 asmlinkage
long sys_epoll_pwait(int epfd
, struct epoll_event __user
*events
,
710 int maxevents
, int timeout
, const sigset_t __user
*sigmask
,
714 sigset_t ksigmask
, sigsaved
;
717 * If the caller wants a certain signal mask to be set during the wait,
721 if (sigsetsize
!= sizeof(sigset_t
))
723 if (copy_from_user(&ksigmask
, sigmask
, sizeof(ksigmask
)))
725 sigdelsetmask(&ksigmask
, sigmask(SIGKILL
) | sigmask(SIGSTOP
));
726 sigprocmask(SIG_SETMASK
, &ksigmask
, &sigsaved
);
729 error
= sys_epoll_wait(epfd
, events
, maxevents
, timeout
);
732 * If we changed the signal mask, we need to restore the original one.
733 * In case we've got a signal while waiting, we do not restore the
734 * signal mask yet, and we allow do_signal() to deliver the signal on
735 * the way back to userspace, before the signal mask is restored.
738 if (error
== -EINTR
) {
739 memcpy(¤t
->saved_sigmask
, &sigsaved
,
741 set_thread_flag(TIF_RESTORE_SIGMASK
);
743 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
749 #endif /* #ifdef TIF_RESTORE_SIGMASK */
753 * Creates the file descriptor to be used by the epoll interface.
755 static int ep_getfd(int *efd
, struct inode
**einode
, struct file
**efile
,
756 struct eventpoll
*ep
)
760 struct dentry
*dentry
;
765 /* Get an ready to use file */
767 file
= get_empty_filp();
771 /* Allocates an inode from the eventpoll file system */
772 inode
= ep_eventpoll_inode();
774 error
= PTR_ERR(inode
);
778 /* Allocates a free descriptor to plug the file onto */
779 error
= get_unused_fd();
785 * Link the inode to a directory entry by creating a unique name
786 * using the inode number.
789 sprintf(name
, "[%lu]", inode
->i_ino
);
791 this.len
= strlen(name
);
792 this.hash
= inode
->i_ino
;
793 dentry
= d_alloc(eventpoll_mnt
->mnt_sb
->s_root
, &this);
796 dentry
->d_op
= &eventpollfs_dentry_operations
;
797 d_add(dentry
, inode
);
798 file
->f_path
.mnt
= mntget(eventpoll_mnt
);
799 file
->f_path
.dentry
= dentry
;
800 file
->f_mapping
= inode
->i_mapping
;
803 file
->f_flags
= O_RDONLY
;
804 file
->f_op
= &eventpoll_fops
;
805 file
->f_mode
= FMODE_READ
;
807 file
->private_data
= ep
;
809 /* Install the new setup file into the allocated fd. */
810 fd_install(fd
, file
);
828 static int ep_alloc(struct eventpoll
**pep
)
830 struct eventpoll
*ep
= kzalloc(sizeof(*ep
), GFP_KERNEL
);
835 rwlock_init(&ep
->lock
);
836 init_rwsem(&ep
->sem
);
837 init_waitqueue_head(&ep
->wq
);
838 init_waitqueue_head(&ep
->poll_wait
);
839 INIT_LIST_HEAD(&ep
->rdllist
);
844 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_alloc() ep=%p\n",
850 static void ep_free(struct eventpoll
*ep
)
855 /* We need to release all tasks waiting for these file */
856 if (waitqueue_active(&ep
->poll_wait
))
857 ep_poll_safewake(&psw
, &ep
->poll_wait
);
860 * We need to lock this because we could be hit by
861 * eventpoll_release_file() while we're freeing the "struct eventpoll".
862 * We do not need to hold "ep->sem" here because the epoll file
863 * is on the way to be removed and no one has references to it
864 * anymore. The only hit might come from eventpoll_release_file() but
865 * holding "epmutex" is sufficent here.
867 mutex_lock(&epmutex
);
870 * Walks through the whole tree by unregistering poll callbacks.
872 for (rbp
= rb_first(&ep
->rbr
); rbp
; rbp
= rb_next(rbp
)) {
873 epi
= rb_entry(rbp
, struct epitem
, rbn
);
875 ep_unregister_pollwait(ep
, epi
);
879 * Walks through the whole hash by freeing each "struct epitem". At this
880 * point we are sure no poll callbacks will be lingering around, and also by
881 * write-holding "sem" we can be sure that no file cleanup code will hit
882 * us during this operation. So we can avoid the lock on "ep->lock".
884 while ((rbp
= rb_first(&ep
->rbr
)) != 0) {
885 epi
= rb_entry(rbp
, struct epitem
, rbn
);
889 mutex_unlock(&epmutex
);
894 * Search the file inside the eventpoll hash. It add usage count to
895 * the returned item, so the caller must call ep_release_epitem()
896 * after finished using the "struct epitem".
898 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
)
903 struct epitem
*epi
, *epir
= NULL
;
904 struct epoll_filefd ffd
;
906 ep_set_ffd(&ffd
, file
, fd
);
907 read_lock_irqsave(&ep
->lock
, flags
);
908 for (rbp
= ep
->rbr
.rb_node
; rbp
; ) {
909 epi
= rb_entry(rbp
, struct epitem
, rbn
);
910 kcmp
= ep_cmp_ffd(&ffd
, &epi
->ffd
);
921 read_unlock_irqrestore(&ep
->lock
, flags
);
923 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_find(%p) -> %p\n",
924 current
, file
, epir
));
931 * Increment the usage count of the "struct epitem" making it sure
932 * that the user will have a valid pointer to reference.
934 static void ep_use_epitem(struct epitem
*epi
)
937 atomic_inc(&epi
->usecnt
);
942 * Decrement ( release ) the usage count by signaling that the user
943 * has finished using the structure. It might lead to freeing the
944 * structure itself if the count goes to zero.
946 static void ep_release_epitem(struct epitem
*epi
)
949 if (atomic_dec_and_test(&epi
->usecnt
))
950 kmem_cache_free(epi_cache
, epi
);
955 * This is the callback that is used to add our wait queue to the
956 * target file wakeup lists.
958 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
961 struct epitem
*epi
= ep_item_from_epqueue(pt
);
962 struct eppoll_entry
*pwq
;
964 if (epi
->nwait
>= 0 && (pwq
= kmem_cache_alloc(pwq_cache
, GFP_KERNEL
))) {
965 init_waitqueue_func_entry(&pwq
->wait
, ep_poll_callback
);
968 add_wait_queue(whead
, &pwq
->wait
);
969 list_add_tail(&pwq
->llink
, &epi
->pwqlist
);
972 /* We have to signal that an error occurred */
978 static void ep_rbtree_insert(struct eventpoll
*ep
, struct epitem
*epi
)
981 struct rb_node
**p
= &ep
->rbr
.rb_node
, *parent
= NULL
;
986 epic
= rb_entry(parent
, struct epitem
, rbn
);
987 kcmp
= ep_cmp_ffd(&epi
->ffd
, &epic
->ffd
);
989 p
= &parent
->rb_right
;
991 p
= &parent
->rb_left
;
993 rb_link_node(&epi
->rbn
, parent
, p
);
994 rb_insert_color(&epi
->rbn
, &ep
->rbr
);
998 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
999 struct file
*tfile
, int fd
)
1001 int error
, revents
, pwake
= 0;
1002 unsigned long flags
;
1004 struct ep_pqueue epq
;
1007 if (!(epi
= kmem_cache_alloc(epi_cache
, GFP_KERNEL
)))
1010 /* Item initialization follow here ... */
1011 ep_rb_initnode(&epi
->rbn
);
1012 INIT_LIST_HEAD(&epi
->rdllink
);
1013 INIT_LIST_HEAD(&epi
->fllink
);
1014 INIT_LIST_HEAD(&epi
->txlink
);
1015 INIT_LIST_HEAD(&epi
->pwqlist
);
1017 ep_set_ffd(&epi
->ffd
, tfile
, fd
);
1018 epi
->event
= *event
;
1019 atomic_set(&epi
->usecnt
, 1);
1022 /* Initialize the poll table using the queue callback */
1024 init_poll_funcptr(&epq
.pt
, ep_ptable_queue_proc
);
1027 * Attach the item to the poll hooks and get current event bits.
1028 * We can safely use the file* here because its usage count has
1029 * been increased by the caller of this function.
1031 revents
= tfile
->f_op
->poll(tfile
, &epq
.pt
);
1034 * We have to check if something went wrong during the poll wait queue
1035 * install process. Namely an allocation for a wait queue failed due
1036 * high memory pressure.
1041 /* Add the current item to the list of active epoll hook for this file */
1042 spin_lock(&tfile
->f_ep_lock
);
1043 list_add_tail(&epi
->fllink
, &tfile
->f_ep_links
);
1044 spin_unlock(&tfile
->f_ep_lock
);
1046 /* We have to drop the new item inside our item list to keep track of it */
1047 write_lock_irqsave(&ep
->lock
, flags
);
1049 /* Add the current item to the rb-tree */
1050 ep_rbtree_insert(ep
, epi
);
1052 /* If the file is already "ready" we drop it inside the ready list */
1053 if ((revents
& event
->events
) && !ep_is_linked(&epi
->rdllink
)) {
1054 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1056 /* Notify waiting tasks that events are available */
1057 if (waitqueue_active(&ep
->wq
))
1058 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
);
1059 if (waitqueue_active(&ep
->poll_wait
))
1063 write_unlock_irqrestore(&ep
->lock
, flags
);
1065 /* We have to call this outside the lock */
1067 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1069 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_insert(%p, %p, %d)\n",
1070 current
, ep
, tfile
, fd
));
1075 ep_unregister_pollwait(ep
, epi
);
1078 * We need to do this because an event could have been arrived on some
1079 * allocated wait queue.
1081 write_lock_irqsave(&ep
->lock
, flags
);
1082 if (ep_is_linked(&epi
->rdllink
))
1083 ep_list_del(&epi
->rdllink
);
1084 write_unlock_irqrestore(&ep
->lock
, flags
);
1086 kmem_cache_free(epi_cache
, epi
);
1093 * Modify the interest event mask by dropping an event if the new mask
1094 * has a match in the current file status.
1096 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
, struct epoll_event
*event
)
1099 unsigned int revents
;
1100 unsigned long flags
;
1103 * Set the new event interest mask before calling f_op->poll(), otherwise
1104 * a potential race might occur. In fact if we do this operation inside
1105 * the lock, an event might happen between the f_op->poll() call and the
1106 * new event set registering.
1108 epi
->event
.events
= event
->events
;
1111 * Get current event bits. We can safely use the file* here because
1112 * its usage count has been increased by the caller of this function.
1114 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
);
1116 write_lock_irqsave(&ep
->lock
, flags
);
1118 /* Copy the data member from inside the lock */
1119 epi
->event
.data
= event
->data
;
1122 * If the item is not linked to the hash it means that it's on its
1123 * way toward the removal. Do nothing in this case.
1125 if (ep_rb_linked(&epi
->rbn
)) {
1127 * If the item is "hot" and it is not registered inside the ready
1128 * list, push it inside. If the item is not "hot" and it is currently
1129 * registered inside the ready list, unlink it.
1131 if (revents
& event
->events
) {
1132 if (!ep_is_linked(&epi
->rdllink
)) {
1133 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1135 /* Notify waiting tasks that events are available */
1136 if (waitqueue_active(&ep
->wq
))
1137 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1138 TASK_INTERRUPTIBLE
);
1139 if (waitqueue_active(&ep
->poll_wait
))
1145 write_unlock_irqrestore(&ep
->lock
, flags
);
1147 /* We have to call this outside the lock */
1149 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1156 * This function unregister poll callbacks from the associated file descriptor.
1157 * Since this must be called without holding "ep->lock" the atomic exchange trick
1158 * will protect us from multiple unregister.
1160 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
)
1163 struct list_head
*lsthead
= &epi
->pwqlist
;
1164 struct eppoll_entry
*pwq
;
1166 /* This is called without locks, so we need the atomic exchange */
1167 nwait
= xchg(&epi
->nwait
, 0);
1170 while (!list_empty(lsthead
)) {
1171 pwq
= list_entry(lsthead
->next
, struct eppoll_entry
, llink
);
1173 ep_list_del(&pwq
->llink
);
1174 remove_wait_queue(pwq
->whead
, &pwq
->wait
);
1175 kmem_cache_free(pwq_cache
, pwq
);
1182 * Unlink the "struct epitem" from all places it might have been hooked up.
1183 * This function must be called with write IRQ lock on "ep->lock".
1185 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
)
1190 * It can happen that this one is called for an item already unlinked.
1191 * The check protect us from doing a double unlink ( crash ).
1194 if (!ep_rb_linked(&epi
->rbn
))
1198 * Clear the event mask for the unlinked item. This will avoid item
1199 * notifications to be sent after the unlink operation from inside
1200 * the kernel->userspace event transfer loop.
1202 epi
->event
.events
= 0;
1205 * At this point is safe to do the job, unlink the item from our rb-tree.
1206 * This operation togheter with the above check closes the door to
1209 ep_rb_erase(&epi
->rbn
, &ep
->rbr
);
1212 * If the item we are going to remove is inside the ready file descriptors
1213 * we want to remove it from this list to avoid stale events.
1215 if (ep_is_linked(&epi
->rdllink
))
1216 ep_list_del(&epi
->rdllink
);
1221 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1222 current
, ep
, epi
->ffd
.file
, error
));
1229 * Removes a "struct epitem" from the eventpoll hash and deallocates
1230 * all the associated resources.
1232 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
)
1235 unsigned long flags
;
1236 struct file
*file
= epi
->ffd
.file
;
1239 * Removes poll wait queue hooks. We _have_ to do this without holding
1240 * the "ep->lock" otherwise a deadlock might occur. This because of the
1241 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
1242 * queue head lock when unregistering the wait queue. The wakeup callback
1243 * will run by holding the wait queue head lock and will call our callback
1244 * that will try to get "ep->lock".
1246 ep_unregister_pollwait(ep
, epi
);
1248 /* Remove the current item from the list of epoll hooks */
1249 spin_lock(&file
->f_ep_lock
);
1250 if (ep_is_linked(&epi
->fllink
))
1251 ep_list_del(&epi
->fllink
);
1252 spin_unlock(&file
->f_ep_lock
);
1254 /* We need to acquire the write IRQ lock before calling ep_unlink() */
1255 write_lock_irqsave(&ep
->lock
, flags
);
1257 /* Really unlink the item from the hash */
1258 error
= ep_unlink(ep
, epi
);
1260 write_unlock_irqrestore(&ep
->lock
, flags
);
1265 /* At this point it is safe to free the eventpoll item */
1266 ep_release_epitem(epi
);
1270 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_remove(%p, %p) = %d\n",
1271 current
, ep
, file
, error
));
1278 * This is the callback that is passed to the wait queue wakeup
1279 * machanism. It is called by the stored file descriptors when they
1280 * have events to report.
1282 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
1285 unsigned long flags
;
1286 struct epitem
*epi
= ep_item_from_wait(wait
);
1287 struct eventpoll
*ep
= epi
->ep
;
1289 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1290 current
, epi
->ffd
.file
, epi
, ep
));
1292 write_lock_irqsave(&ep
->lock
, flags
);
1295 * If the event mask does not contain any poll(2) event, we consider the
1296 * descriptor to be disabled. This condition is likely the effect of the
1297 * EPOLLONESHOT bit that disables the descriptor when an event is received,
1298 * until the next EPOLL_CTL_MOD will be issued.
1300 if (!(epi
->event
.events
& ~EP_PRIVATE_BITS
))
1303 /* If this file is already in the ready list we exit soon */
1304 if (ep_is_linked(&epi
->rdllink
))
1307 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1311 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1314 if (waitqueue_active(&ep
->wq
))
1315 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1316 TASK_INTERRUPTIBLE
);
1317 if (waitqueue_active(&ep
->poll_wait
))
1321 write_unlock_irqrestore(&ep
->lock
, flags
);
1323 /* We have to call this outside the lock */
1325 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1331 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
)
1333 struct eventpoll
*ep
= file
->private_data
;
1340 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: close() ep=%p\n", current
, ep
));
1345 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
)
1347 unsigned int pollflags
= 0;
1348 unsigned long flags
;
1349 struct eventpoll
*ep
= file
->private_data
;
1351 /* Insert inside our poll wait queue */
1352 poll_wait(file
, &ep
->poll_wait
, wait
);
1354 /* Check our condition */
1355 read_lock_irqsave(&ep
->lock
, flags
);
1356 if (!list_empty(&ep
->rdllist
))
1357 pollflags
= POLLIN
| POLLRDNORM
;
1358 read_unlock_irqrestore(&ep
->lock
, flags
);
1365 * Since we have to release the lock during the __copy_to_user() operation and
1366 * during the f_op->poll() call, we try to collect the maximum number of items
1367 * by reducing the irqlock/irqunlock switching rate.
1369 static int ep_collect_ready_items(struct eventpoll
*ep
, struct list_head
*txlist
, int maxevents
)
1372 unsigned long flags
;
1373 struct list_head
*lsthead
= &ep
->rdllist
, *lnk
;
1376 write_lock_irqsave(&ep
->lock
, flags
);
1378 for (nepi
= 0, lnk
= lsthead
->next
; lnk
!= lsthead
&& nepi
< maxevents
;) {
1379 epi
= list_entry(lnk
, struct epitem
, rdllink
);
1383 /* If this file is already in the ready list we exit soon */
1384 if (!ep_is_linked(&epi
->txlink
)) {
1386 * This is initialized in this way so that the default
1387 * behaviour of the reinjecting code will be to push back
1388 * the item inside the ready list.
1390 epi
->revents
= epi
->event
.events
;
1392 /* Link the ready item into the transfer list */
1393 list_add(&epi
->txlink
, txlist
);
1397 * Unlink the item from the ready list.
1399 ep_list_del(&epi
->rdllink
);
1403 write_unlock_irqrestore(&ep
->lock
, flags
);
1410 * This function is called without holding the "ep->lock" since the call to
1411 * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
1412 * because of the way poll() is traditionally implemented in Linux.
1414 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
1415 struct epoll_event __user
*events
)
1418 unsigned int revents
;
1419 struct list_head
*lnk
;
1423 * We can loop without lock because this is a task private list.
1424 * The test done during the collection loop will guarantee us that
1425 * another task will not try to collect this file. Also, items
1426 * cannot vanish during the loop because we are holding "sem".
1428 list_for_each(lnk
, txlist
) {
1429 epi
= list_entry(lnk
, struct epitem
, txlink
);
1432 * Get the ready file event set. We can safely use the file
1433 * because we are holding the "sem" in read and this will
1434 * guarantee that both the file and the item will not vanish.
1436 revents
= epi
->ffd
.file
->f_op
->poll(epi
->ffd
.file
, NULL
);
1439 * Set the return event set for the current file descriptor.
1440 * Note that only the task task was successfully able to link
1441 * the item to its "txlist" will write this field.
1443 epi
->revents
= revents
& epi
->event
.events
;
1446 if (__put_user(epi
->revents
,
1447 &events
[eventcnt
].events
) ||
1448 __put_user(epi
->event
.data
,
1449 &events
[eventcnt
].data
))
1451 if (epi
->event
.events
& EPOLLONESHOT
)
1452 epi
->event
.events
&= EP_PRIVATE_BITS
;
1461 * Walk through the transfer list we collected with ep_collect_ready_items()
1462 * and, if 1) the item is still "alive" 2) its event set is not empty 3) it's
1463 * not already linked, links it to the ready list. Same as above, we are holding
1464 * "sem" so items cannot vanish underneath our nose.
1466 static void ep_reinject_items(struct eventpoll
*ep
, struct list_head
*txlist
)
1468 int ricnt
= 0, pwake
= 0;
1469 unsigned long flags
;
1472 write_lock_irqsave(&ep
->lock
, flags
);
1474 while (!list_empty(txlist
)) {
1475 epi
= list_entry(txlist
->next
, struct epitem
, txlink
);
1477 /* Unlink the current item from the transfer list */
1478 ep_list_del(&epi
->txlink
);
1481 * If the item is no more linked to the interest set, we don't
1482 * have to push it inside the ready list because the following
1483 * ep_release_epitem() is going to drop it. Also, if the current
1484 * item is set to have an Edge Triggered behaviour, we don't have
1485 * to push it back either.
1487 if (ep_rb_linked(&epi
->rbn
) && !(epi
->event
.events
& EPOLLET
) &&
1488 (epi
->revents
& epi
->event
.events
) && !ep_is_linked(&epi
->rdllink
)) {
1489 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1496 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1499 if (waitqueue_active(&ep
->wq
))
1500 __wake_up_locked(&ep
->wq
, TASK_UNINTERRUPTIBLE
|
1501 TASK_INTERRUPTIBLE
);
1502 if (waitqueue_active(&ep
->poll_wait
))
1506 write_unlock_irqrestore(&ep
->lock
, flags
);
1508 /* We have to call this outside the lock */
1510 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1515 * Perform the transfer of events to user space.
1517 static int ep_events_transfer(struct eventpoll
*ep
,
1518 struct epoll_event __user
*events
, int maxevents
)
1521 struct list_head txlist
;
1523 INIT_LIST_HEAD(&txlist
);
1526 * We need to lock this because we could be hit by
1527 * eventpoll_release_file() and epoll_ctl(EPOLL_CTL_DEL).
1529 down_read(&ep
->sem
);
1531 /* Collect/extract ready items */
1532 if (ep_collect_ready_items(ep
, &txlist
, maxevents
) > 0) {
1533 /* Build result set in userspace */
1534 eventcnt
= ep_send_events(ep
, &txlist
, events
);
1536 /* Reinject ready items into the ready list */
1537 ep_reinject_items(ep
, &txlist
);
1546 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
1547 int maxevents
, long timeout
)
1550 unsigned long flags
;
1555 * Calculate the timeout by checking for the "infinite" value ( -1 )
1556 * and the overflow condition. The passed timeout is in milliseconds,
1557 * that why (t * HZ) / 1000.
1559 jtimeout
= (timeout
< 0 || timeout
>= EP_MAX_MSTIMEO
) ?
1560 MAX_SCHEDULE_TIMEOUT
: (timeout
* HZ
+ 999) / 1000;
1563 write_lock_irqsave(&ep
->lock
, flags
);
1566 if (list_empty(&ep
->rdllist
)) {
1568 * We don't have any available event to return to the caller.
1569 * We need to sleep here, and we will be wake up by
1570 * ep_poll_callback() when events will become available.
1572 init_waitqueue_entry(&wait
, current
);
1573 __add_wait_queue(&ep
->wq
, &wait
);
1577 * We don't want to sleep if the ep_poll_callback() sends us
1578 * a wakeup in between. That's why we set the task state
1579 * to TASK_INTERRUPTIBLE before doing the checks.
1581 set_current_state(TASK_INTERRUPTIBLE
);
1582 if (!list_empty(&ep
->rdllist
) || !jtimeout
)
1584 if (signal_pending(current
)) {
1589 write_unlock_irqrestore(&ep
->lock
, flags
);
1590 jtimeout
= schedule_timeout(jtimeout
);
1591 write_lock_irqsave(&ep
->lock
, flags
);
1593 __remove_wait_queue(&ep
->wq
, &wait
);
1595 set_current_state(TASK_RUNNING
);
1598 /* Is it worth to try to dig for events ? */
1599 eavail
= !list_empty(&ep
->rdllist
);
1601 write_unlock_irqrestore(&ep
->lock
, flags
);
1604 * Try to transfer events to user space. In case we get 0 events and
1605 * there's still timeout left over, we go trying again in search of
1608 if (!res
&& eavail
&&
1609 !(res
= ep_events_transfer(ep
, events
, maxevents
)) && jtimeout
)
1616 static int eventpollfs_delete_dentry(struct dentry
*dentry
)
1623 static struct inode
*ep_eventpoll_inode(void)
1625 int error
= -ENOMEM
;
1626 struct inode
*inode
= new_inode(eventpoll_mnt
->mnt_sb
);
1631 inode
->i_fop
= &eventpoll_fops
;
1634 * Mark the inode dirty from the very beginning,
1635 * that way it will never be moved to the dirty
1636 * list because mark_inode_dirty() will think
1637 * that it already _is_ on the dirty list.
1639 inode
->i_state
= I_DIRTY
;
1640 inode
->i_mode
= S_IRUSR
| S_IWUSR
;
1641 inode
->i_uid
= current
->fsuid
;
1642 inode
->i_gid
= current
->fsgid
;
1643 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1647 return ERR_PTR(error
);
1652 eventpollfs_get_sb(struct file_system_type
*fs_type
, int flags
,
1653 const char *dev_name
, void *data
, struct vfsmount
*mnt
)
1655 return get_sb_pseudo(fs_type
, "eventpoll:", NULL
, EVENTPOLLFS_MAGIC
,
1660 static int __init
eventpoll_init(void)
1664 mutex_init(&epmutex
);
1666 /* Initialize the structure used to perform safe poll wait head wake ups */
1667 ep_poll_safewake_init(&psw
);
1669 /* Allocates slab cache used to allocate "struct epitem" items */
1670 epi_cache
= kmem_cache_create("eventpoll_epi", sizeof(struct epitem
),
1671 0, SLAB_HWCACHE_ALIGN
|EPI_SLAB_DEBUG
|SLAB_PANIC
,
1674 /* Allocates slab cache used to allocate "struct eppoll_entry" */
1675 pwq_cache
= kmem_cache_create("eventpoll_pwq",
1676 sizeof(struct eppoll_entry
), 0,
1677 EPI_SLAB_DEBUG
|SLAB_PANIC
, NULL
, NULL
);
1680 * Register the virtual file system that will be the source of inodes
1681 * for the eventpoll files
1683 error
= register_filesystem(&eventpoll_fs_type
);
1687 /* Mount the above commented virtual file system */
1688 eventpoll_mnt
= kern_mount(&eventpoll_fs_type
);
1689 error
= PTR_ERR(eventpoll_mnt
);
1690 if (IS_ERR(eventpoll_mnt
))
1693 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: successfully initialized.\n",
1698 panic("eventpoll_init() failed\n");
1702 static void __exit
eventpoll_exit(void)
1704 /* Undo all operations done inside eventpoll_init() */
1705 unregister_filesystem(&eventpoll_fs_type
);
1706 mntput(eventpoll_mnt
);
1707 kmem_cache_destroy(pwq_cache
);
1708 kmem_cache_destroy(epi_cache
);
1711 module_init(eventpoll_init
);
1712 module_exit(eventpoll_exit
);
1714 MODULE_LICENSE("GPL");