2 * fs/eventpoll.c ( Efficent event polling implementation )
3 * Copyright (C) 2001,...,2003 Davide Libenzi
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * Davide Libenzi <davidel@xmailserver.org>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
19 #include <linux/file.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/poll.h>
25 #include <linux/smp_lock.h>
26 #include <linux/string.h>
27 #include <linux/list.h>
28 #include <linux/hash.h>
29 #include <linux/spinlock.h>
30 #include <linux/rwsem.h>
31 #include <linux/wait.h>
32 #include <linux/eventpoll.h>
33 #include <linux/mount.h>
34 #include <asm/bitops.h>
35 #include <asm/uaccess.h>
36 #include <asm/system.h>
39 #include <asm/atomic.h>
40 #include <asm/semaphore.h>
45 * There are three level of locking required by epoll :
47 * 1) epsem (semaphore)
48 * 2) ep->sem (rw_semaphore)
49 * 3) ep->lock (rw_lock)
51 * The acquire order is the one listed above, from 1 to 3.
52 * We need a spinlock (ep->lock) because we manipulate objects
53 * from inside the poll callback, that might be triggered from
54 * a wake_up() that in turn might be called from IRQ context.
55 * So we can't sleep inside the poll callback and hence we need
56 * a spinlock. During the event transfer loop (from kernel to
57 * user space) we could end up sleeping due a copy_to_user(), so
58 * we need a lock that will allow us to sleep. This lock is a
59 * read-write semaphore (ep->sem). It is acquired on read during
60 * the event transfer loop and in write during epoll_ctl(EPOLL_CTL_DEL)
61 * and during eventpoll_release(). Then we also need a global
62 * semaphore to serialize eventpoll_release() and ep_free().
63 * This semaphore is acquired by ep_free() during the epoll file
64 * cleanup path and it is also acquired by eventpoll_release()
65 * if a file has been pushed inside an epoll set and it is then
66 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
67 * It is possible to drop the "ep->sem" and to use the global
68 * semaphore "epsem" (together with "ep->lock") to have it working,
69 * but having "ep->sem" will make the interface more scalable.
70 * Events that require holding "epsem" are very rare, while for
71 * normal operations the epoll private "ep->sem" will guarantee
72 * a greater scalability.
76 #define EVENTPOLLFS_MAGIC 0x03111965 /* My birthday should work for this :) */
81 #define DPRINTK(x) printk x
82 #define DNPRINTK(n, x) do { if ((n) <= DEBUG_EPOLL) printk x; } while (0)
83 #else /* #if DEBUG_EPOLL > 0 */
84 #define DPRINTK(x) (void) 0
85 #define DNPRINTK(n, x) (void) 0
86 #endif /* #if DEBUG_EPOLL > 0 */
91 #define EPI_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
92 #else /* #if DEBUG_EPI != 0 */
93 #define EPI_SLAB_DEBUG 0
94 #endif /* #if DEBUG_EPI != 0 */
97 /* Maximum number of poll wake up nests we are allowing */
98 #define EP_MAX_POLLWAKE_NESTS 4
100 /* Maximum size of the hash in bits ( 2^N ) */
101 #define EP_MAX_HASH_BITS 17
103 /* Minimum size of the hash in bits ( 2^N ) */
104 #define EP_MIN_HASH_BITS 9
106 /* Number of hash entries ( "struct list_head" ) inside a page */
107 #define EP_HENTRY_X_PAGE (PAGE_SIZE / sizeof(struct list_head))
109 /* Maximum size of the hash in pages */
110 #define EP_MAX_HPAGES ((1 << EP_MAX_HASH_BITS) / EP_HENTRY_X_PAGE + 1)
112 /* Number of pages allocated for an "hbits" sized hash table */
113 #define EP_HASH_PAGES(hbits) ((int) ((1 << (hbits)) / EP_HENTRY_X_PAGE + \
114 ((1 << (hbits)) % EP_HENTRY_X_PAGE ? 1: 0)))
116 /* Macro to allocate a "struct epitem" from the slab cache */
117 #define EPI_MEM_ALLOC() (struct epitem *) kmem_cache_alloc(epi_cache, SLAB_KERNEL)
119 /* Macro to free a "struct epitem" to the slab cache */
120 #define EPI_MEM_FREE(p) kmem_cache_free(epi_cache, p)
122 /* Macro to allocate a "struct eppoll_entry" from the slab cache */
123 #define PWQ_MEM_ALLOC() (struct eppoll_entry *) kmem_cache_alloc(pwq_cache, SLAB_KERNEL)
125 /* Macro to free a "struct eppoll_entry" to the slab cache */
126 #define PWQ_MEM_FREE(p) kmem_cache_free(pwq_cache, p)
128 /* Fast test to see if the file is an evenpoll file */
129 #define IS_FILE_EPOLL(f) ((f)->f_op == &eventpoll_fops)
132 * Remove the item from the list and perform its initialization.
133 * This is useful for us because we can test if the item is linked
134 * using "EP_IS_LINKED(p)".
136 #define EP_LIST_DEL(p) do { list_del(p); INIT_LIST_HEAD(p); } while (0)
138 /* Tells us if the item is currently linked */
139 #define EP_IS_LINKED(p) (!list_empty(p))
141 /* Get the "struct epitem" from a wait queue pointer */
142 #define EP_ITEM_FROM_WAIT(p) ((struct epitem *) container_of(p, struct eppoll_entry, wait)->base)
144 /* Get the "struct epitem" from an epoll queue wrapper */
145 #define EP_ITEM_FROM_EPQUEUE(p) (container_of(p, struct ep_pqueue, pt)->epi)
148 * This is used to optimize the event transfer to userspace. Since this
149 * is kept on stack, it should be pretty small.
151 #define EP_MAX_BUF_EVENTS 32
156 * Node that is linked into the "wake_task_list" member of the "struct poll_safewake".
157 * It is used to keep track on all tasks that are currently inside the wake_up() code
158 * to 1) short-circuit the one coming from the same task and same wait queue head
159 * ( loop ) 2) allow a maximum number of epoll descriptors inclusion nesting
160 * 3) let go the ones coming from other tasks.
162 struct wake_task_node
{
163 struct list_head llink
;
165 wait_queue_head_t
*wq
;
169 * This is used to implement the safe poll wake up avoiding to reenter
170 * the poll callback from inside wake_up().
172 struct poll_safewake
{
173 struct list_head wake_task_list
;
178 * This structure is stored inside the "private_data" member of the file
179 * structure and rapresent the main data sructure for the eventpoll
183 /* Protect the this structure access */
187 * This semaphore is used to ensure that files are not removed
188 * while epoll is using them. This is read-held during the event
189 * collection loop and it is write-held during the file cleanup
190 * path, the epoll file exit code and the ctl operations.
192 struct rw_semaphore sem
;
194 /* Wait queue used by sys_epoll_wait() */
195 wait_queue_head_t wq
;
197 /* Wait queue used by file->poll() */
198 wait_queue_head_t poll_wait
;
200 /* List of ready file descriptors */
201 struct list_head rdllist
;
203 /* Size of the hash */
204 unsigned int hashbits
;
206 /* Pages for the "struct epitem" hash */
207 char *hpages
[EP_MAX_HPAGES
];
210 /* Wait structure used by the poll hooks */
211 struct eppoll_entry
{
212 /* List header used to link this structure to the "struct epitem" */
213 struct list_head llink
;
215 /* The "base" pointer is set to the container "struct epitem" */
219 * Wait queue item that will be linked to the target file wait
224 /* The wait queue head that linked the "wait" wait queue item */
225 wait_queue_head_t
*whead
;
229 * Each file descriptor added to the eventpoll interface will
230 * have an entry of this type linked to the hash.
233 /* List header used to link this structure to the eventpoll hash */
234 struct list_head llink
;
236 /* List header used to link this structure to the eventpoll ready list */
237 struct list_head rdllink
;
239 /* Number of active wait queue attached to poll operations */
242 /* List containing poll wait queues */
243 struct list_head pwqlist
;
245 /* The "container" of this item */
246 struct eventpoll
*ep
;
248 /* The file descriptor this item refers to */
251 /* The file this item refers to */
254 /* The structure that describe the interested events and the source fd */
255 struct epoll_event event
;
258 * Used to keep track of the usage count of the structure. This avoids
259 * that the structure will desappear from underneath our processing.
263 /* List header used to link this item to the "struct file" items list */
264 struct list_head fllink
;
266 /* List header used to link the item to the transfer list */
267 struct list_head txlink
;
270 * This is used during the collection/transfer of events to userspace
271 * to pin items empty events set.
273 unsigned int revents
;
276 /* Wrapper struct used by poll queueing */
284 static void ep_poll_safewake_init(struct poll_safewake
*psw
);
285 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
);
286 static unsigned int ep_get_hash_bits(unsigned int hintsize
);
287 static int ep_getfd(int *efd
, struct inode
**einode
, struct file
**efile
);
288 static int ep_alloc_pages(char **pages
, int numpages
);
289 static int ep_free_pages(char **pages
, int numpages
);
290 static int ep_file_init(struct file
*file
, unsigned int hashbits
);
291 static unsigned int ep_hash_index(struct eventpoll
*ep
, struct file
*file
,
293 static struct list_head
*ep_hash_entry(struct eventpoll
*ep
,
295 static int ep_init(struct eventpoll
*ep
, unsigned int hashbits
);
296 static void ep_free(struct eventpoll
*ep
);
297 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
);
298 static void ep_use_epitem(struct epitem
*epi
);
299 static void ep_release_epitem(struct epitem
*epi
);
300 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
302 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
303 struct file
*tfile
, int fd
);
304 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
,
305 struct epoll_event
*event
);
306 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
);
307 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
);
308 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
);
309 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
);
310 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
);
311 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
);
312 static int ep_collect_ready_items(struct eventpoll
*ep
,
313 struct list_head
*txlist
, int maxevents
);
314 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
315 struct epoll_event __user
*events
);
316 static void ep_reinject_items(struct eventpoll
*ep
, struct list_head
*txlist
);
317 static int ep_events_transfer(struct eventpoll
*ep
,
318 struct epoll_event __user
*events
,
320 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
321 int maxevents
, long timeout
);
322 static int eventpollfs_delete_dentry(struct dentry
*dentry
);
323 static struct inode
*ep_eventpoll_inode(void);
324 static struct super_block
*eventpollfs_get_sb(struct file_system_type
*fs_type
,
325 int flags
, const char *dev_name
,
329 * This semaphore is used to serialize ep_free() and eventpoll_release().
331 struct semaphore epsem
;
333 /* Safe wake up implementation */
334 static struct poll_safewake psw
;
336 /* Slab cache used to allocate "struct epitem" */
337 static kmem_cache_t
*epi_cache
;
339 /* Slab cache used to allocate "struct eppoll_entry" */
340 static kmem_cache_t
*pwq_cache
;
342 /* Virtual fs used to allocate inodes for eventpoll files */
343 static struct vfsmount
*eventpoll_mnt
;
345 /* File callbacks that implement the eventpoll file behaviour */
346 static struct file_operations eventpoll_fops
= {
347 .release
= ep_eventpoll_close
,
348 .poll
= ep_eventpoll_poll
352 * This is used to register the virtual file system from where
353 * eventpoll inodes are allocated.
355 static struct file_system_type eventpoll_fs_type
= {
356 .name
= "eventpollfs",
357 .get_sb
= eventpollfs_get_sb
,
358 .kill_sb
= kill_anon_super
,
361 /* Very basic directory entry operations for the eventpoll virtual file system */
362 static struct dentry_operations eventpollfs_dentry_operations
= {
363 .d_delete
= eventpollfs_delete_dentry
,
368 /* Initialize the poll safe wake up structure */
369 static void ep_poll_safewake_init(struct poll_safewake
*psw
)
372 INIT_LIST_HEAD(&psw
->wake_task_list
);
373 spin_lock_init(&psw
->lock
);
378 * Perform a safe wake up of the poll wait list. The problem is that
379 * with the new callback'd wake up system, it is possible that the
380 * poll callback is reentered from inside the call to wake_up() done
381 * on the poll wait queue head. The rule is that we cannot reenter the
382 * wake up code from the same task more than EP_MAX_POLLWAKE_NESTS times,
383 * and we cannot reenter the same wait queue head at all. This will
384 * enable to have a hierarchy of epoll file descriptor of no more than
385 * EP_MAX_POLLWAKE_NESTS deep. We need the irq version of the spin lock
386 * because this one gets called by the poll callback, that in turn is called
387 * from inside a wake_up(), that might be called from irq context.
389 static void ep_poll_safewake(struct poll_safewake
*psw
, wait_queue_head_t
*wq
)
393 task_t
*this_task
= current
;
394 struct list_head
*lsthead
= &psw
->wake_task_list
, *lnk
;
395 struct wake_task_node
*tncur
;
396 struct wake_task_node tnode
;
398 spin_lock_irqsave(&psw
->lock
, flags
);
400 /* Try to see if the current task is already inside this wakeup call */
401 list_for_each(lnk
, lsthead
) {
402 tncur
= list_entry(lnk
, struct wake_task_node
, llink
);
404 if (tncur
->wq
== wq
||
405 (tncur
->task
== this_task
&& ++wake_nests
> EP_MAX_POLLWAKE_NESTS
)) {
407 * Ops ... loop detected or maximum nest level reached.
408 * We abort this wake by breaking the cycle itself.
410 spin_unlock_irqrestore(&psw
->lock
, flags
);
415 /* Add the current task to the list */
416 tnode
.task
= this_task
;
418 list_add(&tnode
.llink
, lsthead
);
420 spin_unlock_irqrestore(&psw
->lock
, flags
);
422 /* Do really wake up now */
425 /* Remove the current task from the list */
426 spin_lock_irqsave(&psw
->lock
, flags
);
427 list_del(&tnode
.llink
);
428 spin_unlock_irqrestore(&psw
->lock
, flags
);
433 * Calculate the size of the hash in bits. The returned size will be
434 * bounded between EP_MIN_HASH_BITS and EP_MAX_HASH_BITS.
436 static unsigned int ep_get_hash_bits(unsigned int hintsize
)
440 for (i
= 0, val
= 1; val
< hintsize
&& i
< EP_MAX_HASH_BITS
; i
++, val
<<= 1);
441 return i
< EP_MIN_HASH_BITS
? EP_MIN_HASH_BITS
: i
;
445 /* Used to initialize the epoll bits inside the "struct file" */
446 void eventpoll_init_file(struct file
*file
)
449 INIT_LIST_HEAD(&file
->f_ep_links
);
450 spin_lock_init(&file
->f_ep_lock
);
455 * This is called from eventpoll_release() to unlink files from the eventpoll
456 * interface. We need to have this facility to cleanup correctly files that are
457 * closed without being removed from the eventpoll interface.
459 void eventpoll_release_file(struct file
*file
)
461 struct list_head
*lsthead
= &file
->f_ep_links
;
462 struct eventpoll
*ep
;
466 * We don't want to get "file->f_ep_lock" because it is not
467 * necessary. It is not necessary because we're in the "struct file"
468 * cleanup path, and this means that noone is using this file anymore.
469 * The only hit might come from ep_free() but by holding the semaphore
470 * will correctly serialize the operation. We do need to acquire
471 * "ep->sem" after "epsem" because ep_remove() requires it when called
472 * from anywhere but ep_free().
476 while (!list_empty(lsthead
)) {
477 epi
= list_entry(lsthead
->next
, struct epitem
, fllink
);
480 EP_LIST_DEL(&epi
->fllink
);
481 down_write(&ep
->sem
);
491 * It opens an eventpoll file descriptor by suggesting a storage of "size"
492 * file descriptors. The size parameter is just an hint about how to size
493 * data structures. It won't prevent the user to store more than "size"
494 * file descriptors inside the epoll interface. It is the kernel part of
495 * the userspace epoll_create(2).
497 asmlinkage
long sys_epoll_create(int size
)
500 unsigned int hashbits
;
504 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d)\n",
507 /* Correctly size the hash */
508 hashbits
= ep_get_hash_bits((unsigned int) size
);
511 * Creates all the items needed to setup an eventpoll file. That is,
512 * a file structure, and inode and a free file descriptor.
514 error
= ep_getfd(&fd
, &inode
, &file
);
518 /* Setup the file internal data structure ( "struct eventpoll" ) */
519 error
= ep_file_init(file
, hashbits
);
524 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
532 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_create(%d) = %d\n",
533 current
, size
, error
));
539 * The following function implements the controller interface for
540 * the eventpoll file that enables the insertion/removal/change of
541 * file descriptors inside the interest set. It represents
542 * the kernel part of the user space epoll_ctl(2).
545 sys_epoll_ctl(int epfd
, int op
, int fd
, struct epoll_event __user
*event
)
548 struct file
*file
, *tfile
;
549 struct eventpoll
*ep
;
551 struct epoll_event epds
;
553 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p)\n",
554 current
, epfd
, op
, fd
, event
));
557 if (copy_from_user(&epds
, event
, sizeof(struct epoll_event
)))
560 /* Get the "struct file *" for the eventpoll file */
566 /* Get the "struct file *" for the target file */
571 /* The target file descriptor must support poll */
573 if (!tfile
->f_op
|| !tfile
->f_op
->poll
)
577 * We have to check that the file structure underneath the file descriptor
578 * the user passed to us _is_ an eventpoll file. And also we do not permit
579 * adding an epoll file descriptor inside itself.
582 if (file
== tfile
|| !IS_FILE_EPOLL(file
))
586 * At this point it is safe to assume that the "private_data" contains
587 * our own data structure.
589 ep
= file
->private_data
;
591 down_write(&ep
->sem
);
593 /* Try to lookup the file inside our hash table */
594 epi
= ep_find(ep
, tfile
, fd
);
600 epds
.events
|= POLLERR
| POLLHUP
;
602 error
= ep_insert(ep
, &epds
, tfile
, fd
);
608 error
= ep_remove(ep
, epi
);
614 epds
.events
|= POLLERR
| POLLHUP
;
615 error
= ep_modify(ep
, epi
, &epds
);
622 * The function ep_find() increments the usage count of the structure
623 * so, if this is not NULL, we need to release it.
626 ep_release_epitem(epi
);
635 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_ctl(%d, %d, %d, %p) = %d\n",
636 current
, epfd
, op
, fd
, event
, error
));
643 * Implement the event wait interface for the eventpoll file. It is the kernel
644 * part of the user space epoll_wait(2).
646 asmlinkage
long sys_epoll_wait(int epfd
, struct epoll_event __user
*events
,
647 int maxevents
, int timeout
)
651 struct eventpoll
*ep
;
653 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d)\n",
654 current
, epfd
, events
, maxevents
, timeout
));
656 /* The maximum number of event must be greater than zero */
660 /* Verify that the area passed by the user is writeable */
661 if ((error
= verify_area(VERIFY_WRITE
, events
, maxevents
* sizeof(struct epoll_event
))))
664 /* Get the "struct file *" for the eventpoll file */
671 * We have to check that the file structure underneath the fd
672 * the user passed to us _is_ an eventpoll file.
675 if (!IS_FILE_EPOLL(file
))
679 * At this point it is safe to assume that the "private_data" contains
680 * our own data structure.
682 ep
= file
->private_data
;
684 /* Time to fish for events ... */
685 error
= ep_poll(ep
, events
, maxevents
, timeout
);
690 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: sys_epoll_wait(%d, %p, %d, %d) = %d\n",
691 current
, epfd
, events
, maxevents
, timeout
, error
));
698 * Creates the file descriptor to be used by the epoll interface.
700 static int ep_getfd(int *efd
, struct inode
**einode
, struct file
**efile
)
704 struct dentry
*dentry
;
709 /* Get an ready to use file */
711 file
= get_empty_filp();
715 /* Allocates an inode from the eventpoll file system */
716 inode
= ep_eventpoll_inode();
717 error
= PTR_ERR(inode
);
721 /* Allocates a free descriptor to plug the file onto */
722 error
= get_unused_fd();
728 * Link the inode to a directory entry by creating a unique name
729 * using the inode number.
732 sprintf(name
, "[%lu]", inode
->i_ino
);
734 this.len
= strlen(name
);
735 this.hash
= inode
->i_ino
;
736 dentry
= d_alloc(eventpoll_mnt
->mnt_sb
->s_root
, &this);
739 dentry
->d_op
= &eventpollfs_dentry_operations
;
740 d_add(dentry
, inode
);
741 file
->f_vfsmnt
= mntget(eventpoll_mnt
);
742 file
->f_dentry
= dget(dentry
);
745 file
->f_flags
= O_RDONLY
;
746 file
->f_op
= &eventpoll_fops
;
747 file
->f_mode
= FMODE_READ
;
749 file
->private_data
= NULL
;
751 /* Install the new setup file into the allocated fd. */
752 fd_install(fd
, file
);
770 static int ep_alloc_pages(char **pages
, int numpages
)
774 for (i
= 0; i
< numpages
; i
++) {
775 pages
[i
] = (char *) __get_free_pages(GFP_KERNEL
, 0);
777 for (--i
; i
>= 0; i
--) {
778 ClearPageReserved(virt_to_page(pages
[i
]));
779 free_pages((unsigned long) pages
[i
], 0);
783 SetPageReserved(virt_to_page(pages
[i
]));
789 static int ep_free_pages(char **pages
, int numpages
)
793 for (i
= 0; i
< numpages
; i
++) {
794 ClearPageReserved(virt_to_page(pages
[i
]));
795 free_pages((unsigned long) pages
[i
], 0);
801 static int ep_file_init(struct file
*file
, unsigned int hashbits
)
804 struct eventpoll
*ep
;
806 if (!(ep
= kmalloc(sizeof(struct eventpoll
), GFP_KERNEL
)))
809 memset(ep
, 0, sizeof(*ep
));
811 error
= ep_init(ep
, hashbits
);
817 file
->private_data
= ep
;
819 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_file_init() ep=%p\n",
826 * Calculate the index of the hash relative to "file".
828 static unsigned int ep_hash_index(struct eventpoll
*ep
, struct file
*file
, int fd
)
830 unsigned long ptr
= (unsigned long) file
^ (fd
<< ep
->hashbits
);
832 return (unsigned int) hash_ptr((void *) ptr
, ep
->hashbits
);
837 * Returns the hash entry ( struct list_head * ) of the passed index.
839 static struct list_head
*ep_hash_entry(struct eventpoll
*ep
, unsigned int index
)
842 return (struct list_head
*) (ep
->hpages
[index
/ EP_HENTRY_X_PAGE
] +
843 (index
% EP_HENTRY_X_PAGE
) * sizeof(struct list_head
));
847 static int ep_init(struct eventpoll
*ep
, unsigned int hashbits
)
850 unsigned int i
, hsize
;
852 rwlock_init(&ep
->lock
);
853 init_rwsem(&ep
->sem
);
854 init_waitqueue_head(&ep
->wq
);
855 init_waitqueue_head(&ep
->poll_wait
);
856 INIT_LIST_HEAD(&ep
->rdllist
);
858 /* Hash allocation and setup */
859 ep
->hashbits
= hashbits
;
860 error
= ep_alloc_pages(ep
->hpages
, EP_HASH_PAGES(ep
->hashbits
));
864 /* Initialize hash buckets */
865 for (i
= 0, hsize
= 1 << hashbits
; i
< hsize
; i
++)
866 INIT_LIST_HEAD(ep_hash_entry(ep
, i
));
874 static void ep_free(struct eventpoll
*ep
)
876 unsigned int i
, hsize
;
877 struct list_head
*lsthead
, *lnk
;
880 /* We need to release all tasks waiting for these file */
881 if (waitqueue_active(&ep
->poll_wait
))
882 ep_poll_safewake(&psw
, &ep
->poll_wait
);
885 * We need to lock this because we could be hit by
886 * eventpoll_release() while we're freeing the "struct eventpoll".
887 * We do not need to hold "ep->sem" here because the epoll file
888 * is on the way to be removed and no one has references to it
889 * anymore. The only hit might come from eventpoll_release() but
890 * holding "epsem" is sufficent here.
895 * Walks through the whole hash by unregistering poll callbacks.
897 for (i
= 0, hsize
= 1 << ep
->hashbits
; i
< hsize
; i
++) {
898 lsthead
= ep_hash_entry(ep
, i
);
900 list_for_each(lnk
, lsthead
) {
901 epi
= list_entry(lnk
, struct epitem
, llink
);
903 ep_unregister_pollwait(ep
, epi
);
908 * Walks through the whole hash by freeing each "struct epitem". At this
909 * point we are sure no poll callbacks will be lingering around, and also by
910 * write-holding "sem" we can be sure that no file cleanup code will hit
911 * us during this operation. So we can avoid the lock on "ep->lock".
913 for (i
= 0, hsize
= 1 << ep
->hashbits
; i
< hsize
; i
++) {
914 lsthead
= ep_hash_entry(ep
, i
);
916 while (!list_empty(lsthead
)) {
917 epi
= list_entry(lsthead
->next
, struct epitem
, llink
);
925 /* Free hash pages */
926 ep_free_pages(ep
->hpages
, EP_HASH_PAGES(ep
->hashbits
));
931 * Search the file inside the eventpoll hash. It add usage count to
932 * the returned item, so the caller must call ep_release_epitem()
933 * after finished using the "struct epitem".
935 static struct epitem
*ep_find(struct eventpoll
*ep
, struct file
*file
, int fd
)
938 struct list_head
*lsthead
, *lnk
;
939 struct epitem
*epi
= NULL
;
941 read_lock_irqsave(&ep
->lock
, flags
);
943 lsthead
= ep_hash_entry(ep
, ep_hash_index(ep
, file
, fd
));
944 list_for_each(lnk
, lsthead
) {
945 epi
= list_entry(lnk
, struct epitem
, llink
);
947 if (epi
->file
== file
&& epi
->fd
== fd
) {
954 read_unlock_irqrestore(&ep
->lock
, flags
);
956 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_find(%p) -> %p\n",
957 current
, file
, epi
));
964 * Increment the usage count of the "struct epitem" making it sure
965 * that the user will have a valid pointer to reference.
967 static void ep_use_epitem(struct epitem
*epi
)
970 atomic_inc(&epi
->usecnt
);
975 * Decrement ( release ) the usage count by signaling that the user
976 * has finished using the structure. It might lead to freeing the
977 * structure itself if the count goes to zero.
979 static void ep_release_epitem(struct epitem
*epi
)
982 if (atomic_dec_and_test(&epi
->usecnt
))
988 * This is the callback that is used to add our wait queue to the
989 * target file wakeup lists.
991 static void ep_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*whead
,
994 struct epitem
*epi
= EP_ITEM_FROM_EPQUEUE(pt
);
995 struct eppoll_entry
*pwq
;
997 if (epi
->nwait
>= 0 && (pwq
= PWQ_MEM_ALLOC())) {
998 init_waitqueue_func_entry(&pwq
->wait
, ep_poll_callback
);
1001 add_wait_queue(whead
, &pwq
->wait
);
1002 list_add_tail(&pwq
->llink
, &epi
->pwqlist
);
1005 /* We have to signal that an error occurred */
1011 static int ep_insert(struct eventpoll
*ep
, struct epoll_event
*event
,
1012 struct file
*tfile
, int fd
)
1014 int error
, revents
, pwake
= 0;
1015 unsigned long flags
;
1017 struct ep_pqueue epq
;
1020 if (!(epi
= EPI_MEM_ALLOC()))
1023 /* Item initialization follow here ... */
1024 INIT_LIST_HEAD(&epi
->llink
);
1025 INIT_LIST_HEAD(&epi
->rdllink
);
1026 INIT_LIST_HEAD(&epi
->fllink
);
1027 INIT_LIST_HEAD(&epi
->txlink
);
1028 INIT_LIST_HEAD(&epi
->pwqlist
);
1032 epi
->event
= *event
;
1033 atomic_set(&epi
->usecnt
, 1);
1036 /* Initialize the poll table using the queue callback */
1038 init_poll_funcptr(&epq
.pt
, ep_ptable_queue_proc
);
1041 * Attach the item to the poll hooks and get current event bits.
1042 * We can safely use the file* here because its usage count has
1043 * been increased by the caller of this function.
1045 revents
= tfile
->f_op
->poll(tfile
, &epq
.pt
);
1048 * We have to check if something went wrong during the poll wait queue
1049 * install process. Namely an allocation for a wait queue failed due
1050 * high memory pressure.
1055 /* Add the current item to the list of active epoll hook for this file */
1056 spin_lock(&tfile
->f_ep_lock
);
1057 list_add_tail(&epi
->fllink
, &tfile
->f_ep_links
);
1058 spin_unlock(&tfile
->f_ep_lock
);
1060 /* We have to drop the new item inside our item list to keep track of it */
1061 write_lock_irqsave(&ep
->lock
, flags
);
1063 /* Add the current item to the hash table */
1064 list_add(&epi
->llink
, ep_hash_entry(ep
, ep_hash_index(ep
, tfile
, fd
)));
1066 /* If the file is already "ready" we drop it inside the ready list */
1067 if ((revents
& event
->events
) && !EP_IS_LINKED(&epi
->rdllink
)) {
1068 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1070 /* Notify waiting tasks that events are available */
1071 if (waitqueue_active(&ep
->wq
))
1073 if (waitqueue_active(&ep
->poll_wait
))
1077 write_unlock_irqrestore(&ep
->lock
, flags
);
1079 /* We have to call this outside the lock */
1081 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1083 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_insert(%p, %p, %d)\n",
1084 current
, ep
, tfile
, fd
));
1089 ep_unregister_pollwait(ep
, epi
);
1092 * We need to do this because an event could have been arrived on some
1093 * allocated wait queue.
1095 write_lock_irqsave(&ep
->lock
, flags
);
1096 if (EP_IS_LINKED(&epi
->rdllink
))
1097 EP_LIST_DEL(&epi
->rdllink
);
1098 write_unlock_irqrestore(&ep
->lock
, flags
);
1107 * Modify the interest event mask by dropping an event if the new mask
1108 * has a match in the current file status.
1110 static int ep_modify(struct eventpoll
*ep
, struct epitem
*epi
, struct epoll_event
*event
)
1113 unsigned int revents
;
1114 unsigned long flags
;
1117 * Set the new event interest mask before calling f_op->poll(), otherwise
1118 * a potential race might occur. In fact if we do this operation inside
1119 * the lock, an event might happen between the f_op->poll() call and the
1120 * new event set registering.
1122 epi
->event
.events
= event
->events
;
1125 * Get current event bits. We can safely use the file* here because
1126 * its usage count has been increased by the caller of this function.
1128 revents
= epi
->file
->f_op
->poll(epi
->file
, NULL
);
1130 write_lock_irqsave(&ep
->lock
, flags
);
1132 /* Copy the data member from inside the lock */
1133 epi
->event
.data
= event
->data
;
1136 * If the item is not linked to the hash it means that it's on its
1137 * way toward the removal. Do nothing in this case.
1139 if (EP_IS_LINKED(&epi
->llink
)) {
1141 * If the item is "hot" and it is not registered inside the ready
1142 * list, push it inside. If the item is not "hot" and it is currently
1143 * registered inside the ready list, unlink it.
1145 if (revents
& event
->events
) {
1146 if (!EP_IS_LINKED(&epi
->rdllink
)) {
1147 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1149 /* Notify waiting tasks that events are available */
1150 if (waitqueue_active(&ep
->wq
))
1152 if (waitqueue_active(&ep
->poll_wait
))
1155 } else if (EP_IS_LINKED(&epi
->rdllink
))
1156 EP_LIST_DEL(&epi
->rdllink
);
1159 write_unlock_irqrestore(&ep
->lock
, flags
);
1161 /* We have to call this outside the lock */
1163 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1170 * This function unregister poll callbacks from the associated file descriptor.
1171 * Since this must be called without holding "ep->lock" the atomic exchange trick
1172 * will protect us from multiple unregister.
1174 static void ep_unregister_pollwait(struct eventpoll
*ep
, struct epitem
*epi
)
1177 struct list_head
*lsthead
= &epi
->pwqlist
;
1178 struct eppoll_entry
*pwq
;
1180 /* This is called without locks, so we need the atomic exchange */
1181 nwait
= xchg(&epi
->nwait
, 0);
1184 while (!list_empty(lsthead
)) {
1185 pwq
= list_entry(lsthead
->next
, struct eppoll_entry
, llink
);
1187 EP_LIST_DEL(&pwq
->llink
);
1188 remove_wait_queue(pwq
->whead
, &pwq
->wait
);
1196 * Unlink the "struct epitem" from all places it might have been hooked up.
1197 * This function must be called with write IRQ lock on "ep->lock".
1199 static int ep_unlink(struct eventpoll
*ep
, struct epitem
*epi
)
1204 * It can happen that this one is called for an item already unlinked.
1205 * The check protect us from doing a double unlink ( crash ).
1208 if (!EP_IS_LINKED(&epi
->llink
))
1212 * Clear the event mask for the unlinked item. This will avoid item
1213 * notifications to be sent after the unlink operation from inside
1214 * the kernel->userspace event transfer loop.
1216 epi
->event
.events
= 0;
1219 * At this point is safe to do the job, unlink the item from our list.
1220 * This operation togheter with the above check closes the door to
1223 EP_LIST_DEL(&epi
->llink
);
1226 * If the item we are going to remove is inside the ready file descriptors
1227 * we want to remove it from this list to avoid stale events.
1229 if (EP_IS_LINKED(&epi
->rdllink
))
1230 EP_LIST_DEL(&epi
->rdllink
);
1235 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_unlink(%p, %p) = %d\n",
1236 current
, ep
, epi
->file
, error
));
1243 * Removes a "struct epitem" from the eventpoll hash and deallocates
1244 * all the associated resources.
1246 static int ep_remove(struct eventpoll
*ep
, struct epitem
*epi
)
1249 unsigned long flags
;
1250 struct file
*file
= epi
->file
;
1253 * Removes poll wait queue hooks. We _have_ to do this without holding
1254 * the "ep->lock" otherwise a deadlock might occur. This because of the
1255 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
1256 * queue head lock when unregistering the wait queue. The wakeup callback
1257 * will run by holding the wait queue head lock and will call our callback
1258 * that will try to get "ep->lock".
1260 ep_unregister_pollwait(ep
, epi
);
1262 /* Remove the current item from the list of epoll hooks */
1263 spin_lock(&file
->f_ep_lock
);
1264 if (EP_IS_LINKED(&epi
->fllink
))
1265 EP_LIST_DEL(&epi
->fllink
);
1266 spin_unlock(&file
->f_ep_lock
);
1268 /* We need to acquire the write IRQ lock before calling ep_unlink() */
1269 write_lock_irqsave(&ep
->lock
, flags
);
1271 /* Really unlink the item from the hash */
1272 error
= ep_unlink(ep
, epi
);
1274 write_unlock_irqrestore(&ep
->lock
, flags
);
1279 /* At this point it is safe to free the eventpoll item */
1280 ep_release_epitem(epi
);
1284 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: ep_remove(%p, %p) = %d\n",
1285 current
, ep
, file
, error
));
1292 * This is the callback that is passed to the wait queue wakeup
1293 * machanism. It is called by the stored file descriptors when they
1294 * have events to report.
1296 static int ep_poll_callback(wait_queue_t
*wait
, unsigned mode
, int sync
)
1299 unsigned long flags
;
1300 struct epitem
*epi
= EP_ITEM_FROM_WAIT(wait
);
1301 struct eventpoll
*ep
= epi
->ep
;
1303 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: poll_callback(%p) epi=%p ep=%p\n",
1304 current
, epi
->file
, epi
, ep
));
1306 write_lock_irqsave(&ep
->lock
, flags
);
1308 /* If this file is already in the ready list we exit soon */
1309 if (EP_IS_LINKED(&epi
->rdllink
))
1312 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1316 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1319 if (waitqueue_active(&ep
->wq
))
1321 if (waitqueue_active(&ep
->poll_wait
))
1324 write_unlock_irqrestore(&ep
->lock
, flags
);
1326 /* We have to call this outside the lock */
1328 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1334 static int ep_eventpoll_close(struct inode
*inode
, struct file
*file
)
1336 struct eventpoll
*ep
= file
->private_data
;
1343 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: close() ep=%p\n", current
, ep
));
1348 static unsigned int ep_eventpoll_poll(struct file
*file
, poll_table
*wait
)
1350 unsigned int pollflags
= 0;
1351 unsigned long flags
;
1352 struct eventpoll
*ep
= file
->private_data
;
1354 /* Insert inside our poll wait queue */
1355 poll_wait(file
, &ep
->poll_wait
, wait
);
1357 /* Check our condition */
1358 read_lock_irqsave(&ep
->lock
, flags
);
1359 if (!list_empty(&ep
->rdllist
))
1360 pollflags
= POLLIN
| POLLRDNORM
;
1361 read_unlock_irqrestore(&ep
->lock
, flags
);
1368 * Since we have to release the lock during the __copy_to_user() operation and
1369 * during the f_op->poll() call, we try to collect the maximum number of items
1370 * by reducing the irqlock/irqunlock switching rate.
1372 static int ep_collect_ready_items(struct eventpoll
*ep
, struct list_head
*txlist
, int maxevents
)
1375 unsigned long flags
;
1376 struct list_head
*lsthead
= &ep
->rdllist
, *lnk
;
1379 write_lock_irqsave(&ep
->lock
, flags
);
1381 for (nepi
= 0, lnk
= lsthead
->next
; lnk
!= lsthead
&& nepi
< maxevents
;) {
1382 epi
= list_entry(lnk
, struct epitem
, rdllink
);
1386 /* If this file is already in the ready list we exit soon */
1387 if (!EP_IS_LINKED(&epi
->txlink
)) {
1389 * This is initialized in this way so that the default
1390 * behaviour of the reinjecting code will be to push back
1391 * the item inside the ready list.
1393 epi
->revents
= epi
->event
.events
;
1395 /* Link the ready item into the transfer list */
1396 list_add(&epi
->txlink
, txlist
);
1400 * Unlink the item from the ready list.
1402 EP_LIST_DEL(&epi
->rdllink
);
1406 write_unlock_irqrestore(&ep
->lock
, flags
);
1413 * This function is called without holding the "ep->lock" since the call to
1414 * __copy_to_user() might sleep, and also f_op->poll() might reenable the IRQ
1415 * because of the way poll() is traditionally implemented in Linux.
1417 static int ep_send_events(struct eventpoll
*ep
, struct list_head
*txlist
,
1418 struct epoll_event __user
*events
)
1420 int eventcnt
= 0, eventbuf
= 0;
1421 unsigned int revents
;
1422 struct list_head
*lnk
;
1424 struct epoll_event event
[EP_MAX_BUF_EVENTS
];
1427 * We can loop without lock because this is a task private list.
1428 * The test done during the collection loop will guarantee us that
1429 * another task will not try to collect this file. Also, items
1430 * cannot vanish during the loop because we are holding "sem".
1432 list_for_each(lnk
, txlist
) {
1433 epi
= list_entry(lnk
, struct epitem
, txlink
);
1436 * Get the ready file event set. We can safely use the file
1437 * because we are holding the "sem" in read and this will
1438 * guarantee that both the file and the item will not vanish.
1440 revents
= epi
->file
->f_op
->poll(epi
->file
, NULL
);
1443 * Set the return event set for the current file descriptor.
1444 * Note that only the task task was successfully able to link
1445 * the item to its "txlist" will write this field.
1447 epi
->revents
= revents
& epi
->event
.events
;
1450 event
[eventbuf
] = epi
->event
;
1451 event
[eventbuf
].events
&= revents
;
1453 if (eventbuf
== EP_MAX_BUF_EVENTS
) {
1454 if (__copy_to_user(&events
[eventcnt
], event
,
1455 eventbuf
* sizeof(struct epoll_event
)))
1457 eventcnt
+= eventbuf
;
1464 if (__copy_to_user(&events
[eventcnt
], event
,
1465 eventbuf
* sizeof(struct epoll_event
)))
1467 eventcnt
+= eventbuf
;
1475 * Walk through the transfer list we collected with ep_collect_ready_items()
1476 * and, if 1) the item is still "alive" 2) its event set is not empty 3) it's
1477 * not already linked, links it to the ready list. Same as above, we are holding
1478 * "sem" so items cannot vanish underneath our nose.
1480 static void ep_reinject_items(struct eventpoll
*ep
, struct list_head
*txlist
)
1482 int ricnt
= 0, pwake
= 0;
1483 unsigned long flags
;
1486 write_lock_irqsave(&ep
->lock
, flags
);
1488 while (!list_empty(txlist
)) {
1489 epi
= list_entry(txlist
->next
, struct epitem
, txlink
);
1491 /* Unlink the current item from the transfer list */
1492 EP_LIST_DEL(&epi
->txlink
);
1495 * If the item is no more linked to the interest set, we don't
1496 * have to push it inside the ready list because the following
1497 * ep_release_epitem() is going to drop it. Also, if the current
1498 * item is set to have an Edge Triggered behaviour, we don't have
1499 * to push it back either.
1501 if (EP_IS_LINKED(&epi
->llink
) && !(epi
->event
.events
& EPOLLET
) &&
1502 (epi
->revents
& epi
->event
.events
) && !EP_IS_LINKED(&epi
->rdllink
)) {
1503 list_add_tail(&epi
->rdllink
, &ep
->rdllist
);
1510 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1513 if (waitqueue_active(&ep
->wq
))
1515 if (waitqueue_active(&ep
->poll_wait
))
1519 write_unlock_irqrestore(&ep
->lock
, flags
);
1521 /* We have to call this outside the lock */
1523 ep_poll_safewake(&psw
, &ep
->poll_wait
);
1528 * Perform the transfer of events to user space.
1530 static int ep_events_transfer(struct eventpoll
*ep
,
1531 struct epoll_event __user
*events
, int maxevents
)
1534 struct list_head txlist
;
1536 INIT_LIST_HEAD(&txlist
);
1539 * We need to lock this because we could be hit by
1540 * eventpoll_release() and epoll_ctl(EPOLL_CTL_DEL).
1542 down_read(&ep
->sem
);
1544 /* Collect/extract ready items */
1545 if (ep_collect_ready_items(ep
, &txlist
, maxevents
) > 0) {
1546 /* Build result set in userspace */
1547 eventcnt
= ep_send_events(ep
, &txlist
, events
);
1549 /* Reinject ready items into the ready list */
1550 ep_reinject_items(ep
, &txlist
);
1559 static int ep_poll(struct eventpoll
*ep
, struct epoll_event __user
*events
,
1560 int maxevents
, long timeout
)
1563 unsigned long flags
;
1568 * Calculate the timeout by checking for the "infinite" value ( -1 )
1569 * and the overflow condition. The passed timeout is in milliseconds,
1570 * that why (t * HZ) / 1000.
1572 jtimeout
= timeout
== -1 || timeout
> (MAX_SCHEDULE_TIMEOUT
- 1000) / HZ
?
1573 MAX_SCHEDULE_TIMEOUT
: (timeout
* HZ
+ 999) / 1000;
1576 write_lock_irqsave(&ep
->lock
, flags
);
1579 if (list_empty(&ep
->rdllist
)) {
1581 * We don't have any available event to return to the caller.
1582 * We need to sleep here, and we will be wake up by
1583 * ep_poll_callback() when events will become available.
1585 init_waitqueue_entry(&wait
, current
);
1586 add_wait_queue(&ep
->wq
, &wait
);
1590 * We don't want to sleep if the ep_poll_callback() sends us
1591 * a wakeup in between. That's why we set the task state
1592 * to TASK_INTERRUPTIBLE before doing the checks.
1594 set_current_state(TASK_INTERRUPTIBLE
);
1595 if (!list_empty(&ep
->rdllist
) || !jtimeout
)
1597 if (signal_pending(current
)) {
1602 write_unlock_irqrestore(&ep
->lock
, flags
);
1603 jtimeout
= schedule_timeout(jtimeout
);
1604 write_lock_irqsave(&ep
->lock
, flags
);
1606 remove_wait_queue(&ep
->wq
, &wait
);
1608 set_current_state(TASK_RUNNING
);
1611 /* Is it worth to try to dig for events ? */
1612 eavail
= !list_empty(&ep
->rdllist
);
1614 write_unlock_irqrestore(&ep
->lock
, flags
);
1617 * Try to transfer events to user space. In case we get 0 events and
1618 * there's still timeout left over, we go trying again in search of
1621 if (!res
&& eavail
&&
1622 !(res
= ep_events_transfer(ep
, events
, maxevents
)) && jtimeout
)
1629 static int eventpollfs_delete_dentry(struct dentry
*dentry
)
1636 static struct inode
*ep_eventpoll_inode(void)
1638 int error
= -ENOMEM
;
1639 struct inode
*inode
= new_inode(eventpoll_mnt
->mnt_sb
);
1644 inode
->i_fop
= &eventpoll_fops
;
1647 * Mark the inode dirty from the very beginning,
1648 * that way it will never be moved to the dirty
1649 * list because mark_inode_dirty() will think
1650 * that it already _is_ on the dirty list.
1652 inode
->i_state
= I_DIRTY
;
1653 inode
->i_mode
= S_IRUSR
| S_IWUSR
;
1654 inode
->i_uid
= current
->fsuid
;
1655 inode
->i_gid
= current
->fsgid
;
1656 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1657 inode
->i_blksize
= PAGE_SIZE
;
1661 return ERR_PTR(error
);
1665 static struct super_block
*
1666 eventpollfs_get_sb(struct file_system_type
*fs_type
, int flags
,
1667 const char *dev_name
, void *data
)
1669 return get_sb_pseudo(fs_type
, "eventpoll:", NULL
, EVENTPOLLFS_MAGIC
);
1673 static int __init
eventpoll_init(void)
1679 /* Initialize the structure used to perform safe poll wait head wake ups */
1680 ep_poll_safewake_init(&psw
);
1682 /* Allocates slab cache used to allocate "struct epitem" items */
1684 epi_cache
= kmem_cache_create("eventpoll_epi",
1685 sizeof(struct epitem
),
1687 SLAB_HWCACHE_ALIGN
| EPI_SLAB_DEBUG
, NULL
, NULL
);
1691 /* Allocates slab cache used to allocate "struct eppoll_entry" */
1693 pwq_cache
= kmem_cache_create("eventpoll_pwq",
1694 sizeof(struct eppoll_entry
),
1696 EPI_SLAB_DEBUG
, NULL
, NULL
);
1701 * Register the virtual file system that will be the source of inodes
1702 * for the eventpoll files
1704 error
= register_filesystem(&eventpoll_fs_type
);
1708 /* Mount the above commented virtual file system */
1709 eventpoll_mnt
= kern_mount(&eventpoll_fs_type
);
1710 error
= PTR_ERR(eventpoll_mnt
);
1711 if (IS_ERR(eventpoll_mnt
))
1714 DNPRINTK(3, (KERN_INFO
"[%p] eventpoll: successfully initialized.\n", current
));
1719 unregister_filesystem(&eventpoll_fs_type
);
1721 kmem_cache_destroy(pwq_cache
);
1723 kmem_cache_destroy(epi_cache
);
1730 static void __exit
eventpoll_exit(void)
1732 /* Undo all operations done inside eventpoll_init() */
1733 unregister_filesystem(&eventpoll_fs_type
);
1734 mntput(eventpoll_mnt
);
1735 kmem_cache_destroy(pwq_cache
);
1736 kmem_cache_destroy(epi_cache
);
1739 module_init(eventpoll_init
);
1740 module_exit(eventpoll_exit
);
1742 MODULE_LICENSE("GPL");