4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/syscalls.h>
19 #include <linux/export.h>
20 #include <linux/kref.h>
21 #include <linux/eventfd.h>
25 wait_queue_head_t wqh
;
27 * Every time that a write(2) is performed on an eventfd, the
28 * value of the __u64 being written is added to "count" and a
29 * wakeup is performed on "wqh". A read(2) will return the "count"
30 * value to userspace, and will reset "count" to zero. The kernel
31 * side eventfd_signal() also, adds to the "count" counter and
39 * eventfd_signal - Adds @n to the eventfd counter.
40 * @ctx: [in] Pointer to the eventfd context.
41 * @n: [in] Value of the counter to be added to the eventfd internal counter.
42 * The value cannot be negative.
44 * This function is supposed to be called by the kernel in paths that do not
45 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
46 * value, and we signal this as overflow condition by returining a POLLERR
49 * Returns @n in case of success, a non-negative number lower than @n in case
50 * of overflow, or the following error codes:
52 * -EINVAL : The value of @n is negative.
54 int eventfd_signal(struct eventfd_ctx
*ctx
, int n
)
60 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
61 if (ULLONG_MAX
- ctx
->count
< n
)
62 n
= (int) (ULLONG_MAX
- ctx
->count
);
64 if (waitqueue_active(&ctx
->wqh
))
65 wake_up_locked_poll(&ctx
->wqh
, POLLIN
);
66 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
70 EXPORT_SYMBOL_GPL(eventfd_signal
);
72 static void eventfd_free_ctx(struct eventfd_ctx
*ctx
)
77 static void eventfd_free(struct kref
*kref
)
79 struct eventfd_ctx
*ctx
= container_of(kref
, struct eventfd_ctx
, kref
);
81 eventfd_free_ctx(ctx
);
85 * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
86 * @ctx: [in] Pointer to the eventfd context.
88 * Returns: In case of success, returns a pointer to the eventfd context.
90 struct eventfd_ctx
*eventfd_ctx_get(struct eventfd_ctx
*ctx
)
95 EXPORT_SYMBOL_GPL(eventfd_ctx_get
);
98 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
99 * @ctx: [in] Pointer to eventfd context.
101 * The eventfd context reference must have been previously acquired either
102 * with eventfd_ctx_get() or eventfd_ctx_fdget().
104 void eventfd_ctx_put(struct eventfd_ctx
*ctx
)
106 kref_put(&ctx
->kref
, eventfd_free
);
108 EXPORT_SYMBOL_GPL(eventfd_ctx_put
);
110 static int eventfd_release(struct inode
*inode
, struct file
*file
)
112 struct eventfd_ctx
*ctx
= file
->private_data
;
114 wake_up_poll(&ctx
->wqh
, POLLHUP
);
115 eventfd_ctx_put(ctx
);
119 static unsigned int eventfd_poll(struct file
*file
, poll_table
*wait
)
121 struct eventfd_ctx
*ctx
= file
->private_data
;
122 unsigned int events
= 0;
125 poll_wait(file
, &ctx
->wqh
, wait
);
127 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
130 if (ctx
->count
== ULLONG_MAX
)
132 if (ULLONG_MAX
- 1 > ctx
->count
)
134 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
139 static void eventfd_ctx_do_read(struct eventfd_ctx
*ctx
, __u64
*cnt
)
141 *cnt
= (ctx
->flags
& EFD_SEMAPHORE
) ? 1 : ctx
->count
;
146 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
147 * @ctx: [in] Pointer to eventfd context.
148 * @wait: [in] Wait queue to be removed.
149 * @cnt: [out] Pointer to the 64-bit counter value.
151 * Returns %0 if successful, or the following error codes:
153 * -EAGAIN : The operation would have blocked.
155 * This is used to atomically remove a wait queue entry from the eventfd wait
156 * queue head, and read/reset the counter value.
158 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx
*ctx
, wait_queue_t
*wait
,
163 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
164 eventfd_ctx_do_read(ctx
, cnt
);
165 __remove_wait_queue(&ctx
->wqh
, wait
);
166 if (*cnt
!= 0 && waitqueue_active(&ctx
->wqh
))
167 wake_up_locked_poll(&ctx
->wqh
, POLLOUT
);
168 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
170 return *cnt
!= 0 ? 0 : -EAGAIN
;
172 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue
);
175 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
176 * @ctx: [in] Pointer to eventfd context.
177 * @no_wait: [in] Different from zero if the operation should not block.
178 * @cnt: [out] Pointer to the 64-bit counter value.
180 * Returns %0 if successful, or the following error codes:
182 * -EAGAIN : The operation would have blocked but @no_wait was non-zero.
183 * -ERESTARTSYS : A signal interrupted the wait operation.
185 * If @no_wait is zero, the function might sleep until the eventfd internal
186 * counter becomes greater than zero.
188 ssize_t
eventfd_ctx_read(struct eventfd_ctx
*ctx
, int no_wait
, __u64
*cnt
)
191 DECLARE_WAITQUEUE(wait
, current
);
193 spin_lock_irq(&ctx
->wqh
.lock
);
199 __add_wait_queue(&ctx
->wqh
, &wait
);
201 set_current_state(TASK_INTERRUPTIBLE
);
202 if (ctx
->count
> 0) {
206 if (signal_pending(current
)) {
210 spin_unlock_irq(&ctx
->wqh
.lock
);
212 spin_lock_irq(&ctx
->wqh
.lock
);
214 __remove_wait_queue(&ctx
->wqh
, &wait
);
215 __set_current_state(TASK_RUNNING
);
217 if (likely(res
== 0)) {
218 eventfd_ctx_do_read(ctx
, cnt
);
219 if (waitqueue_active(&ctx
->wqh
))
220 wake_up_locked_poll(&ctx
->wqh
, POLLOUT
);
222 spin_unlock_irq(&ctx
->wqh
.lock
);
226 EXPORT_SYMBOL_GPL(eventfd_ctx_read
);
228 static ssize_t
eventfd_read(struct file
*file
, char __user
*buf
, size_t count
,
231 struct eventfd_ctx
*ctx
= file
->private_data
;
235 if (count
< sizeof(cnt
))
237 res
= eventfd_ctx_read(ctx
, file
->f_flags
& O_NONBLOCK
, &cnt
);
241 return put_user(cnt
, (__u64 __user
*) buf
) ? -EFAULT
: sizeof(cnt
);
244 static ssize_t
eventfd_write(struct file
*file
, const char __user
*buf
, size_t count
,
247 struct eventfd_ctx
*ctx
= file
->private_data
;
250 DECLARE_WAITQUEUE(wait
, current
);
252 if (count
< sizeof(ucnt
))
254 if (copy_from_user(&ucnt
, buf
, sizeof(ucnt
)))
256 if (ucnt
== ULLONG_MAX
)
258 spin_lock_irq(&ctx
->wqh
.lock
);
260 if (ULLONG_MAX
- ctx
->count
> ucnt
)
262 else if (!(file
->f_flags
& O_NONBLOCK
)) {
263 __add_wait_queue(&ctx
->wqh
, &wait
);
265 set_current_state(TASK_INTERRUPTIBLE
);
266 if (ULLONG_MAX
- ctx
->count
> ucnt
) {
270 if (signal_pending(current
)) {
274 spin_unlock_irq(&ctx
->wqh
.lock
);
276 spin_lock_irq(&ctx
->wqh
.lock
);
278 __remove_wait_queue(&ctx
->wqh
, &wait
);
279 __set_current_state(TASK_RUNNING
);
281 if (likely(res
> 0)) {
283 if (waitqueue_active(&ctx
->wqh
))
284 wake_up_locked_poll(&ctx
->wqh
, POLLIN
);
286 spin_unlock_irq(&ctx
->wqh
.lock
);
291 static const struct file_operations eventfd_fops
= {
292 .release
= eventfd_release
,
293 .poll
= eventfd_poll
,
294 .read
= eventfd_read
,
295 .write
= eventfd_write
,
296 .llseek
= noop_llseek
,
300 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
301 * @fd: [in] Eventfd file descriptor.
303 * Returns a pointer to the eventfd file structure in case of success, or the
304 * following error pointer:
306 * -EBADF : Invalid @fd file descriptor.
307 * -EINVAL : The @fd file descriptor is not an eventfd file.
309 struct file
*eventfd_fget(int fd
)
315 return ERR_PTR(-EBADF
);
316 if (file
->f_op
!= &eventfd_fops
) {
318 return ERR_PTR(-EINVAL
);
323 EXPORT_SYMBOL_GPL(eventfd_fget
);
326 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
327 * @fd: [in] Eventfd file descriptor.
329 * Returns a pointer to the internal eventfd context, otherwise the error
330 * pointers returned by the following functions:
334 struct eventfd_ctx
*eventfd_ctx_fdget(int fd
)
337 struct eventfd_ctx
*ctx
;
339 file
= eventfd_fget(fd
);
341 return (struct eventfd_ctx
*) file
;
342 ctx
= eventfd_ctx_get(file
->private_data
);
347 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget
);
350 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
351 * @file: [in] Eventfd file pointer.
353 * Returns a pointer to the internal eventfd context, otherwise the error
356 * -EINVAL : The @fd file descriptor is not an eventfd file.
358 struct eventfd_ctx
*eventfd_ctx_fileget(struct file
*file
)
360 if (file
->f_op
!= &eventfd_fops
)
361 return ERR_PTR(-EINVAL
);
363 return eventfd_ctx_get(file
->private_data
);
365 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget
);
368 * eventfd_file_create - Creates an eventfd file pointer.
369 * @count: Initial eventfd counter value.
370 * @flags: Flags for the eventfd file.
372 * This function creates an eventfd file pointer, w/out installing it into
373 * the fd table. This is useful when the eventfd file is used during the
374 * initialization of data structures that require extra setup after the eventfd
375 * creation. So the eventfd creation is split into the file pointer creation
376 * phase, and the file descriptor installation phase.
377 * In this way races with userspace closing the newly installed file descriptor
379 * Returns an eventfd file pointer, or a proper error pointer.
381 struct file
*eventfd_file_create(unsigned int count
, int flags
)
384 struct eventfd_ctx
*ctx
;
386 /* Check the EFD_* constants for consistency. */
387 BUILD_BUG_ON(EFD_CLOEXEC
!= O_CLOEXEC
);
388 BUILD_BUG_ON(EFD_NONBLOCK
!= O_NONBLOCK
);
390 if (flags
& ~EFD_FLAGS_SET
)
391 return ERR_PTR(-EINVAL
);
393 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
395 return ERR_PTR(-ENOMEM
);
397 kref_init(&ctx
->kref
);
398 init_waitqueue_head(&ctx
->wqh
);
402 file
= anon_inode_getfile("[eventfd]", &eventfd_fops
, ctx
,
403 O_RDWR
| (flags
& EFD_SHARED_FCNTL_FLAGS
));
405 eventfd_free_ctx(ctx
);
410 SYSCALL_DEFINE2(eventfd2
, unsigned int, count
, int, flags
)
415 error
= get_unused_fd_flags(flags
& EFD_SHARED_FCNTL_FLAGS
);
420 file
= eventfd_file_create(count
, flags
);
422 error
= PTR_ERR(file
);
423 goto err_put_unused_fd
;
425 fd_install(fd
, file
);
435 SYSCALL_DEFINE1(eventfd
, unsigned int, count
)
437 return sys_eventfd2(count
, 0);