4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
7 * Thanks to Thomas Gleixner for code reviews and useful comments.
11 #include <linux/file.h>
12 #include <linux/poll.h>
13 #include <linux/init.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/spinlock.h>
20 #include <linux/time.h>
21 #include <linux/hrtimer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/timerfd.h>
24 #include <linux/syscalls.h>
25 #include <linux/rcupdate.h>
31 wait_queue_head_t wqh
;
36 struct list_head clist
;
40 static LIST_HEAD(cancel_list
);
41 static DEFINE_SPINLOCK(cancel_lock
);
44 * This gets called when the timer event triggers. We set the "expired"
45 * flag, but we do not re-arm the timer (in case it's necessary,
46 * tintv.tv64 != 0) until the timer is accessed.
48 static enum hrtimer_restart
timerfd_tmrproc(struct hrtimer
*htmr
)
50 struct timerfd_ctx
*ctx
= container_of(htmr
, struct timerfd_ctx
, tmr
);
53 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
56 wake_up_locked(&ctx
->wqh
);
57 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
59 return HRTIMER_NORESTART
;
63 * Called when the clock was set to cancel the timers in the cancel
64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
68 void timerfd_clock_was_set(void)
70 ktime_t moffs
= ktime_get_monotonic_offset();
71 struct timerfd_ctx
*ctx
;
75 list_for_each_entry_rcu(ctx
, &cancel_list
, clist
) {
76 if (!ctx
->might_cancel
)
78 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
79 if (ctx
->moffs
.tv64
!= moffs
.tv64
) {
80 ctx
->moffs
.tv64
= KTIME_MAX
;
82 wake_up_locked(&ctx
->wqh
);
84 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
89 static void timerfd_remove_cancel(struct timerfd_ctx
*ctx
)
91 if (ctx
->might_cancel
) {
92 ctx
->might_cancel
= false;
93 spin_lock(&cancel_lock
);
94 list_del_rcu(&ctx
->clist
);
95 spin_unlock(&cancel_lock
);
99 static bool timerfd_canceled(struct timerfd_ctx
*ctx
)
101 if (!ctx
->might_cancel
|| ctx
->moffs
.tv64
!= KTIME_MAX
)
103 ctx
->moffs
= ktime_get_monotonic_offset();
107 static void timerfd_setup_cancel(struct timerfd_ctx
*ctx
, int flags
)
109 if (ctx
->clockid
== CLOCK_REALTIME
&& (flags
& TFD_TIMER_ABSTIME
) &&
110 (flags
& TFD_TIMER_CANCEL_ON_SET
)) {
111 if (!ctx
->might_cancel
) {
112 ctx
->might_cancel
= true;
113 spin_lock(&cancel_lock
);
114 list_add_rcu(&ctx
->clist
, &cancel_list
);
115 spin_unlock(&cancel_lock
);
117 } else if (ctx
->might_cancel
) {
118 timerfd_remove_cancel(ctx
);
122 static ktime_t
timerfd_get_remaining(struct timerfd_ctx
*ctx
)
126 remaining
= hrtimer_expires_remaining(&ctx
->tmr
);
127 return remaining
.tv64
< 0 ? ktime_set(0, 0): remaining
;
130 static int timerfd_setup(struct timerfd_ctx
*ctx
, int flags
,
131 const struct itimerspec
*ktmr
)
133 enum hrtimer_mode htmode
;
135 int clockid
= ctx
->clockid
;
137 htmode
= (flags
& TFD_TIMER_ABSTIME
) ?
138 HRTIMER_MODE_ABS
: HRTIMER_MODE_REL
;
140 texp
= timespec_to_ktime(ktmr
->it_value
);
143 ctx
->tintv
= timespec_to_ktime(ktmr
->it_interval
);
144 hrtimer_init(&ctx
->tmr
, clockid
, htmode
);
145 hrtimer_set_expires(&ctx
->tmr
, texp
);
146 ctx
->tmr
.function
= timerfd_tmrproc
;
147 if (texp
.tv64
!= 0) {
148 hrtimer_start(&ctx
->tmr
, texp
, htmode
);
149 if (timerfd_canceled(ctx
))
155 static int timerfd_release(struct inode
*inode
, struct file
*file
)
157 struct timerfd_ctx
*ctx
= file
->private_data
;
159 timerfd_remove_cancel(ctx
);
160 hrtimer_cancel(&ctx
->tmr
);
165 static unsigned int timerfd_poll(struct file
*file
, poll_table
*wait
)
167 struct timerfd_ctx
*ctx
= file
->private_data
;
168 unsigned int events
= 0;
171 poll_wait(file
, &ctx
->wqh
, wait
);
173 spin_lock_irqsave(&ctx
->wqh
.lock
, flags
);
176 spin_unlock_irqrestore(&ctx
->wqh
.lock
, flags
);
181 static ssize_t
timerfd_read(struct file
*file
, char __user
*buf
, size_t count
,
184 struct timerfd_ctx
*ctx
= file
->private_data
;
188 if (count
< sizeof(ticks
))
190 spin_lock_irq(&ctx
->wqh
.lock
);
191 if (file
->f_flags
& O_NONBLOCK
)
194 res
= wait_event_interruptible_locked_irq(ctx
->wqh
, ctx
->ticks
);
197 * If clock has changed, we do not care about the
198 * ticks and we do not rearm the timer. Userspace must
201 if (timerfd_canceled(ctx
)) {
210 if (ctx
->expired
&& ctx
->tintv
.tv64
) {
212 * If tintv.tv64 != 0, this is a periodic timer that
213 * needs to be re-armed. We avoid doing it in the timer
214 * callback to avoid DoS attacks specifying a very
215 * short timer period.
217 ticks
+= hrtimer_forward_now(&ctx
->tmr
,
219 hrtimer_restart(&ctx
->tmr
);
224 spin_unlock_irq(&ctx
->wqh
.lock
);
226 res
= put_user(ticks
, (u64 __user
*) buf
) ? -EFAULT
: sizeof(ticks
);
230 static const struct file_operations timerfd_fops
= {
231 .release
= timerfd_release
,
232 .poll
= timerfd_poll
,
233 .read
= timerfd_read
,
234 .llseek
= noop_llseek
,
237 static struct file
*timerfd_fget(int fd
)
243 return ERR_PTR(-EBADF
);
244 if (file
->f_op
!= &timerfd_fops
) {
246 return ERR_PTR(-EINVAL
);
252 SYSCALL_DEFINE2(timerfd_create
, int, clockid
, int, flags
)
255 struct timerfd_ctx
*ctx
;
257 /* Check the TFD_* constants for consistency. */
258 BUILD_BUG_ON(TFD_CLOEXEC
!= O_CLOEXEC
);
259 BUILD_BUG_ON(TFD_NONBLOCK
!= O_NONBLOCK
);
261 if ((flags
& ~TFD_CREATE_FLAGS
) ||
262 (clockid
!= CLOCK_MONOTONIC
&&
263 clockid
!= CLOCK_REALTIME
))
266 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
270 init_waitqueue_head(&ctx
->wqh
);
271 ctx
->clockid
= clockid
;
272 hrtimer_init(&ctx
->tmr
, clockid
, HRTIMER_MODE_ABS
);
273 ctx
->moffs
= ktime_get_monotonic_offset();
275 ufd
= anon_inode_getfd("[timerfd]", &timerfd_fops
, ctx
,
276 O_RDWR
| (flags
& TFD_SHARED_FCNTL_FLAGS
));
283 SYSCALL_DEFINE4(timerfd_settime
, int, ufd
, int, flags
,
284 const struct itimerspec __user
*, utmr
,
285 struct itimerspec __user
*, otmr
)
288 struct timerfd_ctx
*ctx
;
289 struct itimerspec ktmr
, kotmr
;
292 if (copy_from_user(&ktmr
, utmr
, sizeof(ktmr
)))
295 if ((flags
& ~TFD_SETTIME_FLAGS
) ||
296 !timespec_valid(&ktmr
.it_value
) ||
297 !timespec_valid(&ktmr
.it_interval
))
300 file
= timerfd_fget(ufd
);
302 return PTR_ERR(file
);
303 ctx
= file
->private_data
;
305 timerfd_setup_cancel(ctx
, flags
);
308 * We need to stop the existing timer before reprogramming
309 * it to the new values.
312 spin_lock_irq(&ctx
->wqh
.lock
);
313 if (hrtimer_try_to_cancel(&ctx
->tmr
) >= 0)
315 spin_unlock_irq(&ctx
->wqh
.lock
);
320 * If the timer is expired and it's periodic, we need to advance it
321 * because the caller may want to know the previous expiration time.
322 * We do not update "ticks" and "expired" since the timer will be
323 * re-programmed again in the following timerfd_setup() call.
325 if (ctx
->expired
&& ctx
->tintv
.tv64
)
326 hrtimer_forward_now(&ctx
->tmr
, ctx
->tintv
);
328 kotmr
.it_value
= ktime_to_timespec(timerfd_get_remaining(ctx
));
329 kotmr
.it_interval
= ktime_to_timespec(ctx
->tintv
);
332 * Re-program the timer to the new value ...
334 ret
= timerfd_setup(ctx
, flags
, &ktmr
);
336 spin_unlock_irq(&ctx
->wqh
.lock
);
338 if (otmr
&& copy_to_user(otmr
, &kotmr
, sizeof(kotmr
)))
344 SYSCALL_DEFINE2(timerfd_gettime
, int, ufd
, struct itimerspec __user
*, otmr
)
347 struct timerfd_ctx
*ctx
;
348 struct itimerspec kotmr
;
350 file
= timerfd_fget(ufd
);
352 return PTR_ERR(file
);
353 ctx
= file
->private_data
;
355 spin_lock_irq(&ctx
->wqh
.lock
);
356 if (ctx
->expired
&& ctx
->tintv
.tv64
) {
359 hrtimer_forward_now(&ctx
->tmr
, ctx
->tintv
) - 1;
360 hrtimer_restart(&ctx
->tmr
);
362 kotmr
.it_value
= ktime_to_timespec(timerfd_get_remaining(ctx
));
363 kotmr
.it_interval
= ktime_to_timespec(ctx
->tintv
);
364 spin_unlock_irq(&ctx
->wqh
.lock
);
367 return copy_to_user(otmr
, &kotmr
, sizeof(kotmr
)) ? -EFAULT
: 0;