2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <asm/uaccess.h>
8 #include <linux/errno.h>
10 static int check_clock(clockid_t which_clock
)
13 struct task_struct
*p
;
14 const pid_t pid
= CPUCLOCK_PID(which_clock
);
16 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
22 read_lock(&tasklist_lock
);
23 p
= find_task_by_pid(pid
);
24 if (!p
|| (CPUCLOCK_PERTHREAD(which_clock
) ?
25 p
->tgid
!= current
->tgid
: p
->tgid
!= pid
)) {
28 read_unlock(&tasklist_lock
);
33 static inline union cpu_time_count
34 timespec_to_sample(clockid_t which_clock
, const struct timespec
*tp
)
36 union cpu_time_count ret
;
37 ret
.sched
= 0; /* high half always zero when .cpu used */
38 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
39 ret
.sched
= tp
->tv_sec
* NSEC_PER_SEC
+ tp
->tv_nsec
;
41 ret
.cpu
= timespec_to_cputime(tp
);
46 static void sample_to_timespec(clockid_t which_clock
,
47 union cpu_time_count cpu
,
50 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
51 tp
->tv_sec
= div_long_long_rem(cpu
.sched
,
52 NSEC_PER_SEC
, &tp
->tv_nsec
);
54 cputime_to_timespec(cpu
.cpu
, tp
);
58 static inline int cpu_time_before(clockid_t which_clock
,
59 union cpu_time_count now
,
60 union cpu_time_count then
)
62 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
63 return now
.sched
< then
.sched
;
65 return cputime_lt(now
.cpu
, then
.cpu
);
68 static inline void cpu_time_add(clockid_t which_clock
,
69 union cpu_time_count
*acc
,
70 union cpu_time_count val
)
72 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
73 acc
->sched
+= val
.sched
;
75 acc
->cpu
= cputime_add(acc
->cpu
, val
.cpu
);
78 static inline union cpu_time_count
cpu_time_sub(clockid_t which_clock
,
79 union cpu_time_count a
,
80 union cpu_time_count b
)
82 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
85 a
.cpu
= cputime_sub(a
.cpu
, b
.cpu
);
91 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample.
94 static inline void bump_cpu_timer(struct k_itimer
*timer
,
95 union cpu_time_count now
)
99 if (timer
->it
.cpu
.incr
.sched
== 0)
102 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
103 unsigned long long delta
, incr
;
105 if (now
.sched
< timer
->it
.cpu
.expires
.sched
)
107 incr
= timer
->it
.cpu
.incr
.sched
;
108 delta
= now
.sched
+ incr
- timer
->it
.cpu
.expires
.sched
;
109 /* Don't use (incr*2 < delta), incr*2 might overflow. */
110 for (i
= 0; incr
< delta
- incr
; i
++)
112 for (; i
>= 0; incr
>>= 1, i
--) {
115 timer
->it
.cpu
.expires
.sched
+= incr
;
116 timer
->it_overrun
+= 1 << i
;
120 cputime_t delta
, incr
;
122 if (cputime_lt(now
.cpu
, timer
->it
.cpu
.expires
.cpu
))
124 incr
= timer
->it
.cpu
.incr
.cpu
;
125 delta
= cputime_sub(cputime_add(now
.cpu
, incr
),
126 timer
->it
.cpu
.expires
.cpu
);
127 /* Don't use (incr*2 < delta), incr*2 might overflow. */
128 for (i
= 0; cputime_lt(incr
, cputime_sub(delta
, incr
)); i
++)
129 incr
= cputime_add(incr
, incr
);
130 for (; i
>= 0; incr
= cputime_halve(incr
), i
--) {
131 if (cputime_le(delta
, incr
))
133 timer
->it
.cpu
.expires
.cpu
=
134 cputime_add(timer
->it
.cpu
.expires
.cpu
, incr
);
135 timer
->it_overrun
+= 1 << i
;
136 delta
= cputime_sub(delta
, incr
);
141 static inline cputime_t
prof_ticks(struct task_struct
*p
)
143 return cputime_add(p
->utime
, p
->stime
);
145 static inline cputime_t
virt_ticks(struct task_struct
*p
)
149 static inline unsigned long long sched_ns(struct task_struct
*p
)
151 return (p
== current
) ? current_sched_time(p
) : p
->sched_time
;
154 int posix_cpu_clock_getres(clockid_t which_clock
, struct timespec
*tp
)
156 int error
= check_clock(which_clock
);
159 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
160 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
162 * If sched_clock is using a cycle counter, we
163 * don't have any idea of its true resolution
164 * exported, but it is much more than 1s/HZ.
172 int posix_cpu_clock_set(clockid_t which_clock
, const struct timespec
*tp
)
175 * You can never reset a CPU clock, but we check for other errors
176 * in the call before failing with EPERM.
178 int error
= check_clock(which_clock
);
187 * Sample a per-thread clock for the given task.
189 static int cpu_clock_sample(clockid_t which_clock
, struct task_struct
*p
,
190 union cpu_time_count
*cpu
)
192 switch (CPUCLOCK_WHICH(which_clock
)) {
196 cpu
->cpu
= prof_ticks(p
);
199 cpu
->cpu
= virt_ticks(p
);
202 cpu
->sched
= sched_ns(p
);
209 * Sample a process (thread group) clock for the given group_leader task.
210 * Must be called with tasklist_lock held for reading.
211 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
213 static int cpu_clock_sample_group_locked(unsigned int clock_idx
,
214 struct task_struct
*p
,
215 union cpu_time_count
*cpu
)
217 struct task_struct
*t
= p
;
222 cpu
->cpu
= cputime_add(p
->signal
->utime
, p
->signal
->stime
);
224 cpu
->cpu
= cputime_add(cpu
->cpu
, prof_ticks(t
));
229 cpu
->cpu
= p
->signal
->utime
;
231 cpu
->cpu
= cputime_add(cpu
->cpu
, virt_ticks(t
));
236 cpu
->sched
= p
->signal
->sched_time
;
237 /* Add in each other live thread. */
238 while ((t
= next_thread(t
)) != p
) {
239 cpu
->sched
+= t
->sched_time
;
241 if (p
->tgid
== current
->tgid
) {
243 * We're sampling ourselves, so include the
244 * cycles not yet banked. We still omit
245 * other threads running on other CPUs,
246 * so the total can always be behind as
247 * much as max(nthreads-1,ncpus) * (NSEC_PER_SEC/HZ).
249 cpu
->sched
+= current_sched_time(current
);
251 cpu
->sched
+= p
->sched_time
;
259 * Sample a process (thread group) clock for the given group_leader task.
260 * Must be called with tasklist_lock held for reading.
262 static int cpu_clock_sample_group(clockid_t which_clock
,
263 struct task_struct
*p
,
264 union cpu_time_count
*cpu
)
268 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
269 ret
= cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock
), p
,
271 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
276 int posix_cpu_clock_get(clockid_t which_clock
, struct timespec
*tp
)
278 const pid_t pid
= CPUCLOCK_PID(which_clock
);
280 union cpu_time_count rtn
;
284 * Special case constant value for our own clocks.
285 * We don't have to do any lookup to find ourselves.
287 if (CPUCLOCK_PERTHREAD(which_clock
)) {
289 * Sampling just ourselves we can do with no locking.
291 error
= cpu_clock_sample(which_clock
,
294 read_lock(&tasklist_lock
);
295 error
= cpu_clock_sample_group(which_clock
,
297 read_unlock(&tasklist_lock
);
301 * Find the given PID, and validate that the caller
302 * should be able to see it.
304 struct task_struct
*p
;
305 read_lock(&tasklist_lock
);
306 p
= find_task_by_pid(pid
);
308 if (CPUCLOCK_PERTHREAD(which_clock
)) {
309 if (p
->tgid
== current
->tgid
) {
310 error
= cpu_clock_sample(which_clock
,
313 } else if (p
->tgid
== pid
&& p
->signal
) {
314 error
= cpu_clock_sample_group(which_clock
,
318 read_unlock(&tasklist_lock
);
323 sample_to_timespec(which_clock
, rtn
, tp
);
329 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
330 * This is called from sys_timer_create with the new timer already locked.
332 int posix_cpu_timer_create(struct k_itimer
*new_timer
)
335 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
336 struct task_struct
*p
;
338 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
341 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
342 new_timer
->it
.cpu
.incr
.sched
= 0;
343 new_timer
->it
.cpu
.expires
.sched
= 0;
345 read_lock(&tasklist_lock
);
346 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
350 p
= find_task_by_pid(pid
);
351 if (p
&& p
->tgid
!= current
->tgid
)
356 p
= current
->group_leader
;
358 p
= find_task_by_pid(pid
);
359 if (p
&& p
->tgid
!= pid
)
363 new_timer
->it
.cpu
.task
= p
;
369 read_unlock(&tasklist_lock
);
375 * Clean up a CPU-clock timer that is about to be destroyed.
376 * This is called from timer deletion with the timer already locked.
377 * If we return TIMER_RETRY, it's necessary to release the timer's lock
378 * and try again. (This happens when the timer is in the middle of firing.)
380 int posix_cpu_timer_del(struct k_itimer
*timer
)
382 struct task_struct
*p
= timer
->it
.cpu
.task
;
384 if (timer
->it
.cpu
.firing
)
387 if (unlikely(p
== NULL
))
390 if (!list_empty(&timer
->it
.cpu
.entry
)) {
391 read_lock(&tasklist_lock
);
392 if (unlikely(p
->signal
== NULL
)) {
394 * We raced with the reaping of the task.
395 * The deletion should have cleared us off the list.
397 BUG_ON(!list_empty(&timer
->it
.cpu
.entry
));
400 * Take us off the task's timer list.
402 spin_lock(&p
->sighand
->siglock
);
403 list_del(&timer
->it
.cpu
.entry
);
404 spin_unlock(&p
->sighand
->siglock
);
406 read_unlock(&tasklist_lock
);
414 * Clean out CPU timers still ticking when a thread exited. The task
415 * pointer is cleared, and the expiry time is replaced with the residual
416 * time for later timer_gettime calls to return.
417 * This must be called with the siglock held.
419 static void cleanup_timers(struct list_head
*head
,
420 cputime_t utime
, cputime_t stime
,
421 unsigned long long sched_time
)
423 struct cpu_timer_list
*timer
, *next
;
424 cputime_t ptime
= cputime_add(utime
, stime
);
426 list_for_each_entry_safe(timer
, next
, head
, entry
) {
427 put_task_struct(timer
->task
);
429 list_del_init(&timer
->entry
);
430 if (cputime_lt(timer
->expires
.cpu
, ptime
)) {
431 timer
->expires
.cpu
= cputime_zero
;
433 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
439 list_for_each_entry_safe(timer
, next
, head
, entry
) {
440 put_task_struct(timer
->task
);
442 list_del_init(&timer
->entry
);
443 if (cputime_lt(timer
->expires
.cpu
, utime
)) {
444 timer
->expires
.cpu
= cputime_zero
;
446 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
452 list_for_each_entry_safe(timer
, next
, head
, entry
) {
453 put_task_struct(timer
->task
);
455 list_del_init(&timer
->entry
);
456 if (timer
->expires
.sched
< sched_time
) {
457 timer
->expires
.sched
= 0;
459 timer
->expires
.sched
-= sched_time
;
465 * These are both called with the siglock held, when the current thread
466 * is being reaped. When the final (leader) thread in the group is reaped,
467 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
469 void posix_cpu_timers_exit(struct task_struct
*tsk
)
471 cleanup_timers(tsk
->cpu_timers
,
472 tsk
->utime
, tsk
->stime
, tsk
->sched_time
);
475 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
477 cleanup_timers(tsk
->signal
->cpu_timers
,
478 cputime_add(tsk
->utime
, tsk
->signal
->utime
),
479 cputime_add(tsk
->stime
, tsk
->signal
->stime
),
480 tsk
->sched_time
+ tsk
->signal
->sched_time
);
485 * Set the expiry times of all the threads in the process so one of them
486 * will go off before the process cumulative expiry total is reached.
488 static void process_timer_rebalance(struct task_struct
*p
,
489 unsigned int clock_idx
,
490 union cpu_time_count expires
,
491 union cpu_time_count val
)
493 cputime_t ticks
, left
;
494 unsigned long long ns
, nsleft
;
495 struct task_struct
*t
= p
;
496 unsigned int nthreads
= atomic_read(&p
->signal
->live
);
503 left
= cputime_div(cputime_sub(expires
.cpu
, val
.cpu
),
506 if (!unlikely(t
->exit_state
)) {
507 ticks
= cputime_add(prof_ticks(t
), left
);
508 if (cputime_eq(t
->it_prof_expires
,
510 cputime_gt(t
->it_prof_expires
, ticks
)) {
511 t
->it_prof_expires
= ticks
;
518 left
= cputime_div(cputime_sub(expires
.cpu
, val
.cpu
),
521 if (!unlikely(t
->exit_state
)) {
522 ticks
= cputime_add(virt_ticks(t
), left
);
523 if (cputime_eq(t
->it_virt_expires
,
525 cputime_gt(t
->it_virt_expires
, ticks
)) {
526 t
->it_virt_expires
= ticks
;
533 nsleft
= expires
.sched
- val
.sched
;
534 do_div(nsleft
, nthreads
);
536 if (!unlikely(t
->exit_state
)) {
537 ns
= t
->sched_time
+ nsleft
;
538 if (t
->it_sched_expires
== 0 ||
539 t
->it_sched_expires
> ns
) {
540 t
->it_sched_expires
= ns
;
549 static void clear_dead_task(struct k_itimer
*timer
, union cpu_time_count now
)
552 * That's all for this thread or process.
553 * We leave our residual in expires to be reported.
555 put_task_struct(timer
->it
.cpu
.task
);
556 timer
->it
.cpu
.task
= NULL
;
557 timer
->it
.cpu
.expires
= cpu_time_sub(timer
->it_clock
,
558 timer
->it
.cpu
.expires
,
563 * Insert the timer on the appropriate list before any timers that
564 * expire later. This must be called with the tasklist_lock held
565 * for reading, and interrupts disabled.
567 static void arm_timer(struct k_itimer
*timer
, union cpu_time_count now
)
569 struct task_struct
*p
= timer
->it
.cpu
.task
;
570 struct list_head
*head
, *listpos
;
571 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
572 struct cpu_timer_list
*next
;
575 head
= (CPUCLOCK_PERTHREAD(timer
->it_clock
) ?
576 p
->cpu_timers
: p
->signal
->cpu_timers
);
577 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
579 BUG_ON(!irqs_disabled());
580 spin_lock(&p
->sighand
->siglock
);
583 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
584 list_for_each_entry(next
, head
, entry
) {
585 if (next
->expires
.sched
> nt
->expires
.sched
) {
586 listpos
= &next
->entry
;
591 list_for_each_entry(next
, head
, entry
) {
592 if (cputime_gt(next
->expires
.cpu
, nt
->expires
.cpu
)) {
593 listpos
= &next
->entry
;
598 list_add(&nt
->entry
, listpos
);
600 if (listpos
== head
) {
602 * We are the new earliest-expiring timer.
603 * If we are a thread timer, there can always
604 * be a process timer telling us to stop earlier.
607 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
608 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
612 if (cputime_eq(p
->it_prof_expires
,
614 cputime_gt(p
->it_prof_expires
,
616 p
->it_prof_expires
= nt
->expires
.cpu
;
619 if (cputime_eq(p
->it_virt_expires
,
621 cputime_gt(p
->it_virt_expires
,
623 p
->it_virt_expires
= nt
->expires
.cpu
;
626 if (p
->it_sched_expires
== 0 ||
627 p
->it_sched_expires
> nt
->expires
.sched
)
628 p
->it_sched_expires
= nt
->expires
.sched
;
633 * For a process timer, we must balance
634 * all the live threads' expirations.
636 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
640 if (!cputime_eq(p
->signal
->it_virt_expires
,
642 cputime_lt(p
->signal
->it_virt_expires
,
643 timer
->it
.cpu
.expires
.cpu
))
647 if (!cputime_eq(p
->signal
->it_prof_expires
,
649 cputime_lt(p
->signal
->it_prof_expires
,
650 timer
->it
.cpu
.expires
.cpu
))
652 i
= p
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
;
653 if (i
!= RLIM_INFINITY
&&
654 i
<= cputime_to_secs(timer
->it
.cpu
.expires
.cpu
))
659 process_timer_rebalance(
661 CPUCLOCK_WHICH(timer
->it_clock
),
662 timer
->it
.cpu
.expires
, now
);
668 spin_unlock(&p
->sighand
->siglock
);
672 * The timer is locked, fire it and arrange for its reload.
674 static void cpu_timer_fire(struct k_itimer
*timer
)
676 if (unlikely(timer
->sigq
== NULL
)) {
678 * This a special case for clock_nanosleep,
679 * not a normal timer from sys_timer_create.
681 wake_up_process(timer
->it_process
);
682 timer
->it
.cpu
.expires
.sched
= 0;
683 } else if (timer
->it
.cpu
.incr
.sched
== 0) {
685 * One-shot timer. Clear it as soon as it's fired.
687 posix_timer_event(timer
, 0);
688 timer
->it
.cpu
.expires
.sched
= 0;
689 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
691 * The signal did not get queued because the signal
692 * was ignored, so we won't get any callback to
693 * reload the timer. But we need to keep it
694 * ticking in case the signal is deliverable next time.
696 posix_cpu_timer_schedule(timer
);
701 * Guts of sys_timer_settime for CPU timers.
702 * This is called with the timer locked and interrupts disabled.
703 * If we return TIMER_RETRY, it's necessary to release the timer's lock
704 * and try again. (This happens when the timer is in the middle of firing.)
706 int posix_cpu_timer_set(struct k_itimer
*timer
, int flags
,
707 struct itimerspec
*new, struct itimerspec
*old
)
709 struct task_struct
*p
= timer
->it
.cpu
.task
;
710 union cpu_time_count old_expires
, new_expires
, val
;
713 if (unlikely(p
== NULL
)) {
715 * Timer refers to a dead task's clock.
720 new_expires
= timespec_to_sample(timer
->it_clock
, &new->it_value
);
722 read_lock(&tasklist_lock
);
724 * We need the tasklist_lock to protect against reaping that
725 * clears p->signal. If p has just been reaped, we can no
726 * longer get any information about it at all.
728 if (unlikely(p
->signal
== NULL
)) {
729 read_unlock(&tasklist_lock
);
731 timer
->it
.cpu
.task
= NULL
;
736 * Disarm any old timer after extracting its expiry time.
738 BUG_ON(!irqs_disabled());
739 spin_lock(&p
->sighand
->siglock
);
740 old_expires
= timer
->it
.cpu
.expires
;
741 list_del_init(&timer
->it
.cpu
.entry
);
742 spin_unlock(&p
->sighand
->siglock
);
745 * We need to sample the current value to convert the new
746 * value from to relative and absolute, and to convert the
747 * old value from absolute to relative. To set a process
748 * timer, we need a sample to balance the thread expiry
749 * times (in arm_timer). With an absolute time, we must
750 * check if it's already passed. In short, we need a sample.
752 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
753 cpu_clock_sample(timer
->it_clock
, p
, &val
);
755 cpu_clock_sample_group(timer
->it_clock
, p
, &val
);
759 if (old_expires
.sched
== 0) {
760 old
->it_value
.tv_sec
= 0;
761 old
->it_value
.tv_nsec
= 0;
764 * Update the timer in case it has
765 * overrun already. If it has,
766 * we'll report it as having overrun
767 * and with the next reloaded timer
768 * already ticking, though we are
769 * swallowing that pending
770 * notification here to install the
773 bump_cpu_timer(timer
, val
);
774 if (cpu_time_before(timer
->it_clock
, val
,
775 timer
->it
.cpu
.expires
)) {
776 old_expires
= cpu_time_sub(
778 timer
->it
.cpu
.expires
, val
);
779 sample_to_timespec(timer
->it_clock
,
783 old
->it_value
.tv_nsec
= 1;
784 old
->it_value
.tv_sec
= 0;
789 if (unlikely(timer
->it
.cpu
.firing
)) {
791 * We are colliding with the timer actually firing.
792 * Punt after filling in the timer's old value, and
793 * disable this firing since we are already reporting
794 * it as an overrun (thanks to bump_cpu_timer above).
796 read_unlock(&tasklist_lock
);
797 timer
->it
.cpu
.firing
= -1;
802 if (new_expires
.sched
!= 0 && !(flags
& TIMER_ABSTIME
)) {
803 cpu_time_add(timer
->it_clock
, &new_expires
, val
);
807 * Install the new expiry time (or zero).
808 * For a timer with no notification action, we don't actually
809 * arm the timer (we'll just fake it for timer_gettime).
811 timer
->it
.cpu
.expires
= new_expires
;
812 if (new_expires
.sched
!= 0 &&
813 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
814 cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
815 arm_timer(timer
, val
);
818 read_unlock(&tasklist_lock
);
821 * Install the new reload setting, and
822 * set up the signal and overrun bookkeeping.
824 timer
->it
.cpu
.incr
= timespec_to_sample(timer
->it_clock
,
828 * This acts as a modification timestamp for the timer,
829 * so any automatic reload attempt will punt on seeing
830 * that we have reset the timer manually.
832 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
834 timer
->it_overrun_last
= 0;
835 timer
->it_overrun
= -1;
837 if (new_expires
.sched
!= 0 &&
838 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
839 !cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
841 * The designated time already passed, so we notify
842 * immediately, even if the thread never runs to
843 * accumulate more time on this clock.
845 cpu_timer_fire(timer
);
851 sample_to_timespec(timer
->it_clock
,
852 timer
->it
.cpu
.incr
, &old
->it_interval
);
857 void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
859 union cpu_time_count now
;
860 struct task_struct
*p
= timer
->it
.cpu
.task
;
864 * Easy part: convert the reload time.
866 sample_to_timespec(timer
->it_clock
,
867 timer
->it
.cpu
.incr
, &itp
->it_interval
);
869 if (timer
->it
.cpu
.expires
.sched
== 0) { /* Timer not armed at all. */
870 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
874 if (unlikely(p
== NULL
)) {
876 * This task already died and the timer will never fire.
877 * In this case, expires is actually the dead value.
880 sample_to_timespec(timer
->it_clock
, timer
->it
.cpu
.expires
,
886 * Sample the clock to take the difference with the expiry time.
888 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
889 cpu_clock_sample(timer
->it_clock
, p
, &now
);
890 clear_dead
= p
->exit_state
;
892 read_lock(&tasklist_lock
);
893 if (unlikely(p
->signal
== NULL
)) {
895 * The process has been reaped.
896 * We can't even collect a sample any more.
897 * Call the timer disarmed, nothing else to do.
900 timer
->it
.cpu
.task
= NULL
;
901 timer
->it
.cpu
.expires
.sched
= 0;
902 read_unlock(&tasklist_lock
);
905 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
906 clear_dead
= (unlikely(p
->exit_state
) &&
907 thread_group_empty(p
));
909 read_unlock(&tasklist_lock
);
912 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
913 if (timer
->it
.cpu
.incr
.sched
== 0 &&
914 cpu_time_before(timer
->it_clock
,
915 timer
->it
.cpu
.expires
, now
)) {
917 * Do-nothing timer expired and has no reload,
918 * so it's as if it was never set.
920 timer
->it
.cpu
.expires
.sched
= 0;
921 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
925 * Account for any expirations and reloads that should
928 bump_cpu_timer(timer
, now
);
931 if (unlikely(clear_dead
)) {
933 * We've noticed that the thread is dead, but
934 * not yet reaped. Take this opportunity to
937 clear_dead_task(timer
, now
);
941 if (cpu_time_before(timer
->it_clock
, now
, timer
->it
.cpu
.expires
)) {
942 sample_to_timespec(timer
->it_clock
,
943 cpu_time_sub(timer
->it_clock
,
944 timer
->it
.cpu
.expires
, now
),
948 * The timer should have expired already, but the firing
949 * hasn't taken place yet. Say it's just about to expire.
951 itp
->it_value
.tv_nsec
= 1;
952 itp
->it_value
.tv_sec
= 0;
957 * Check for any per-thread CPU timers that have fired and move them off
958 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
959 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
961 static void check_thread_timers(struct task_struct
*tsk
,
962 struct list_head
*firing
)
964 struct list_head
*timers
= tsk
->cpu_timers
;
966 tsk
->it_prof_expires
= cputime_zero
;
967 while (!list_empty(timers
)) {
968 struct cpu_timer_list
*t
= list_entry(timers
->next
,
969 struct cpu_timer_list
,
971 if (cputime_lt(prof_ticks(tsk
), t
->expires
.cpu
)) {
972 tsk
->it_prof_expires
= t
->expires
.cpu
;
976 list_move_tail(&t
->entry
, firing
);
980 tsk
->it_virt_expires
= cputime_zero
;
981 while (!list_empty(timers
)) {
982 struct cpu_timer_list
*t
= list_entry(timers
->next
,
983 struct cpu_timer_list
,
985 if (cputime_lt(virt_ticks(tsk
), t
->expires
.cpu
)) {
986 tsk
->it_virt_expires
= t
->expires
.cpu
;
990 list_move_tail(&t
->entry
, firing
);
994 tsk
->it_sched_expires
= 0;
995 while (!list_empty(timers
)) {
996 struct cpu_timer_list
*t
= list_entry(timers
->next
,
997 struct cpu_timer_list
,
999 if (tsk
->sched_time
< t
->expires
.sched
) {
1000 tsk
->it_sched_expires
= t
->expires
.sched
;
1004 list_move_tail(&t
->entry
, firing
);
1009 * Check for any per-thread CPU timers that have fired and move them
1010 * off the tsk->*_timers list onto the firing list. Per-thread timers
1011 * have already been taken off.
1013 static void check_process_timers(struct task_struct
*tsk
,
1014 struct list_head
*firing
)
1016 struct signal_struct
*const sig
= tsk
->signal
;
1017 cputime_t utime
, stime
, ptime
, virt_expires
, prof_expires
;
1018 unsigned long long sched_time
, sched_expires
;
1019 struct task_struct
*t
;
1020 struct list_head
*timers
= sig
->cpu_timers
;
1023 * Don't sample the current process CPU clocks if there are no timers.
1025 if (list_empty(&timers
[CPUCLOCK_PROF
]) &&
1026 cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1027 sig
->rlim
[RLIMIT_CPU
].rlim_cur
== RLIM_INFINITY
&&
1028 list_empty(&timers
[CPUCLOCK_VIRT
]) &&
1029 cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1030 list_empty(&timers
[CPUCLOCK_SCHED
]))
1034 * Collect the current process totals.
1038 sched_time
= sig
->sched_time
;
1041 utime
= cputime_add(utime
, t
->utime
);
1042 stime
= cputime_add(stime
, t
->stime
);
1043 sched_time
+= t
->sched_time
;
1046 ptime
= cputime_add(utime
, stime
);
1048 prof_expires
= cputime_zero
;
1049 while (!list_empty(timers
)) {
1050 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1051 struct cpu_timer_list
,
1053 if (cputime_lt(ptime
, t
->expires
.cpu
)) {
1054 prof_expires
= t
->expires
.cpu
;
1058 list_move_tail(&t
->entry
, firing
);
1062 virt_expires
= cputime_zero
;
1063 while (!list_empty(timers
)) {
1064 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1065 struct cpu_timer_list
,
1067 if (cputime_lt(utime
, t
->expires
.cpu
)) {
1068 virt_expires
= t
->expires
.cpu
;
1072 list_move_tail(&t
->entry
, firing
);
1077 while (!list_empty(timers
)) {
1078 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1079 struct cpu_timer_list
,
1081 if (sched_time
< t
->expires
.sched
) {
1082 sched_expires
= t
->expires
.sched
;
1086 list_move_tail(&t
->entry
, firing
);
1090 * Check for the special case process timers.
1092 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1093 if (cputime_ge(ptime
, sig
->it_prof_expires
)) {
1094 /* ITIMER_PROF fires and reloads. */
1095 sig
->it_prof_expires
= sig
->it_prof_incr
;
1096 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1097 sig
->it_prof_expires
= cputime_add(
1098 sig
->it_prof_expires
, ptime
);
1100 __group_send_sig_info(SIGPROF
, SEND_SIG_PRIV
, tsk
);
1102 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1103 (cputime_eq(prof_expires
, cputime_zero
) ||
1104 cputime_lt(sig
->it_prof_expires
, prof_expires
))) {
1105 prof_expires
= sig
->it_prof_expires
;
1108 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1109 if (cputime_ge(utime
, sig
->it_virt_expires
)) {
1110 /* ITIMER_VIRTUAL fires and reloads. */
1111 sig
->it_virt_expires
= sig
->it_virt_incr
;
1112 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1113 sig
->it_virt_expires
= cputime_add(
1114 sig
->it_virt_expires
, utime
);
1116 __group_send_sig_info(SIGVTALRM
, SEND_SIG_PRIV
, tsk
);
1118 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1119 (cputime_eq(virt_expires
, cputime_zero
) ||
1120 cputime_lt(sig
->it_virt_expires
, virt_expires
))) {
1121 virt_expires
= sig
->it_virt_expires
;
1124 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
!= RLIM_INFINITY
) {
1125 unsigned long psecs
= cputime_to_secs(ptime
);
1127 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1129 * At the hard limit, we just die.
1130 * No need to calculate anything else now.
1132 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1135 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_cur
) {
1137 * At the soft limit, send a SIGXCPU every second.
1139 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1140 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
1141 < sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1142 sig
->rlim
[RLIMIT_CPU
].rlim_cur
++;
1145 x
= secs_to_cputime(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1146 if (cputime_eq(prof_expires
, cputime_zero
) ||
1147 cputime_lt(x
, prof_expires
)) {
1152 if (!cputime_eq(prof_expires
, cputime_zero
) ||
1153 !cputime_eq(virt_expires
, cputime_zero
) ||
1154 sched_expires
!= 0) {
1156 * Rebalance the threads' expiry times for the remaining
1157 * process CPU timers.
1160 cputime_t prof_left
, virt_left
, ticks
;
1161 unsigned long long sched_left
, sched
;
1162 const unsigned int nthreads
= atomic_read(&sig
->live
);
1164 prof_left
= cputime_sub(prof_expires
, utime
);
1165 prof_left
= cputime_sub(prof_left
, stime
);
1166 prof_left
= cputime_div(prof_left
, nthreads
);
1167 virt_left
= cputime_sub(virt_expires
, utime
);
1168 virt_left
= cputime_div(virt_left
, nthreads
);
1169 if (sched_expires
) {
1170 sched_left
= sched_expires
- sched_time
;
1171 do_div(sched_left
, nthreads
);
1177 ticks
= cputime_add(cputime_add(t
->utime
, t
->stime
),
1179 if (!cputime_eq(prof_expires
, cputime_zero
) &&
1180 (cputime_eq(t
->it_prof_expires
, cputime_zero
) ||
1181 cputime_gt(t
->it_prof_expires
, ticks
))) {
1182 t
->it_prof_expires
= ticks
;
1185 ticks
= cputime_add(t
->utime
, virt_left
);
1186 if (!cputime_eq(virt_expires
, cputime_zero
) &&
1187 (cputime_eq(t
->it_virt_expires
, cputime_zero
) ||
1188 cputime_gt(t
->it_virt_expires
, ticks
))) {
1189 t
->it_virt_expires
= ticks
;
1192 sched
= t
->sched_time
+ sched_left
;
1193 if (sched_expires
&& (t
->it_sched_expires
== 0 ||
1194 t
->it_sched_expires
> sched
)) {
1195 t
->it_sched_expires
= sched
;
1200 } while (unlikely(t
->exit_state
));
1206 * This is called from the signal code (via do_schedule_next_timer)
1207 * when the last timer signal was delivered and we have to reload the timer.
1209 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
1211 struct task_struct
*p
= timer
->it
.cpu
.task
;
1212 union cpu_time_count now
;
1214 if (unlikely(p
== NULL
))
1216 * The task was cleaned up already, no future firings.
1221 * Fetch the current sample and update the timer's expiry time.
1223 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
1224 cpu_clock_sample(timer
->it_clock
, p
, &now
);
1225 bump_cpu_timer(timer
, now
);
1226 if (unlikely(p
->exit_state
)) {
1227 clear_dead_task(timer
, now
);
1230 read_lock(&tasklist_lock
); /* arm_timer needs it. */
1232 read_lock(&tasklist_lock
);
1233 if (unlikely(p
->signal
== NULL
)) {
1235 * The process has been reaped.
1236 * We can't even collect a sample any more.
1239 timer
->it
.cpu
.task
= p
= NULL
;
1240 timer
->it
.cpu
.expires
.sched
= 0;
1241 read_unlock(&tasklist_lock
);
1243 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1245 * We've noticed that the thread is dead, but
1246 * not yet reaped. Take this opportunity to
1247 * drop our task ref.
1249 clear_dead_task(timer
, now
);
1250 read_unlock(&tasklist_lock
);
1253 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
1254 bump_cpu_timer(timer
, now
);
1255 /* Leave the tasklist_lock locked for the call below. */
1259 * Now re-arm for the new expiry time.
1261 arm_timer(timer
, now
);
1263 read_unlock(&tasklist_lock
);
1267 * This is called from the timer interrupt handler. The irq handler has
1268 * already updated our counts. We need to check if any timers fire now.
1269 * Interrupts are disabled.
1271 void run_posix_cpu_timers(struct task_struct
*tsk
)
1274 struct k_itimer
*timer
, *next
;
1276 BUG_ON(!irqs_disabled());
1278 #define UNEXPIRED(clock) \
1279 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
1280 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
1282 if (UNEXPIRED(prof
) && UNEXPIRED(virt
) &&
1283 (tsk
->it_sched_expires
== 0 ||
1284 tsk
->sched_time
< tsk
->it_sched_expires
))
1289 BUG_ON(tsk
->exit_state
);
1292 * Double-check with locks held.
1294 read_lock(&tasklist_lock
);
1295 spin_lock(&tsk
->sighand
->siglock
);
1298 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1299 * all the timers that are firing, and put them on the firing list.
1301 check_thread_timers(tsk
, &firing
);
1302 check_process_timers(tsk
, &firing
);
1305 * We must release these locks before taking any timer's lock.
1306 * There is a potential race with timer deletion here, as the
1307 * siglock now protects our private firing list. We have set
1308 * the firing flag in each timer, so that a deletion attempt
1309 * that gets the timer lock before we do will give it up and
1310 * spin until we've taken care of that timer below.
1312 spin_unlock(&tsk
->sighand
->siglock
);
1313 read_unlock(&tasklist_lock
);
1316 * Now that all the timers on our list have the firing flag,
1317 * noone will touch their list entries but us. We'll take
1318 * each timer's lock before clearing its firing flag, so no
1319 * timer call will interfere.
1321 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1323 spin_lock(&timer
->it_lock
);
1324 list_del_init(&timer
->it
.cpu
.entry
);
1325 firing
= timer
->it
.cpu
.firing
;
1326 timer
->it
.cpu
.firing
= 0;
1328 * The firing flag is -1 if we collided with a reset
1329 * of the timer, which already reported this
1330 * almost-firing as an overrun. So don't generate an event.
1332 if (likely(firing
>= 0)) {
1333 cpu_timer_fire(timer
);
1335 spin_unlock(&timer
->it_lock
);
1340 * Set one of the process-wide special case CPU timers.
1341 * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
1342 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
1343 * absolute; non-null for ITIMER_*, where *newval is relative and we update
1344 * it to be absolute, *oldval is absolute and we update it to be relative.
1346 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1347 cputime_t
*newval
, cputime_t
*oldval
)
1349 union cpu_time_count now
;
1350 struct list_head
*head
;
1352 BUG_ON(clock_idx
== CPUCLOCK_SCHED
);
1353 cpu_clock_sample_group_locked(clock_idx
, tsk
, &now
);
1356 if (!cputime_eq(*oldval
, cputime_zero
)) {
1357 if (cputime_le(*oldval
, now
.cpu
)) {
1358 /* Just about to fire. */
1359 *oldval
= jiffies_to_cputime(1);
1361 *oldval
= cputime_sub(*oldval
, now
.cpu
);
1365 if (cputime_eq(*newval
, cputime_zero
))
1367 *newval
= cputime_add(*newval
, now
.cpu
);
1370 * If the RLIMIT_CPU timer will expire before the
1371 * ITIMER_PROF timer, we have nothing else to do.
1373 if (tsk
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
1374 < cputime_to_secs(*newval
))
1379 * Check whether there are any process timers already set to fire
1380 * before this one. If so, we don't have anything more to do.
1382 head
= &tsk
->signal
->cpu_timers
[clock_idx
];
1383 if (list_empty(head
) ||
1384 cputime_ge(list_entry(head
->next
,
1385 struct cpu_timer_list
, entry
)->expires
.cpu
,
1388 * Rejigger each thread's expiry time so that one will
1389 * notice before we hit the process-cumulative expiry time.
1391 union cpu_time_count expires
= { .sched
= 0 };
1392 expires
.cpu
= *newval
;
1393 process_timer_rebalance(tsk
, clock_idx
, expires
, now
);
1397 static long posix_cpu_clock_nanosleep_restart(struct restart_block
*);
1399 int posix_cpu_nsleep(clockid_t which_clock
, int flags
,
1400 struct timespec
*rqtp
)
1402 struct restart_block
*restart_block
=
1403 ¤t_thread_info()->restart_block
;
1404 struct k_itimer timer
;
1408 * Diagnose required errors first.
1410 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1411 (CPUCLOCK_PID(which_clock
) == 0 ||
1412 CPUCLOCK_PID(which_clock
) == current
->pid
))
1416 * Set up a temporary timer and then wait for it to go off.
1418 memset(&timer
, 0, sizeof timer
);
1419 spin_lock_init(&timer
.it_lock
);
1420 timer
.it_clock
= which_clock
;
1421 timer
.it_overrun
= -1;
1422 error
= posix_cpu_timer_create(&timer
);
1423 timer
.it_process
= current
;
1425 struct timespec __user
*rmtp
;
1426 static struct itimerspec zero_it
;
1427 struct itimerspec it
= { .it_value
= *rqtp
,
1428 .it_interval
= {} };
1430 spin_lock_irq(&timer
.it_lock
);
1431 error
= posix_cpu_timer_set(&timer
, flags
, &it
, NULL
);
1433 spin_unlock_irq(&timer
.it_lock
);
1437 while (!signal_pending(current
)) {
1438 if (timer
.it
.cpu
.expires
.sched
== 0) {
1440 * Our timer fired and was reset.
1442 spin_unlock_irq(&timer
.it_lock
);
1447 * Block until cpu_timer_fire (or a signal) wakes us.
1449 __set_current_state(TASK_INTERRUPTIBLE
);
1450 spin_unlock_irq(&timer
.it_lock
);
1452 spin_lock_irq(&timer
.it_lock
);
1456 * We were interrupted by a signal.
1458 sample_to_timespec(which_clock
, timer
.it
.cpu
.expires
, rqtp
);
1459 posix_cpu_timer_set(&timer
, 0, &zero_it
, &it
);
1460 spin_unlock_irq(&timer
.it_lock
);
1462 if ((it
.it_value
.tv_sec
| it
.it_value
.tv_nsec
) == 0) {
1464 * It actually did fire already.
1470 * Report back to the user the time still remaining.
1472 rmtp
= (struct timespec __user
*) restart_block
->arg1
;
1473 if (rmtp
!= NULL
&& !(flags
& TIMER_ABSTIME
) &&
1474 copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1477 restart_block
->fn
= posix_cpu_clock_nanosleep_restart
;
1478 /* Caller already set restart_block->arg1 */
1479 restart_block
->arg0
= which_clock
;
1480 restart_block
->arg2
= rqtp
->tv_sec
;
1481 restart_block
->arg3
= rqtp
->tv_nsec
;
1483 error
= -ERESTART_RESTARTBLOCK
;
1490 posix_cpu_clock_nanosleep_restart(struct restart_block
*restart_block
)
1492 clockid_t which_clock
= restart_block
->arg0
;
1493 struct timespec t
= { .tv_sec
= restart_block
->arg2
,
1494 .tv_nsec
= restart_block
->arg3
};
1495 restart_block
->fn
= do_no_restart_syscall
;
1496 return posix_cpu_nsleep(which_clock
, TIMER_ABSTIME
, &t
);
1500 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1501 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1503 static int process_cpu_clock_getres(clockid_t which_clock
, struct timespec
*tp
)
1505 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1507 static int process_cpu_clock_get(clockid_t which_clock
, struct timespec
*tp
)
1509 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1511 static int process_cpu_timer_create(struct k_itimer
*timer
)
1513 timer
->it_clock
= PROCESS_CLOCK
;
1514 return posix_cpu_timer_create(timer
);
1516 static int process_cpu_nsleep(clockid_t which_clock
, int flags
,
1517 struct timespec
*rqtp
)
1519 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
);
1521 static int thread_cpu_clock_getres(clockid_t which_clock
, struct timespec
*tp
)
1523 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1525 static int thread_cpu_clock_get(clockid_t which_clock
, struct timespec
*tp
)
1527 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1529 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1531 timer
->it_clock
= THREAD_CLOCK
;
1532 return posix_cpu_timer_create(timer
);
1534 static int thread_cpu_nsleep(clockid_t which_clock
, int flags
,
1535 struct timespec
*rqtp
)
1540 static __init
int init_posix_cpu_timers(void)
1542 struct k_clock process
= {
1543 .clock_getres
= process_cpu_clock_getres
,
1544 .clock_get
= process_cpu_clock_get
,
1545 .clock_set
= do_posix_clock_nosettime
,
1546 .timer_create
= process_cpu_timer_create
,
1547 .nsleep
= process_cpu_nsleep
,
1549 struct k_clock thread
= {
1550 .clock_getres
= thread_cpu_clock_getres
,
1551 .clock_get
= thread_cpu_clock_get
,
1552 .clock_set
= do_posix_clock_nosettime
,
1553 .timer_create
= thread_cpu_timer_create
,
1554 .nsleep
= thread_cpu_nsleep
,
1557 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1558 register_posix_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1562 __initcall(init_posix_cpu_timers
);