2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <asm/uaccess.h>
8 #include <linux/errno.h>
10 static int check_clock(const clockid_t which_clock
)
13 struct task_struct
*p
;
14 const pid_t pid
= CPUCLOCK_PID(which_clock
);
16 if (CPUCLOCK_WHICH(which_clock
) >= CPUCLOCK_MAX
)
22 read_lock(&tasklist_lock
);
23 p
= find_task_by_pid(pid
);
24 if (!p
|| (CPUCLOCK_PERTHREAD(which_clock
) ?
25 p
->tgid
!= current
->tgid
: p
->tgid
!= pid
)) {
28 read_unlock(&tasklist_lock
);
33 static inline union cpu_time_count
34 timespec_to_sample(const clockid_t which_clock
, const struct timespec
*tp
)
36 union cpu_time_count ret
;
37 ret
.sched
= 0; /* high half always zero when .cpu used */
38 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
39 ret
.sched
= (unsigned long long)tp
->tv_sec
* NSEC_PER_SEC
+ tp
->tv_nsec
;
41 ret
.cpu
= timespec_to_cputime(tp
);
46 static void sample_to_timespec(const clockid_t which_clock
,
47 union cpu_time_count cpu
,
50 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
51 tp
->tv_sec
= div_long_long_rem(cpu
.sched
,
52 NSEC_PER_SEC
, &tp
->tv_nsec
);
54 cputime_to_timespec(cpu
.cpu
, tp
);
58 static inline int cpu_time_before(const clockid_t which_clock
,
59 union cpu_time_count now
,
60 union cpu_time_count then
)
62 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
63 return now
.sched
< then
.sched
;
65 return cputime_lt(now
.cpu
, then
.cpu
);
68 static inline void cpu_time_add(const clockid_t which_clock
,
69 union cpu_time_count
*acc
,
70 union cpu_time_count val
)
72 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
73 acc
->sched
+= val
.sched
;
75 acc
->cpu
= cputime_add(acc
->cpu
, val
.cpu
);
78 static inline union cpu_time_count
cpu_time_sub(const clockid_t which_clock
,
79 union cpu_time_count a
,
80 union cpu_time_count b
)
82 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
85 a
.cpu
= cputime_sub(a
.cpu
, b
.cpu
);
91 * Divide and limit the result to res >= 1
93 * This is necessary to prevent signal delivery starvation, when the result of
94 * the division would be rounded down to 0.
96 static inline cputime_t
cputime_div_non_zero(cputime_t time
, unsigned long div
)
98 cputime_t res
= cputime_div(time
, div
);
100 return max_t(cputime_t
, res
, 1);
104 * Update expiry time from increment, and increase overrun count,
105 * given the current clock sample.
107 static void bump_cpu_timer(struct k_itimer
*timer
,
108 union cpu_time_count now
)
112 if (timer
->it
.cpu
.incr
.sched
== 0)
115 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
116 unsigned long long delta
, incr
;
118 if (now
.sched
< timer
->it
.cpu
.expires
.sched
)
120 incr
= timer
->it
.cpu
.incr
.sched
;
121 delta
= now
.sched
+ incr
- timer
->it
.cpu
.expires
.sched
;
122 /* Don't use (incr*2 < delta), incr*2 might overflow. */
123 for (i
= 0; incr
< delta
- incr
; i
++)
125 for (; i
>= 0; incr
>>= 1, i
--) {
128 timer
->it
.cpu
.expires
.sched
+= incr
;
129 timer
->it_overrun
+= 1 << i
;
133 cputime_t delta
, incr
;
135 if (cputime_lt(now
.cpu
, timer
->it
.cpu
.expires
.cpu
))
137 incr
= timer
->it
.cpu
.incr
.cpu
;
138 delta
= cputime_sub(cputime_add(now
.cpu
, incr
),
139 timer
->it
.cpu
.expires
.cpu
);
140 /* Don't use (incr*2 < delta), incr*2 might overflow. */
141 for (i
= 0; cputime_lt(incr
, cputime_sub(delta
, incr
)); i
++)
142 incr
= cputime_add(incr
, incr
);
143 for (; i
>= 0; incr
= cputime_halve(incr
), i
--) {
144 if (cputime_lt(delta
, incr
))
146 timer
->it
.cpu
.expires
.cpu
=
147 cputime_add(timer
->it
.cpu
.expires
.cpu
, incr
);
148 timer
->it_overrun
+= 1 << i
;
149 delta
= cputime_sub(delta
, incr
);
154 static inline cputime_t
prof_ticks(struct task_struct
*p
)
156 return cputime_add(p
->utime
, p
->stime
);
158 static inline cputime_t
virt_ticks(struct task_struct
*p
)
162 static inline unsigned long long sched_ns(struct task_struct
*p
)
164 return (p
== current
) ? current_sched_time(p
) : p
->sched_time
;
167 int posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec
*tp
)
169 int error
= check_clock(which_clock
);
172 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
173 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
175 * If sched_clock is using a cycle counter, we
176 * don't have any idea of its true resolution
177 * exported, but it is much more than 1s/HZ.
185 int posix_cpu_clock_set(const clockid_t which_clock
, const struct timespec
*tp
)
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
191 int error
= check_clock(which_clock
);
200 * Sample a per-thread clock for the given task.
202 static int cpu_clock_sample(const clockid_t which_clock
, struct task_struct
*p
,
203 union cpu_time_count
*cpu
)
205 switch (CPUCLOCK_WHICH(which_clock
)) {
209 cpu
->cpu
= prof_ticks(p
);
212 cpu
->cpu
= virt_ticks(p
);
215 cpu
->sched
= sched_ns(p
);
222 * Sample a process (thread group) clock for the given group_leader task.
223 * Must be called with tasklist_lock held for reading.
224 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
226 static int cpu_clock_sample_group_locked(unsigned int clock_idx
,
227 struct task_struct
*p
,
228 union cpu_time_count
*cpu
)
230 struct task_struct
*t
= p
;
235 cpu
->cpu
= cputime_add(p
->signal
->utime
, p
->signal
->stime
);
237 cpu
->cpu
= cputime_add(cpu
->cpu
, prof_ticks(t
));
242 cpu
->cpu
= p
->signal
->utime
;
244 cpu
->cpu
= cputime_add(cpu
->cpu
, virt_ticks(t
));
249 cpu
->sched
= p
->signal
->sched_time
;
250 /* Add in each other live thread. */
251 while ((t
= next_thread(t
)) != p
) {
252 cpu
->sched
+= t
->sched_time
;
254 cpu
->sched
+= sched_ns(p
);
261 * Sample a process (thread group) clock for the given group_leader task.
262 * Must be called with tasklist_lock held for reading.
264 static int cpu_clock_sample_group(const clockid_t which_clock
,
265 struct task_struct
*p
,
266 union cpu_time_count
*cpu
)
270 spin_lock_irqsave(&p
->sighand
->siglock
, flags
);
271 ret
= cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock
), p
,
273 spin_unlock_irqrestore(&p
->sighand
->siglock
, flags
);
278 int posix_cpu_clock_get(const clockid_t which_clock
, struct timespec
*tp
)
280 const pid_t pid
= CPUCLOCK_PID(which_clock
);
282 union cpu_time_count rtn
;
286 * Special case constant value for our own clocks.
287 * We don't have to do any lookup to find ourselves.
289 if (CPUCLOCK_PERTHREAD(which_clock
)) {
291 * Sampling just ourselves we can do with no locking.
293 error
= cpu_clock_sample(which_clock
,
296 read_lock(&tasklist_lock
);
297 error
= cpu_clock_sample_group(which_clock
,
299 read_unlock(&tasklist_lock
);
303 * Find the given PID, and validate that the caller
304 * should be able to see it.
306 struct task_struct
*p
;
307 read_lock(&tasklist_lock
);
308 p
= find_task_by_pid(pid
);
310 if (CPUCLOCK_PERTHREAD(which_clock
)) {
311 if (p
->tgid
== current
->tgid
) {
312 error
= cpu_clock_sample(which_clock
,
315 } else if (p
->tgid
== pid
&& p
->signal
) {
316 error
= cpu_clock_sample_group(which_clock
,
320 read_unlock(&tasklist_lock
);
325 sample_to_timespec(which_clock
, rtn
, tp
);
331 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
332 * This is called from sys_timer_create with the new timer already locked.
334 int posix_cpu_timer_create(struct k_itimer
*new_timer
)
337 const pid_t pid
= CPUCLOCK_PID(new_timer
->it_clock
);
338 struct task_struct
*p
;
340 if (CPUCLOCK_WHICH(new_timer
->it_clock
) >= CPUCLOCK_MAX
)
343 INIT_LIST_HEAD(&new_timer
->it
.cpu
.entry
);
344 new_timer
->it
.cpu
.incr
.sched
= 0;
345 new_timer
->it
.cpu
.expires
.sched
= 0;
347 read_lock(&tasklist_lock
);
348 if (CPUCLOCK_PERTHREAD(new_timer
->it_clock
)) {
352 p
= find_task_by_pid(pid
);
353 if (p
&& p
->tgid
!= current
->tgid
)
358 p
= current
->group_leader
;
360 p
= find_task_by_pid(pid
);
361 if (p
&& p
->tgid
!= pid
)
365 new_timer
->it
.cpu
.task
= p
;
371 read_unlock(&tasklist_lock
);
377 * Clean up a CPU-clock timer that is about to be destroyed.
378 * This is called from timer deletion with the timer already locked.
379 * If we return TIMER_RETRY, it's necessary to release the timer's lock
380 * and try again. (This happens when the timer is in the middle of firing.)
382 int posix_cpu_timer_del(struct k_itimer
*timer
)
384 struct task_struct
*p
= timer
->it
.cpu
.task
;
387 if (likely(p
!= NULL
)) {
388 read_lock(&tasklist_lock
);
389 if (unlikely(p
->signal
== NULL
)) {
391 * We raced with the reaping of the task.
392 * The deletion should have cleared us off the list.
394 BUG_ON(!list_empty(&timer
->it
.cpu
.entry
));
396 spin_lock(&p
->sighand
->siglock
);
397 if (timer
->it
.cpu
.firing
)
400 list_del(&timer
->it
.cpu
.entry
);
401 spin_unlock(&p
->sighand
->siglock
);
403 read_unlock(&tasklist_lock
);
413 * Clean out CPU timers still ticking when a thread exited. The task
414 * pointer is cleared, and the expiry time is replaced with the residual
415 * time for later timer_gettime calls to return.
416 * This must be called with the siglock held.
418 static void cleanup_timers(struct list_head
*head
,
419 cputime_t utime
, cputime_t stime
,
420 unsigned long long sched_time
)
422 struct cpu_timer_list
*timer
, *next
;
423 cputime_t ptime
= cputime_add(utime
, stime
);
425 list_for_each_entry_safe(timer
, next
, head
, entry
) {
426 list_del_init(&timer
->entry
);
427 if (cputime_lt(timer
->expires
.cpu
, ptime
)) {
428 timer
->expires
.cpu
= cputime_zero
;
430 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
436 list_for_each_entry_safe(timer
, next
, head
, entry
) {
437 list_del_init(&timer
->entry
);
438 if (cputime_lt(timer
->expires
.cpu
, utime
)) {
439 timer
->expires
.cpu
= cputime_zero
;
441 timer
->expires
.cpu
= cputime_sub(timer
->expires
.cpu
,
447 list_for_each_entry_safe(timer
, next
, head
, entry
) {
448 list_del_init(&timer
->entry
);
449 if (timer
->expires
.sched
< sched_time
) {
450 timer
->expires
.sched
= 0;
452 timer
->expires
.sched
-= sched_time
;
458 * These are both called with the siglock held, when the current thread
459 * is being reaped. When the final (leader) thread in the group is reaped,
460 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
462 void posix_cpu_timers_exit(struct task_struct
*tsk
)
464 cleanup_timers(tsk
->cpu_timers
,
465 tsk
->utime
, tsk
->stime
, tsk
->sched_time
);
468 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
470 cleanup_timers(tsk
->signal
->cpu_timers
,
471 cputime_add(tsk
->utime
, tsk
->signal
->utime
),
472 cputime_add(tsk
->stime
, tsk
->signal
->stime
),
473 tsk
->sched_time
+ tsk
->signal
->sched_time
);
478 * Set the expiry times of all the threads in the process so one of them
479 * will go off before the process cumulative expiry total is reached.
481 static void process_timer_rebalance(struct task_struct
*p
,
482 unsigned int clock_idx
,
483 union cpu_time_count expires
,
484 union cpu_time_count val
)
486 cputime_t ticks
, left
;
487 unsigned long long ns
, nsleft
;
488 struct task_struct
*t
= p
;
489 unsigned int nthreads
= atomic_read(&p
->signal
->live
);
499 left
= cputime_div_non_zero(cputime_sub(expires
.cpu
, val
.cpu
),
502 if (likely(!(t
->flags
& PF_EXITING
))) {
503 ticks
= cputime_add(prof_ticks(t
), left
);
504 if (cputime_eq(t
->it_prof_expires
,
506 cputime_gt(t
->it_prof_expires
, ticks
)) {
507 t
->it_prof_expires
= ticks
;
514 left
= cputime_div_non_zero(cputime_sub(expires
.cpu
, val
.cpu
),
517 if (likely(!(t
->flags
& PF_EXITING
))) {
518 ticks
= cputime_add(virt_ticks(t
), left
);
519 if (cputime_eq(t
->it_virt_expires
,
521 cputime_gt(t
->it_virt_expires
, ticks
)) {
522 t
->it_virt_expires
= ticks
;
529 nsleft
= expires
.sched
- val
.sched
;
530 do_div(nsleft
, nthreads
);
531 nsleft
= max_t(unsigned long long, nsleft
, 1);
533 if (likely(!(t
->flags
& PF_EXITING
))) {
534 ns
= t
->sched_time
+ nsleft
;
535 if (t
->it_sched_expires
== 0 ||
536 t
->it_sched_expires
> ns
) {
537 t
->it_sched_expires
= ns
;
546 static void clear_dead_task(struct k_itimer
*timer
, union cpu_time_count now
)
549 * That's all for this thread or process.
550 * We leave our residual in expires to be reported.
552 put_task_struct(timer
->it
.cpu
.task
);
553 timer
->it
.cpu
.task
= NULL
;
554 timer
->it
.cpu
.expires
= cpu_time_sub(timer
->it_clock
,
555 timer
->it
.cpu
.expires
,
560 * Insert the timer on the appropriate list before any timers that
561 * expire later. This must be called with the tasklist_lock held
562 * for reading, and interrupts disabled.
564 static void arm_timer(struct k_itimer
*timer
, union cpu_time_count now
)
566 struct task_struct
*p
= timer
->it
.cpu
.task
;
567 struct list_head
*head
, *listpos
;
568 struct cpu_timer_list
*const nt
= &timer
->it
.cpu
;
569 struct cpu_timer_list
*next
;
572 if (CPUCLOCK_PERTHREAD(timer
->it_clock
) && (p
->flags
& PF_EXITING
))
575 head
= (CPUCLOCK_PERTHREAD(timer
->it_clock
) ?
576 p
->cpu_timers
: p
->signal
->cpu_timers
);
577 head
+= CPUCLOCK_WHICH(timer
->it_clock
);
579 BUG_ON(!irqs_disabled());
580 spin_lock(&p
->sighand
->siglock
);
583 if (CPUCLOCK_WHICH(timer
->it_clock
) == CPUCLOCK_SCHED
) {
584 list_for_each_entry(next
, head
, entry
) {
585 if (next
->expires
.sched
> nt
->expires
.sched
)
587 listpos
= &next
->entry
;
590 list_for_each_entry(next
, head
, entry
) {
591 if (cputime_gt(next
->expires
.cpu
, nt
->expires
.cpu
))
593 listpos
= &next
->entry
;
596 list_add(&nt
->entry
, listpos
);
598 if (listpos
== head
) {
600 * We are the new earliest-expiring timer.
601 * If we are a thread timer, there can always
602 * be a process timer telling us to stop earlier.
605 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
606 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
610 if (cputime_eq(p
->it_prof_expires
,
612 cputime_gt(p
->it_prof_expires
,
614 p
->it_prof_expires
= nt
->expires
.cpu
;
617 if (cputime_eq(p
->it_virt_expires
,
619 cputime_gt(p
->it_virt_expires
,
621 p
->it_virt_expires
= nt
->expires
.cpu
;
624 if (p
->it_sched_expires
== 0 ||
625 p
->it_sched_expires
> nt
->expires
.sched
)
626 p
->it_sched_expires
= nt
->expires
.sched
;
631 * For a process timer, we must balance
632 * all the live threads' expirations.
634 switch (CPUCLOCK_WHICH(timer
->it_clock
)) {
638 if (!cputime_eq(p
->signal
->it_virt_expires
,
640 cputime_lt(p
->signal
->it_virt_expires
,
641 timer
->it
.cpu
.expires
.cpu
))
645 if (!cputime_eq(p
->signal
->it_prof_expires
,
647 cputime_lt(p
->signal
->it_prof_expires
,
648 timer
->it
.cpu
.expires
.cpu
))
650 i
= p
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
;
651 if (i
!= RLIM_INFINITY
&&
652 i
<= cputime_to_secs(timer
->it
.cpu
.expires
.cpu
))
657 process_timer_rebalance(
659 CPUCLOCK_WHICH(timer
->it_clock
),
660 timer
->it
.cpu
.expires
, now
);
666 spin_unlock(&p
->sighand
->siglock
);
670 * The timer is locked, fire it and arrange for its reload.
672 static void cpu_timer_fire(struct k_itimer
*timer
)
674 if (unlikely(timer
->sigq
== NULL
)) {
676 * This a special case for clock_nanosleep,
677 * not a normal timer from sys_timer_create.
679 wake_up_process(timer
->it_process
);
680 timer
->it
.cpu
.expires
.sched
= 0;
681 } else if (timer
->it
.cpu
.incr
.sched
== 0) {
683 * One-shot timer. Clear it as soon as it's fired.
685 posix_timer_event(timer
, 0);
686 timer
->it
.cpu
.expires
.sched
= 0;
687 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
689 * The signal did not get queued because the signal
690 * was ignored, so we won't get any callback to
691 * reload the timer. But we need to keep it
692 * ticking in case the signal is deliverable next time.
694 posix_cpu_timer_schedule(timer
);
699 * Guts of sys_timer_settime for CPU timers.
700 * This is called with the timer locked and interrupts disabled.
701 * If we return TIMER_RETRY, it's necessary to release the timer's lock
702 * and try again. (This happens when the timer is in the middle of firing.)
704 int posix_cpu_timer_set(struct k_itimer
*timer
, int flags
,
705 struct itimerspec
*new, struct itimerspec
*old
)
707 struct task_struct
*p
= timer
->it
.cpu
.task
;
708 union cpu_time_count old_expires
, new_expires
, val
;
711 if (unlikely(p
== NULL
)) {
713 * Timer refers to a dead task's clock.
718 new_expires
= timespec_to_sample(timer
->it_clock
, &new->it_value
);
720 read_lock(&tasklist_lock
);
722 * We need the tasklist_lock to protect against reaping that
723 * clears p->signal. If p has just been reaped, we can no
724 * longer get any information about it at all.
726 if (unlikely(p
->signal
== NULL
)) {
727 read_unlock(&tasklist_lock
);
729 timer
->it
.cpu
.task
= NULL
;
734 * Disarm any old timer after extracting its expiry time.
736 BUG_ON(!irqs_disabled());
739 spin_lock(&p
->sighand
->siglock
);
740 old_expires
= timer
->it
.cpu
.expires
;
741 if (unlikely(timer
->it
.cpu
.firing
)) {
742 timer
->it
.cpu
.firing
= -1;
745 list_del_init(&timer
->it
.cpu
.entry
);
746 spin_unlock(&p
->sighand
->siglock
);
749 * We need to sample the current value to convert the new
750 * value from to relative and absolute, and to convert the
751 * old value from absolute to relative. To set a process
752 * timer, we need a sample to balance the thread expiry
753 * times (in arm_timer). With an absolute time, we must
754 * check if it's already passed. In short, we need a sample.
756 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
757 cpu_clock_sample(timer
->it_clock
, p
, &val
);
759 cpu_clock_sample_group(timer
->it_clock
, p
, &val
);
763 if (old_expires
.sched
== 0) {
764 old
->it_value
.tv_sec
= 0;
765 old
->it_value
.tv_nsec
= 0;
768 * Update the timer in case it has
769 * overrun already. If it has,
770 * we'll report it as having overrun
771 * and with the next reloaded timer
772 * already ticking, though we are
773 * swallowing that pending
774 * notification here to install the
777 bump_cpu_timer(timer
, val
);
778 if (cpu_time_before(timer
->it_clock
, val
,
779 timer
->it
.cpu
.expires
)) {
780 old_expires
= cpu_time_sub(
782 timer
->it
.cpu
.expires
, val
);
783 sample_to_timespec(timer
->it_clock
,
787 old
->it_value
.tv_nsec
= 1;
788 old
->it_value
.tv_sec
= 0;
795 * We are colliding with the timer actually firing.
796 * Punt after filling in the timer's old value, and
797 * disable this firing since we are already reporting
798 * it as an overrun (thanks to bump_cpu_timer above).
800 read_unlock(&tasklist_lock
);
804 if (new_expires
.sched
!= 0 && !(flags
& TIMER_ABSTIME
)) {
805 cpu_time_add(timer
->it_clock
, &new_expires
, val
);
809 * Install the new expiry time (or zero).
810 * For a timer with no notification action, we don't actually
811 * arm the timer (we'll just fake it for timer_gettime).
813 timer
->it
.cpu
.expires
= new_expires
;
814 if (new_expires
.sched
!= 0 &&
815 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
816 cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
817 arm_timer(timer
, val
);
820 read_unlock(&tasklist_lock
);
823 * Install the new reload setting, and
824 * set up the signal and overrun bookkeeping.
826 timer
->it
.cpu
.incr
= timespec_to_sample(timer
->it_clock
,
830 * This acts as a modification timestamp for the timer,
831 * so any automatic reload attempt will punt on seeing
832 * that we have reset the timer manually.
834 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
836 timer
->it_overrun_last
= 0;
837 timer
->it_overrun
= -1;
839 if (new_expires
.sched
!= 0 &&
840 (timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) != SIGEV_NONE
&&
841 !cpu_time_before(timer
->it_clock
, val
, new_expires
)) {
843 * The designated time already passed, so we notify
844 * immediately, even if the thread never runs to
845 * accumulate more time on this clock.
847 cpu_timer_fire(timer
);
853 sample_to_timespec(timer
->it_clock
,
854 timer
->it
.cpu
.incr
, &old
->it_interval
);
859 void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec
*itp
)
861 union cpu_time_count now
;
862 struct task_struct
*p
= timer
->it
.cpu
.task
;
866 * Easy part: convert the reload time.
868 sample_to_timespec(timer
->it_clock
,
869 timer
->it
.cpu
.incr
, &itp
->it_interval
);
871 if (timer
->it
.cpu
.expires
.sched
== 0) { /* Timer not armed at all. */
872 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
876 if (unlikely(p
== NULL
)) {
878 * This task already died and the timer will never fire.
879 * In this case, expires is actually the dead value.
882 sample_to_timespec(timer
->it_clock
, timer
->it
.cpu
.expires
,
888 * Sample the clock to take the difference with the expiry time.
890 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
891 cpu_clock_sample(timer
->it_clock
, p
, &now
);
892 clear_dead
= p
->exit_state
;
894 read_lock(&tasklist_lock
);
895 if (unlikely(p
->signal
== NULL
)) {
897 * The process has been reaped.
898 * We can't even collect a sample any more.
899 * Call the timer disarmed, nothing else to do.
902 timer
->it
.cpu
.task
= NULL
;
903 timer
->it
.cpu
.expires
.sched
= 0;
904 read_unlock(&tasklist_lock
);
907 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
908 clear_dead
= (unlikely(p
->exit_state
) &&
909 thread_group_empty(p
));
911 read_unlock(&tasklist_lock
);
914 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
915 if (timer
->it
.cpu
.incr
.sched
== 0 &&
916 cpu_time_before(timer
->it_clock
,
917 timer
->it
.cpu
.expires
, now
)) {
919 * Do-nothing timer expired and has no reload,
920 * so it's as if it was never set.
922 timer
->it
.cpu
.expires
.sched
= 0;
923 itp
->it_value
.tv_sec
= itp
->it_value
.tv_nsec
= 0;
927 * Account for any expirations and reloads that should
930 bump_cpu_timer(timer
, now
);
933 if (unlikely(clear_dead
)) {
935 * We've noticed that the thread is dead, but
936 * not yet reaped. Take this opportunity to
939 clear_dead_task(timer
, now
);
943 if (cpu_time_before(timer
->it_clock
, now
, timer
->it
.cpu
.expires
)) {
944 sample_to_timespec(timer
->it_clock
,
945 cpu_time_sub(timer
->it_clock
,
946 timer
->it
.cpu
.expires
, now
),
950 * The timer should have expired already, but the firing
951 * hasn't taken place yet. Say it's just about to expire.
953 itp
->it_value
.tv_nsec
= 1;
954 itp
->it_value
.tv_sec
= 0;
959 * Check for any per-thread CPU timers that have fired and move them off
960 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
961 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
963 static void check_thread_timers(struct task_struct
*tsk
,
964 struct list_head
*firing
)
967 struct list_head
*timers
= tsk
->cpu_timers
;
970 tsk
->it_prof_expires
= cputime_zero
;
971 while (!list_empty(timers
)) {
972 struct cpu_timer_list
*t
= list_entry(timers
->next
,
973 struct cpu_timer_list
,
975 if (!--maxfire
|| cputime_lt(prof_ticks(tsk
), t
->expires
.cpu
)) {
976 tsk
->it_prof_expires
= t
->expires
.cpu
;
980 list_move_tail(&t
->entry
, firing
);
985 tsk
->it_virt_expires
= cputime_zero
;
986 while (!list_empty(timers
)) {
987 struct cpu_timer_list
*t
= list_entry(timers
->next
,
988 struct cpu_timer_list
,
990 if (!--maxfire
|| cputime_lt(virt_ticks(tsk
), t
->expires
.cpu
)) {
991 tsk
->it_virt_expires
= t
->expires
.cpu
;
995 list_move_tail(&t
->entry
, firing
);
1000 tsk
->it_sched_expires
= 0;
1001 while (!list_empty(timers
)) {
1002 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1003 struct cpu_timer_list
,
1005 if (!--maxfire
|| tsk
->sched_time
< t
->expires
.sched
) {
1006 tsk
->it_sched_expires
= t
->expires
.sched
;
1010 list_move_tail(&t
->entry
, firing
);
1015 * Check for any per-thread CPU timers that have fired and move them
1016 * off the tsk->*_timers list onto the firing list. Per-thread timers
1017 * have already been taken off.
1019 static void check_process_timers(struct task_struct
*tsk
,
1020 struct list_head
*firing
)
1023 struct signal_struct
*const sig
= tsk
->signal
;
1024 cputime_t utime
, stime
, ptime
, virt_expires
, prof_expires
;
1025 unsigned long long sched_time
, sched_expires
;
1026 struct task_struct
*t
;
1027 struct list_head
*timers
= sig
->cpu_timers
;
1030 * Don't sample the current process CPU clocks if there are no timers.
1032 if (list_empty(&timers
[CPUCLOCK_PROF
]) &&
1033 cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1034 sig
->rlim
[RLIMIT_CPU
].rlim_cur
== RLIM_INFINITY
&&
1035 list_empty(&timers
[CPUCLOCK_VIRT
]) &&
1036 cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1037 list_empty(&timers
[CPUCLOCK_SCHED
]))
1041 * Collect the current process totals.
1045 sched_time
= sig
->sched_time
;
1048 utime
= cputime_add(utime
, t
->utime
);
1049 stime
= cputime_add(stime
, t
->stime
);
1050 sched_time
+= t
->sched_time
;
1053 ptime
= cputime_add(utime
, stime
);
1056 prof_expires
= cputime_zero
;
1057 while (!list_empty(timers
)) {
1058 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1059 struct cpu_timer_list
,
1061 if (!--maxfire
|| cputime_lt(ptime
, t
->expires
.cpu
)) {
1062 prof_expires
= t
->expires
.cpu
;
1066 list_move_tail(&t
->entry
, firing
);
1071 virt_expires
= cputime_zero
;
1072 while (!list_empty(timers
)) {
1073 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1074 struct cpu_timer_list
,
1076 if (!--maxfire
|| cputime_lt(utime
, t
->expires
.cpu
)) {
1077 virt_expires
= t
->expires
.cpu
;
1081 list_move_tail(&t
->entry
, firing
);
1087 while (!list_empty(timers
)) {
1088 struct cpu_timer_list
*t
= list_entry(timers
->next
,
1089 struct cpu_timer_list
,
1091 if (!--maxfire
|| sched_time
< t
->expires
.sched
) {
1092 sched_expires
= t
->expires
.sched
;
1096 list_move_tail(&t
->entry
, firing
);
1100 * Check for the special case process timers.
1102 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1103 if (cputime_ge(ptime
, sig
->it_prof_expires
)) {
1104 /* ITIMER_PROF fires and reloads. */
1105 sig
->it_prof_expires
= sig
->it_prof_incr
;
1106 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
)) {
1107 sig
->it_prof_expires
= cputime_add(
1108 sig
->it_prof_expires
, ptime
);
1110 __group_send_sig_info(SIGPROF
, SEND_SIG_PRIV
, tsk
);
1112 if (!cputime_eq(sig
->it_prof_expires
, cputime_zero
) &&
1113 (cputime_eq(prof_expires
, cputime_zero
) ||
1114 cputime_lt(sig
->it_prof_expires
, prof_expires
))) {
1115 prof_expires
= sig
->it_prof_expires
;
1118 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1119 if (cputime_ge(utime
, sig
->it_virt_expires
)) {
1120 /* ITIMER_VIRTUAL fires and reloads. */
1121 sig
->it_virt_expires
= sig
->it_virt_incr
;
1122 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
)) {
1123 sig
->it_virt_expires
= cputime_add(
1124 sig
->it_virt_expires
, utime
);
1126 __group_send_sig_info(SIGVTALRM
, SEND_SIG_PRIV
, tsk
);
1128 if (!cputime_eq(sig
->it_virt_expires
, cputime_zero
) &&
1129 (cputime_eq(virt_expires
, cputime_zero
) ||
1130 cputime_lt(sig
->it_virt_expires
, virt_expires
))) {
1131 virt_expires
= sig
->it_virt_expires
;
1134 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
!= RLIM_INFINITY
) {
1135 unsigned long psecs
= cputime_to_secs(ptime
);
1137 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1139 * At the hard limit, we just die.
1140 * No need to calculate anything else now.
1142 __group_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, tsk
);
1145 if (psecs
>= sig
->rlim
[RLIMIT_CPU
].rlim_cur
) {
1147 * At the soft limit, send a SIGXCPU every second.
1149 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
1150 if (sig
->rlim
[RLIMIT_CPU
].rlim_cur
1151 < sig
->rlim
[RLIMIT_CPU
].rlim_max
) {
1152 sig
->rlim
[RLIMIT_CPU
].rlim_cur
++;
1155 x
= secs_to_cputime(sig
->rlim
[RLIMIT_CPU
].rlim_cur
);
1156 if (cputime_eq(prof_expires
, cputime_zero
) ||
1157 cputime_lt(x
, prof_expires
)) {
1162 if (!cputime_eq(prof_expires
, cputime_zero
) ||
1163 !cputime_eq(virt_expires
, cputime_zero
) ||
1164 sched_expires
!= 0) {
1166 * Rebalance the threads' expiry times for the remaining
1167 * process CPU timers.
1170 cputime_t prof_left
, virt_left
, ticks
;
1171 unsigned long long sched_left
, sched
;
1172 const unsigned int nthreads
= atomic_read(&sig
->live
);
1177 prof_left
= cputime_sub(prof_expires
, utime
);
1178 prof_left
= cputime_sub(prof_left
, stime
);
1179 prof_left
= cputime_div_non_zero(prof_left
, nthreads
);
1180 virt_left
= cputime_sub(virt_expires
, utime
);
1181 virt_left
= cputime_div_non_zero(virt_left
, nthreads
);
1182 if (sched_expires
) {
1183 sched_left
= sched_expires
- sched_time
;
1184 do_div(sched_left
, nthreads
);
1185 sched_left
= max_t(unsigned long long, sched_left
, 1);
1191 if (unlikely(t
->flags
& PF_EXITING
))
1194 ticks
= cputime_add(cputime_add(t
->utime
, t
->stime
),
1196 if (!cputime_eq(prof_expires
, cputime_zero
) &&
1197 (cputime_eq(t
->it_prof_expires
, cputime_zero
) ||
1198 cputime_gt(t
->it_prof_expires
, ticks
))) {
1199 t
->it_prof_expires
= ticks
;
1202 ticks
= cputime_add(t
->utime
, virt_left
);
1203 if (!cputime_eq(virt_expires
, cputime_zero
) &&
1204 (cputime_eq(t
->it_virt_expires
, cputime_zero
) ||
1205 cputime_gt(t
->it_virt_expires
, ticks
))) {
1206 t
->it_virt_expires
= ticks
;
1209 sched
= t
->sched_time
+ sched_left
;
1210 if (sched_expires
&& (t
->it_sched_expires
== 0 ||
1211 t
->it_sched_expires
> sched
)) {
1212 t
->it_sched_expires
= sched
;
1214 } while ((t
= next_thread(t
)) != tsk
);
1219 * This is called from the signal code (via do_schedule_next_timer)
1220 * when the last timer signal was delivered and we have to reload the timer.
1222 void posix_cpu_timer_schedule(struct k_itimer
*timer
)
1224 struct task_struct
*p
= timer
->it
.cpu
.task
;
1225 union cpu_time_count now
;
1227 if (unlikely(p
== NULL
))
1229 * The task was cleaned up already, no future firings.
1234 * Fetch the current sample and update the timer's expiry time.
1236 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
1237 cpu_clock_sample(timer
->it_clock
, p
, &now
);
1238 bump_cpu_timer(timer
, now
);
1239 if (unlikely(p
->exit_state
)) {
1240 clear_dead_task(timer
, now
);
1243 read_lock(&tasklist_lock
); /* arm_timer needs it. */
1245 read_lock(&tasklist_lock
);
1246 if (unlikely(p
->signal
== NULL
)) {
1248 * The process has been reaped.
1249 * We can't even collect a sample any more.
1252 timer
->it
.cpu
.task
= p
= NULL
;
1253 timer
->it
.cpu
.expires
.sched
= 0;
1255 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1257 * We've noticed that the thread is dead, but
1258 * not yet reaped. Take this opportunity to
1259 * drop our task ref.
1261 clear_dead_task(timer
, now
);
1264 cpu_clock_sample_group(timer
->it_clock
, p
, &now
);
1265 bump_cpu_timer(timer
, now
);
1266 /* Leave the tasklist_lock locked for the call below. */
1270 * Now re-arm for the new expiry time.
1272 arm_timer(timer
, now
);
1275 read_unlock(&tasklist_lock
);
1278 timer
->it_overrun_last
= timer
->it_overrun
;
1279 timer
->it_overrun
= -1;
1280 ++timer
->it_requeue_pending
;
1284 * This is called from the timer interrupt handler. The irq handler has
1285 * already updated our counts. We need to check if any timers fire now.
1286 * Interrupts are disabled.
1288 void run_posix_cpu_timers(struct task_struct
*tsk
)
1291 struct k_itimer
*timer
, *next
;
1293 BUG_ON(!irqs_disabled());
1295 #define UNEXPIRED(clock) \
1296 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
1297 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
1299 if (UNEXPIRED(prof
) && UNEXPIRED(virt
) &&
1300 (tsk
->it_sched_expires
== 0 ||
1301 tsk
->sched_time
< tsk
->it_sched_expires
))
1307 * Double-check with locks held.
1309 read_lock(&tasklist_lock
);
1310 if (likely(tsk
->signal
!= NULL
)) {
1311 spin_lock(&tsk
->sighand
->siglock
);
1314 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1315 * all the timers that are firing, and put them on the firing list.
1317 check_thread_timers(tsk
, &firing
);
1318 check_process_timers(tsk
, &firing
);
1321 * We must release these locks before taking any timer's lock.
1322 * There is a potential race with timer deletion here, as the
1323 * siglock now protects our private firing list. We have set
1324 * the firing flag in each timer, so that a deletion attempt
1325 * that gets the timer lock before we do will give it up and
1326 * spin until we've taken care of that timer below.
1328 spin_unlock(&tsk
->sighand
->siglock
);
1330 read_unlock(&tasklist_lock
);
1333 * Now that all the timers on our list have the firing flag,
1334 * noone will touch their list entries but us. We'll take
1335 * each timer's lock before clearing its firing flag, so no
1336 * timer call will interfere.
1338 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.entry
) {
1340 spin_lock(&timer
->it_lock
);
1341 list_del_init(&timer
->it
.cpu
.entry
);
1342 firing
= timer
->it
.cpu
.firing
;
1343 timer
->it
.cpu
.firing
= 0;
1345 * The firing flag is -1 if we collided with a reset
1346 * of the timer, which already reported this
1347 * almost-firing as an overrun. So don't generate an event.
1349 if (likely(firing
>= 0)) {
1350 cpu_timer_fire(timer
);
1352 spin_unlock(&timer
->it_lock
);
1357 * Set one of the process-wide special case CPU timers.
1358 * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
1359 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
1360 * absolute; non-null for ITIMER_*, where *newval is relative and we update
1361 * it to be absolute, *oldval is absolute and we update it to be relative.
1363 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clock_idx
,
1364 cputime_t
*newval
, cputime_t
*oldval
)
1366 union cpu_time_count now
;
1367 struct list_head
*head
;
1369 BUG_ON(clock_idx
== CPUCLOCK_SCHED
);
1370 cpu_clock_sample_group_locked(clock_idx
, tsk
, &now
);
1373 if (!cputime_eq(*oldval
, cputime_zero
)) {
1374 if (cputime_le(*oldval
, now
.cpu
)) {
1375 /* Just about to fire. */
1376 *oldval
= jiffies_to_cputime(1);
1378 *oldval
= cputime_sub(*oldval
, now
.cpu
);
1382 if (cputime_eq(*newval
, cputime_zero
))
1384 *newval
= cputime_add(*newval
, now
.cpu
);
1387 * If the RLIMIT_CPU timer will expire before the
1388 * ITIMER_PROF timer, we have nothing else to do.
1390 if (tsk
->signal
->rlim
[RLIMIT_CPU
].rlim_cur
1391 < cputime_to_secs(*newval
))
1396 * Check whether there are any process timers already set to fire
1397 * before this one. If so, we don't have anything more to do.
1399 head
= &tsk
->signal
->cpu_timers
[clock_idx
];
1400 if (list_empty(head
) ||
1401 cputime_ge(list_entry(head
->next
,
1402 struct cpu_timer_list
, entry
)->expires
.cpu
,
1405 * Rejigger each thread's expiry time so that one will
1406 * notice before we hit the process-cumulative expiry time.
1408 union cpu_time_count expires
= { .sched
= 0 };
1409 expires
.cpu
= *newval
;
1410 process_timer_rebalance(tsk
, clock_idx
, expires
, now
);
1414 static long posix_cpu_clock_nanosleep_restart(struct restart_block
*);
1416 int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1417 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1419 struct restart_block
*restart_block
=
1420 ¤t_thread_info()->restart_block
;
1421 struct k_itimer timer
;
1425 * Diagnose required errors first.
1427 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1428 (CPUCLOCK_PID(which_clock
) == 0 ||
1429 CPUCLOCK_PID(which_clock
) == current
->pid
))
1433 * Set up a temporary timer and then wait for it to go off.
1435 memset(&timer
, 0, sizeof timer
);
1436 spin_lock_init(&timer
.it_lock
);
1437 timer
.it_clock
= which_clock
;
1438 timer
.it_overrun
= -1;
1439 error
= posix_cpu_timer_create(&timer
);
1440 timer
.it_process
= current
;
1442 static struct itimerspec zero_it
;
1443 struct itimerspec it
= { .it_value
= *rqtp
,
1444 .it_interval
= {} };
1446 spin_lock_irq(&timer
.it_lock
);
1447 error
= posix_cpu_timer_set(&timer
, flags
, &it
, NULL
);
1449 spin_unlock_irq(&timer
.it_lock
);
1453 while (!signal_pending(current
)) {
1454 if (timer
.it
.cpu
.expires
.sched
== 0) {
1456 * Our timer fired and was reset.
1458 spin_unlock_irq(&timer
.it_lock
);
1463 * Block until cpu_timer_fire (or a signal) wakes us.
1465 __set_current_state(TASK_INTERRUPTIBLE
);
1466 spin_unlock_irq(&timer
.it_lock
);
1468 spin_lock_irq(&timer
.it_lock
);
1472 * We were interrupted by a signal.
1474 sample_to_timespec(which_clock
, timer
.it
.cpu
.expires
, rqtp
);
1475 posix_cpu_timer_set(&timer
, 0, &zero_it
, &it
);
1476 spin_unlock_irq(&timer
.it_lock
);
1478 if ((it
.it_value
.tv_sec
| it
.it_value
.tv_nsec
) == 0) {
1480 * It actually did fire already.
1486 * Report back to the user the time still remaining.
1488 if (rmtp
!= NULL
&& !(flags
& TIMER_ABSTIME
) &&
1489 copy_to_user(rmtp
, &it
.it_value
, sizeof *rmtp
))
1492 restart_block
->fn
= posix_cpu_clock_nanosleep_restart
;
1493 /* Caller already set restart_block->arg1 */
1494 restart_block
->arg0
= which_clock
;
1495 restart_block
->arg1
= (unsigned long) rmtp
;
1496 restart_block
->arg2
= rqtp
->tv_sec
;
1497 restart_block
->arg3
= rqtp
->tv_nsec
;
1499 error
= -ERESTART_RESTARTBLOCK
;
1506 posix_cpu_clock_nanosleep_restart(struct restart_block
*restart_block
)
1508 clockid_t which_clock
= restart_block
->arg0
;
1509 struct timespec __user
*rmtp
;
1512 rmtp
= (struct timespec __user
*) restart_block
->arg1
;
1513 t
.tv_sec
= restart_block
->arg2
;
1514 t
.tv_nsec
= restart_block
->arg3
;
1516 restart_block
->fn
= do_no_restart_syscall
;
1517 return posix_cpu_nsleep(which_clock
, TIMER_ABSTIME
, &t
, rmtp
);
1521 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1522 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1524 static int process_cpu_clock_getres(const clockid_t which_clock
,
1525 struct timespec
*tp
)
1527 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1529 static int process_cpu_clock_get(const clockid_t which_clock
,
1530 struct timespec
*tp
)
1532 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1534 static int process_cpu_timer_create(struct k_itimer
*timer
)
1536 timer
->it_clock
= PROCESS_CLOCK
;
1537 return posix_cpu_timer_create(timer
);
1539 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1540 struct timespec
*rqtp
,
1541 struct timespec __user
*rmtp
)
1543 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
, rmtp
);
1545 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1546 struct timespec
*tp
)
1548 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1550 static int thread_cpu_clock_get(const clockid_t which_clock
,
1551 struct timespec
*tp
)
1553 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1555 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1557 timer
->it_clock
= THREAD_CLOCK
;
1558 return posix_cpu_timer_create(timer
);
1560 static int thread_cpu_nsleep(const clockid_t which_clock
, int flags
,
1561 struct timespec
*rqtp
, struct timespec __user
*rmtp
)
1566 static __init
int init_posix_cpu_timers(void)
1568 struct k_clock process
= {
1569 .clock_getres
= process_cpu_clock_getres
,
1570 .clock_get
= process_cpu_clock_get
,
1571 .clock_set
= do_posix_clock_nosettime
,
1572 .timer_create
= process_cpu_timer_create
,
1573 .nsleep
= process_cpu_nsleep
,
1575 struct k_clock thread
= {
1576 .clock_getres
= thread_cpu_clock_getres
,
1577 .clock_get
= thread_cpu_clock_get
,
1578 .clock_set
= do_posix_clock_nosettime
,
1579 .timer_create
= thread_cpu_timer_create
,
1580 .nsleep
= thread_cpu_nsleep
,
1583 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID
, &process
);
1584 register_posix_clock(CLOCK_THREAD_CPUTIME_ID
, &thread
);
1588 __initcall(init_posix_cpu_timers
);