1 // SPDX-License-Identifier: GPL-2.0
3 * Implement CPU time clocks for the POSIX clock interface.
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
19 #include "posix-timers.h"
21 static void posix_cpu_timer_rearm(struct k_itimer
*timer
);
23 void posix_cputimers_group_init(struct posix_cputimers
*pct
, u64 cpu_limit
)
25 posix_cputimers_init(pct
);
26 if (cpu_limit
!= RLIM_INFINITY
) {
27 pct
->bases
[CPUCLOCK_PROF
].nextevt
= cpu_limit
* NSEC_PER_SEC
;
28 pct
->timers_active
= true;
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
38 void update_rlimit_cpu(struct task_struct
*task
, unsigned long rlim_new
)
40 u64 nsecs
= rlim_new
* NSEC_PER_SEC
;
42 spin_lock_irq(&task
->sighand
->siglock
);
43 set_process_cpu_timer(task
, CPUCLOCK_PROF
, &nsecs
, NULL
);
44 spin_unlock_irq(&task
->sighand
->siglock
);
48 * Functions for validating access to tasks.
50 static struct task_struct
*lookup_task(const pid_t pid
, bool thread
,
53 struct task_struct
*p
;
56 * If the encoded PID is 0, then the timer is targeted at current
57 * or the process to which current belongs.
60 return thread
? current
: current
->group_leader
;
62 p
= find_task_by_vpid(pid
);
67 return same_thread_group(p
, current
) ? p
: NULL
;
71 * For clock_gettime(PROCESS) the task does not need to be
72 * the actual group leader. tsk->sighand gives
73 * access to the group's clock.
75 * Timers need the group leader because they take a
76 * reference on it and store the task pointer until the
79 return (p
== current
|| thread_group_leader(p
)) ? p
: NULL
;
83 * For processes require that p is group leader.
85 return has_group_leader_pid(p
) ? p
: NULL
;
88 static struct task_struct
*__get_task_for_clock(const clockid_t clock
,
89 bool getref
, bool gettime
)
91 const bool thread
= !!CPUCLOCK_PERTHREAD(clock
);
92 const pid_t pid
= CPUCLOCK_PID(clock
);
93 struct task_struct
*p
;
95 if (CPUCLOCK_WHICH(clock
) >= CPUCLOCK_MAX
)
99 p
= lookup_task(pid
, thread
, gettime
);
106 static inline struct task_struct
*get_task_for_clock(const clockid_t clock
)
108 return __get_task_for_clock(clock
, true, false);
111 static inline struct task_struct
*get_task_for_clock_get(const clockid_t clock
)
113 return __get_task_for_clock(clock
, true, true);
116 static inline int validate_clock_permissions(const clockid_t clock
)
118 return __get_task_for_clock(clock
, false, false) ? 0 : -EINVAL
;
122 * Update expiry time from increment, and increase overrun count,
123 * given the current clock sample.
125 static u64
bump_cpu_timer(struct k_itimer
*timer
, u64 now
)
127 u64 delta
, incr
, expires
= timer
->it
.cpu
.node
.expires
;
130 if (!timer
->it_interval
)
136 incr
= timer
->it_interval
;
137 delta
= now
+ incr
- expires
;
139 /* Don't use (incr*2 < delta), incr*2 might overflow. */
140 for (i
= 0; incr
< delta
- incr
; i
++)
143 for (; i
>= 0; incr
>>= 1, i
--) {
147 timer
->it
.cpu
.node
.expires
+= incr
;
148 timer
->it_overrun
+= 1LL << i
;
151 return timer
->it
.cpu
.node
.expires
;
154 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
155 static inline bool expiry_cache_is_inactive(const struct posix_cputimers
*pct
)
157 return !(~pct
->bases
[CPUCLOCK_PROF
].nextevt
|
158 ~pct
->bases
[CPUCLOCK_VIRT
].nextevt
|
159 ~pct
->bases
[CPUCLOCK_SCHED
].nextevt
);
163 posix_cpu_clock_getres(const clockid_t which_clock
, struct timespec64
*tp
)
165 int error
= validate_clock_permissions(which_clock
);
169 tp
->tv_nsec
= ((NSEC_PER_SEC
+ HZ
- 1) / HZ
);
170 if (CPUCLOCK_WHICH(which_clock
) == CPUCLOCK_SCHED
) {
172 * If sched_clock is using a cycle counter, we
173 * don't have any idea of its true resolution
174 * exported, but it is much more than 1s/HZ.
183 posix_cpu_clock_set(const clockid_t clock
, const struct timespec64
*tp
)
185 int error
= validate_clock_permissions(clock
);
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
191 return error
? : -EPERM
;
195 * Sample a per-thread clock for the given task. clkid is validated.
197 static u64
cpu_clock_sample(const clockid_t clkid
, struct task_struct
*p
)
201 if (clkid
== CPUCLOCK_SCHED
)
202 return task_sched_runtime(p
);
204 task_cputime(p
, &utime
, &stime
);
208 return utime
+ stime
;
217 static inline void store_samples(u64
*samples
, u64 stime
, u64 utime
, u64 rtime
)
219 samples
[CPUCLOCK_PROF
] = stime
+ utime
;
220 samples
[CPUCLOCK_VIRT
] = utime
;
221 samples
[CPUCLOCK_SCHED
] = rtime
;
224 static void task_sample_cputime(struct task_struct
*p
, u64
*samples
)
228 task_cputime(p
, &utime
, &stime
);
229 store_samples(samples
, stime
, utime
, p
->se
.sum_exec_runtime
);
232 static void proc_sample_cputime_atomic(struct task_cputime_atomic
*at
,
235 u64 stime
, utime
, rtime
;
237 utime
= atomic64_read(&at
->utime
);
238 stime
= atomic64_read(&at
->stime
);
239 rtime
= atomic64_read(&at
->sum_exec_runtime
);
240 store_samples(samples
, stime
, utime
, rtime
);
244 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
245 * to avoid race conditions with concurrent updates to cputime.
247 static inline void __update_gt_cputime(atomic64_t
*cputime
, u64 sum_cputime
)
251 curr_cputime
= atomic64_read(cputime
);
252 if (sum_cputime
> curr_cputime
) {
253 if (atomic64_cmpxchg(cputime
, curr_cputime
, sum_cputime
) != curr_cputime
)
258 static void update_gt_cputime(struct task_cputime_atomic
*cputime_atomic
,
259 struct task_cputime
*sum
)
261 __update_gt_cputime(&cputime_atomic
->utime
, sum
->utime
);
262 __update_gt_cputime(&cputime_atomic
->stime
, sum
->stime
);
263 __update_gt_cputime(&cputime_atomic
->sum_exec_runtime
, sum
->sum_exec_runtime
);
267 * thread_group_sample_cputime - Sample cputime for a given task
268 * @tsk: Task for which cputime needs to be started
269 * @samples: Storage for time samples
271 * Called from sys_getitimer() to calculate the expiry time of an active
272 * timer. That means group cputime accounting is already active. Called
273 * with task sighand lock held.
275 * Updates @times with an uptodate sample of the thread group cputimes.
277 void thread_group_sample_cputime(struct task_struct
*tsk
, u64
*samples
)
279 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
280 struct posix_cputimers
*pct
= &tsk
->signal
->posix_cputimers
;
282 WARN_ON_ONCE(!pct
->timers_active
);
284 proc_sample_cputime_atomic(&cputimer
->cputime_atomic
, samples
);
288 * thread_group_start_cputime - Start cputime and return a sample
289 * @tsk: Task for which cputime needs to be started
290 * @samples: Storage for time samples
292 * The thread group cputime accouting is avoided when there are no posix
293 * CPU timers armed. Before starting a timer it's required to check whether
294 * the time accounting is active. If not, a full update of the atomic
295 * accounting store needs to be done and the accounting enabled.
297 * Updates @times with an uptodate sample of the thread group cputimes.
299 static void thread_group_start_cputime(struct task_struct
*tsk
, u64
*samples
)
301 struct thread_group_cputimer
*cputimer
= &tsk
->signal
->cputimer
;
302 struct posix_cputimers
*pct
= &tsk
->signal
->posix_cputimers
;
304 /* Check if cputimer isn't running. This is accessed without locking. */
305 if (!READ_ONCE(pct
->timers_active
)) {
306 struct task_cputime sum
;
309 * The POSIX timer interface allows for absolute time expiry
310 * values through the TIMER_ABSTIME flag, therefore we have
311 * to synchronize the timer to the clock every time we start it.
313 thread_group_cputime(tsk
, &sum
);
314 update_gt_cputime(&cputimer
->cputime_atomic
, &sum
);
317 * We're setting timers_active without a lock. Ensure this
318 * only gets written to in one operation. We set it after
319 * update_gt_cputime() as a small optimization, but
320 * barriers are not required because update_gt_cputime()
321 * can handle concurrent updates.
323 WRITE_ONCE(pct
->timers_active
, true);
325 proc_sample_cputime_atomic(&cputimer
->cputime_atomic
, samples
);
328 static void __thread_group_cputime(struct task_struct
*tsk
, u64
*samples
)
330 struct task_cputime ct
;
332 thread_group_cputime(tsk
, &ct
);
333 store_samples(samples
, ct
.stime
, ct
.utime
, ct
.sum_exec_runtime
);
337 * Sample a process (thread group) clock for the given task clkid. If the
338 * group's cputime accounting is already enabled, read the atomic
339 * store. Otherwise a full update is required. Task's sighand lock must be
340 * held to protect the task traversal on a full update. clkid is already
343 static u64
cpu_clock_sample_group(const clockid_t clkid
, struct task_struct
*p
,
346 struct thread_group_cputimer
*cputimer
= &p
->signal
->cputimer
;
347 struct posix_cputimers
*pct
= &p
->signal
->posix_cputimers
;
348 u64 samples
[CPUCLOCK_MAX
];
350 if (!READ_ONCE(pct
->timers_active
)) {
352 thread_group_start_cputime(p
, samples
);
354 __thread_group_cputime(p
, samples
);
356 proc_sample_cputime_atomic(&cputimer
->cputime_atomic
, samples
);
359 return samples
[clkid
];
362 static int posix_cpu_clock_get(const clockid_t clock
, struct timespec64
*tp
)
364 const clockid_t clkid
= CPUCLOCK_WHICH(clock
);
365 struct task_struct
*tsk
;
368 tsk
= get_task_for_clock_get(clock
);
372 if (CPUCLOCK_PERTHREAD(clock
))
373 t
= cpu_clock_sample(clkid
, tsk
);
375 t
= cpu_clock_sample_group(clkid
, tsk
, false);
376 put_task_struct(tsk
);
378 *tp
= ns_to_timespec64(t
);
383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
384 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
385 * new timer already all-zeros initialized.
387 static int posix_cpu_timer_create(struct k_itimer
*new_timer
)
389 struct task_struct
*p
= get_task_for_clock(new_timer
->it_clock
);
394 new_timer
->kclock
= &clock_posix_cpu
;
395 timerqueue_init(&new_timer
->it
.cpu
.node
);
396 new_timer
->it
.cpu
.task
= p
;
401 * Clean up a CPU-clock timer that is about to be destroyed.
402 * This is called from timer deletion with the timer already locked.
403 * If we return TIMER_RETRY, it's necessary to release the timer's lock
404 * and try again. (This happens when the timer is in the middle of firing.)
406 static int posix_cpu_timer_del(struct k_itimer
*timer
)
408 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
409 struct task_struct
*p
= ctmr
->task
;
410 struct sighand_struct
*sighand
;
414 if (WARN_ON_ONCE(!p
))
418 * Protect against sighand release/switch in exit/exec and process/
419 * thread timer list entry concurrent read/writes.
421 sighand
= lock_task_sighand(p
, &flags
);
422 if (unlikely(sighand
== NULL
)) {
424 * This raced with the reaping of the task. The exit cleanup
425 * should have removed this timer from the timer queue.
427 WARN_ON_ONCE(ctmr
->head
|| timerqueue_node_queued(&ctmr
->node
));
429 if (timer
->it
.cpu
.firing
)
432 cpu_timer_dequeue(ctmr
);
434 unlock_task_sighand(p
, &flags
);
443 static void cleanup_timerqueue(struct timerqueue_head
*head
)
445 struct timerqueue_node
*node
;
446 struct cpu_timer
*ctmr
;
448 while ((node
= timerqueue_getnext(head
))) {
449 timerqueue_del(head
, node
);
450 ctmr
= container_of(node
, struct cpu_timer
, node
);
456 * Clean out CPU timers which are still armed when a thread exits. The
457 * timers are only removed from the list. No other updates are done. The
458 * corresponding posix timers are still accessible, but cannot be rearmed.
460 * This must be called with the siglock held.
462 static void cleanup_timers(struct posix_cputimers
*pct
)
464 cleanup_timerqueue(&pct
->bases
[CPUCLOCK_PROF
].tqhead
);
465 cleanup_timerqueue(&pct
->bases
[CPUCLOCK_VIRT
].tqhead
);
466 cleanup_timerqueue(&pct
->bases
[CPUCLOCK_SCHED
].tqhead
);
470 * These are both called with the siglock held, when the current thread
471 * is being reaped. When the final (leader) thread in the group is reaped,
472 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
474 void posix_cpu_timers_exit(struct task_struct
*tsk
)
476 cleanup_timers(&tsk
->posix_cputimers
);
478 void posix_cpu_timers_exit_group(struct task_struct
*tsk
)
480 cleanup_timers(&tsk
->signal
->posix_cputimers
);
484 * Insert the timer on the appropriate list before any timers that
485 * expire later. This must be called with the sighand lock held.
487 static void arm_timer(struct k_itimer
*timer
)
489 int clkidx
= CPUCLOCK_WHICH(timer
->it_clock
);
490 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
491 u64 newexp
= cpu_timer_getexpires(ctmr
);
492 struct task_struct
*p
= ctmr
->task
;
493 struct posix_cputimer_base
*base
;
495 if (CPUCLOCK_PERTHREAD(timer
->it_clock
))
496 base
= p
->posix_cputimers
.bases
+ clkidx
;
498 base
= p
->signal
->posix_cputimers
.bases
+ clkidx
;
500 if (!cpu_timer_enqueue(&base
->tqhead
, ctmr
))
504 * We are the new earliest-expiring POSIX 1.b timer, hence
505 * need to update expiration cache. Take into account that
506 * for process timers we share expiration cache with itimers
507 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
509 if (newexp
< base
->nextevt
)
510 base
->nextevt
= newexp
;
512 if (CPUCLOCK_PERTHREAD(timer
->it_clock
))
513 tick_dep_set_task(p
, TICK_DEP_BIT_POSIX_TIMER
);
515 tick_dep_set_signal(p
->signal
, TICK_DEP_BIT_POSIX_TIMER
);
519 * The timer is locked, fire it and arrange for its reload.
521 static void cpu_timer_fire(struct k_itimer
*timer
)
523 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
525 if ((timer
->it_sigev_notify
& ~SIGEV_THREAD_ID
) == SIGEV_NONE
) {
527 * User don't want any signal.
529 cpu_timer_setexpires(ctmr
, 0);
530 } else if (unlikely(timer
->sigq
== NULL
)) {
532 * This a special case for clock_nanosleep,
533 * not a normal timer from sys_timer_create.
535 wake_up_process(timer
->it_process
);
536 cpu_timer_setexpires(ctmr
, 0);
537 } else if (!timer
->it_interval
) {
539 * One-shot timer. Clear it as soon as it's fired.
541 posix_timer_event(timer
, 0);
542 cpu_timer_setexpires(ctmr
, 0);
543 } else if (posix_timer_event(timer
, ++timer
->it_requeue_pending
)) {
545 * The signal did not get queued because the signal
546 * was ignored, so we won't get any callback to
547 * reload the timer. But we need to keep it
548 * ticking in case the signal is deliverable next time.
550 posix_cpu_timer_rearm(timer
);
551 ++timer
->it_requeue_pending
;
556 * Guts of sys_timer_settime for CPU timers.
557 * This is called with the timer locked and interrupts disabled.
558 * If we return TIMER_RETRY, it's necessary to release the timer's lock
559 * and try again. (This happens when the timer is in the middle of firing.)
561 static int posix_cpu_timer_set(struct k_itimer
*timer
, int timer_flags
,
562 struct itimerspec64
*new, struct itimerspec64
*old
)
564 clockid_t clkid
= CPUCLOCK_WHICH(timer
->it_clock
);
565 u64 old_expires
, new_expires
, old_incr
, val
;
566 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
567 struct task_struct
*p
= ctmr
->task
;
568 struct sighand_struct
*sighand
;
572 if (WARN_ON_ONCE(!p
))
576 * Use the to_ktime conversion because that clamps the maximum
577 * value to KTIME_MAX and avoid multiplication overflows.
579 new_expires
= ktime_to_ns(timespec64_to_ktime(new->it_value
));
582 * Protect against sighand release/switch in exit/exec and p->cpu_timers
583 * and p->signal->cpu_timers read/write in arm_timer()
585 sighand
= lock_task_sighand(p
, &flags
);
587 * If p has just been reaped, we can no
588 * longer get any information about it at all.
590 if (unlikely(sighand
== NULL
))
594 * Disarm any old timer after extracting its expiry time.
596 old_incr
= timer
->it_interval
;
597 old_expires
= cpu_timer_getexpires(ctmr
);
599 if (unlikely(timer
->it
.cpu
.firing
)) {
600 timer
->it
.cpu
.firing
= -1;
603 cpu_timer_dequeue(ctmr
);
607 * We need to sample the current value to convert the new
608 * value from to relative and absolute, and to convert the
609 * old value from absolute to relative. To set a process
610 * timer, we need a sample to balance the thread expiry
611 * times (in arm_timer). With an absolute time, we must
612 * check if it's already passed. In short, we need a sample.
614 if (CPUCLOCK_PERTHREAD(timer
->it_clock
))
615 val
= cpu_clock_sample(clkid
, p
);
617 val
= cpu_clock_sample_group(clkid
, p
, true);
620 if (old_expires
== 0) {
621 old
->it_value
.tv_sec
= 0;
622 old
->it_value
.tv_nsec
= 0;
625 * Update the timer in case it has overrun already.
626 * If it has, we'll report it as having overrun and
627 * with the next reloaded timer already ticking,
628 * though we are swallowing that pending
629 * notification here to install the new setting.
631 u64 exp
= bump_cpu_timer(timer
, val
);
634 old_expires
= exp
- val
;
635 old
->it_value
= ns_to_timespec64(old_expires
);
637 old
->it_value
.tv_nsec
= 1;
638 old
->it_value
.tv_sec
= 0;
645 * We are colliding with the timer actually firing.
646 * Punt after filling in the timer's old value, and
647 * disable this firing since we are already reporting
648 * it as an overrun (thanks to bump_cpu_timer above).
650 unlock_task_sighand(p
, &flags
);
654 if (new_expires
!= 0 && !(timer_flags
& TIMER_ABSTIME
)) {
659 * Install the new expiry time (or zero).
660 * For a timer with no notification action, we don't actually
661 * arm the timer (we'll just fake it for timer_gettime).
663 cpu_timer_setexpires(ctmr
, new_expires
);
664 if (new_expires
!= 0 && val
< new_expires
) {
668 unlock_task_sighand(p
, &flags
);
670 * Install the new reload setting, and
671 * set up the signal and overrun bookkeeping.
673 timer
->it_interval
= timespec64_to_ktime(new->it_interval
);
676 * This acts as a modification timestamp for the timer,
677 * so any automatic reload attempt will punt on seeing
678 * that we have reset the timer manually.
680 timer
->it_requeue_pending
= (timer
->it_requeue_pending
+ 2) &
682 timer
->it_overrun_last
= 0;
683 timer
->it_overrun
= -1;
685 if (new_expires
!= 0 && !(val
< new_expires
)) {
687 * The designated time already passed, so we notify
688 * immediately, even if the thread never runs to
689 * accumulate more time on this clock.
691 cpu_timer_fire(timer
);
697 old
->it_interval
= ns_to_timespec64(old_incr
);
702 static void posix_cpu_timer_get(struct k_itimer
*timer
, struct itimerspec64
*itp
)
704 clockid_t clkid
= CPUCLOCK_WHICH(timer
->it_clock
);
705 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
706 u64 now
, expires
= cpu_timer_getexpires(ctmr
);
707 struct task_struct
*p
= ctmr
->task
;
709 if (WARN_ON_ONCE(!p
))
713 * Easy part: convert the reload time.
715 itp
->it_interval
= ktime_to_timespec64(timer
->it_interval
);
721 * Sample the clock to take the difference with the expiry time.
723 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
724 now
= cpu_clock_sample(clkid
, p
);
726 struct sighand_struct
*sighand
;
730 * Protect against sighand release/switch in exit/exec and
731 * also make timer sampling safe if it ends up calling
732 * thread_group_cputime().
734 sighand
= lock_task_sighand(p
, &flags
);
735 if (unlikely(sighand
== NULL
)) {
737 * The process has been reaped.
738 * We can't even collect a sample any more.
739 * Disarm the timer, nothing else to do.
741 cpu_timer_setexpires(ctmr
, 0);
744 now
= cpu_clock_sample_group(clkid
, p
, false);
745 unlock_task_sighand(p
, &flags
);
750 itp
->it_value
= ns_to_timespec64(expires
- now
);
753 * The timer should have expired already, but the firing
754 * hasn't taken place yet. Say it's just about to expire.
756 itp
->it_value
.tv_nsec
= 1;
757 itp
->it_value
.tv_sec
= 0;
761 #define MAX_COLLECTED 20
763 static u64
collect_timerqueue(struct timerqueue_head
*head
,
764 struct list_head
*firing
, u64 now
)
766 struct timerqueue_node
*next
;
769 while ((next
= timerqueue_getnext(head
))) {
770 struct cpu_timer
*ctmr
;
773 ctmr
= container_of(next
, struct cpu_timer
, node
);
774 expires
= cpu_timer_getexpires(ctmr
);
775 /* Limit the number of timers to expire at once */
776 if (++i
== MAX_COLLECTED
|| now
< expires
)
780 cpu_timer_dequeue(ctmr
);
781 list_add_tail(&ctmr
->elist
, firing
);
787 static void collect_posix_cputimers(struct posix_cputimers
*pct
, u64
*samples
,
788 struct list_head
*firing
)
790 struct posix_cputimer_base
*base
= pct
->bases
;
793 for (i
= 0; i
< CPUCLOCK_MAX
; i
++, base
++) {
794 base
->nextevt
= collect_timerqueue(&base
->tqhead
, firing
,
799 static inline void check_dl_overrun(struct task_struct
*tsk
)
801 if (tsk
->dl
.dl_overrun
) {
802 tsk
->dl
.dl_overrun
= 0;
803 __group_send_sig_info(SIGXCPU
, SEND_SIG_PRIV
, tsk
);
807 static bool check_rlimit(u64 time
, u64 limit
, int signo
, bool rt
, bool hard
)
812 if (print_fatal_signals
) {
813 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
814 rt
? "RT" : "CPU", hard
? "hard" : "soft",
815 current
->comm
, task_pid_nr(current
));
817 __group_send_sig_info(signo
, SEND_SIG_PRIV
, current
);
822 * Check for any per-thread CPU timers that have fired and move them off
823 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
824 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
826 static void check_thread_timers(struct task_struct
*tsk
,
827 struct list_head
*firing
)
829 struct posix_cputimers
*pct
= &tsk
->posix_cputimers
;
830 u64 samples
[CPUCLOCK_MAX
];
834 check_dl_overrun(tsk
);
836 if (expiry_cache_is_inactive(pct
))
839 task_sample_cputime(tsk
, samples
);
840 collect_posix_cputimers(pct
, samples
, firing
);
843 * Check for the special case thread timers.
845 soft
= task_rlimit(tsk
, RLIMIT_RTTIME
);
846 if (soft
!= RLIM_INFINITY
) {
847 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
848 unsigned long rttime
= tsk
->rt
.timeout
* (USEC_PER_SEC
/ HZ
);
849 unsigned long hard
= task_rlimit_max(tsk
, RLIMIT_RTTIME
);
851 /* At the hard limit, send SIGKILL. No further action. */
852 if (hard
!= RLIM_INFINITY
&&
853 check_rlimit(rttime
, hard
, SIGKILL
, true, true))
856 /* At the soft limit, send a SIGXCPU every second */
857 if (check_rlimit(rttime
, soft
, SIGXCPU
, true, false)) {
858 soft
+= USEC_PER_SEC
;
859 tsk
->signal
->rlim
[RLIMIT_RTTIME
].rlim_cur
= soft
;
863 if (expiry_cache_is_inactive(pct
))
864 tick_dep_clear_task(tsk
, TICK_DEP_BIT_POSIX_TIMER
);
867 static inline void stop_process_timers(struct signal_struct
*sig
)
869 struct posix_cputimers
*pct
= &sig
->posix_cputimers
;
871 /* Turn off the active flag. This is done without locking. */
872 WRITE_ONCE(pct
->timers_active
, false);
873 tick_dep_clear_signal(sig
, TICK_DEP_BIT_POSIX_TIMER
);
876 static void check_cpu_itimer(struct task_struct
*tsk
, struct cpu_itimer
*it
,
877 u64
*expires
, u64 cur_time
, int signo
)
882 if (cur_time
>= it
->expires
) {
884 it
->expires
+= it
->incr
;
888 trace_itimer_expire(signo
== SIGPROF
?
889 ITIMER_PROF
: ITIMER_VIRTUAL
,
890 task_tgid(tsk
), cur_time
);
891 __group_send_sig_info(signo
, SEND_SIG_PRIV
, tsk
);
894 if (it
->expires
&& it
->expires
< *expires
)
895 *expires
= it
->expires
;
899 * Check for any per-thread CPU timers that have fired and move them
900 * off the tsk->*_timers list onto the firing list. Per-thread timers
901 * have already been taken off.
903 static void check_process_timers(struct task_struct
*tsk
,
904 struct list_head
*firing
)
906 struct signal_struct
*const sig
= tsk
->signal
;
907 struct posix_cputimers
*pct
= &sig
->posix_cputimers
;
908 u64 samples
[CPUCLOCK_MAX
];
912 * If there are no active process wide timers (POSIX 1.b, itimers,
913 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
914 * processing when there is already another task handling them.
916 if (!READ_ONCE(pct
->timers_active
) || pct
->expiry_active
)
920 * Signify that a thread is checking for process timers.
921 * Write access to this field is protected by the sighand lock.
923 pct
->expiry_active
= true;
926 * Collect the current process totals. Group accounting is active
927 * so the sample can be taken directly.
929 proc_sample_cputime_atomic(&sig
->cputimer
.cputime_atomic
, samples
);
930 collect_posix_cputimers(pct
, samples
, firing
);
933 * Check for the special case process timers.
935 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_PROF
],
936 &pct
->bases
[CPUCLOCK_PROF
].nextevt
,
937 samples
[CPUCLOCK_PROF
], SIGPROF
);
938 check_cpu_itimer(tsk
, &sig
->it
[CPUCLOCK_VIRT
],
939 &pct
->bases
[CPUCLOCK_VIRT
].nextevt
,
940 samples
[CPUCLOCK_VIRT
], SIGVTALRM
);
942 soft
= task_rlimit(tsk
, RLIMIT_CPU
);
943 if (soft
!= RLIM_INFINITY
) {
944 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
945 unsigned long hard
= task_rlimit_max(tsk
, RLIMIT_CPU
);
946 u64 ptime
= samples
[CPUCLOCK_PROF
];
947 u64 softns
= (u64
)soft
* NSEC_PER_SEC
;
948 u64 hardns
= (u64
)hard
* NSEC_PER_SEC
;
950 /* At the hard limit, send SIGKILL. No further action. */
951 if (hard
!= RLIM_INFINITY
&&
952 check_rlimit(ptime
, hardns
, SIGKILL
, false, true))
955 /* At the soft limit, send a SIGXCPU every second */
956 if (check_rlimit(ptime
, softns
, SIGXCPU
, false, false)) {
957 sig
->rlim
[RLIMIT_CPU
].rlim_cur
= soft
+ 1;
958 softns
+= NSEC_PER_SEC
;
961 /* Update the expiry cache */
962 if (softns
< pct
->bases
[CPUCLOCK_PROF
].nextevt
)
963 pct
->bases
[CPUCLOCK_PROF
].nextevt
= softns
;
966 if (expiry_cache_is_inactive(pct
))
967 stop_process_timers(sig
);
969 pct
->expiry_active
= false;
973 * This is called from the signal code (via posixtimer_rearm)
974 * when the last timer signal was delivered and we have to reload the timer.
976 static void posix_cpu_timer_rearm(struct k_itimer
*timer
)
978 clockid_t clkid
= CPUCLOCK_WHICH(timer
->it_clock
);
979 struct cpu_timer
*ctmr
= &timer
->it
.cpu
;
980 struct task_struct
*p
= ctmr
->task
;
981 struct sighand_struct
*sighand
;
985 if (WARN_ON_ONCE(!p
))
989 * Fetch the current sample and update the timer's expiry time.
991 if (CPUCLOCK_PERTHREAD(timer
->it_clock
)) {
992 now
= cpu_clock_sample(clkid
, p
);
993 bump_cpu_timer(timer
, now
);
994 if (unlikely(p
->exit_state
))
997 /* Protect timer list r/w in arm_timer() */
998 sighand
= lock_task_sighand(p
, &flags
);
1003 * Protect arm_timer() and timer sampling in case of call to
1004 * thread_group_cputime().
1006 sighand
= lock_task_sighand(p
, &flags
);
1007 if (unlikely(sighand
== NULL
)) {
1009 * The process has been reaped.
1010 * We can't even collect a sample any more.
1012 cpu_timer_setexpires(ctmr
, 0);
1014 } else if (unlikely(p
->exit_state
) && thread_group_empty(p
)) {
1015 /* If the process is dying, no need to rearm */
1018 now
= cpu_clock_sample_group(clkid
, p
, true);
1019 bump_cpu_timer(timer
, now
);
1020 /* Leave the sighand locked for the call below. */
1024 * Now re-arm for the new expiry time.
1028 unlock_task_sighand(p
, &flags
);
1032 * task_cputimers_expired - Check whether posix CPU timers are expired
1034 * @samples: Array of current samples for the CPUCLOCK clocks
1035 * @pct: Pointer to a posix_cputimers container
1037 * Returns true if any member of @samples is greater than the corresponding
1038 * member of @pct->bases[CLK].nextevt. False otherwise
1041 task_cputimers_expired(const u64
*samples
, struct posix_cputimers
*pct
)
1045 for (i
= 0; i
< CPUCLOCK_MAX
; i
++) {
1046 if (samples
[i
] >= pct
->bases
[i
].nextevt
)
1053 * fastpath_timer_check - POSIX CPU timers fast path.
1055 * @tsk: The task (thread) being checked.
1057 * Check the task and thread group timers. If both are zero (there are no
1058 * timers set) return false. Otherwise snapshot the task and thread group
1059 * timers and compare them with the corresponding expiration times. Return
1060 * true if a timer has expired, else return false.
1062 static inline bool fastpath_timer_check(struct task_struct
*tsk
)
1064 struct posix_cputimers
*pct
= &tsk
->posix_cputimers
;
1065 struct signal_struct
*sig
;
1067 if (!expiry_cache_is_inactive(pct
)) {
1068 u64 samples
[CPUCLOCK_MAX
];
1070 task_sample_cputime(tsk
, samples
);
1071 if (task_cputimers_expired(samples
, pct
))
1076 pct
= &sig
->posix_cputimers
;
1078 * Check if thread group timers expired when timers are active and
1079 * no other thread in the group is already handling expiry for
1080 * thread group cputimers. These fields are read without the
1081 * sighand lock. However, this is fine because this is meant to be
1082 * a fastpath heuristic to determine whether we should try to
1083 * acquire the sighand lock to handle timer expiry.
1085 * In the worst case scenario, if concurrently timers_active is set
1086 * or expiry_active is cleared, but the current thread doesn't see
1087 * the change yet, the timer checks are delayed until the next
1088 * thread in the group gets a scheduler interrupt to handle the
1089 * timer. This isn't an issue in practice because these types of
1090 * delays with signals actually getting sent are expected.
1092 if (READ_ONCE(pct
->timers_active
) && !READ_ONCE(pct
->expiry_active
)) {
1093 u64 samples
[CPUCLOCK_MAX
];
1095 proc_sample_cputime_atomic(&sig
->cputimer
.cputime_atomic
,
1098 if (task_cputimers_expired(samples
, pct
))
1102 if (dl_task(tsk
) && tsk
->dl
.dl_overrun
)
1109 * This is called from the timer interrupt handler. The irq handler has
1110 * already updated our counts. We need to check if any timers fire now.
1111 * Interrupts are disabled.
1113 void run_posix_cpu_timers(void)
1115 struct task_struct
*tsk
= current
;
1116 struct k_itimer
*timer
, *next
;
1117 unsigned long flags
;
1120 lockdep_assert_irqs_disabled();
1123 * The fast path checks that there are no expired thread or thread
1124 * group timers. If that's so, just return.
1126 if (!fastpath_timer_check(tsk
))
1129 if (!lock_task_sighand(tsk
, &flags
))
1132 * Here we take off tsk->signal->cpu_timers[N] and
1133 * tsk->cpu_timers[N] all the timers that are firing, and
1134 * put them on the firing list.
1136 check_thread_timers(tsk
, &firing
);
1138 check_process_timers(tsk
, &firing
);
1141 * We must release these locks before taking any timer's lock.
1142 * There is a potential race with timer deletion here, as the
1143 * siglock now protects our private firing list. We have set
1144 * the firing flag in each timer, so that a deletion attempt
1145 * that gets the timer lock before we do will give it up and
1146 * spin until we've taken care of that timer below.
1148 unlock_task_sighand(tsk
, &flags
);
1151 * Now that all the timers on our list have the firing flag,
1152 * no one will touch their list entries but us. We'll take
1153 * each timer's lock before clearing its firing flag, so no
1154 * timer call will interfere.
1156 list_for_each_entry_safe(timer
, next
, &firing
, it
.cpu
.elist
) {
1159 spin_lock(&timer
->it_lock
);
1160 list_del_init(&timer
->it
.cpu
.elist
);
1161 cpu_firing
= timer
->it
.cpu
.firing
;
1162 timer
->it
.cpu
.firing
= 0;
1164 * The firing flag is -1 if we collided with a reset
1165 * of the timer, which already reported this
1166 * almost-firing as an overrun. So don't generate an event.
1168 if (likely(cpu_firing
>= 0))
1169 cpu_timer_fire(timer
);
1170 spin_unlock(&timer
->it_lock
);
1175 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1176 * The tsk->sighand->siglock must be held by the caller.
1178 void set_process_cpu_timer(struct task_struct
*tsk
, unsigned int clkid
,
1179 u64
*newval
, u64
*oldval
)
1183 if (WARN_ON_ONCE(clkid
>= CPUCLOCK_SCHED
))
1186 nextevt
= &tsk
->signal
->posix_cputimers
.bases
[clkid
].nextevt
;
1187 now
= cpu_clock_sample_group(clkid
, tsk
, true);
1191 * We are setting itimer. The *oldval is absolute and we update
1192 * it to be relative, *newval argument is relative and we update
1193 * it to be absolute.
1196 if (*oldval
<= now
) {
1197 /* Just about to fire. */
1198 *oldval
= TICK_NSEC
;
1210 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1211 * expiry cache is also used by RLIMIT_CPU!.
1213 if (*newval
< *nextevt
)
1216 tick_dep_set_signal(tsk
->signal
, TICK_DEP_BIT_POSIX_TIMER
);
1219 static int do_cpu_nanosleep(const clockid_t which_clock
, int flags
,
1220 const struct timespec64
*rqtp
)
1222 struct itimerspec64 it
;
1223 struct k_itimer timer
;
1228 * Set up a temporary timer and then wait for it to go off.
1230 memset(&timer
, 0, sizeof timer
);
1231 spin_lock_init(&timer
.it_lock
);
1232 timer
.it_clock
= which_clock
;
1233 timer
.it_overrun
= -1;
1234 error
= posix_cpu_timer_create(&timer
);
1235 timer
.it_process
= current
;
1238 static struct itimerspec64 zero_it
;
1239 struct restart_block
*restart
;
1241 memset(&it
, 0, sizeof(it
));
1242 it
.it_value
= *rqtp
;
1244 spin_lock_irq(&timer
.it_lock
);
1245 error
= posix_cpu_timer_set(&timer
, flags
, &it
, NULL
);
1247 spin_unlock_irq(&timer
.it_lock
);
1251 while (!signal_pending(current
)) {
1252 if (!cpu_timer_getexpires(&timer
.it
.cpu
)) {
1254 * Our timer fired and was reset, below
1255 * deletion can not fail.
1257 posix_cpu_timer_del(&timer
);
1258 spin_unlock_irq(&timer
.it_lock
);
1263 * Block until cpu_timer_fire (or a signal) wakes us.
1265 __set_current_state(TASK_INTERRUPTIBLE
);
1266 spin_unlock_irq(&timer
.it_lock
);
1268 spin_lock_irq(&timer
.it_lock
);
1272 * We were interrupted by a signal.
1274 expires
= cpu_timer_getexpires(&timer
.it
.cpu
);
1275 error
= posix_cpu_timer_set(&timer
, 0, &zero_it
, &it
);
1278 * Timer is now unarmed, deletion can not fail.
1280 posix_cpu_timer_del(&timer
);
1282 spin_unlock_irq(&timer
.it_lock
);
1284 while (error
== TIMER_RETRY
) {
1286 * We need to handle case when timer was or is in the
1287 * middle of firing. In other cases we already freed
1290 spin_lock_irq(&timer
.it_lock
);
1291 error
= posix_cpu_timer_del(&timer
);
1292 spin_unlock_irq(&timer
.it_lock
);
1295 if ((it
.it_value
.tv_sec
| it
.it_value
.tv_nsec
) == 0) {
1297 * It actually did fire already.
1302 error
= -ERESTART_RESTARTBLOCK
;
1304 * Report back to the user the time still remaining.
1306 restart
= ¤t
->restart_block
;
1307 restart
->nanosleep
.expires
= expires
;
1308 if (restart
->nanosleep
.type
!= TT_NONE
)
1309 error
= nanosleep_copyout(restart
, &it
.it_value
);
1315 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
);
1317 static int posix_cpu_nsleep(const clockid_t which_clock
, int flags
,
1318 const struct timespec64
*rqtp
)
1320 struct restart_block
*restart_block
= ¤t
->restart_block
;
1324 * Diagnose required errors first.
1326 if (CPUCLOCK_PERTHREAD(which_clock
) &&
1327 (CPUCLOCK_PID(which_clock
) == 0 ||
1328 CPUCLOCK_PID(which_clock
) == task_pid_vnr(current
)))
1331 error
= do_cpu_nanosleep(which_clock
, flags
, rqtp
);
1333 if (error
== -ERESTART_RESTARTBLOCK
) {
1335 if (flags
& TIMER_ABSTIME
)
1336 return -ERESTARTNOHAND
;
1338 restart_block
->fn
= posix_cpu_nsleep_restart
;
1339 restart_block
->nanosleep
.clockid
= which_clock
;
1344 static long posix_cpu_nsleep_restart(struct restart_block
*restart_block
)
1346 clockid_t which_clock
= restart_block
->nanosleep
.clockid
;
1347 struct timespec64 t
;
1349 t
= ns_to_timespec64(restart_block
->nanosleep
.expires
);
1351 return do_cpu_nanosleep(which_clock
, TIMER_ABSTIME
, &t
);
1354 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1355 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1357 static int process_cpu_clock_getres(const clockid_t which_clock
,
1358 struct timespec64
*tp
)
1360 return posix_cpu_clock_getres(PROCESS_CLOCK
, tp
);
1362 static int process_cpu_clock_get(const clockid_t which_clock
,
1363 struct timespec64
*tp
)
1365 return posix_cpu_clock_get(PROCESS_CLOCK
, tp
);
1367 static int process_cpu_timer_create(struct k_itimer
*timer
)
1369 timer
->it_clock
= PROCESS_CLOCK
;
1370 return posix_cpu_timer_create(timer
);
1372 static int process_cpu_nsleep(const clockid_t which_clock
, int flags
,
1373 const struct timespec64
*rqtp
)
1375 return posix_cpu_nsleep(PROCESS_CLOCK
, flags
, rqtp
);
1377 static int thread_cpu_clock_getres(const clockid_t which_clock
,
1378 struct timespec64
*tp
)
1380 return posix_cpu_clock_getres(THREAD_CLOCK
, tp
);
1382 static int thread_cpu_clock_get(const clockid_t which_clock
,
1383 struct timespec64
*tp
)
1385 return posix_cpu_clock_get(THREAD_CLOCK
, tp
);
1387 static int thread_cpu_timer_create(struct k_itimer
*timer
)
1389 timer
->it_clock
= THREAD_CLOCK
;
1390 return posix_cpu_timer_create(timer
);
1393 const struct k_clock clock_posix_cpu
= {
1394 .clock_getres
= posix_cpu_clock_getres
,
1395 .clock_set
= posix_cpu_clock_set
,
1396 .clock_get
= posix_cpu_clock_get
,
1397 .timer_create
= posix_cpu_timer_create
,
1398 .nsleep
= posix_cpu_nsleep
,
1399 .timer_set
= posix_cpu_timer_set
,
1400 .timer_del
= posix_cpu_timer_del
,
1401 .timer_get
= posix_cpu_timer_get
,
1402 .timer_rearm
= posix_cpu_timer_rearm
,
1405 const struct k_clock clock_process
= {
1406 .clock_getres
= process_cpu_clock_getres
,
1407 .clock_get
= process_cpu_clock_get
,
1408 .timer_create
= process_cpu_timer_create
,
1409 .nsleep
= process_cpu_nsleep
,
1412 const struct k_clock clock_thread
= {
1413 .clock_getres
= thread_cpu_clock_getres
,
1414 .clock_get
= thread_cpu_clock_get
,
1415 .timer_create
= thread_cpu_timer_create
,