1 /* Helper code for POSIX timer implementation on NPTL.
2 Copyright (C) 2000-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public License as
7 published by the Free Software Foundation; either version 2.1 of the
8 License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; see the file COPYING.LIB. If
17 not, see <https://www.gnu.org/licenses/>. */
28 #include <sys/syscall.h>
30 #include "posix-timer.h"
31 #include <timer_routines.h>
33 #ifndef DELAYTIMER_MAX
34 # define DELAYTIMER_MAX INT_MAX
37 /* Number of threads used. */
38 #define THREAD_MAXNODES 16
40 /* Array containing the descriptors for the used threads. */
41 static struct thread_node thread_array
[THREAD_MAXNODES
];
43 /* Static array with the structures for all the timers. */
44 struct timer_node __timer_array
[TIMER_MAX
];
46 /* Global lock to protect operation on the lists. */
47 pthread_mutex_t __timer_mutex
= PTHREAD_MUTEX_INITIALIZER
;
49 /* Variable to protect initialization. */
50 pthread_once_t __timer_init_once_control
= PTHREAD_ONCE_INIT
;
52 /* Nonzero if initialization of timer implementation failed. */
53 int __timer_init_failed
;
55 /* Node for the thread used to deliver signals. */
56 struct thread_node __timer_signal_thread_rclk
;
58 /* Lists to keep free and used timers and threads. */
59 static struct list_head timer_free_list
;
60 static struct list_head thread_free_list
;
61 static struct list_head thread_active_list
;
64 #ifdef __NR_rt_sigqueueinfo
65 extern int __syscall_rt_sigqueueinfo (int, int, siginfo_t
*);
69 /* List handling functions. */
71 list_append (struct list_head
*list
, struct list_head
*newp
)
73 newp
->prev
= list
->prev
;
75 list
->prev
->next
= newp
;
80 list_insbefore (struct list_head
*list
, struct list_head
*newp
)
82 list_append (list
, newp
);
86 * Like list_unlink_ip, except that calling it on a node that
87 * is already unlinked is disastrous rather than a noop.
91 list_unlink (struct list_head
*list
)
93 struct list_head
*lnext
= list
->next
, *lprev
= list
->prev
;
99 static inline struct list_head
*
100 list_first (struct list_head
*list
)
105 static inline struct list_head
*
106 list_null (struct list_head
*list
)
111 static inline struct list_head
*
112 list_next (struct list_head
*list
)
118 list_isempty (struct list_head
*list
)
120 return list
->next
== list
;
124 /* Functions build on top of the list functions. */
125 static inline struct thread_node
*
126 thread_links2ptr (struct list_head
*list
)
128 return (struct thread_node
*) ((char *) list
129 - offsetof (struct thread_node
, links
));
132 static inline struct timer_node
*
133 timer_links2ptr (struct list_head
*list
)
135 return (struct timer_node
*) ((char *) list
136 - offsetof (struct timer_node
, links
));
140 /* Initialize a newly allocated thread structure. */
142 thread_init (struct thread_node
*thread
, const pthread_attr_t
*attr
, clockid_t clock_id
)
145 thread
->attr
= *attr
;
148 pthread_attr_init (&thread
->attr
);
149 pthread_attr_setdetachstate (&thread
->attr
, PTHREAD_CREATE_DETACHED
);
153 INIT_LIST_HEAD (&thread
->timer_queue
);
154 pthread_cond_init (&thread
->cond
, 0);
155 thread
->current_timer
= 0;
156 thread
->captured
= pthread_self ();
157 thread
->clock_id
= clock_id
;
161 /* Initialize the global lists, and acquire global resources. Error
162 reporting is done by storing a non-zero value to the global variable
163 timer_init_failed. */
169 INIT_LIST_HEAD (&timer_free_list
);
170 INIT_LIST_HEAD (&thread_free_list
);
171 INIT_LIST_HEAD (&thread_active_list
);
173 for (i
= 0; i
< TIMER_MAX
; ++i
)
175 list_append (&timer_free_list
, &__timer_array
[i
].links
);
176 __timer_array
[i
].inuse
= TIMER_FREE
;
179 for (i
= 0; i
< THREAD_MAXNODES
; ++i
)
180 list_append (&thread_free_list
, &thread_array
[i
].links
);
182 thread_init (&__timer_signal_thread_rclk
, 0, CLOCK_REALTIME
);
186 /* This is a handler executed in a child process after a fork()
187 occurs. It reinitializes the module, resetting all of the data
188 structures to their initial state. The mutex is initialized in
189 case it was locked in the parent process. */
191 reinit_after_fork (void)
194 pthread_mutex_init (&__timer_mutex
, 0);
198 /* Called once form pthread_once in timer_init. This initializes the
199 module and ensures that reinit_after_fork will be executed in any
202 __timer_init_once (void)
205 pthread_atfork (0, 0, reinit_after_fork
);
209 /* Deinitialize a thread that is about to be deallocated. */
211 thread_deinit (struct thread_node
*thread
)
213 assert (list_isempty (&thread
->timer_queue
));
214 pthread_cond_destroy (&thread
->cond
);
218 /* Allocate a thread structure from the global free list. Global
219 mutex lock must be held by caller. The thread is moved to
222 __timer_thread_alloc (const pthread_attr_t
*desired_attr
, clockid_t clock_id
)
224 struct list_head
*node
= list_first (&thread_free_list
);
226 if (node
!= list_null (&thread_free_list
))
228 struct thread_node
*thread
= thread_links2ptr (node
);
230 thread_init (thread
, desired_attr
, clock_id
);
231 list_append (&thread_active_list
, node
);
239 /* Return a thread structure to the global free list. Global lock
240 must be held by caller. */
242 __timer_thread_dealloc (struct thread_node
*thread
)
244 thread_deinit (thread
);
245 list_unlink (&thread
->links
);
246 list_append (&thread_free_list
, &thread
->links
);
250 /* Each of our threads which terminates executes this cleanup
251 handler. We never terminate threads ourselves; if a thread gets here
252 it means that the evil application has killed it. If the thread has
253 timers, these require servicing and so we must hire a replacement
254 thread right away. We must also unblock another thread that may
255 have been waiting for this thread to finish servicing a timer (see
259 thread_cleanup (void *val
)
263 struct thread_node
*thread
= val
;
265 /* How did the signal thread get killed? */
266 assert (thread
!= &__timer_signal_thread_rclk
);
268 pthread_mutex_lock (&__timer_mutex
);
272 /* We are no longer processing a timer event. */
273 thread
->current_timer
= 0;
275 if (list_isempty (&thread
->timer_queue
))
276 __timer_thread_dealloc (thread
);
278 (void) __timer_thread_start (thread
);
280 pthread_mutex_unlock (&__timer_mutex
);
282 /* Unblock potentially blocked timer_delete(). */
283 pthread_cond_broadcast (&thread
->cond
);
288 /* Handle a timer which is supposed to go off now. */
290 thread_expire_timer (struct thread_node
*self
, struct timer_node
*timer
)
292 self
->current_timer
= timer
; /* Lets timer_delete know timer is running. */
294 pthread_mutex_unlock (&__timer_mutex
);
296 switch (__builtin_expect (timer
->event
.sigev_notify
, SIGEV_SIGNAL
))
302 #ifdef __NR_rt_sigqueueinfo
306 /* First, clear the siginfo_t structure, so that we don't pass our
307 stack content to other tasks. */
308 memset (&info
, 0, sizeof (siginfo_t
));
309 /* We must pass the information about the data in a siginfo_t
311 info
.si_signo
= timer
->event
.sigev_signo
;
312 info
.si_code
= SI_TIMER
;
313 info
.si_pid
= timer
->creator_pid
;
314 info
.si_uid
= getuid ();
315 info
.si_value
= timer
->event
.sigev_value
;
317 INLINE_SYSCALL (rt_sigqueueinfo
, 3, info
.si_pid
, info
.si_signo
, &info
);
320 if (pthread_kill (self
->captured
, timer
->event
.sigev_signo
) != 0)
322 if (pthread_kill (self
->id
, timer
->event
.sigev_signo
) != 0)
329 timer
->event
.sigev_notify_function (timer
->event
.sigev_value
);
333 assert (! "unknown event");
337 pthread_mutex_lock (&__timer_mutex
);
339 self
->current_timer
= 0;
341 pthread_cond_broadcast (&self
->cond
);
345 /* Thread function; executed by each timer thread. The job of this
346 function is to wait on the thread's timer queue and expire the
347 timers in chronological order as close to their scheduled time as
350 __attribute__ ((noreturn
))
351 thread_func (void *arg
)
353 struct thread_node
*self
= arg
;
355 /* Register cleanup handler, in case rogue application terminates
356 this thread. (This cannot happen to __timer_signal_thread, which
357 doesn't invoke application callbacks). */
359 pthread_cleanup_push (thread_cleanup
, self
);
361 pthread_mutex_lock (&__timer_mutex
);
365 struct list_head
*first
;
366 struct timer_node
*timer
= NULL
;
368 /* While the timer queue is not empty, inspect the first node. */
369 first
= list_first (&self
->timer_queue
);
370 if (first
!= list_null (&self
->timer_queue
))
374 timer
= timer_links2ptr (first
);
376 /* This assumes that the elements of the list of one thread
377 are all for the same clock. */
378 __clock_gettime (timer
->clock
, &now
);
382 /* If the timer is due or overdue, remove it from the queue.
383 If it's a periodic timer, re-compute its new time and
384 requeue it. Either way, perform the timer expiry. */
385 if (timespec_compare (&now
, &timer
->expirytime
) < 0)
388 list_unlink_ip (first
);
390 if (__builtin_expect (timer
->value
.it_interval
.tv_sec
, 0) != 0
391 || timer
->value
.it_interval
.tv_nsec
!= 0)
393 timer
->overrun_count
= 0;
394 timespec_add (&timer
->expirytime
, &timer
->expirytime
,
395 &timer
->value
.it_interval
);
396 while (timespec_compare (&timer
->expirytime
, &now
) < 0)
398 timespec_add (&timer
->expirytime
, &timer
->expirytime
,
399 &timer
->value
.it_interval
);
400 if (timer
->overrun_count
< DELAYTIMER_MAX
)
401 ++timer
->overrun_count
;
403 __timer_thread_queue_timer (self
, timer
);
406 thread_expire_timer (self
, timer
);
408 first
= list_first (&self
->timer_queue
);
409 if (first
== list_null (&self
->timer_queue
))
412 timer
= timer_links2ptr (first
);
416 /* If the queue is not empty, wait until the expiry time of the
417 first node. Otherwise wait indefinitely. Insertions at the
418 head of the queue must wake up the thread by broadcasting
419 this condition variable. */
421 pthread_cond_timedwait (&self
->cond
, &__timer_mutex
,
424 pthread_cond_wait (&self
->cond
, &__timer_mutex
);
426 /* This macro will never be executed since the while loop loops
427 forever - but we have to add it for proper nesting. */
428 pthread_cleanup_pop (1);
432 /* Enqueue a timer in wakeup order in the thread's timer queue.
433 Returns 1 if the timer was inserted at the head of the queue,
434 causing the queue's next wakeup time to change. */
437 __timer_thread_queue_timer (struct thread_node
*thread
,
438 struct timer_node
*insert
)
440 struct list_head
*iter
;
443 for (iter
= list_first (&thread
->timer_queue
);
444 iter
!= list_null (&thread
->timer_queue
);
445 iter
= list_next (iter
))
447 struct timer_node
*timer
= timer_links2ptr (iter
);
449 if (timespec_compare (&insert
->expirytime
, &timer
->expirytime
) < 0)
454 list_insbefore (iter
, &insert
->links
);
459 /* Start a thread and associate it with the given thread node. Global
460 lock must be held by caller. */
462 __timer_thread_start (struct thread_node
*thread
)
467 assert (!thread
->exists
);
471 pthread_sigmask (SIG_SETMASK
, &set
, &oset
);
473 if (pthread_create (&thread
->id
, &thread
->attr
,
474 (void *(*) (void *)) thread_func
, thread
) != 0)
480 pthread_sigmask (SIG_SETMASK
, &oset
, NULL
);
487 __timer_thread_wakeup (struct thread_node
*thread
)
489 pthread_cond_broadcast (&thread
->cond
);
494 /* Search the list of active threads and find one which has matching
495 attributes. Global mutex lock must be held by caller. */
497 __timer_thread_find_matching (const pthread_attr_t
*desired_attr
,
498 clockid_t desired_clock_id
)
500 struct list_head
*iter
= list_first (&thread_active_list
);
502 while (iter
!= list_null (&thread_active_list
))
504 struct thread_node
*candidate
= thread_links2ptr (iter
);
506 if (thread_attr_compare (desired_attr
, &candidate
->attr
)
507 && desired_clock_id
== candidate
->clock_id
)
510 iter
= list_next (iter
);
517 /* Grab a free timer structure from the global free list. The global
518 lock must be held by the caller. */
522 struct list_head
*node
= list_first (&timer_free_list
);
524 if (node
!= list_null (&timer_free_list
))
526 struct timer_node
*timer
= timer_links2ptr (node
);
527 list_unlink_ip (node
);
528 timer
->inuse
= TIMER_INUSE
;
537 /* Return a timer structure to the global free list. The global lock
538 must be held by the caller. */
540 __timer_dealloc (struct timer_node
*timer
)
542 assert (timer
->refcount
== 0);
543 timer
->thread
= NULL
; /* Break association between timer and thread. */
544 timer
->inuse
= TIMER_FREE
;
545 list_append (&timer_free_list
, &timer
->links
);
549 /* Thread cancellation handler which unlocks a mutex. */
551 __timer_mutex_cancel_handler (void *arg
)
553 pthread_mutex_unlock (arg
);