2 * Copyright (c) 2006 Ondrej Palkovsky
3 * Copyright (c) 2007 Jakub Jermar
4 * Copyright (c) 2018 CZ.NIC, z.s.p.o.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * - The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <libarch/faddr.h>
51 #include "../private/thread.h"
52 #include "../private/futex.h"
53 #include "../private/fibril.h"
54 #include "../private/libc.h"
56 #define DPRINTF(...) ((void)0)
59 /** Member of timeout_list. */
62 struct timespec expires
;
63 fibril_event_t
*event
;
86 static bool multithreaded
= false;
88 /* This futex serializes access to global data. */
89 static futex_t fibril_futex
= FUTEX_INITIALIZER
;
90 static futex_t ready_semaphore
= FUTEX_INITIALIZE(0);
91 static long ready_st_count
;
93 static LIST_INITIALIZE(ready_list
);
94 static LIST_INITIALIZE(fibril_list
);
95 static LIST_INITIALIZE(timeout_list
);
97 static futex_t ipc_lists_futex
= FUTEX_INITIALIZER
;
98 static LIST_INITIALIZE(ipc_waiter_list
);
99 static LIST_INITIALIZE(ipc_buffer_list
);
100 static LIST_INITIALIZE(ipc_buffer_free_list
);
102 /* Only used as unique markers for triggered events. */
103 static fibril_t _fibril_event_triggered
;
104 static fibril_t _fibril_event_timed_out
;
105 #define _EVENT_INITIAL (NULL)
106 #define _EVENT_TRIGGERED (&_fibril_event_triggered)
107 #define _EVENT_TIMED_OUT (&_fibril_event_timed_out)
109 static inline void _ready_debug_check(void)
112 assert(!multithreaded
);
113 long count
= (long) list_count(&ready_list
) +
114 (long) list_count(&ipc_buffer_free_list
);
115 assert(ready_st_count
== count
);
119 static inline long _ready_count(void)
122 * The number of available tokens is always equal to the number
123 * of fibrils in the ready list + the number of free IPC buffer
128 return atomic_get(&ready_semaphore
.val
);
130 _ready_debug_check();
131 return ready_st_count
;
134 static inline void _ready_up(void)
137 futex_up(&ready_semaphore
);
140 _ready_debug_check();
144 static inline errno_t
_ready_down(const struct timespec
*expires
)
147 return futex_down_timeout(&ready_semaphore
, expires
);
149 _ready_debug_check();
154 static atomic_t threads_in_ipc_wait
= { 0 };
156 /** Function that spans the whole life-cycle of a fibril.
158 * Each fibril begins execution in this function. Then the function implementing
159 * the fibril logic is called. After its return, the return value is saved.
160 * The fibril then switches to another fibril, which cleans up after it.
163 static void _fibril_main(void)
165 /* fibril_futex is locked when a fibril is started. */
166 futex_unlock(&fibril_futex
);
168 fibril_t
*fibril
= fibril_self();
170 /* Call the implementing function. */
171 fibril_exit(fibril
->func(fibril
->arg
));
176 /** Allocate a fibril structure and TCB, but don't do anything else with it. */
177 fibril_t
*fibril_alloc(void)
179 tcb_t
*tcb
= tls_make(__progsymbols
.elfstart
);
183 fibril_t
*fibril
= calloc(1, sizeof(fibril_t
));
189 tcb
->fibril_data
= fibril
;
191 fibril
->is_freeable
= true;
193 fibril_setup(fibril
);
198 * Put the fibril into fibril_list.
200 void fibril_setup(fibril_t
*f
)
202 futex_lock(&fibril_futex
);
203 list_append(&f
->all_link
, &fibril_list
);
204 futex_unlock(&fibril_futex
);
207 void fibril_teardown(fibril_t
*fibril
)
209 futex_lock(&fibril_futex
);
210 list_remove(&fibril
->all_link
);
211 futex_unlock(&fibril_futex
);
213 if (fibril
->is_freeable
) {
214 tls_free(fibril
->tcb
);
220 * Event notification with a given reason.
222 * @param reason Reason of the notification.
223 * Can be either _EVENT_TRIGGERED or _EVENT_TIMED_OUT.
225 static fibril_t
*_fibril_trigger_internal(fibril_event_t
*event
, fibril_t
*reason
)
227 assert(reason
!= _EVENT_INITIAL
);
228 assert(reason
== _EVENT_TIMED_OUT
|| reason
== _EVENT_TRIGGERED
);
230 futex_assert_is_locked(&fibril_futex
);
232 if (event
->fibril
== _EVENT_INITIAL
) {
233 event
->fibril
= reason
;
237 if (event
->fibril
== _EVENT_TIMED_OUT
) {
238 assert(reason
== _EVENT_TRIGGERED
);
239 event
->fibril
= reason
;
243 if (event
->fibril
== _EVENT_TRIGGERED
) {
244 /* Already triggered. Nothing to do. */
248 fibril_t
*f
= event
->fibril
;
249 event
->fibril
= reason
;
251 assert(f
->sleep_event
== event
);
255 static errno_t
_ipc_wait(ipc_call_t
*call
, const struct timespec
*expires
)
258 return ipc_wait(call
, SYNCH_NO_TIMEOUT
, SYNCH_FLAGS_NONE
);
260 if (expires
->tv_sec
== 0)
261 return ipc_wait(call
, SYNCH_NO_TIMEOUT
, SYNCH_FLAGS_NON_BLOCKING
);
266 if (ts_gteq(&now
, expires
))
267 return ipc_wait(call
, SYNCH_NO_TIMEOUT
, SYNCH_FLAGS_NON_BLOCKING
);
269 return ipc_wait(call
, NSEC2USEC(ts_sub_diff(expires
, &now
)),
274 * Waits until a ready fibril is added to the list, or an IPC message arrives.
275 * Returns NULL on timeout and may also return NULL if returning from IPC
276 * wait after new ready fibrils are added.
278 static fibril_t
*_ready_list_pop(const struct timespec
*expires
, bool locked
)
281 futex_assert_is_locked(&fibril_futex
);
283 /* Must be nonblocking. */
284 assert(expires
->tv_sec
== 0);
286 futex_assert_is_not_locked(&fibril_futex
);
289 errno_t rc
= _ready_down(expires
);
294 * Once we acquire a token from ready_semaphore, there are two options.
295 * Either there is a ready fibril in the list, or it's our turn to
296 * call `ipc_wait_cycle()`. There is one extra token on the semaphore
297 * for each entry of the call buffer.
302 futex_lock(&fibril_futex
);
303 fibril_t
*f
= list_pop(&ready_list
, fibril_t
, link
);
305 atomic_inc(&threads_in_ipc_wait
);
307 futex_unlock(&fibril_futex
);
313 assert(list_empty(&ipc_buffer_list
));
315 /* No fibril is ready, IPC wait it is. */
316 ipc_call_t call
= { 0 };
317 rc
= _ipc_wait(&call
, expires
);
319 atomic_dec(&threads_in_ipc_wait
);
321 if (rc
!= EOK
&& rc
!= ENOENT
) {
328 * We might get ENOENT due to a poke.
329 * In that case, we propagate the null call out of fibril_ipc_wait(),
330 * because poke must result in that call returning.
334 * If a fibril is already waiting for IPC, we wake up the fibril,
335 * and return the token to ready_semaphore.
336 * If there is no fibril waiting, we pop a buffer bucket and
337 * put our call there. The token then returns when the bucket is
342 futex_lock(&fibril_futex
);
344 futex_lock(&ipc_lists_futex
);
347 _ipc_waiter_t
*w
= list_pop(&ipc_waiter_list
, _ipc_waiter_t
, link
);
351 /* We switch to the woken up fibril immediately if possible. */
352 f
= _fibril_trigger_internal(&w
->event
, _EVENT_TRIGGERED
);
357 _ipc_buffer_t
*buf
= list_pop(&ipc_buffer_free_list
, _ipc_buffer_t
, link
);
359 *buf
= (_ipc_buffer_t
) { .call
= call
, .rc
= rc
};
360 list_append(&buf
->link
, &ipc_buffer_list
);
363 futex_unlock(&ipc_lists_futex
);
366 futex_unlock(&fibril_futex
);
371 static fibril_t
*_ready_list_pop_nonblocking(bool locked
)
373 struct timespec tv
= { .tv_sec
= 0, .tv_nsec
= 0 };
374 return _ready_list_pop(&tv
, locked
);
377 static void _ready_list_push(fibril_t
*f
)
382 futex_assert_is_locked(&fibril_futex
);
384 /* Enqueue in ready_list. */
385 list_append(&f
->link
, &ready_list
);
388 if (atomic_get(&threads_in_ipc_wait
)) {
389 DPRINTF("Poking.\n");
390 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
395 /* Blocks the current fibril until an IPC call arrives. */
396 static errno_t
_wait_ipc(ipc_call_t
*call
, const struct timespec
*expires
)
398 futex_assert_is_not_locked(&fibril_futex
);
400 futex_lock(&ipc_lists_futex
);
401 _ipc_buffer_t
*buf
= list_pop(&ipc_buffer_list
, _ipc_buffer_t
, link
);
404 errno_t rc
= buf
->rc
;
406 /* Return to freelist. */
407 list_append(&buf
->link
, &ipc_buffer_free_list
);
408 /* Return IPC wait token. */
411 futex_unlock(&ipc_lists_futex
);
415 _ipc_waiter_t w
= { .call
= call
};
416 list_append(&w
.link
, &ipc_waiter_list
);
417 futex_unlock(&ipc_lists_futex
);
419 errno_t rc
= fibril_wait_timeout(&w
.event
, expires
);
423 futex_lock(&ipc_lists_futex
);
424 if (link_in_use(&w
.link
))
425 list_remove(&w
.link
);
428 futex_unlock(&ipc_lists_futex
);
432 /** Fire all timeouts that expired. */
433 static struct timespec
*_handle_expired_timeouts(struct timespec
*next_timeout
)
438 futex_lock(&fibril_futex
);
440 while (!list_empty(&timeout_list
)) {
441 link_t
*cur
= list_first(&timeout_list
);
442 _timeout_t
*to
= list_get_instance(cur
, _timeout_t
, link
);
444 if (ts_gt(&to
->expires
, &ts
)) {
445 *next_timeout
= to
->expires
;
446 futex_unlock(&fibril_futex
);
450 list_remove(&to
->link
);
452 _ready_list_push(_fibril_trigger_internal(
453 to
->event
, _EVENT_TIMED_OUT
));
456 futex_unlock(&fibril_futex
);
461 * Clean up after a dead fibril from which we restored context, if any.
462 * Called after a switch is made and fibril_futex is unlocked.
464 static void _fibril_cleanup_dead(void)
466 fibril_t
*srcf
= fibril_self();
467 if (!srcf
->clean_after_me
)
470 void *stack
= srcf
->clean_after_me
->stack
;
472 as_area_destroy(stack
);
473 fibril_teardown(srcf
->clean_after_me
);
474 srcf
->clean_after_me
= NULL
;
477 /** Switch to a fibril. */
478 static void _fibril_switch_to(_switch_type_t type
, fibril_t
*dstf
, bool locked
)
480 assert(fibril_self()->rmutex_locks
== 0);
483 futex_lock(&fibril_futex
);
485 futex_assert_is_locked(&fibril_futex
);
487 fibril_t
*srcf
= fibril_self();
492 case SWITCH_FROM_YIELD
:
493 _ready_list_push(srcf
);
495 case SWITCH_FROM_DEAD
:
496 dstf
->clean_after_me
= srcf
;
498 case SWITCH_FROM_HELPER
:
499 case SWITCH_FROM_BLOCKED
:
503 dstf
->thread_ctx
= srcf
->thread_ctx
;
504 srcf
->thread_ctx
= NULL
;
506 /* Just some bookkeeping to allow better debugging of futex locks. */
507 futex_give_to(&fibril_futex
, dstf
);
509 /* Swap to the next fibril. */
510 context_swap(&srcf
->ctx
, &dstf
->ctx
);
512 assert(srcf
== fibril_self());
513 assert(srcf
->thread_ctx
);
516 /* Must be after context_swap()! */
517 futex_unlock(&fibril_futex
);
518 _fibril_cleanup_dead();
523 * Main function for a helper fibril.
524 * The helper fibril executes on threads in the lightweight fibril pool when
525 * there is no fibril ready to run. Its only purpose is to block until
526 * another fibril is ready, or a timeout expires, or an IPC message arrives.
528 * There is at most one helper fibril per thread.
531 static errno_t
_helper_fibril_fn(void *arg
)
533 /* Set itself as the thread's own context. */
534 fibril_self()->thread_ctx
= fibril_self();
538 struct timespec next_timeout
;
540 struct timespec
*to
= _handle_expired_timeouts(&next_timeout
);
541 fibril_t
*f
= _ready_list_pop(to
, false);
543 _fibril_switch_to(SWITCH_FROM_HELPER
, f
, false);
550 /** Create a new fibril.
552 * @param func Implementing function of the new fibril.
553 * @param arg Argument to pass to func.
554 * @param stksz Stack size in bytes.
556 * @return 0 on failure or TLS of the new fibril.
559 fid_t
fibril_create_generic(errno_t (*func
)(void *), void *arg
, size_t stksz
)
563 fibril
= fibril_alloc();
567 fibril
->stack_size
= (stksz
== FIBRIL_DFLT_STK_SIZE
) ?
568 stack_size_get() : stksz
;
569 fibril
->stack
= as_area_create(AS_AREA_ANY
, fibril
->stack_size
,
570 AS_AREA_READ
| AS_AREA_WRITE
| AS_AREA_CACHEABLE
| AS_AREA_GUARD
|
571 AS_AREA_LATE_RESERVE
, AS_AREA_UNPAGED
);
572 if (fibril
->stack
== AS_MAP_FAILED
) {
573 fibril_teardown(fibril
);
580 context_create_t sctx
= {
582 .stack_base
= fibril
->stack
,
583 .stack_size
= fibril
->stack_size
,
587 context_create(&fibril
->ctx
, &sctx
);
588 return (fid_t
) fibril
;
591 /** Delete a fibril that has never run.
593 * Free resources of a fibril that has been created with fibril_create()
594 * but never started using fibril_start().
596 * @param fid Pointer to the fibril structure of the fibril to be
599 void fibril_destroy(fid_t fid
)
601 fibril_t
*fibril
= (fibril_t
*) fid
;
603 assert(!fibril
->is_running
);
604 assert(fibril
->stack
);
605 as_area_destroy(fibril
->stack
);
606 fibril_teardown(fibril
);
609 static void _insert_timeout(_timeout_t
*timeout
)
611 futex_assert_is_locked(&fibril_futex
);
614 link_t
*tmp
= timeout_list
.head
.next
;
615 while (tmp
!= &timeout_list
.head
) {
616 _timeout_t
*cur
= list_get_instance(tmp
, _timeout_t
, link
);
618 if (ts_gteq(&cur
->expires
, &timeout
->expires
))
624 list_insert_before(&timeout
->link
, tmp
);
628 * Same as `fibril_wait_for()`, except with a timeout.
630 * It is guaranteed that timing out cannot cause another thread's
631 * `fibril_notify()` to be lost. I.e. the function returns success if and
632 * only if `fibril_notify()` was called after the last call to
633 * wait/wait_timeout returned, and before the call timed out.
635 * @return ETIMEOUT if timed out. EOK otherwise.
637 errno_t
fibril_wait_timeout(fibril_event_t
*event
,
638 const struct timespec
*expires
)
640 assert(fibril_self()->rmutex_locks
== 0);
642 DPRINTF("### Fibril %p sleeping on event %p.\n", fibril_self(), event
);
644 if (!fibril_self()->thread_ctx
) {
645 fibril_self()->thread_ctx
=
646 fibril_create_generic(_helper_fibril_fn
, NULL
, PAGE_SIZE
);
647 if (!fibril_self()->thread_ctx
)
651 futex_lock(&fibril_futex
);
653 if (event
->fibril
== _EVENT_TRIGGERED
) {
654 DPRINTF("### Already triggered. Returning. \n");
655 event
->fibril
= _EVENT_INITIAL
;
656 futex_unlock(&fibril_futex
);
660 assert(event
->fibril
== _EVENT_INITIAL
);
662 fibril_t
*srcf
= fibril_self();
663 fibril_t
*dstf
= NULL
;
666 * We cannot block here waiting for another fibril becoming
667 * ready, since that would require unlocking the fibril_futex,
668 * and that in turn would allow another thread to restore
669 * the source fibril before this thread finished switching.
671 * Instead, we switch to an internal "helper" fibril whose only
672 * job is to wait for an event, freeing the source fibril for
673 * wakeups. There is always one for each running thread.
676 dstf
= _ready_list_pop_nonblocking(true);
678 // XXX: It is possible for the _ready_list_pop_nonblocking() to
679 // check for IPC, find a pending message, and trigger the
680 // event on which we are currently trying to sleep.
681 if (event
->fibril
== _EVENT_TRIGGERED
) {
682 event
->fibril
= _EVENT_INITIAL
;
683 futex_unlock(&fibril_futex
);
687 dstf
= srcf
->thread_ctx
;
691 _timeout_t timeout
= { 0 };
693 timeout
.expires
= *expires
;
694 timeout
.event
= event
;
695 _insert_timeout(&timeout
);
700 event
->fibril
= srcf
;
701 srcf
->sleep_event
= event
;
703 assert(event
->fibril
!= _EVENT_INITIAL
);
705 _fibril_switch_to(SWITCH_FROM_BLOCKED
, dstf
, true);
707 assert(event
->fibril
!= srcf
);
708 assert(event
->fibril
!= _EVENT_INITIAL
);
709 assert(event
->fibril
== _EVENT_TIMED_OUT
|| event
->fibril
== _EVENT_TRIGGERED
);
711 list_remove(&timeout
.link
);
712 errno_t rc
= (event
->fibril
== _EVENT_TIMED_OUT
) ? ETIMEOUT
: EOK
;
713 event
->fibril
= _EVENT_INITIAL
;
715 futex_unlock(&fibril_futex
);
716 _fibril_cleanup_dead();
720 void fibril_wait_for(fibril_event_t
*event
)
722 assert(fibril_self()->rmutex_locks
== 0);
724 (void) fibril_wait_timeout(event
, NULL
);
728 * Wake up the fibril waiting for the given event.
729 * Up to one wakeup is remembered if the fibril is not currently waiting.
731 * This function is safe for use under restricted mutex lock.
733 void fibril_notify(fibril_event_t
*event
)
735 futex_lock(&fibril_futex
);
736 _ready_list_push(_fibril_trigger_internal(event
, _EVENT_TRIGGERED
));
737 futex_unlock(&fibril_futex
);
740 /** Start a fibril that has not been running yet. */
741 void fibril_start(fibril_t
*fibril
)
743 futex_lock(&fibril_futex
);
744 assert(!fibril
->is_running
);
745 fibril
->is_running
= true;
747 if (!link_in_use(&fibril
->all_link
))
748 list_append(&fibril
->all_link
, &fibril_list
);
750 _ready_list_push(fibril
);
752 futex_unlock(&fibril_futex
);
755 /** Start a fibril that has not been running yet. (obsolete) */
756 void fibril_add_ready(fibril_t
*fibril
)
758 fibril_start(fibril
);
761 /** @return the currently running fibril. */
762 fibril_t
*fibril_self(void)
764 assert(__tcb_is_set());
765 tcb_t
*tcb
= __tcb_get();
766 assert(tcb
->fibril_data
);
767 return tcb
->fibril_data
;
771 * Obsolete, use fibril_self().
773 * @return ID of the currently running fibril.
775 fid_t
fibril_get_id(void)
777 return (fid_t
) fibril_self();
781 * Switch to another fibril, if one is ready to run.
782 * Has no effect on a heavy fibril.
784 void fibril_yield(void)
786 if (fibril_self()->rmutex_locks
> 0)
789 fibril_t
*f
= _ready_list_pop_nonblocking(false);
791 _fibril_switch_to(SWITCH_FROM_YIELD
, f
, false);
794 static void _runner_fn(void *arg
)
796 _helper_fibril_fn(arg
);
800 * Spawn a given number of runners (i.e. OS threads) immediately, and
801 * unconditionally. This is meant to be used for tests and debugging.
802 * Regular programs should just use `fibril_enable_multithreaded()`.
804 * @param n Number of runners to spawn.
805 * @return Number of runners successfully spawned.
807 int fibril_test_spawn_runners(int n
)
809 assert(fibril_self()->rmutex_locks
== 0);
811 if (!multithreaded
) {
812 _ready_debug_check();
813 atomic_set(&ready_semaphore
.val
, ready_st_count
);
814 multithreaded
= true;
819 for (int i
= 0; i
< n
; i
++) {
821 rc
= thread_create(_runner_fn
, NULL
, "fibril runner", &tid
);
831 * Opt-in to have more than one runner thread.
833 * Currently, a task only ever runs in one thread because multithreading
834 * might break some existing code.
836 * Eventually, the number of runner threads for a given task should become
837 * configurable in the environment and this function becomes no-op.
839 void fibril_enable_multithreaded(void)
841 // TODO: Implement better.
842 // For now, 4 total runners is a sensible default.
843 if (!multithreaded
) {
844 fibril_test_spawn_runners(3);
851 void fibril_detach(fid_t f
)
853 // TODO: Currently all fibrils are detached by default, but they
854 // won't always be. Code that explicitly spawns fibrils with
855 // limited lifetime should call this function.
859 * Exit a fibril. Never returns.
861 * @param retval Value to return from fibril_join() called on this fibril.
863 _Noreturn
void fibril_exit(long retval
)
865 // TODO: implement fibril_join() and remember retval
868 fibril_t
*f
= _ready_list_pop_nonblocking(false);
870 f
= fibril_self()->thread_ctx
;
872 _fibril_switch_to(SWITCH_FROM_DEAD
, f
, false);
873 __builtin_unreachable();
876 void __fibrils_init(void)
879 * We allow a fixed, small amount of parallelism for IPC reads, but
880 * since IPC is currently serialized in kernel, there's not much
881 * we can get from more threads reading messages.
884 #define IPC_BUFFER_COUNT 1024
885 static _ipc_buffer_t buffers
[IPC_BUFFER_COUNT
];
887 for (int i
= 0; i
< IPC_BUFFER_COUNT
; i
++) {
888 list_append(&buffers
[i
].link
, &ipc_buffer_free_list
);
893 void fibril_usleep(usec_t timeout
)
895 struct timespec expires
;
897 ts_add_diff(&expires
, USEC2NSEC(timeout
));
899 fibril_event_t event
= FIBRIL_EVENT_INIT
;
900 fibril_wait_timeout(&event
, &expires
);
903 void fibril_sleep(sec_t sec
)
905 struct timespec expires
;
907 expires
.tv_sec
+= sec
;
909 fibril_event_t event
= FIBRIL_EVENT_INIT
;
910 fibril_wait_timeout(&event
, &expires
);
913 void fibril_ipc_poke(void)
915 DPRINTF("Poking.\n");
916 /* Wakeup one thread sleeping in SYS_IPC_WAIT. */
920 errno_t
fibril_ipc_wait(ipc_call_t
*call
, const struct timespec
*expires
)
922 return _wait_ipc(call
, expires
);