2 * Copyright (c) 2009 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <fibril_synch.h>
43 #include <stacktrace.h>
46 #include "private/async.h"
48 static void optimize_execution_power(void)
51 * When waking up a worker fibril previously blocked in fibril
52 * synchronization, chances are that there is an idle manager fibril
53 * waiting for IPC, that could start executing the awakened worker
54 * fibril right away. We try to detect this and bring the manager
55 * fibril back to fruitful work.
57 if (atomic_get(&threads_in_ipc_wait
) > 0)
61 static void print_deadlock(fibril_owner_info_t
*oi
)
63 fibril_t
*f
= (fibril_t
*) fibril_get_id();
65 printf("Deadlock detected.\n");
68 printf("Fibril %p waits for primitive %p.\n", f
, oi
);
70 while (oi
&& oi
->owned_by
) {
71 printf("Primitive %p is owned by fibril %p.\n",
73 if (oi
->owned_by
== f
)
75 stacktrace_print_fp_pc(context_get_fp(&oi
->owned_by
->ctx
),
76 oi
->owned_by
->ctx
.pc
);
77 printf("Fibril %p waits for primitive %p.\n",
78 oi
->owned_by
, oi
->owned_by
->waits_for
);
79 oi
= oi
->owned_by
->waits_for
;
84 static void check_for_deadlock(fibril_owner_info_t
*oi
)
86 while (oi
&& oi
->owned_by
) {
87 if (oi
->owned_by
== (fibril_t
*) fibril_get_id()) {
91 oi
= oi
->owned_by
->waits_for
;
96 void fibril_mutex_initialize(fibril_mutex_t
*fm
)
98 fm
->oi
.owned_by
= NULL
;
100 list_initialize(&fm
->waiters
);
103 void fibril_mutex_lock(fibril_mutex_t
*fm
)
105 fibril_t
*f
= (fibril_t
*) fibril_get_id();
107 futex_down(&async_futex
);
108 if (fm
->counter
-- <= 0) {
111 awaiter_initialize(&wdata
);
112 wdata
.fid
= fibril_get_id();
113 wdata
.wu_event
.inlist
= true;
114 list_append(&wdata
.wu_event
.link
, &fm
->waiters
);
115 check_for_deadlock(&fm
->oi
);
116 f
->waits_for
= &fm
->oi
;
117 fibril_switch(FIBRIL_TO_MANAGER
);
120 futex_up(&async_futex
);
124 bool fibril_mutex_trylock(fibril_mutex_t
*fm
)
128 futex_down(&async_futex
);
129 if (fm
->counter
> 0) {
131 fm
->oi
.owned_by
= (fibril_t
*) fibril_get_id();
134 futex_up(&async_futex
);
139 static void _fibril_mutex_unlock_unsafe(fibril_mutex_t
*fm
)
141 if (fm
->counter
++ < 0) {
146 tmp
= list_first(&fm
->waiters
);
148 wdp
= list_get_instance(tmp
, awaiter_t
, wu_event
.link
);
150 wdp
->wu_event
.inlist
= false;
152 f
= (fibril_t
*) wdp
->fid
;
156 list_remove(&wdp
->wu_event
.link
);
157 fibril_add_ready(wdp
->fid
);
158 optimize_execution_power();
160 fm
->oi
.owned_by
= NULL
;
164 void fibril_mutex_unlock(fibril_mutex_t
*fm
)
166 assert(fibril_mutex_is_locked(fm
));
167 futex_down(&async_futex
);
168 _fibril_mutex_unlock_unsafe(fm
);
169 futex_up(&async_futex
);
172 bool fibril_mutex_is_locked(fibril_mutex_t
*fm
)
176 futex_down(&async_futex
);
177 if (fm
->counter
<= 0)
179 futex_up(&async_futex
);
184 void fibril_rwlock_initialize(fibril_rwlock_t
*frw
)
186 frw
->oi
.owned_by
= NULL
;
189 list_initialize(&frw
->waiters
);
192 void fibril_rwlock_read_lock(fibril_rwlock_t
*frw
)
194 fibril_t
*f
= (fibril_t
*) fibril_get_id();
196 futex_down(&async_futex
);
200 awaiter_initialize(&wdata
);
201 wdata
.fid
= (fid_t
) f
;
202 wdata
.wu_event
.inlist
= true;
203 f
->flags
&= ~FIBRIL_WRITER
;
204 list_append(&wdata
.wu_event
.link
, &frw
->waiters
);
205 check_for_deadlock(&frw
->oi
);
206 f
->waits_for
= &frw
->oi
;
207 fibril_switch(FIBRIL_TO_MANAGER
);
209 /* Consider the first reader the owner. */
210 if (frw
->readers
++ == 0)
211 frw
->oi
.owned_by
= f
;
212 futex_up(&async_futex
);
216 void fibril_rwlock_write_lock(fibril_rwlock_t
*frw
)
218 fibril_t
*f
= (fibril_t
*) fibril_get_id();
220 futex_down(&async_futex
);
221 if (frw
->writers
|| frw
->readers
) {
224 awaiter_initialize(&wdata
);
225 wdata
.fid
= (fid_t
) f
;
226 wdata
.wu_event
.inlist
= true;
227 f
->flags
|= FIBRIL_WRITER
;
228 list_append(&wdata
.wu_event
.link
, &frw
->waiters
);
229 check_for_deadlock(&frw
->oi
);
230 f
->waits_for
= &frw
->oi
;
231 fibril_switch(FIBRIL_TO_MANAGER
);
233 frw
->oi
.owned_by
= f
;
235 futex_up(&async_futex
);
239 static void _fibril_rwlock_common_unlock(fibril_rwlock_t
*frw
)
241 futex_down(&async_futex
);
243 if (--frw
->readers
) {
244 if (frw
->oi
.owned_by
== (fibril_t
*) fibril_get_id()) {
246 * If this reader firbril was considered the
247 * owner of this rwlock, clear the ownership
248 * information even if there are still more
251 * This is the limitation of the detection
252 * mechanism rooted in the fact that tracking
253 * all readers would require dynamically
254 * allocated memory for keeping linkage info.
256 frw
->oi
.owned_by
= NULL
;
264 assert(!frw
->readers
&& !frw
->writers
);
266 frw
->oi
.owned_by
= NULL
;
268 while (!list_empty(&frw
->waiters
)) {
269 link_t
*tmp
= list_first(&frw
->waiters
);
273 wdp
= list_get_instance(tmp
, awaiter_t
, wu_event
.link
);
274 f
= (fibril_t
*) wdp
->fid
;
278 if (f
->flags
& FIBRIL_WRITER
) {
282 wdp
->wu_event
.inlist
= false;
283 list_remove(&wdp
->wu_event
.link
);
284 fibril_add_ready(wdp
->fid
);
286 frw
->oi
.owned_by
= f
;
287 optimize_execution_power();
291 wdp
->wu_event
.inlist
= false;
292 list_remove(&wdp
->wu_event
.link
);
293 fibril_add_ready(wdp
->fid
);
294 if (frw
->readers
++ == 0) {
295 /* Consider the first reader the owner. */
296 frw
->oi
.owned_by
= f
;
298 optimize_execution_power();
302 futex_up(&async_futex
);
305 void fibril_rwlock_read_unlock(fibril_rwlock_t
*frw
)
307 assert(fibril_rwlock_is_read_locked(frw
));
308 _fibril_rwlock_common_unlock(frw
);
311 void fibril_rwlock_write_unlock(fibril_rwlock_t
*frw
)
313 assert(fibril_rwlock_is_write_locked(frw
));
314 _fibril_rwlock_common_unlock(frw
);
317 bool fibril_rwlock_is_read_locked(fibril_rwlock_t
*frw
)
321 futex_down(&async_futex
);
324 futex_up(&async_futex
);
329 bool fibril_rwlock_is_write_locked(fibril_rwlock_t
*frw
)
333 futex_down(&async_futex
);
335 assert(frw
->writers
== 1);
338 futex_up(&async_futex
);
343 bool fibril_rwlock_is_locked(fibril_rwlock_t
*frw
)
345 return fibril_rwlock_is_read_locked(frw
) ||
346 fibril_rwlock_is_write_locked(frw
);
349 void fibril_condvar_initialize(fibril_condvar_t
*fcv
)
351 list_initialize(&fcv
->waiters
);
355 fibril_condvar_wait_timeout(fibril_condvar_t
*fcv
, fibril_mutex_t
*fm
,
360 assert(fibril_mutex_is_locked(fm
));
365 awaiter_initialize(&wdata
);
366 wdata
.fid
= fibril_get_id();
367 wdata
.to_event
.inlist
= timeout
> 0;
368 wdata
.wu_event
.inlist
= true;
370 futex_down(&async_futex
);
372 getuptime(&wdata
.to_event
.expires
);
373 tv_add_diff(&wdata
.to_event
.expires
, timeout
);
374 async_insert_timeout(&wdata
);
376 list_append(&wdata
.wu_event
.link
, &fcv
->waiters
);
377 _fibril_mutex_unlock_unsafe(fm
);
378 fibril_switch(FIBRIL_TO_MANAGER
);
379 fibril_mutex_lock(fm
);
381 /* async_futex not held after fibril_switch() */
382 futex_down(&async_futex
);
383 if (wdata
.to_event
.inlist
)
384 list_remove(&wdata
.to_event
.link
);
385 if (wdata
.wu_event
.inlist
)
386 list_remove(&wdata
.wu_event
.link
);
387 futex_up(&async_futex
);
389 return wdata
.to_event
.occurred
? ETIMEOUT
: EOK
;
392 void fibril_condvar_wait(fibril_condvar_t
*fcv
, fibril_mutex_t
*fm
)
396 rc
= fibril_condvar_wait_timeout(fcv
, fm
, 0);
400 static void _fibril_condvar_wakeup_common(fibril_condvar_t
*fcv
, bool once
)
405 futex_down(&async_futex
);
406 while (!list_empty(&fcv
->waiters
)) {
407 tmp
= list_first(&fcv
->waiters
);
408 wdp
= list_get_instance(tmp
, awaiter_t
, wu_event
.link
);
409 list_remove(&wdp
->wu_event
.link
);
410 wdp
->wu_event
.inlist
= false;
413 fibril_add_ready(wdp
->fid
);
414 optimize_execution_power();
419 futex_up(&async_futex
);
422 void fibril_condvar_signal(fibril_condvar_t
*fcv
)
424 _fibril_condvar_wakeup_common(fcv
, true);
427 void fibril_condvar_broadcast(fibril_condvar_t
*fcv
)
429 _fibril_condvar_wakeup_common(fcv
, false);
436 static errno_t
fibril_timer_func(void *arg
)
438 fibril_timer_t
*timer
= (fibril_timer_t
*) arg
;
441 fibril_mutex_lock(timer
->lockp
);
443 while (timer
->state
!= fts_cleanup
) {
444 switch (timer
->state
) {
447 fibril_condvar_wait(&timer
->cv
, timer
->lockp
);
450 rc
= fibril_condvar_wait_timeout(&timer
->cv
,
451 timer
->lockp
, timer
->delay
);
452 if (rc
== ETIMEOUT
&& timer
->state
== fts_active
) {
453 timer
->state
= fts_fired
;
454 timer
->handler_fid
= fibril_get_id();
455 fibril_mutex_unlock(timer
->lockp
);
456 timer
->fun(timer
->arg
);
457 fibril_mutex_lock(timer
->lockp
);
458 timer
->handler_fid
= 0;
468 /* Acknowledge timer fibril has finished cleanup. */
469 timer
->state
= fts_clean
;
470 fibril_condvar_broadcast(&timer
->cv
);
471 fibril_mutex_unlock(timer
->lockp
);
476 /** Create new timer.
478 * @return New timer on success, @c NULL if out of memory.
480 fibril_timer_t
*fibril_timer_create(fibril_mutex_t
*lock
)
483 fibril_timer_t
*timer
;
485 timer
= calloc(1, sizeof(fibril_timer_t
));
489 fid
= fibril_create(fibril_timer_func
, (void *) timer
);
495 fibril_mutex_initialize(&timer
->lock
);
496 fibril_condvar_initialize(&timer
->cv
);
499 timer
->state
= fts_not_set
;
500 timer
->lockp
= (lock
!= NULL
) ? lock
: &timer
->lock
;
502 fibril_add_ready(fid
);
508 * @param timer Timer, must not be active or accessed by other threads.
510 void fibril_timer_destroy(fibril_timer_t
*timer
)
512 fibril_mutex_lock(timer
->lockp
);
513 assert(timer
->state
== fts_not_set
|| timer
->state
== fts_fired
);
515 /* Request timer fibril to terminate. */
516 timer
->state
= fts_cleanup
;
517 fibril_condvar_broadcast(&timer
->cv
);
519 /* Wait for timer fibril to terminate */
520 while (timer
->state
!= fts_clean
)
521 fibril_condvar_wait(&timer
->cv
, timer
->lockp
);
522 fibril_mutex_unlock(timer
->lockp
);
529 * Set timer to execute a callback function after the specified
533 * @param delay Delay in microseconds
534 * @param fun Callback function
535 * @param arg Argument for @a fun
537 void fibril_timer_set(fibril_timer_t
*timer
, suseconds_t delay
,
538 fibril_timer_fun_t fun
, void *arg
)
540 fibril_mutex_lock(timer
->lockp
);
541 fibril_timer_set_locked(timer
, delay
, fun
, arg
);
542 fibril_mutex_unlock(timer
->lockp
);
545 /** Set locked timer.
547 * Set timer to execute a callback function after the specified
548 * interval. Must be called when the timer is locked.
551 * @param delay Delay in microseconds
552 * @param fun Callback function
553 * @param arg Argument for @a fun
555 void fibril_timer_set_locked(fibril_timer_t
*timer
, suseconds_t delay
,
556 fibril_timer_fun_t fun
, void *arg
)
558 assert(fibril_mutex_is_locked(timer
->lockp
));
559 assert(timer
->state
== fts_not_set
|| timer
->state
== fts_fired
);
560 timer
->state
= fts_active
;
561 timer
->delay
= delay
;
564 fibril_condvar_broadcast(&timer
->cv
);
569 * Clears (cancels) timer and returns last state of the timer.
570 * This can be one of:
571 * - fts_not_set If the timer has not been set or has been cleared
572 * - fts_active Timer was set but did not fire
573 * - fts_fired Timer fired
576 * @return Last timer state
578 fibril_timer_state_t
fibril_timer_clear(fibril_timer_t
*timer
)
580 fibril_timer_state_t old_state
;
582 fibril_mutex_lock(timer
->lockp
);
583 old_state
= fibril_timer_clear_locked(timer
);
584 fibril_mutex_unlock(timer
->lockp
);
589 /** Clear locked timer.
591 * Clears (cancels) timer and returns last state of the timer.
592 * This can be one of:
593 * - fts_not_set If the timer has not been set or has been cleared
594 * - fts_active Timer was set but did not fire
595 * - fts_fired Timer fired
596 * Must be called when the timer is locked.
599 * @return Last timer state
601 fibril_timer_state_t
fibril_timer_clear_locked(fibril_timer_t
*timer
)
603 fibril_timer_state_t old_state
;
605 assert(fibril_mutex_is_locked(timer
->lockp
));
607 while (timer
->handler_fid
!= 0) {
608 if (timer
->handler_fid
== fibril_get_id()) {
609 printf("Deadlock detected.\n");
611 printf("Fibril %zx is trying to clear timer %p from "
612 "inside its handler %p.\n",
613 fibril_get_id(), timer
, timer
->fun
);
617 fibril_condvar_wait(&timer
->cv
, timer
->lockp
);
620 old_state
= timer
->state
;
621 timer
->state
= fts_not_set
;
626 fibril_condvar_broadcast(&timer
->cv
);