Use `errno_t` in all uspace and kernel code.
[helenos.git] / uspace / lib / c / generic / fibril_synch.c
blob6c40a82aa46f806ab63c53d23c373cbbe3394d40
1 /*
2 * Copyright (c) 2009 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup libc
30 * @{
32 /** @file
35 #include <fibril_synch.h>
36 #include <fibril.h>
37 #include <async.h>
38 #include <adt/list.h>
39 #include <futex.h>
40 #include <sys/time.h>
41 #include <errno.h>
42 #include <assert.h>
43 #include <stacktrace.h>
44 #include <stdlib.h>
45 #include <stdio.h>
46 #include "private/async.h"
48 static void optimize_execution_power(void)
51 * When waking up a worker fibril previously blocked in fibril
52 * synchronization, chances are that there is an idle manager fibril
53 * waiting for IPC, that could start executing the awakened worker
54 * fibril right away. We try to detect this and bring the manager
55 * fibril back to fruitful work.
57 if (atomic_get(&threads_in_ipc_wait) > 0)
58 async_poke();
61 static void print_deadlock(fibril_owner_info_t *oi)
63 fibril_t *f = (fibril_t *) fibril_get_id();
65 printf("Deadlock detected.\n");
66 stacktrace_print();
68 printf("Fibril %p waits for primitive %p.\n", f, oi);
70 while (oi && oi->owned_by) {
71 printf("Primitive %p is owned by fibril %p.\n",
72 oi, oi->owned_by);
73 if (oi->owned_by == f)
74 break;
75 stacktrace_print_fp_pc(context_get_fp(&oi->owned_by->ctx),
76 oi->owned_by->ctx.pc);
77 printf("Fibril %p waits for primitive %p.\n",
78 oi->owned_by, oi->owned_by->waits_for);
79 oi = oi->owned_by->waits_for;
84 static void check_for_deadlock(fibril_owner_info_t *oi)
86 while (oi && oi->owned_by) {
87 if (oi->owned_by == (fibril_t *) fibril_get_id()) {
88 print_deadlock(oi);
89 abort();
91 oi = oi->owned_by->waits_for;
96 void fibril_mutex_initialize(fibril_mutex_t *fm)
98 fm->oi.owned_by = NULL;
99 fm->counter = 1;
100 list_initialize(&fm->waiters);
103 void fibril_mutex_lock(fibril_mutex_t *fm)
105 fibril_t *f = (fibril_t *) fibril_get_id();
107 futex_down(&async_futex);
108 if (fm->counter-- <= 0) {
109 awaiter_t wdata;
111 awaiter_initialize(&wdata);
112 wdata.fid = fibril_get_id();
113 wdata.wu_event.inlist = true;
114 list_append(&wdata.wu_event.link, &fm->waiters);
115 check_for_deadlock(&fm->oi);
116 f->waits_for = &fm->oi;
117 fibril_switch(FIBRIL_TO_MANAGER);
118 } else {
119 fm->oi.owned_by = f;
120 futex_up(&async_futex);
124 bool fibril_mutex_trylock(fibril_mutex_t *fm)
126 bool locked = false;
128 futex_down(&async_futex);
129 if (fm->counter > 0) {
130 fm->counter--;
131 fm->oi.owned_by = (fibril_t *) fibril_get_id();
132 locked = true;
134 futex_up(&async_futex);
136 return locked;
139 static void _fibril_mutex_unlock_unsafe(fibril_mutex_t *fm)
141 if (fm->counter++ < 0) {
142 link_t *tmp;
143 awaiter_t *wdp;
144 fibril_t *f;
146 tmp = list_first(&fm->waiters);
147 assert(tmp != NULL);
148 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
149 wdp->active = true;
150 wdp->wu_event.inlist = false;
152 f = (fibril_t *) wdp->fid;
153 fm->oi.owned_by = f;
154 f->waits_for = NULL;
156 list_remove(&wdp->wu_event.link);
157 fibril_add_ready(wdp->fid);
158 optimize_execution_power();
159 } else {
160 fm->oi.owned_by = NULL;
164 void fibril_mutex_unlock(fibril_mutex_t *fm)
166 assert(fibril_mutex_is_locked(fm));
167 futex_down(&async_futex);
168 _fibril_mutex_unlock_unsafe(fm);
169 futex_up(&async_futex);
172 bool fibril_mutex_is_locked(fibril_mutex_t *fm)
174 bool locked = false;
176 futex_down(&async_futex);
177 if (fm->counter <= 0)
178 locked = true;
179 futex_up(&async_futex);
181 return locked;
184 void fibril_rwlock_initialize(fibril_rwlock_t *frw)
186 frw->oi.owned_by = NULL;
187 frw->writers = 0;
188 frw->readers = 0;
189 list_initialize(&frw->waiters);
192 void fibril_rwlock_read_lock(fibril_rwlock_t *frw)
194 fibril_t *f = (fibril_t *) fibril_get_id();
196 futex_down(&async_futex);
197 if (frw->writers) {
198 awaiter_t wdata;
200 awaiter_initialize(&wdata);
201 wdata.fid = (fid_t) f;
202 wdata.wu_event.inlist = true;
203 f->flags &= ~FIBRIL_WRITER;
204 list_append(&wdata.wu_event.link, &frw->waiters);
205 check_for_deadlock(&frw->oi);
206 f->waits_for = &frw->oi;
207 fibril_switch(FIBRIL_TO_MANAGER);
208 } else {
209 /* Consider the first reader the owner. */
210 if (frw->readers++ == 0)
211 frw->oi.owned_by = f;
212 futex_up(&async_futex);
216 void fibril_rwlock_write_lock(fibril_rwlock_t *frw)
218 fibril_t *f = (fibril_t *) fibril_get_id();
220 futex_down(&async_futex);
221 if (frw->writers || frw->readers) {
222 awaiter_t wdata;
224 awaiter_initialize(&wdata);
225 wdata.fid = (fid_t) f;
226 wdata.wu_event.inlist = true;
227 f->flags |= FIBRIL_WRITER;
228 list_append(&wdata.wu_event.link, &frw->waiters);
229 check_for_deadlock(&frw->oi);
230 f->waits_for = &frw->oi;
231 fibril_switch(FIBRIL_TO_MANAGER);
232 } else {
233 frw->oi.owned_by = f;
234 frw->writers++;
235 futex_up(&async_futex);
239 static void _fibril_rwlock_common_unlock(fibril_rwlock_t *frw)
241 futex_down(&async_futex);
242 if (frw->readers) {
243 if (--frw->readers) {
244 if (frw->oi.owned_by == (fibril_t *) fibril_get_id()) {
246 * If this reader firbril was considered the
247 * owner of this rwlock, clear the ownership
248 * information even if there are still more
249 * readers.
251 * This is the limitation of the detection
252 * mechanism rooted in the fact that tracking
253 * all readers would require dynamically
254 * allocated memory for keeping linkage info.
256 frw->oi.owned_by = NULL;
258 goto out;
260 } else {
261 frw->writers--;
264 assert(!frw->readers && !frw->writers);
266 frw->oi.owned_by = NULL;
268 while (!list_empty(&frw->waiters)) {
269 link_t *tmp = list_first(&frw->waiters);
270 awaiter_t *wdp;
271 fibril_t *f;
273 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
274 f = (fibril_t *) wdp->fid;
276 f->waits_for = NULL;
278 if (f->flags & FIBRIL_WRITER) {
279 if (frw->readers)
280 break;
281 wdp->active = true;
282 wdp->wu_event.inlist = false;
283 list_remove(&wdp->wu_event.link);
284 fibril_add_ready(wdp->fid);
285 frw->writers++;
286 frw->oi.owned_by = f;
287 optimize_execution_power();
288 break;
289 } else {
290 wdp->active = true;
291 wdp->wu_event.inlist = false;
292 list_remove(&wdp->wu_event.link);
293 fibril_add_ready(wdp->fid);
294 if (frw->readers++ == 0) {
295 /* Consider the first reader the owner. */
296 frw->oi.owned_by = f;
298 optimize_execution_power();
301 out:
302 futex_up(&async_futex);
305 void fibril_rwlock_read_unlock(fibril_rwlock_t *frw)
307 assert(fibril_rwlock_is_read_locked(frw));
308 _fibril_rwlock_common_unlock(frw);
311 void fibril_rwlock_write_unlock(fibril_rwlock_t *frw)
313 assert(fibril_rwlock_is_write_locked(frw));
314 _fibril_rwlock_common_unlock(frw);
317 bool fibril_rwlock_is_read_locked(fibril_rwlock_t *frw)
319 bool locked = false;
321 futex_down(&async_futex);
322 if (frw->readers)
323 locked = true;
324 futex_up(&async_futex);
326 return locked;
329 bool fibril_rwlock_is_write_locked(fibril_rwlock_t *frw)
331 bool locked = false;
333 futex_down(&async_futex);
334 if (frw->writers) {
335 assert(frw->writers == 1);
336 locked = true;
338 futex_up(&async_futex);
340 return locked;
343 bool fibril_rwlock_is_locked(fibril_rwlock_t *frw)
345 return fibril_rwlock_is_read_locked(frw) ||
346 fibril_rwlock_is_write_locked(frw);
349 void fibril_condvar_initialize(fibril_condvar_t *fcv)
351 list_initialize(&fcv->waiters);
354 errno_t
355 fibril_condvar_wait_timeout(fibril_condvar_t *fcv, fibril_mutex_t *fm,
356 suseconds_t timeout)
358 awaiter_t wdata;
360 assert(fibril_mutex_is_locked(fm));
362 if (timeout < 0)
363 return ETIMEOUT;
365 awaiter_initialize(&wdata);
366 wdata.fid = fibril_get_id();
367 wdata.to_event.inlist = timeout > 0;
368 wdata.wu_event.inlist = true;
370 futex_down(&async_futex);
371 if (timeout) {
372 getuptime(&wdata.to_event.expires);
373 tv_add_diff(&wdata.to_event.expires, timeout);
374 async_insert_timeout(&wdata);
376 list_append(&wdata.wu_event.link, &fcv->waiters);
377 _fibril_mutex_unlock_unsafe(fm);
378 fibril_switch(FIBRIL_TO_MANAGER);
379 fibril_mutex_lock(fm);
381 /* async_futex not held after fibril_switch() */
382 futex_down(&async_futex);
383 if (wdata.to_event.inlist)
384 list_remove(&wdata.to_event.link);
385 if (wdata.wu_event.inlist)
386 list_remove(&wdata.wu_event.link);
387 futex_up(&async_futex);
389 return wdata.to_event.occurred ? ETIMEOUT : EOK;
392 void fibril_condvar_wait(fibril_condvar_t *fcv, fibril_mutex_t *fm)
394 errno_t rc;
396 rc = fibril_condvar_wait_timeout(fcv, fm, 0);
397 assert(rc == EOK);
400 static void _fibril_condvar_wakeup_common(fibril_condvar_t *fcv, bool once)
402 link_t *tmp;
403 awaiter_t *wdp;
405 futex_down(&async_futex);
406 while (!list_empty(&fcv->waiters)) {
407 tmp = list_first(&fcv->waiters);
408 wdp = list_get_instance(tmp, awaiter_t, wu_event.link);
409 list_remove(&wdp->wu_event.link);
410 wdp->wu_event.inlist = false;
411 if (!wdp->active) {
412 wdp->active = true;
413 fibril_add_ready(wdp->fid);
414 optimize_execution_power();
415 if (once)
416 break;
419 futex_up(&async_futex);
422 void fibril_condvar_signal(fibril_condvar_t *fcv)
424 _fibril_condvar_wakeup_common(fcv, true);
427 void fibril_condvar_broadcast(fibril_condvar_t *fcv)
429 _fibril_condvar_wakeup_common(fcv, false);
432 /** Timer fibril.
434 * @param arg Timer
436 static errno_t fibril_timer_func(void *arg)
438 fibril_timer_t *timer = (fibril_timer_t *) arg;
439 errno_t rc;
441 fibril_mutex_lock(timer->lockp);
443 while (timer->state != fts_cleanup) {
444 switch (timer->state) {
445 case fts_not_set:
446 case fts_fired:
447 fibril_condvar_wait(&timer->cv, timer->lockp);
448 break;
449 case fts_active:
450 rc = fibril_condvar_wait_timeout(&timer->cv,
451 timer->lockp, timer->delay);
452 if (rc == ETIMEOUT && timer->state == fts_active) {
453 timer->state = fts_fired;
454 timer->handler_fid = fibril_get_id();
455 fibril_mutex_unlock(timer->lockp);
456 timer->fun(timer->arg);
457 fibril_mutex_lock(timer->lockp);
458 timer->handler_fid = 0;
460 break;
461 case fts_cleanup:
462 case fts_clean:
463 assert(false);
464 break;
468 /* Acknowledge timer fibril has finished cleanup. */
469 timer->state = fts_clean;
470 fibril_condvar_broadcast(&timer->cv);
471 fibril_mutex_unlock(timer->lockp);
473 return 0;
476 /** Create new timer.
478 * @return New timer on success, @c NULL if out of memory.
480 fibril_timer_t *fibril_timer_create(fibril_mutex_t *lock)
482 fid_t fid;
483 fibril_timer_t *timer;
485 timer = calloc(1, sizeof(fibril_timer_t));
486 if (timer == NULL)
487 return NULL;
489 fid = fibril_create(fibril_timer_func, (void *) timer);
490 if (fid == 0) {
491 free(timer);
492 return NULL;
495 fibril_mutex_initialize(&timer->lock);
496 fibril_condvar_initialize(&timer->cv);
498 timer->fibril = fid;
499 timer->state = fts_not_set;
500 timer->lockp = (lock != NULL) ? lock : &timer->lock;
502 fibril_add_ready(fid);
503 return timer;
506 /** Destroy timer.
508 * @param timer Timer, must not be active or accessed by other threads.
510 void fibril_timer_destroy(fibril_timer_t *timer)
512 fibril_mutex_lock(timer->lockp);
513 assert(timer->state == fts_not_set || timer->state == fts_fired);
515 /* Request timer fibril to terminate. */
516 timer->state = fts_cleanup;
517 fibril_condvar_broadcast(&timer->cv);
519 /* Wait for timer fibril to terminate */
520 while (timer->state != fts_clean)
521 fibril_condvar_wait(&timer->cv, timer->lockp);
522 fibril_mutex_unlock(timer->lockp);
524 free(timer);
527 /** Set timer.
529 * Set timer to execute a callback function after the specified
530 * interval.
532 * @param timer Timer
533 * @param delay Delay in microseconds
534 * @param fun Callback function
535 * @param arg Argument for @a fun
537 void fibril_timer_set(fibril_timer_t *timer, suseconds_t delay,
538 fibril_timer_fun_t fun, void *arg)
540 fibril_mutex_lock(timer->lockp);
541 fibril_timer_set_locked(timer, delay, fun, arg);
542 fibril_mutex_unlock(timer->lockp);
545 /** Set locked timer.
547 * Set timer to execute a callback function after the specified
548 * interval. Must be called when the timer is locked.
550 * @param timer Timer
551 * @param delay Delay in microseconds
552 * @param fun Callback function
553 * @param arg Argument for @a fun
555 void fibril_timer_set_locked(fibril_timer_t *timer, suseconds_t delay,
556 fibril_timer_fun_t fun, void *arg)
558 assert(fibril_mutex_is_locked(timer->lockp));
559 assert(timer->state == fts_not_set || timer->state == fts_fired);
560 timer->state = fts_active;
561 timer->delay = delay;
562 timer->fun = fun;
563 timer->arg = arg;
564 fibril_condvar_broadcast(&timer->cv);
567 /** Clear timer.
569 * Clears (cancels) timer and returns last state of the timer.
570 * This can be one of:
571 * - fts_not_set If the timer has not been set or has been cleared
572 * - fts_active Timer was set but did not fire
573 * - fts_fired Timer fired
575 * @param timer Timer
576 * @return Last timer state
578 fibril_timer_state_t fibril_timer_clear(fibril_timer_t *timer)
580 fibril_timer_state_t old_state;
582 fibril_mutex_lock(timer->lockp);
583 old_state = fibril_timer_clear_locked(timer);
584 fibril_mutex_unlock(timer->lockp);
586 return old_state;
589 /** Clear locked timer.
591 * Clears (cancels) timer and returns last state of the timer.
592 * This can be one of:
593 * - fts_not_set If the timer has not been set or has been cleared
594 * - fts_active Timer was set but did not fire
595 * - fts_fired Timer fired
596 * Must be called when the timer is locked.
598 * @param timer Timer
599 * @return Last timer state
601 fibril_timer_state_t fibril_timer_clear_locked(fibril_timer_t *timer)
603 fibril_timer_state_t old_state;
605 assert(fibril_mutex_is_locked(timer->lockp));
607 while (timer->handler_fid != 0) {
608 if (timer->handler_fid == fibril_get_id()) {
609 printf("Deadlock detected.\n");
610 stacktrace_print();
611 printf("Fibril %zx is trying to clear timer %p from "
612 "inside its handler %p.\n",
613 fibril_get_id(), timer, timer->fun);
614 abort();
617 fibril_condvar_wait(&timer->cv, timer->lockp);
620 old_state = timer->state;
621 timer->state = fts_not_set;
623 timer->delay = 0;
624 timer->fun = NULL;
625 timer->arg = NULL;
626 fibril_condvar_broadcast(&timer->cv);
628 return old_state;
631 /** @}