From 897fd8f11ec6c9c4a63697b4ccc6ab67c8be92f2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Ji=C5=99=C3=AD=20Z=C3=A1rev=C3=BAcky?= Date: Tue, 19 Dec 2017 19:18:15 +0100 Subject: [PATCH] Use instead of special ESYNCH_xx error codes. --- abi/include/abi/synch.h | 17 ------ kernel/arch/ia32/src/smp/smp.c | 3 +- kernel/arch/sparc64/src/smp/sun4u/smp.c | 4 +- kernel/arch/sparc64/src/smp/sun4v/smp.c | 4 +- kernel/generic/include/synch/semaphore.h | 5 +- kernel/generic/include/synch/waitq.h | 8 +-- kernel/generic/src/ipc/ipc.c | 6 +-- kernel/generic/src/proc/thread.c | 6 +-- kernel/generic/src/synch/condvar.c | 20 ++++--- kernel/generic/src/synch/futex.c | 9 ++-- kernel/generic/src/synch/mutex.c | 8 +-- kernel/generic/src/synch/rcu.c | 12 ++--- kernel/generic/src/synch/semaphore.c | 2 +- kernel/generic/src/synch/waitq.c | 93 ++++++++++++++++++-------------- kernel/generic/src/synch/workqueue.c | 3 +- kernel/generic/src/sysinfo/stats.c | 8 +-- kernel/generic/src/udebug/udebug.c | 6 +-- kernel/test/synch/rcu1.c | 4 +- kernel/test/synch/semaphore2.c | 2 +- uspace/lib/c/include/futex.h | 12 +++-- 20 files changed, 119 insertions(+), 113 deletions(-) diff --git a/abi/include/abi/synch.h b/abi/include/abi/synch.h index b83f78de6..819e12b14 100644 --- a/abi/include/abi/synch.h +++ b/abi/include/abi/synch.h @@ -45,23 +45,6 @@ /** Interruptible operation. */ #define SYNCH_FLAGS_INTERRUPTIBLE (1 << 1) -/** Could not satisfy the request without going to sleep. */ -#define ESYNCH_WOULD_BLOCK 1 -/** Timeout occurred. */ -#define ESYNCH_TIMEOUT 2 -/** Sleep was interrupted. */ -#define ESYNCH_INTERRUPTED 4 -/** Operation succeeded without sleeping. */ -#define ESYNCH_OK_ATOMIC 8 -/** Operation succeeded and did sleep. */ -#define ESYNCH_OK_BLOCKED 16 - -#define SYNCH_FAILED(rc) \ - ((rc) & (ESYNCH_WOULD_BLOCK | ESYNCH_TIMEOUT | ESYNCH_INTERRUPTED)) - -#define SYNCH_OK(rc) \ - ((rc) & (ESYNCH_OK_ATOMIC | ESYNCH_OK_BLOCKED)) - #endif /** @} diff --git a/kernel/arch/ia32/src/smp/smp.c b/kernel/arch/ia32/src/smp/smp.c index 13b752ed8..66ecb7951 100644 --- a/kernel/arch/ia32/src/smp/smp.c +++ b/kernel/arch/ia32/src/smp/smp.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -177,7 +178,7 @@ void kmp(void *arg __attribute__((unused))) * supposed to wake us up. */ if (waitq_sleep_timeout(&ap_completion_wq, 1000000, - SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) { + SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) { log(LF_ARCH, LVL_NOTE, "%s: waiting for cpu%u " "(APIC ID = %d) timed out", __FUNCTION__, i, ops->cpu_apic_id(i)); diff --git a/kernel/arch/sparc64/src/smp/sun4u/smp.c b/kernel/arch/sparc64/src/smp/sun4u/smp.c index e839987a1..8695c171e 100644 --- a/kernel/arch/sparc64/src/smp/sun4u/smp.c +++ b/kernel/arch/sparc64/src/smp/sun4u/smp.c @@ -105,8 +105,8 @@ static void wakeup_cpu(ofw_tree_node_t *node) waking_up_mid = mid; - if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == - ESYNCH_TIMEOUT) + if (waitq_sleep_timeout(&ap_completion_wq, 1000000, + SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) log(LF_ARCH, LVL_NOTE, "%s: waiting for processor (mid = %" PRIu32 ") timed out", __func__, mid); } diff --git a/kernel/arch/sparc64/src/smp/sun4v/smp.c b/kernel/arch/sparc64/src/smp/sun4v/smp.c index c108fa1a2..a552e04bc 100644 --- a/kernel/arch/sparc64/src/smp/sun4v/smp.c +++ b/kernel/arch/sparc64/src/smp/sun4v/smp.c @@ -372,8 +372,8 @@ static bool wake_cpu(uint64_t cpuid) return false; #endif - if (waitq_sleep_timeout(&ap_completion_wq, 10000000, SYNCH_FLAGS_NONE) == - ESYNCH_TIMEOUT) + if (waitq_sleep_timeout(&ap_completion_wq, 10000000, + SYNCH_FLAGS_NONE, NULL) == ETIMEOUT) printf("%s: waiting for processor (cpuid = %" PRIu64 ") timed out\n", __func__, cpuid); diff --git a/kernel/generic/include/synch/semaphore.h b/kernel/generic/include/synch/semaphore.h index fca31f3a6..7e3b0da84 100644 --- a/kernel/generic/include/synch/semaphore.h +++ b/kernel/generic/include/synch/semaphore.h @@ -35,6 +35,7 @@ #ifndef KERN_SEMAPHORE_H_ #define KERN_SEMAPHORE_H_ +#include #include #include #include @@ -53,8 +54,8 @@ typedef struct { _semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE) #define semaphore_down_interruptable(s) \ - (ESYNCH_INTERRUPTED != _semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \ - SYNCH_FLAGS_INTERRUPTIBLE)) + (_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, \ + SYNCH_FLAGS_INTERRUPTIBLE) != EINTR) extern void semaphore_initialize(semaphore_t *, int); extern int _semaphore_down_timeout(semaphore_t *, uint32_t, unsigned int); diff --git a/kernel/generic/include/synch/waitq.h b/kernel/generic/include/synch/waitq.h index bca971655..58bc4481a 100644 --- a/kernel/generic/include/synch/waitq.h +++ b/kernel/generic/include/synch/waitq.h @@ -66,15 +66,15 @@ typedef struct { } waitq_t; #define waitq_sleep(wq) \ - waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE) + waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, NULL) struct thread; extern void waitq_initialize(waitq_t *); -extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int); +extern int waitq_sleep_timeout(waitq_t *, uint32_t, unsigned int, bool *); extern ipl_t waitq_sleep_prepare(waitq_t *); -extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int); -extern void waitq_sleep_finish(waitq_t *, int, ipl_t); +extern int waitq_sleep_timeout_unsafe(waitq_t *, uint32_t, unsigned int, bool *); +extern void waitq_sleep_finish(waitq_t *, bool, ipl_t); extern void waitq_wakeup(waitq_t *, wakeup_mode_t); extern void _waitq_wakeup_unsafe(waitq_t *, wakeup_mode_t); extern void waitq_interrupt_sleep(struct thread *); diff --git a/kernel/generic/src/ipc/ipc.c b/kernel/generic/src/ipc/ipc.c index 9fb0648b1..11f4c679c 100644 --- a/kernel/generic/src/ipc/ipc.c +++ b/kernel/generic/src/ipc/ipc.c @@ -537,8 +537,8 @@ call_t *ipc_wait_for_call(answerbox_t *box, uint32_t usec, unsigned int flags) int rc; restart: - rc = waitq_sleep_timeout(&box->wq, usec, flags); - if (SYNCH_FAILED(rc)) + rc = waitq_sleep_timeout(&box->wq, usec, flags, NULL); + if (rc != EOK) return NULL; irq_spinlock_lock(&box->lock, true); @@ -637,7 +637,7 @@ restart_phones: while (!list_empty(&box->connected_phones)) { phone = list_get_instance(list_first(&box->connected_phones), phone_t, link); - if (SYNCH_FAILED(mutex_trylock(&phone->lock))) { + if (mutex_trylock(&phone->lock) != EOK) { irq_spinlock_unlock(&box->lock, true); DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD); goto restart_phones; diff --git a/kernel/generic/src/proc/thread.c b/kernel/generic/src/proc/thread.c index bf72f5e9a..99e56130e 100644 --- a/kernel/generic/src/proc/thread.c +++ b/kernel/generic/src/proc/thread.c @@ -547,7 +547,7 @@ restart: /** Interrupts an existing thread so that it may exit as soon as possible. * * Threads that are blocked waiting for a synchronization primitive - * are woken up with a return code of ESYNCH_INTERRUPTED if the + * are woken up with a return code of EINTR if the * blocking call was interruptable. See waitq_sleep_timeout(). * * The caller must guarantee the thread object is valid during the entire @@ -652,7 +652,7 @@ int thread_join_timeout(thread_t *thread, uint32_t usec, unsigned int flags) assert(!thread->detached); irq_spinlock_unlock(&thread->lock, true); - return waitq_sleep_timeout(&thread->join_wq, usec, flags); + return waitq_sleep_timeout(&thread->join_wq, usec, flags, NULL); } /** Detach thread. @@ -699,7 +699,7 @@ void thread_usleep(uint32_t usec) waitq_initialize(&wq); - (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING); + (void) waitq_sleep_timeout(&wq, usec, SYNCH_FLAGS_NON_BLOCKING, NULL); } static bool thread_walker(avltree_node_t *node, void *arg) diff --git a/kernel/generic/src/synch/condvar.c b/kernel/generic/src/synch/condvar.c index 519c1ff09..d73922fac 100644 --- a/kernel/generic/src/synch/condvar.c +++ b/kernel/generic/src/synch/condvar.c @@ -79,7 +79,7 @@ void condvar_broadcast(condvar_t *cv) * * For exact description of meaning of possible combinations of usec and flags, * see comment for waitq_sleep_timeout(). Note that when - * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always + * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always * returned. * * @return See comment for waitq_sleep_timeout(). @@ -88,15 +88,17 @@ int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) { int rc; ipl_t ipl; + bool blocked; ipl = waitq_sleep_prepare(&cv->wq); /* Unlock only after the waitq is locked so we don't miss a wakeup. */ mutex_unlock(mtx); cv->wq.missed_wakeups = 0; /* Enforce blocking. */ - rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); + rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); + assert(blocked || rc != EOK); - waitq_sleep_finish(&cv->wq, rc, ipl); + waitq_sleep_finish(&cv->wq, blocked, ipl); /* Lock only after releasing the waitq to avoid a possible deadlock. */ mutex_lock(mtx); @@ -116,7 +118,7 @@ int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags) * * For exact description of meaning of possible combinations of usec and flags, * see comment for waitq_sleep_timeout(). Note that when - * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always + * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always * returned. * * @return See comment for waitq_sleep_timeout(). @@ -126,16 +128,18 @@ int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, { int rc; ipl_t ipl; - + bool blocked; + ipl = waitq_sleep_prepare(&cv->wq); /* Unlock only after the waitq is locked so we don't miss a wakeup. */ spinlock_unlock(lock); cv->wq.missed_wakeups = 0; /* Enforce blocking. */ - rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags); + rc = waitq_sleep_timeout_unsafe(&cv->wq, usec, flags, &blocked); + assert(blocked || rc != EOK); - waitq_sleep_finish(&cv->wq, rc, ipl); + waitq_sleep_finish(&cv->wq, blocked, ipl); /* Lock only after releasing the waitq to avoid a possible deadlock. */ spinlock_lock(lock); @@ -151,7 +155,7 @@ int _condvar_wait_timeout_spinlock_impl(condvar_t *cv, spinlock_t *lock, * * For exact description of meaning of possible combinations of usec and flags, * see comment for waitq_sleep_timeout(). Note that when - * SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always + * SYNCH_FLAGS_NON_BLOCKING is specified here, EAGAIN is always * returned. * * @return See comment for waitq_sleep_timeout(). diff --git a/kernel/generic/src/synch/futex.c b/kernel/generic/src/synch/futex.c index d5f6ab93b..831c3e71a 100644 --- a/kernel/generic/src/synch/futex.c +++ b/kernel/generic/src/synch/futex.c @@ -394,8 +394,8 @@ static futex_t *get_and_cache_futex(uintptr_t phys_addr, uintptr_t uaddr) * @param uaddr Userspace address of the futex counter. * * @return If there is no physical mapping for uaddr ENOENT is - * returned. Otherwise returns a wait result as defined in - * synch.h. + * returned. Otherwise returns the return value of + * waitq_sleep_timeout(). */ sysarg_t sys_futex_sleep(uintptr_t uaddr) { @@ -408,7 +408,8 @@ sysarg_t sys_futex_sleep(uintptr_t uaddr) udebug_stoppable_begin(); #endif - int rc = waitq_sleep_timeout(&futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE); + int rc = waitq_sleep_timeout( + &futex->wq, 0, SYNCH_FLAGS_INTERRUPTIBLE, NULL); #ifdef CONFIG_UDEBUG udebug_stoppable_end(); @@ -429,7 +430,7 @@ sysarg_t sys_futex_wakeup(uintptr_t uaddr) if (futex) { waitq_wakeup(&futex->wq, WAKEUP_FIRST); - return 0; + return EOK; } else { return (sysarg_t) ENOENT; } diff --git a/kernel/generic/src/synch/mutex.c b/kernel/generic/src/synch/mutex.c index e0b6593a2..6cc273458 100644 --- a/kernel/generic/src/synch/mutex.c +++ b/kernel/generic/src/synch/mutex.c @@ -36,6 +36,7 @@ */ #include +#include #include #include #include @@ -94,10 +95,10 @@ int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags) if (mtx->owner == THREAD) { mtx->nesting++; - return ESYNCH_OK_ATOMIC; + return EOK; } else { rc = _semaphore_down_timeout(&mtx->sem, usec, flags); - if (SYNCH_OK(rc)) { + if (rc == EOK) { mtx->owner = THREAD; mtx->nesting = 1; } @@ -118,8 +119,7 @@ int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, unsigned int flags) deadlock_reported = true; } rc = semaphore_trydown(&mtx->sem); - } while (SYNCH_FAILED(rc) && - !(flags & SYNCH_FLAGS_NON_BLOCKING)); + } while (rc != EOK && !(flags & SYNCH_FLAGS_NON_BLOCKING)); if (deadlock_reported) printf("cpu%u: not deadlocked\n", CPU->id); } diff --git a/kernel/generic/src/synch/rcu.c b/kernel/generic/src/synch/rcu.c index 0736116c2..47a0b53d4 100644 --- a/kernel/generic/src/synch/rcu.c +++ b/kernel/generic/src/synch/rcu.c @@ -959,7 +959,7 @@ static bool wait_for_cur_cbs_gp_end(bool expedite, rcu_gp_t *completed_gp) int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); - if (ret == ESYNCH_INTERRUPTED) { + if (ret == EINTR) { spinlock_unlock(&rcu.gp_lock); return false; } @@ -1017,13 +1017,13 @@ static bool gp_sleep(bool *expedite) DETECT_SLEEP_MS * 1000, SYNCH_FLAGS_INTERRUPTIBLE); /* rcu.expedite_now was signaled. */ - if (ret == ESYNCH_OK_BLOCKED) { + if (ret == EOK) { *expedite = true; } spinlock_unlock(&rcu.gp_lock); - return (ret != ESYNCH_INTERRUPTED); + return (ret != EINTR); } } @@ -1270,7 +1270,7 @@ static bool cv_wait_for_gp(rcu_gp_t wait_on_gp) while (rcu.completed_gp < wait_on_gp && !interrupted) { int ret = _condvar_wait_timeout_spinlock(&rcu.gp_ended, &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); - interrupted = (ret == ESYNCH_INTERRUPTED); + interrupted = (ret == EINTR); } return interrupted; @@ -1331,7 +1331,7 @@ static bool wait_for_detect_req(void) int ret = _condvar_wait_timeout_spinlock(&rcu.req_gp_changed, &rcu.gp_lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); - interrupted = (ret == ESYNCH_INTERRUPTED); + interrupted = (ret == EINTR); } return !interrupted; @@ -1405,7 +1405,7 @@ static bool gp_sleep(void) spinlock_unlock(&rcu.gp_lock); - return (ret != ESYNCH_INTERRUPTED); + return (ret != EINTR); } /** Actively interrupts and checks the offending cpus for quiescent states. */ diff --git a/kernel/generic/src/synch/semaphore.c b/kernel/generic/src/synch/semaphore.c index c3fafe651..559159d3d 100644 --- a/kernel/generic/src/synch/semaphore.c +++ b/kernel/generic/src/synch/semaphore.c @@ -72,7 +72,7 @@ void semaphore_initialize(semaphore_t *sem, int val) */ int _semaphore_down_timeout(semaphore_t *sem, uint32_t usec, unsigned int flags) { - return waitq_sleep_timeout(&sem->wq, usec, flags); + return waitq_sleep_timeout(&sem->wq, usec, flags, NULL); } /** Semaphore up diff --git a/kernel/generic/src/synch/waitq.c b/kernel/generic/src/synch/waitq.c index c8f5deb99..4e36300e2 100644 --- a/kernel/generic/src/synch/waitq.c +++ b/kernel/generic/src/synch/waitq.c @@ -44,6 +44,7 @@ */ #include +#include #include #include #include @@ -237,6 +238,10 @@ void waitq_unsleep(waitq_t *wq) * @param usec Timeout in microseconds. * @param flags Specify mode of the sleep. * + * @param[out] blocked On return, regardless of the return code, + * `*blocked` is set to `true` iff the thread went to + * sleep. + * * The sleep can be interrupted only if the * SYNCH_FLAGS_INTERRUPTIBLE bit is specified in flags. * @@ -250,25 +255,30 @@ void waitq_unsleep(waitq_t *wq) * If usec is zero and the SYNCH_FLAGS_NON_BLOCKING bit is set in flags, the * call will immediately return, reporting either success or failure. * - * @return ESYNCH_WOULD_BLOCK, meaning that the sleep failed because at the - * time of the call there was no pending wakeup - * @return ESYNCH_TIMEOUT, meaning that the sleep timed out. - * @return ESYNCH_INTERRUPTED, meaning that somebody interrupted the sleeping - * thread. - * @return ESYNCH_OK_ATOMIC, meaning that the sleep succeeded and that there - * was a pending wakeup at the time of the call. The caller was not put - * asleep at all. - * @return ESYNCH_OK_BLOCKED, meaning that the sleep succeeded; the full sleep - * was attempted. + * @return EAGAIN, meaning that the sleep failed because it was requested + * as SYNCH_FLAGS_NON_BLOCKING, but there was no pending wakeup. + * @return ETIMEOUT, meaning that the sleep timed out. + * @return EINTR, meaning that somebody interrupted the sleeping + * thread. Check the value of `*blocked` to see if the thread slept, + * or if a pending interrupt forced it to return immediately. + * @return EOK, meaning that none of the above conditions occured, and the + * thread was woken up successfuly by `waitq_wakeup()`. Check + * the value of `*blocked` to see if the thread slept or if + * the wakeup was already pending. * */ -int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags) +int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) { assert((!PREEMPTION_DISABLED) || (PARAM_NON_BLOCKING(flags, usec))); ipl_t ipl = waitq_sleep_prepare(wq); - int rc = waitq_sleep_timeout_unsafe(wq, usec, flags); - waitq_sleep_finish(wq, rc, ipl); + bool nblocked; + int rc = waitq_sleep_timeout_unsafe(wq, usec, flags, &nblocked); + waitq_sleep_finish(wq, nblocked, ipl); + + if (blocked != NULL) { + *blocked = nblocked; + } return rc; } @@ -319,34 +329,29 @@ restart: * to the call to waitq_sleep_prepare(). If necessary, the wait queue * lock is released. * - * @param wq Wait queue. - * @param rc Return code of waitq_sleep_timeout_unsafe(). - * @param ipl Interrupt level returned by waitq_sleep_prepare(). + * @param wq Wait queue. + * @param blocked Out parameter of waitq_sleep_timeout_unsafe(). + * @param ipl Interrupt level returned by waitq_sleep_prepare(). * */ -void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) +void waitq_sleep_finish(waitq_t *wq, bool blocked, ipl_t ipl) { - switch (rc) { - case ESYNCH_WOULD_BLOCK: - case ESYNCH_OK_ATOMIC: - irq_spinlock_unlock(&wq->lock, false); - break; - default: - /* + if (blocked) { + /* * Wait for a waitq_wakeup() or waitq_unsleep() to complete * before returning from waitq_sleep() to the caller. Otherwise * the caller might expect that the wait queue is no longer used * and deallocate it (although the wakeup on a another cpu has - * not yet completed and is using the wait queue). - * - * Note that we have to do this for ESYNCH_OK_BLOCKED and - * ESYNCH_INTERRUPTED, but not necessarily for ESYNCH_TIMEOUT - * where the timeout handler stops using the waitq before waking - * us up. To be on the safe side, ensure the waitq is not in use - * anymore in this case as well. + * not yet completed and is using the wait queue). + * + * Note that we have to do this for EOK and EINTR, but not + * necessarily for ETIMEOUT where the timeout handler stops + * using the waitq before waking us up. To be on the safe side, + * ensure the waitq is not in use anymore in this case as well. */ waitq_complete_wakeup(wq); - break; + } else { + irq_spinlock_unlock(&wq->lock, false); } interrupts_restore(ipl); @@ -362,19 +367,23 @@ void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl) * @param usec See waitq_sleep_timeout(). * @param flags See waitq_sleep_timeout(). * + * @param[out] blocked See waitq_sleep_timeout(). + * * @return See waitq_sleep_timeout(). * */ -int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) +int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags, bool *blocked) { + *blocked = false; + /* Checks whether to go to sleep at all */ if (wq->missed_wakeups) { wq->missed_wakeups--; - return ESYNCH_OK_ATOMIC; + return EOK; } else { if (PARAM_NON_BLOCKING(flags, usec)) { /* Return immediately instead of going to sleep */ - return ESYNCH_WOULD_BLOCK; + return EAGAIN; } } @@ -391,8 +400,7 @@ int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) */ if (THREAD->interrupted) { irq_spinlock_unlock(&THREAD->lock, false); - irq_spinlock_unlock(&wq->lock, false); - return ESYNCH_INTERRUPTED; + return EINTR; } /* @@ -404,7 +412,7 @@ int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) /* Short emulation of scheduler() return code. */ THREAD->last_cycle = get_cycle(); irq_spinlock_unlock(&THREAD->lock, false); - return ESYNCH_INTERRUPTED; + return EINTR; } } else THREAD->sleep_interruptible = false; @@ -415,7 +423,7 @@ int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) /* Short emulation of scheduler() return code. */ THREAD->last_cycle = get_cycle(); irq_spinlock_unlock(&THREAD->lock, false); - return ESYNCH_TIMEOUT; + return ETIMEOUT; } THREAD->timeout_pending = true; @@ -432,12 +440,17 @@ int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, unsigned int flags) THREAD->state = Sleeping; THREAD->sleep_queue = wq; + /* Must be before entry to scheduler, because there are multiple + * return vectors. + */ + *blocked = true; + irq_spinlock_unlock(&THREAD->lock, false); /* wq->lock is released in scheduler_separated_stack() */ scheduler(); - return ESYNCH_OK_BLOCKED; + return EOK; } /** Wake up first thread sleeping in a wait queue diff --git a/kernel/generic/src/synch/workqueue.c b/kernel/generic/src/synch/workqueue.c index 193adc90a..5a8bb858a 100644 --- a/kernel/generic/src/synch/workqueue.c +++ b/kernel/generic/src/synch/workqueue.c @@ -37,6 +37,7 @@ */ #include +#include #include #include #include @@ -896,7 +897,7 @@ static bool dequeue_add_req(nonblock_adder_t *info, struct work_queue **pworkq) int ret = _condvar_wait_timeout_irq_spinlock(&info->req_cv, &info->lock, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_INTERRUPTIBLE); - stop = (ret == ESYNCH_INTERRUPTED); + stop = (ret == EINTR); } if (!stop) { diff --git a/kernel/generic/src/sysinfo/stats.c b/kernel/generic/src/sysinfo/stats.c index f67116ab9..c40217b5b 100644 --- a/kernel/generic/src/sysinfo/stats.c +++ b/kernel/generic/src/sysinfo/stats.c @@ -156,7 +156,7 @@ static size_t get_task_virtmem(as_t *as) * object, return inexact statistics by skipping the respective object. */ - if (SYNCH_FAILED(mutex_trylock(&as->lock))) + if (mutex_trylock(&as->lock) != EOK) return 0; size_t pages = 0; @@ -168,7 +168,7 @@ static size_t get_task_virtmem(as_t *as) for (i = 0; i < node->keys; i++) { as_area_t *area = node->value[i]; - if (SYNCH_FAILED(mutex_trylock(&area->lock))) + if (mutex_trylock(&area->lock) != EOK) continue; pages += area->pages; @@ -197,7 +197,7 @@ static size_t get_task_resmem(as_t *as) * object, return inexact statistics by skipping the respective object. */ - if (SYNCH_FAILED(mutex_trylock(&as->lock))) + if (mutex_trylock(&as->lock) != EOK) return 0; size_t pages = 0; @@ -208,7 +208,7 @@ static size_t get_task_resmem(as_t *as) for (i = 0; i < node->keys; i++) { as_area_t *area = node->value[i]; - if (SYNCH_FAILED(mutex_trylock(&area->lock))) + if (mutex_trylock(&area->lock) != EOK) continue; pages += area->resident; diff --git a/kernel/generic/src/udebug/udebug.c b/kernel/generic/src/udebug/udebug.c index 79bd10660..e52ec947d 100644 --- a/kernel/generic/src/udebug/udebug.c +++ b/kernel/generic/src/udebug/udebug.c @@ -97,9 +97,9 @@ static void udebug_wait_for_go(waitq_t *wq) ipl_t ipl = waitq_sleep_prepare(wq); wq->missed_wakeups = 0; /* Enforce blocking. */ - int rc = waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE); - - waitq_sleep_finish(wq, rc, ipl); + bool blocked; + (void) waitq_sleep_timeout_unsafe(wq, SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE, &blocked); + waitq_sleep_finish(wq, blocked, ipl); } /** Start of stoppable section. diff --git a/kernel/test/synch/rcu1.c b/kernel/test/synch/rcu1.c index 4112759a4..df8f41959 100644 --- a/kernel/test/synch/rcu1.c +++ b/kernel/test/synch/rcu1.c @@ -113,9 +113,9 @@ static void join_all(void) bool joined = false; do { int ret = thread_join_timeout(thread[i], 5 * 1000 * 1000, 0); - joined = (ret != ESYNCH_TIMEOUT); + joined = (ret != ETIMEOUT); - if (ret == ESYNCH_OK_BLOCKED) { + if (ret == EOK) { TPRINTF("%zu threads remain\n", thread_cnt - i - 1); } } while (!joined); diff --git a/kernel/test/synch/semaphore2.c b/kernel/test/synch/semaphore2.c index 4304a6c9f..870638a05 100644 --- a/kernel/test/synch/semaphore2.c +++ b/kernel/test/synch/semaphore2.c @@ -69,7 +69,7 @@ static void consumer(void *arg) to = random(20000); TPRINTF("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to); rc = semaphore_down_timeout(&sem, to); - if (SYNCH_FAILED(rc)) { + if (rc != EOK) { TPRINTF("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid); return; } diff --git a/uspace/lib/c/include/futex.h b/uspace/lib/c/include/futex.h index f1e948ba9..66fffbe58 100644 --- a/uspace/lib/c/include/futex.h +++ b/uspace/lib/c/include/futex.h @@ -36,6 +36,7 @@ #define LIBC_FUTEX_H_ #include +#include #include typedef struct futex { @@ -120,8 +121,8 @@ static inline int futex_trydown(futex_t *futex) * @param futex Futex. * * @return ENOENT if there is no such virtual address. - * @return Zero in the uncontended case. - * @return Otherwise one of ESYNCH_OK_ATOMIC or ESYNCH_OK_BLOCKED. + * @return EOK on success. + * @return Error code from otherwise. * */ static inline int futex_down(futex_t *futex) @@ -129,7 +130,7 @@ static inline int futex_down(futex_t *futex) if ((atomic_signed_t) atomic_predec(&futex->val) < 0) return __SYSCALL1(SYS_FUTEX_SLEEP, (sysarg_t) &futex->val.count); - return 0; + return EOK; } /** Up the futex. @@ -137,7 +138,8 @@ static inline int futex_down(futex_t *futex) * @param futex Futex. * * @return ENOENT if there is no such virtual address. - * @return Zero in the uncontended case. + * @return EOK on success. + * @return Error code from otherwise. * */ static inline int futex_up(futex_t *futex) @@ -145,7 +147,7 @@ static inline int futex_up(futex_t *futex) if ((atomic_signed_t) atomic_postinc(&futex->val) < 0) return __SYSCALL1(SYS_FUTEX_WAKEUP, (sysarg_t) &futex->val.count); - return 0; + return EOK; } #endif -- 2.11.4.GIT