From 59295bbf66f0c9ce4024363e92b81d45ae2db851 Mon Sep 17 00:00:00 2001 From: schulz Date: Fri, 1 May 2015 11:41:04 +0000 Subject: [PATCH] Simple spinlock support git-svn-id: https://svn.aros.org/svn/aros/trunk/AROS@50546 fb15a70f-31f2-0310-bbcc-cdcc74a49acc --- arch/arm-native/kernel/spinislocked.c | 5 +++- arch/arm-native/kernel/spinlock.c | 36 +++++++++++++++++++++++ arch/arm-native/kernel/spintrylock.c | 49 +++++++++++++++++++++++++++++++- arch/arm-native/kernel/spinunlock.c | 29 ++++++++++++++++++- compiler/include/aros/types/spinlock_s.h | 3 ++ 5 files changed, 119 insertions(+), 3 deletions(-) diff --git a/arch/arm-native/kernel/spinislocked.c b/arch/arm-native/kernel/spinislocked.c index 1a5c82061c..dfc229ad46 100644 --- a/arch/arm-native/kernel/spinislocked.c +++ b/arch/arm-native/kernel/spinislocked.c @@ -17,7 +17,10 @@ AROS_LH1(int, KrnSpinIsLocked, { AROS_LIBFUNC_INIT - return 0; + if (lock->lock == 0) + return 0; + else + return 1; AROS_LIBFUNC_EXIT } diff --git a/arch/arm-native/kernel/spinlock.c b/arch/arm-native/kernel/spinlock.c index c035d8a274..8423e69aa5 100644 --- a/arch/arm-native/kernel/spinlock.c +++ b/arch/arm-native/kernel/spinlock.c @@ -7,6 +7,8 @@ #include #include +#include + #include #include @@ -18,6 +20,40 @@ AROS_LH2(spinlock_t *, KrnSpinLock, { AROS_LIBFUNC_INIT + unsigned long lock_value, result; + + if (mode == SPINLOCK_MODE_WRITE) + { + asm volatile( + "1: ldrex %0, [%1] \n\t" // Load the lock value and gain exclusive lock to memory + " teq %0, #0 \n\t" // is the value 0? It means the lock was free + " wfene \n\t" // Wait for Event if lock was not free + " strexeq %0, %2, [%1] \n\t" // Store value to memory and check if it succeeded + " teq %0, #0 \n\t" // Test if write succeeded + " bne 1b \n\t" // Try again if locking failed + : "=&r"(lock_value) + : "r" (&lock->lock), "r"(0x80000000) + : "cc" + ); + } + else + { + asm volatile( + "1: ldrex %0, [%2] \n\t" // Load lock value and gain exclusive lock to memory + " adds %0, %0, #1 \n\t" // Increase the lock value and update conditional bits + " wfemi \n\t" // Wait for event if lock value was negative + " strexpl %1, %0, [%2] \n\t" // Store value to memory if positive and check result + " rsbpls %0, %1, #0 \n\t" // Reverse substract and update conditionals: if strex write was + // successful, %0 contains 0. #0 - %0 clears N flag. If write failed + // %0 contains 1. #0 - %0 sets N flag (value 0xffffffff). + " bmi 1b \n\t" // Try again if N flag is set (because of locok value or write failure + : "=&r"(lock_value), "=&r"(result) + : "r"(&lock->lock) + : "cc" + ); + } + + dmb(); return lock; AROS_LIBFUNC_EXIT diff --git a/arch/arm-native/kernel/spintrylock.c b/arch/arm-native/kernel/spintrylock.c index 5d31665a9c..6a7d4bb5e8 100644 --- a/arch/arm-native/kernel/spintrylock.c +++ b/arch/arm-native/kernel/spintrylock.c @@ -7,6 +7,8 @@ #include #include +#include + #include #include @@ -18,7 +20,52 @@ AROS_LH2(spinlock_t *, KrnSpinTryLock, { AROS_LIBFUNC_INIT - return lock; + unsigned long lock_value, tmp; + + if (mode == SPINLOCK_MODE_WRITE) + { + do + { + asm volatile( + " ldrex %0, [%2] \n\t" // Load the lock value and gain exclusive lock to memory + " teq %0, #0 \n\t" // is the value 0? It means the lock was free + " strexeq %1, %3, [%2] \n\t" // Store value to memory and check if it succeeded + : "=&r"(lock_value), "=&r"(tmp) + : "r" (&lock->lock), "r"(0x80000000), "1"(0) + : "cc" + ); + } while(tmp); + + if (lock_value == 0) + { + dmb(); + return lock; + } + else + return NULL; + } + else + { + do + { + asm volatile( + " ldrex %0, [%2] \n\t" // Load lock value and gain exclusive lock to memory + " adds %0, %0, #1 \n\t" // Increase the lock value and update conditional bits + " strexpl %1, %0, [%2] \n\t" // Store value to memory if positive and check result + : "=&r"(lock_value), "=&r"(tmp) + : "r"(&lock->lock), "1"(0) + : "cc" + ); + } while(tmp); + + if (lock_value < 0x80000000) + { + dmb(); + return lock; + } + else + return NULL; + } AROS_LIBFUNC_EXIT } diff --git a/arch/arm-native/kernel/spinunlock.c b/arch/arm-native/kernel/spinunlock.c index ad3e9d307c..532354ee9e 100644 --- a/arch/arm-native/kernel/spinunlock.c +++ b/arch/arm-native/kernel/spinunlock.c @@ -17,7 +17,34 @@ AROS_LH1(void, KrnSpinUnLock, { AROS_LIBFUNC_INIT - return; + /* + * Are we releasing a write lock? Just zero out the value. Also send event so that other cores waiting for lock + * wake up. + */ + if (lock->lock & 0x80000000) + { + lock->lock = 0; + asm volatile("sev"); + } + else + { + unsigned long lock_value, write_result; + + asm volatile( + "1: ldrex %0, [%2] \n\t" // Read lock value and gain write exclusive lock + " sub %0, %0, #1 \n\t" // Decrease lock value + " strex %1, %0, [%2] \n\t" // Try to update the lock value + " teq %1, #0 \n\t" // test if write succeeded + " bne 1b \n\t" // Try again if write failed + :"=%r"(lock_value), "=&r"(write_result) + :"r"(&lock->lock) + :"cc" + ); + + /* Send event to other cores if lock is free */ + if (lock_value == 0) + asm volatile("sev"); + } AROS_LIBFUNC_EXIT } diff --git a/compiler/include/aros/types/spinlock_s.h b/compiler/include/aros/types/spinlock_s.h index 839b0ad9eb..487787e423 100644 --- a/compiler/include/aros/types/spinlock_s.h +++ b/compiler/include/aros/types/spinlock_s.h @@ -7,4 +7,7 @@ typedef struct { volatile unsigned long lock; } spinlock_t; +#define SPINLOCK_MODE_READ 0 +#define SPINLOCK_MODE_WRITE 1 + #endif /* ! _AROS_TYPES_SPINLOCK_S_H_ */ -- 2.11.4.GIT