2 Copyright (C) 2014 Szilard Biro
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
12 1. The origin of this software must not be misrepresented; you must not
13 claim that you wrote the original software. If you use this software
14 in a product, an acknowledgment in the product documentation would be
15 appreciated but is not required.
16 2. Altered source versions must be plainly marked as such, and must not be
17 misrepresented as being the original software.
18 3. This notice may not be removed or altered from any source distribution.
24 #include <dos/dostags.h>
25 #include <proto/exec.h>
26 #include <proto/dos.h>
27 #include <proto/timer.h>
29 #include <aros/symbolsets.h>
30 #define TIMESPEC_TO_TIMEVAL(tv, ts) { \
31 (tv)->tv_sec = (ts)->tv_sec; \
32 (tv)->tv_usec = (ts)->tv_nsec / 1000; }
34 #include <constructor.h>
35 #define StackSwapArgs PPCStackSwapArgs
36 #define NewStackSwap NewPPCStackSwap
48 #define SIGB_PARENT SIGBREAKB_CTRL_F
49 #define SIGF_PARENT (1 << SIGB_PARENT)
50 #define SIGB_COND_FALLBACK SIGBREAKB_CTRL_E
51 #define SIGF_COND_FALLBACK (1 << SIGB_COND_FALLBACK)
52 #define SIGB_TIMER_FALLBACK SIGBREAKB_CTRL_D
53 #define SIGF_TIMER_FALLBACK (1 << SIGB_TIMER_FALLBACK)
56 #define PTHREAD_FIRST_THREAD_ID (1)
57 #define PTHREAD_BARRIER_FLAG (1UL << 31)
59 //#define USE_ASYNC_CANCEL
70 void (*destructor
)(void *);
77 void (*routine
)(void *);
83 void *(*start
)(void *);
91 void *tlsvalues
[PTHREAD_KEYS_MAX
];
92 struct MinList cleanup
;
98 static ThreadInfo threads
[PTHREAD_THREADS_MAX
];
99 static struct SignalSemaphore thread_sem
;
100 static TLSKey tlskeys
[PTHREAD_KEYS_MAX
];
101 static struct SignalSemaphore tls_sem
;
107 static int SemaphoreIsInvalid(struct SignalSemaphore
*sem
)
109 DB2(bug("%s(%p)\n", __FUNCTION__
, sem
));
111 return (!sem
|| sem
->ss_Link
.ln_Type
!= NT_SIGNALSEM
|| sem
->ss_WaitQueue
.mlh_Tail
!= NULL
);
114 static int SemaphoreIsMine(struct SignalSemaphore
*sem
)
118 DB2(bug("%s(%p)\n", __FUNCTION__
, sem
));
122 return (sem
&& sem
->ss_NestCount
> 0 && sem
->ss_Owner
== me
);
125 static ThreadInfo
*GetThreadInfo(pthread_t thread
)
127 ThreadInfo
*inf
= NULL
;
129 DB2(bug("%s(%u)\n", __FUNCTION__
, thread
));
131 // TODO: more robust error handling?
132 if (thread
< PTHREAD_THREADS_MAX
)
133 inf
= &threads
[thread
];
138 static pthread_t
GetThreadId(struct Task
*task
)
142 DB2(bug("%s(%p)\n", __FUNCTION__
, task
));
144 ObtainSemaphoreShared(&thread_sem
);
146 // First thread id will be 1 so that it is different than default value of pthread_t
147 for (i
= PTHREAD_FIRST_THREAD_ID
; i
< PTHREAD_THREADS_MAX
; i
++)
149 if (threads
[i
].task
== task
)
153 ReleaseSemaphore(&thread_sem
);
158 #if defined __mc68000__
159 /* No CAS instruction on m68k */
160 static int __m68k_sync_val_compare_and_swap(int *v
, int o
, int n
)
172 #undef __sync_val_compare_and_swap
173 #define __sync_val_compare_and_swap(v, o, n) __m68k_sync_val_compare_and_swap(v, o, n)
175 static int __m68k_sync_lock_test_and_set(int *v
, int n
)
183 #undef __sync_lock_test_and_set
184 #define __sync_lock_test_and_set(v, n) __m68k_sync_lock_test_and_set(v, n)
185 #undef __sync_lock_release
186 #define __sync_lock_release(v) __m68k_sync_lock_test_and_set(v, 0)
188 static inline int __m68k_sync_add_and_fetch(int *v
, int n
)
199 #undef __sync_add_and_fetch
200 #define __sync_add_and_fetch(v, n) __m68k_sync_add_and_fetch(v, n)
201 #undef __sync_sub_and_fetch
202 #define __sync_sub_and_fetch(v, n) __m68k_sync_add_and_fetch(v, -(n))
206 // Thread specific data functions
209 int pthread_key_create(pthread_key_t
*key
, void (*destructor
)(void *))
214 D(bug("%s(%p, %p)\n", __FUNCTION__
, key
, destructor
));
219 ObtainSemaphore(&tls_sem
);
221 for (i
= 0; i
< PTHREAD_KEYS_MAX
; i
++)
223 if (tlskeys
[i
].used
== FALSE
)
227 if (i
== PTHREAD_KEYS_MAX
)
229 ReleaseSemaphore(&tls_sem
);
235 tls
->destructor
= destructor
;
237 ReleaseSemaphore(&tls_sem
);
244 int pthread_key_delete(pthread_key_t key
)
248 D(bug("%s(%u)\n", __FUNCTION__
, key
));
250 if (key
>= PTHREAD_KEYS_MAX
)
255 ObtainSemaphore(&tls_sem
);
257 if (tls
->used
== FALSE
)
259 ReleaseSemaphore(&tls_sem
);
264 tls
->destructor
= NULL
;
266 ReleaseSemaphore(&tls_sem
);
271 int pthread_setspecific(pthread_key_t key
, const void *value
)
277 D(bug("%s(%u)\n", __FUNCTION__
, key
));
279 if (key
>= PTHREAD_KEYS_MAX
)
282 thread
= pthread_self();
285 ObtainSemaphoreShared(&tls_sem
);
287 if (tls
->used
== FALSE
)
289 ReleaseSemaphore(&tls_sem
);
293 ReleaseSemaphore(&tls_sem
);
295 inf
= GetThreadInfo(thread
);
296 inf
->tlsvalues
[key
] = (void *)value
;
301 void *pthread_getspecific(pthread_key_t key
)
307 D(bug("%s(%u)\n", __FUNCTION__
, key
));
309 if (key
>= PTHREAD_KEYS_MAX
)
312 thread
= pthread_self();
313 inf
= GetThreadInfo(thread
);
314 value
= inf
->tlsvalues
[key
];
320 // Mutex attribute functions
323 int pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
325 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
330 attr
->kind
= PTHREAD_MUTEX_DEFAULT
;
335 int pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
337 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
342 memset(attr
, 0, sizeof(pthread_mutexattr_t
));
347 int pthread_mutexattr_gettype(pthread_mutexattr_t
*attr
, int *kind
)
349 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, kind
));
360 int pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int kind
)
362 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
364 if (attr
== NULL
|| !(kind
>= PTHREAD_MUTEX_NORMAL
&& kind
<= PTHREAD_MUTEX_ERRORCHECK
))
376 static int _pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
, BOOL staticinit
)
378 DB2(bug("%s(%p, %p)\n", __FUNCTION__
, mutex
, attr
));
384 mutex
->kind
= attr
->kind
;
385 else if (!staticinit
)
386 mutex
->kind
= PTHREAD_MUTEX_DEFAULT
;
387 InitSemaphore(&mutex
->semaphore
);
393 int pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
)
395 D(bug("%s(%p, %p)\n", __FUNCTION__
, mutex
, attr
));
397 return _pthread_mutex_init(mutex
, attr
, FALSE
);
400 int pthread_mutex_destroy(pthread_mutex_t
*mutex
)
402 D(bug("%s(%p)\n", __FUNCTION__
, mutex
));
407 // probably a statically allocated mutex
408 if (SemaphoreIsInvalid(&mutex
->semaphore
))
411 if (/*mutex->incond ||*/ AttemptSemaphore(&mutex
->semaphore
) == FALSE
)
416 ReleaseSemaphore(&mutex
->semaphore
);
420 ReleaseSemaphore(&mutex
->semaphore
);
421 memset(mutex
, 0, sizeof(pthread_mutex_t
));
426 int pthread_mutex_lock(pthread_mutex_t
*mutex
)
428 D(bug("%s(%p)\n", __FUNCTION__
, mutex
));
433 // initialize static mutexes
434 if (SemaphoreIsInvalid(&mutex
->semaphore
))
435 _pthread_mutex_init(mutex
, NULL
, TRUE
);
437 // normal mutexes would simply deadlock here
438 if (mutex
->kind
== PTHREAD_MUTEX_ERRORCHECK
&& SemaphoreIsMine(&mutex
->semaphore
))
441 ObtainSemaphore(&mutex
->semaphore
);
446 int pthread_mutex_timedlock(pthread_mutex_t
*mutex
, const struct timespec
*abstime
)
448 struct timeval end
, now
;
451 D(bug("%s(%p, %p)\n", __FUNCTION__
, mutex
, abstime
));
457 return pthread_mutex_lock(mutex
);
458 /*else if (abstime.tv_nsec < 0 || abstime.tv_nsec >= 1000000000)
461 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
463 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
464 while ((result
= pthread_mutex_trylock(mutex
)) == EBUSY
)
467 gettimeofday(&now
, NULL
);
468 if (timercmp(&end
, &now
, <))
475 int pthread_mutex_trylock(pthread_mutex_t
*mutex
)
479 D(bug("%s(%p)\n", __FUNCTION__
, mutex
));
484 // initialize static mutexes
485 if (SemaphoreIsInvalid(&mutex
->semaphore
))
486 _pthread_mutex_init(mutex
, NULL
, TRUE
);
488 if (mutex
->kind
!= PTHREAD_MUTEX_RECURSIVE
&& SemaphoreIsMine(&mutex
->semaphore
))
491 ret
= AttemptSemaphore(&mutex
->semaphore
);
493 return (ret
== TRUE
) ? 0 : EBUSY
;
496 int pthread_mutex_unlock(pthread_mutex_t
*mutex
)
498 D(bug("%s(%p)\n", __FUNCTION__
, mutex
));
503 // initialize static mutexes
504 if (SemaphoreIsInvalid(&mutex
->semaphore
))
505 _pthread_mutex_init(mutex
, NULL
, TRUE
);
507 if (mutex
->kind
!= PTHREAD_MUTEX_NORMAL
&& !SemaphoreIsMine(&mutex
->semaphore
))
510 ReleaseSemaphore(&mutex
->semaphore
);
516 // Condition variable attribute functions
519 int pthread_condattr_init(pthread_condattr_t
*attr
)
521 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
526 memset(attr
, 0, sizeof(pthread_condattr_t
));
531 int pthread_condattr_destroy(pthread_condattr_t
*attr
)
533 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
538 memset(attr
, 0, sizeof(pthread_condattr_t
));
544 // Condition variable functions
547 int pthread_cond_init(pthread_cond_t
*cond
, const pthread_condattr_t
*attr
)
549 D(bug("%s(%p, %p)\n", __FUNCTION__
, cond
, attr
));
554 InitSemaphore(&cond
->semaphore
);
555 NEWLIST((struct List
*)&cond
->waiters
);
560 int pthread_cond_destroy(pthread_cond_t
*cond
)
562 D(bug("%s(%p)\n", __FUNCTION__
, cond
));
567 // probably a statically allocated condition
568 if (SemaphoreIsInvalid(&cond
->semaphore
))
571 if (AttemptSemaphore(&cond
->semaphore
) == FALSE
)
574 if (!IsListEmpty((struct List
*)&cond
->waiters
))
576 ReleaseSemaphore(&cond
->semaphore
);
580 ReleaseSemaphore(&cond
->semaphore
);
581 memset(cond
, 0, sizeof(pthread_cond_t
));
586 static int _pthread_cond_timedwait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
, BOOL relative
)
592 struct MsgPort timermp
;
593 struct timerequest timerio
;
596 DB2(bug("%s(%p, %p, %p)\n", __FUNCTION__
, cond
, mutex
, abstime
));
598 if (cond
== NULL
|| mutex
== NULL
)
601 pthread_testcancel();
603 // initialize static conditions
604 if (SemaphoreIsInvalid(&cond
->semaphore
))
605 pthread_cond_init(cond
, NULL
);
607 task
= FindTask(NULL
);
612 timermp
.mp_Node
.ln_Type
= NT_MSGPORT
;
613 timermp
.mp_Node
.ln_Pri
= 0;
614 timermp
.mp_Node
.ln_Name
= NULL
;
615 timermp
.mp_Flags
= PA_SIGNAL
;
616 timermp
.mp_SigTask
= task
;
617 signal
= AllocSignal(-1);
620 signal
= SIGB_TIMER_FALLBACK
;
621 SetSignal(SIGF_TIMER_FALLBACK
, 0);
623 timermp
.mp_SigBit
= signal
;
624 NEWLIST(&timermp
.mp_MsgList
);
627 timerio
.tr_node
.io_Message
.mn_Node
.ln_Type
= NT_MESSAGE
;
628 timerio
.tr_node
.io_Message
.mn_Node
.ln_Pri
= 0;
629 timerio
.tr_node
.io_Message
.mn_Node
.ln_Name
= NULL
;
630 timerio
.tr_node
.io_Message
.mn_ReplyPort
= &timermp
;
631 timerio
.tr_node
.io_Message
.mn_Length
= sizeof(struct timerequest
);
634 if (OpenDevice((STRPTR
)TIMERNAME
, UNIT_MICROHZ
, &timerio
.tr_node
, 0) != 0)
636 if (timermp
.mp_SigBit
!= SIGB_TIMER_FALLBACK
)
637 FreeSignal(timermp
.mp_SigBit
);
642 // prepare the device command and send it
643 timerio
.tr_node
.io_Command
= TR_ADDREQUEST
;
644 timerio
.tr_node
.io_Flags
= 0;
645 TIMESPEC_TO_TIMEVAL(&timerio
.tr_time
, abstime
);
648 struct timeval starttime
;
649 // absolute time has to be converted to relative
650 // GetSysTime can't be used due to the timezone offset in abstime
651 gettimeofday(&starttime
, NULL
);
652 timersub(&timerio
.tr_time
, &starttime
, &timerio
.tr_time
);
654 timermask
= 1 << timermp
.mp_SigBit
;
656 SendIO((struct IORequest
*)&timerio
);
659 // prepare a waiter node
661 signal
= AllocSignal(-1);
664 signal
= SIGB_COND_FALLBACK
;
665 SetSignal(SIGF_COND_FALLBACK
, 0);
667 waiter
.sigmask
= 1 << signal
;
668 sigs
|= waiter
.sigmask
;
670 // add it to the end of the list
671 ObtainSemaphore(&cond
->semaphore
);
672 AddTail((struct List
*)&cond
->waiters
, (struct Node
*)&waiter
);
673 ReleaseSemaphore(&cond
->semaphore
);
675 // wait for the condition to be signalled or the timeout
677 pthread_mutex_unlock(mutex
);
679 pthread_mutex_lock(mutex
);
682 // remove the node from the list
683 ObtainSemaphore(&cond
->semaphore
);
684 Remove((struct Node
*)&waiter
);
685 ReleaseSemaphore(&cond
->semaphore
);
687 if (signal
!= SIGB_COND_FALLBACK
)
692 // clean up the timerequest
693 if (!CheckIO((struct IORequest
*)&timerio
))
695 AbortIO((struct IORequest
*)&timerio
);
696 WaitIO((struct IORequest
*)&timerio
);
698 CloseDevice((struct IORequest
*)&timerio
);
700 if (timermp
.mp_SigBit
!= SIGB_TIMER_FALLBACK
)
701 FreeSignal(timermp
.mp_SigBit
);
704 if (sigs
& timermask
)
711 int pthread_cond_timedwait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
)
713 D(bug("%s(%p, %p, %p)\n", __FUNCTION__
, cond
, mutex
, abstime
));
715 return _pthread_cond_timedwait(cond
, mutex
, abstime
, FALSE
);
718 int pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*reltime
)
720 D(bug("%s(%p, %p, %p)\n", __FUNCTION__
, cond
, mutex
, reltime
));
722 return _pthread_cond_timedwait(cond
, mutex
, reltime
, TRUE
);
725 int pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
)
727 D(bug("%s(%p)\n", __FUNCTION__
, cond
));
729 return _pthread_cond_timedwait(cond
, mutex
, NULL
, FALSE
);
732 static int _pthread_cond_broadcast(pthread_cond_t
*cond
, BOOL onlyfirst
)
736 DB2(bug("%s(%p, %d)\n", __FUNCTION__
, cond
, onlyfirst
));
741 // initialize static conditions
742 if (SemaphoreIsInvalid(&cond
->semaphore
))
743 pthread_cond_init(cond
, NULL
);
745 // signal the waiting threads
746 ObtainSemaphore(&cond
->semaphore
);
747 ForeachNode(&cond
->waiters
, waiter
)
749 Signal(waiter
->task
, waiter
->sigmask
);
750 if (onlyfirst
) break;
752 ReleaseSemaphore(&cond
->semaphore
);
757 int pthread_cond_signal(pthread_cond_t
*cond
)
759 D(bug("%s(%p)\n", __FUNCTION__
, cond
));
761 return _pthread_cond_broadcast(cond
, TRUE
);
764 int pthread_cond_broadcast(pthread_cond_t
*cond
)
766 D(bug("%s(%p)\n", __FUNCTION__
, cond
));
768 return _pthread_cond_broadcast(cond
, FALSE
);
775 int pthread_barrier_init(pthread_barrier_t
*barrier
, const pthread_barrierattr_t
*attr
, unsigned int count
)
777 D(bug("%s(%p, %p, %u)\n", __FUNCTION__
, barrier
, attr
, count
));
779 if (barrier
== NULL
|| count
== 0)
782 barrier
->curr_height
= count
;
783 barrier
->total_height
= PTHREAD_BARRIER_FLAG
;
784 pthread_cond_init(&barrier
->breeched
, NULL
);
785 pthread_mutex_init(&barrier
->lock
, NULL
);
790 int pthread_barrier_destroy(pthread_barrier_t
*barrier
)
792 D(bug("%s(%p)\n", __FUNCTION__
, barrier
));
797 if (pthread_mutex_trylock(&barrier
->lock
) != 0)
800 if (barrier
->total_height
> PTHREAD_BARRIER_FLAG
)
802 pthread_mutex_unlock(&barrier
->lock
);
806 pthread_mutex_unlock(&barrier
->lock
);
808 if (pthread_cond_destroy(&barrier
->breeched
) != 0)
811 pthread_mutex_destroy(&barrier
->lock
);
812 barrier
->curr_height
= barrier
->total_height
= 0;
817 int pthread_barrier_wait(pthread_barrier_t
*barrier
)
819 D(bug("%s(%p)\n", __FUNCTION__
, barrier
));
824 pthread_mutex_lock(&barrier
->lock
);
826 // wait until everyone exits the barrier
827 while (barrier
->total_height
> PTHREAD_BARRIER_FLAG
)
828 pthread_cond_wait(&barrier
->breeched
, &barrier
->lock
);
830 // are we the first to enter?
831 if (barrier
->total_height
== PTHREAD_BARRIER_FLAG
) barrier
->total_height
= 0;
833 barrier
->total_height
++;
835 if (barrier
->total_height
== barrier
->curr_height
)
837 barrier
->total_height
+= PTHREAD_BARRIER_FLAG
- 1;
838 pthread_cond_broadcast(&barrier
->breeched
);
840 pthread_mutex_unlock(&barrier
->lock
);
842 return PTHREAD_BARRIER_SERIAL_THREAD
;
846 // wait until enough threads enter the barrier
847 while (barrier
->total_height
< PTHREAD_BARRIER_FLAG
)
848 pthread_cond_wait(&barrier
->breeched
, &barrier
->lock
);
850 barrier
->total_height
--;
852 // get entering threads to wake up
853 if (barrier
->total_height
== PTHREAD_BARRIER_FLAG
)
854 pthread_cond_broadcast(&barrier
->breeched
);
856 pthread_mutex_unlock(&barrier
->lock
);
863 // Read-write lock attribute functions
866 int pthread_rwlockattr_init(pthread_rwlockattr_t
*attr
)
868 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
873 memset(attr
, 0, sizeof(pthread_rwlockattr_t
));
878 int pthread_rwlockattr_destroy(pthread_rwlockattr_t
*attr
)
880 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
885 memset(attr
, 0, sizeof(pthread_rwlockattr_t
));
891 // Read-write lock functions
894 int pthread_rwlock_init(pthread_rwlock_t
*lock
, const pthread_rwlockattr_t
*attr
)
896 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, attr
));
901 InitSemaphore(&lock
->semaphore
);
906 int pthread_rwlock_destroy(pthread_rwlock_t
*lock
)
908 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
913 // probably a statically allocated rwlock
914 if (SemaphoreIsInvalid(&lock
->semaphore
))
917 if (AttemptSemaphore(&lock
->semaphore
) == FALSE
)
920 ReleaseSemaphore(&lock
->semaphore
);
921 memset(lock
, 0, sizeof(pthread_rwlock_t
));
926 int pthread_rwlock_tryrdlock(pthread_rwlock_t
*lock
)
930 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
935 // initialize static rwlocks
936 if (SemaphoreIsInvalid(&lock
->semaphore
))
937 pthread_rwlock_init(lock
, NULL
);
939 ret
= AttemptSemaphoreShared(&lock
->semaphore
);
941 return (ret
== TRUE
) ? 0 : EBUSY
;
944 int pthread_rwlock_trywrlock(pthread_rwlock_t
*lock
)
948 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
953 // initialize static rwlocks
954 if (SemaphoreIsInvalid(&lock
->semaphore
))
955 pthread_rwlock_init(lock
, NULL
);
957 ret
= AttemptSemaphore(&lock
->semaphore
);
959 return (ret
== TRUE
) ? 0 : EBUSY
;
962 int pthread_rwlock_rdlock(pthread_rwlock_t
*lock
)
964 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
969 pthread_testcancel();
971 // initialize static rwlocks
972 if (SemaphoreIsInvalid(&lock
->semaphore
))
973 pthread_rwlock_init(lock
, NULL
);
975 // we might already have a write lock
976 if (SemaphoreIsMine(&lock
->semaphore
))
979 ObtainSemaphoreShared(&lock
->semaphore
);
984 int pthread_rwlock_timedrdlock(pthread_rwlock_t
*lock
, const struct timespec
*abstime
)
986 struct timeval end
, now
;
989 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, abstime
));
995 return pthread_rwlock_rdlock(lock
);
997 pthread_testcancel();
999 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
1001 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
1002 while ((result
= pthread_rwlock_tryrdlock(lock
)) == EBUSY
)
1005 gettimeofday(&now
, NULL
);
1006 if (timercmp(&end
, &now
, <))
1013 int pthread_rwlock_wrlock(pthread_rwlock_t
*lock
)
1015 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1020 pthread_testcancel();
1022 // initialize static rwlocks
1023 if (SemaphoreIsInvalid(&lock
->semaphore
))
1024 pthread_rwlock_init(lock
, NULL
);
1026 if (SemaphoreIsMine(&lock
->semaphore
))
1029 ObtainSemaphore(&lock
->semaphore
);
1034 int pthread_rwlock_timedwrlock(pthread_rwlock_t
*lock
, const struct timespec
*abstime
)
1036 struct timeval end
, now
;
1039 D(bug("%s(%p, %p)\n", __FUNCTION__
, lock
, abstime
));
1044 if (abstime
== NULL
)
1045 return pthread_rwlock_wrlock(lock
);
1047 pthread_testcancel();
1049 TIMESPEC_TO_TIMEVAL(&end
, abstime
);
1051 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
1052 while ((result
= pthread_rwlock_trywrlock(lock
)) == EBUSY
)
1055 gettimeofday(&now
, NULL
);
1056 if (timercmp(&end
, &now
, <))
1063 int pthread_rwlock_unlock(pthread_rwlock_t
*lock
)
1065 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1070 // initialize static rwlocks
1071 if (SemaphoreIsInvalid(&lock
->semaphore
))
1072 pthread_rwlock_init(lock
, NULL
);
1074 //if (!SemaphoreIsMine(&lock->semaphore))
1075 // if no one has obtained the semaphore don't unlock the rwlock
1076 // this can be a leap of faith because we don't maintain a separate list of readers
1077 if (lock
->semaphore
.ss_NestCount
< 1)
1080 ReleaseSemaphore(&lock
->semaphore
);
1086 // Spinlock functions
1089 int pthread_spin_init(pthread_spinlock_t
*lock
, int pshared
)
1091 D(bug("%s(%p, %d)\n", __FUNCTION__
, lock
, pshared
));
1101 int pthread_spin_destroy(pthread_spinlock_t
*lock
)
1103 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1108 int pthread_spin_lock(pthread_spinlock_t
*lock
)
1110 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1115 while (__sync_lock_test_and_set((int *)lock
, 1))
1116 sched_yield(); // TODO: don't yield the CPU every iteration
1121 int pthread_spin_trylock(pthread_spinlock_t
*lock
)
1123 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1128 if (__sync_lock_test_and_set((int *)lock
, 1))
1134 int pthread_spin_unlock(pthread_spinlock_t
*lock
)
1136 D(bug("%s(%p)\n", __FUNCTION__
, lock
));
1141 __sync_lock_release((int *)lock
);
1147 // Thread attribute functions
1150 int pthread_attr_init(pthread_attr_t
*attr
)
1154 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
1159 memset(attr
, 0, sizeof(pthread_attr_t
));
1160 // inherit the priority and stack size of the parent thread
1161 task
= FindTask(NULL
);
1162 attr
->param
.sched_priority
= task
->tc_Node
.ln_Pri
;
1163 attr
->stacksize
= (UBYTE
*)task
->tc_SPUpper
- (UBYTE
*)task
->tc_SPLower
;
1168 int pthread_attr_destroy(pthread_attr_t
*attr
)
1170 D(bug("%s(%p)\n", __FUNCTION__
, attr
));
1175 memset(attr
, 0, sizeof(pthread_attr_t
));
1180 int pthread_attr_getdetachstate(const pthread_attr_t
*attr
, int *detachstate
)
1182 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, detachstate
));
1187 if (detachstate
!= NULL
)
1188 *detachstate
= attr
->detachstate
;
1193 int pthread_attr_setdetachstate(pthread_attr_t
*attr
, int detachstate
)
1195 D(bug("%s(%p, %d)\n", __FUNCTION__
, attr
, detachstate
));
1197 if (attr
== NULL
|| detachstate
!= PTHREAD_CREATE_JOINABLE
)
1200 attr
->detachstate
= detachstate
;
1205 int pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t *stacksize
)
1207 D(bug("%s(%p, %p, %p)\n", __FUNCTION__
, attr
, stackaddr
, stacksize
));
1212 if (stackaddr
!= NULL
)
1213 *stackaddr
= attr
->stackaddr
;
1215 if (stacksize
!= NULL
)
1216 *stacksize
= attr
->stacksize
;
1221 int pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
1223 D(bug("%s(%p, %p, %u)\n", __FUNCTION__
, attr
, stackaddr
, stacksize
));
1225 if (attr
== NULL
|| (stackaddr
!= NULL
&& stacksize
== 0))
1228 attr
->stackaddr
= stackaddr
;
1229 attr
->stacksize
= stacksize
;
1234 int pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
1236 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, stacksize
));
1238 return pthread_attr_getstack(attr
, NULL
, stacksize
);
1241 int pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
1243 D(bug("%s(%p, %u)\n", __FUNCTION__
, attr
, stacksize
));
1245 return pthread_attr_setstack(attr
, NULL
, stacksize
);
1248 int pthread_attr_getschedparam(const pthread_attr_t
*attr
, struct sched_param
*param
)
1250 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, param
));
1256 *param
= attr
->param
;
1261 int pthread_attr_setschedparam(pthread_attr_t
*attr
, const struct sched_param
*param
)
1263 D(bug("%s(%p, %p)\n", __FUNCTION__
, attr
, param
));
1265 if (attr
== NULL
|| param
== NULL
)
1268 attr
->param
= *param
;
1277 #ifdef USE_ASYNC_CANCEL
1279 static ULONG
CancelHandlerFunc(void);
1280 static struct EmulLibEntry CancelHandler
=
1282 TRAP_LIB
, 0, (void (*)(void))CancelHandlerFunc
1284 static ULONG
CancelHandlerFunc(void)
1286 ULONG signals
= (ULONG
)REG_D0
;
1287 APTR data
= (APTR
)REG_A1
;
1288 struct ExecBase
*SysBase
= (struct ExecBase
*)REG_A6
;
1290 AROS_UFH3S(ULONG
, CancelHandler
,
1291 AROS_UFHA(ULONG
, signals
, D0
),
1292 AROS_UFHA(APTR
, data
, A1
),
1293 AROS_UFHA(struct ExecBase
*, SysBase
, A6
))
1298 DB2(bug("%s(%u, %p, %p)\n", __FUNCTION__
, signals
, data
, SysBase
));
1300 pthread_testcancel();
1309 static void StarterFunc(void)
1313 int foundkey
= TRUE
;
1314 #ifdef USE_ASYNC_CANCEL
1318 DB2(bug("%s()\n", __FUNCTION__
));
1320 inf
= (ThreadInfo
*)FindTask(NULL
)->tc_UserData
;
1322 //inf->task->tc_Node.ln_Name[inf->oldlen];
1324 // we have to set the priority here to avoid race conditions
1325 SetTaskPri(inf
->task
, inf
->attr
.param
.sched_priority
);
1327 #ifdef USE_ASYNC_CANCEL
1328 // set the exception handler for async cancellation
1329 oldexcept
= inf
->task
->tc_ExceptCode
;
1331 inf
->task
->tc_ExceptCode
= &AROS_ASMSYMNAME(CancelHandler
);
1333 inf
->task
->tc_ExceptCode
= &CancelHandler
;
1335 SetExcept(SIGBREAKF_CTRL_C
, SIGBREAKF_CTRL_C
);
1338 // set a jump point for pthread_exit
1339 if (!setjmp(inf
->jmp
))
1341 // custom stack requires special handling
1342 if (inf
->attr
.stackaddr
!= NULL
&& inf
->attr
.stacksize
> 0)
1344 struct StackSwapArgs swapargs
;
1345 struct StackSwapStruct stack
;
1347 swapargs
.Args
[0] = (IPTR
)inf
->arg
;
1348 stack
.stk_Lower
= inf
->attr
.stackaddr
;
1349 stack
.stk_Upper
= (APTR
)((IPTR
)stack
.stk_Lower
+ inf
->attr
.stacksize
);
1350 stack
.stk_Pointer
= stack
.stk_Upper
;
1352 inf
->ret
= (void *)NewStackSwap(&stack
, inf
->start
, &swapargs
);
1356 inf
->ret
= inf
->start(inf
->arg
);
1360 #ifdef USE_ASYNC_CANCEL
1361 // remove the exception handler
1362 SetExcept(0, SIGBREAKF_CTRL_C
);
1363 inf
->task
->tc_ExceptCode
= oldexcept
;
1366 // destroy all non-NULL TLS key values
1367 // since the destructors can set the keys themselves, we have to do multiple iterations
1368 ObtainSemaphoreShared(&tls_sem
);
1369 for (j
= 0; foundkey
&& j
< PTHREAD_DESTRUCTOR_ITERATIONS
; j
++)
1372 for (i
= 0; i
< PTHREAD_KEYS_MAX
; i
++)
1374 if (tlskeys
[i
].used
&& tlskeys
[i
].destructor
&& inf
->tlsvalues
[i
])
1376 void *oldvalue
= inf
->tlsvalues
[i
];
1377 inf
->tlsvalues
[i
] = NULL
;
1378 tlskeys
[i
].destructor(oldvalue
);
1383 ReleaseSemaphore(&tls_sem
);
1385 // tell the parent thread that we are done
1387 inf
->finished
= TRUE
;
1388 Signal(inf
->parent
, SIGF_PARENT
);
1391 int pthread_create(pthread_t
*thread
, const pthread_attr_t
*attr
, void *(*start
)(void *), void *arg
)
1396 pthread_t threadnew
;
1398 D(bug("%s(%p, %p, %p, %p)\n", __FUNCTION__
, thread
, attr
, start
, arg
));
1400 if (thread
== NULL
|| start
== NULL
)
1403 ObtainSemaphore(&thread_sem
);
1405 // grab an empty thread slot
1406 threadnew
= GetThreadId(NULL
);
1407 if (threadnew
== PTHREAD_THREADS_MAX
)
1409 ReleaseSemaphore(&thread_sem
);
1413 // prepare the ThreadInfo structure
1414 inf
= GetThreadInfo(threadnew
);
1415 memset(inf
, 0, sizeof(ThreadInfo
));
1418 inf
->parent
= FindTask(NULL
);
1422 pthread_attr_init(&inf
->attr
);
1423 NEWLIST((struct List
*)&inf
->cleanup
);
1424 inf
->cancelstate
= PTHREAD_CANCEL_ENABLE
;
1425 inf
->canceltype
= PTHREAD_CANCEL_DEFERRED
;
1427 // let's trick CreateNewProc into allocating a larger buffer for the name
1428 snprintf(name
, sizeof(name
), "pthread thread #%d", threadnew
);
1429 oldlen
= strlen(name
);
1430 memset(name
+ oldlen
, ' ', sizeof(name
) - oldlen
- 1);
1431 name
[sizeof(name
) - 1] = '\0';
1433 // start the child thread
1434 inf
->task
= (struct Task
*)CreateNewProcTags(NP_Entry
, StarterFunc
,
1436 NP_CodeType
, CODETYPE_PPC
,
1437 (inf
->attr
.stackaddr
== NULL
&& inf
->attr
.stacksize
> 0) ? NP_PPCStackSize
: TAG_IGNORE
, inf
->attr
.stacksize
,
1439 (inf
->attr
.stackaddr
== NULL
&& inf
->attr
.stacksize
> 0) ? NP_StackSize
: TAG_IGNORE
, inf
->attr
.stacksize
,
1448 ReleaseSemaphore(&thread_sem
);
1452 ReleaseSemaphore(&thread_sem
);
1454 *thread
= threadnew
;
1459 int pthread_detach(pthread_t thread
)
1461 D(bug("%s(%u) not implemented\n", __FUNCTION__
, thread
));
1466 int pthread_join(pthread_t thread
, void **value_ptr
)
1470 D(bug("%s(%u, %p)\n", __FUNCTION__
, thread
, value_ptr
));
1472 inf
= GetThreadInfo(thread
);
1474 if (inf
== NULL
|| inf
->parent
== NULL
)
1477 pthread_testcancel();
1479 while (!inf
->finished
)
1483 *value_ptr
= inf
->ret
;
1485 ObtainSemaphore(&thread_sem
);
1486 memset(inf
, 0, sizeof(ThreadInfo
));
1487 ReleaseSemaphore(&thread_sem
);
1492 int pthread_equal(pthread_t t1
, pthread_t t2
)
1494 D(bug("%s(%u, %u)\n", __FUNCTION__
, t1
, t2
));
1499 pthread_t
pthread_self(void)
1504 D(bug("%s()\n", __FUNCTION__
));
1506 task
= FindTask(NULL
);
1507 thread
= GetThreadId(task
);
1509 // add non-pthread processes to our list, so we can handle the main thread
1510 if (thread
== PTHREAD_THREADS_MAX
)
1514 ObtainSemaphore(&thread_sem
);
1515 thread
= GetThreadId(NULL
);
1516 if (thread
== PTHREAD_THREADS_MAX
)
1518 // TODO: pthread_self is supposed to always succeed, but we can fail
1519 // here if we run out of thread slots
1520 // this can only happen if too many non-pthread processes call
1522 //ReleaseSemaphore(&thread_sem);
1526 inf
= GetThreadInfo(thread
);
1527 memset(inf
, 0, sizeof(ThreadInfo
));
1528 NEWLIST((struct List
*)&inf
->cleanup
);
1530 ReleaseSemaphore(&thread_sem
);
1536 int pthread_cancel(pthread_t thread
)
1540 D(bug("%s(%u)\n", __FUNCTION__
, thread
));
1542 inf
= GetThreadInfo(thread
);
1544 if (inf
== NULL
|| inf
->parent
== NULL
|| inf
->canceled
== TRUE
)
1547 inf
->canceled
= TRUE
;
1549 // we might have to cancel the thread immediately
1550 if (inf
->canceltype
== PTHREAD_CANCEL_ASYNCHRONOUS
&& inf
->cancelstate
== PTHREAD_CANCEL_ENABLE
)
1554 task
= FindTask(NULL
);
1556 if (inf
->task
== task
)
1557 pthread_testcancel(); // cancel ourselves
1559 Signal(inf
->task
, SIGBREAKF_CTRL_C
); // trigger the exception handler
1565 int pthread_setcancelstate(int state
, int *oldstate
)
1570 D(bug("%s(%d, %p)\n", __FUNCTION__
, state
, oldstate
));
1572 if (state
!= PTHREAD_CANCEL_ENABLE
&& state
!= PTHREAD_CANCEL_DISABLE
)
1575 thread
= pthread_self();
1576 inf
= GetThreadInfo(thread
);
1579 *oldstate
= inf
->cancelstate
;
1581 inf
->cancelstate
= state
;
1586 int pthread_setcanceltype(int type
, int *oldtype
)
1591 D(bug("%s(%d, %p)\n", __FUNCTION__
, type
, oldtype
));
1593 if (type
!= PTHREAD_CANCEL_DEFERRED
&& type
!= PTHREAD_CANCEL_ASYNCHRONOUS
)
1596 thread
= pthread_self();
1597 inf
= GetThreadInfo(thread
);
1600 *oldtype
= inf
->canceltype
;
1602 inf
->canceltype
= type
;
1607 void pthread_testcancel(void)
1612 D(bug("%s()\n", __FUNCTION__
));
1614 thread
= pthread_self();
1615 inf
= GetThreadInfo(thread
);
1617 if (inf
->canceled
&& (inf
->cancelstate
== PTHREAD_CANCEL_ENABLE
))
1618 pthread_exit(PTHREAD_CANCELED
);
1621 void pthread_exit(void *value_ptr
)
1625 CleanupHandler
*handler
;
1627 D(bug("%s(%p)\n", __FUNCTION__
, value_ptr
));
1629 thread
= pthread_self();
1630 inf
= GetThreadInfo(thread
);
1631 inf
->ret
= value_ptr
;
1633 // execute the clean-up handlers
1634 while ((handler
= (CleanupHandler
*)RemTail((struct List
*)&inf
->cleanup
)))
1635 if (handler
->routine
)
1636 handler
->routine(handler
->arg
);
1638 longjmp(inf
->jmp
, 1);
1641 static void OnceCleanup(void *arg
)
1643 pthread_once_t
*once_control
;
1645 DB2(bug("%s(%p)\n", __FUNCTION__
, arg
));
1647 once_control
= (pthread_once_t
*)arg
;
1648 pthread_spin_unlock(&once_control
->lock
);
1651 int pthread_once(pthread_once_t
*once_control
, void (*init_routine
)(void))
1653 D(bug("%s(%p, %p)\n", __FUNCTION__
, once_control
, init_routine
));
1655 if (once_control
== NULL
|| init_routine
== NULL
)
1658 if (__sync_val_compare_and_swap(&once_control
->started
, FALSE
, TRUE
))
1660 pthread_spin_lock(&once_control
->lock
);
1661 if (!once_control
->done
)
1663 pthread_cleanup_push(OnceCleanup
, once_control
);
1665 pthread_cleanup_pop(0);
1666 once_control
->done
= TRUE
;
1668 pthread_spin_unlock(&once_control
->lock
);
1675 // Scheduling functions
1678 int pthread_setschedparam(pthread_t thread
, int policy
, const struct sched_param
*param
)
1682 D(bug("%s(%u, %d, %p)\n", __FUNCTION__
, thread
, policy
, param
));
1687 inf
= GetThreadInfo(thread
);
1692 SetTaskPri(inf
->task
, param
->sched_priority
);
1698 // Non-portable functions
1700 int pthread_setname_np(pthread_t thread
, const char *name
)
1706 D(bug("%s(%u, %s)\n", __FUNCTION__
, thread
, name
));
1711 inf
= GetThreadInfo(thread
);
1716 currentname
= GetNodeName(inf
->task
);
1718 if (inf
->parent
== NULL
)
1719 namelen
= strlen(currentname
) + 1;
1723 if (strlen(name
) + 1 > namelen
)
1726 strncpy(currentname
, name
, namelen
);
1731 int pthread_getname_np(pthread_t thread
, char *name
, size_t len
)
1736 D(bug("%s(%u, %p, %u)\n", __FUNCTION__
, thread
, name
, len
));
1738 if (name
== NULL
|| len
== 0)
1741 inf
= GetThreadInfo(thread
);
1746 currentname
= GetNodeName(inf
->task
);
1748 if (strlen(currentname
) + 1 > len
)
1751 // TODO: partially copy the name?
1752 strncpy(name
, currentname
, len
);
1758 // Cancellation cleanup
1761 void pthread_cleanup_push(void (*routine
)(void *), void *arg
)
1765 CleanupHandler
*handler
;
1767 D(bug("%s(%p, %p)\n", __FUNCTION__
, routine
, arg
));
1769 if (routine
== NULL
)
1772 handler
= malloc(sizeof(CleanupHandler
));
1774 if (handler
== NULL
)
1777 thread
= pthread_self();
1778 inf
= GetThreadInfo(thread
);
1780 handler
->routine
= routine
;
1782 AddTail((struct List
*)&inf
->cleanup
, (struct Node
*)handler
);
1785 void pthread_cleanup_pop(int execute
)
1789 CleanupHandler
*handler
;
1791 D(bug("%s(%d)\n", __FUNCTION__
, execute
));
1793 thread
= pthread_self();
1794 inf
= GetThreadInfo(thread
);
1795 handler
= (CleanupHandler
*)RemTail((struct List
*)&inf
->cleanup
);
1797 if (handler
&& handler
->routine
&& execute
)
1798 handler
->routine(handler
->arg
);
1807 int pthread_kill(pthread_t thread
, int sig
)
1809 D(bug("%s(%u, %d) not implemented\n", __FUNCTION__
, thread
, sig
));
1815 // Constructors, destructors
1818 static int _Init_Func(void)
1820 DB2(bug("%s()\n", __FUNCTION__
));
1822 //memset(&threads, 0, sizeof(threads));
1823 InitSemaphore(&thread_sem
);
1824 InitSemaphore(&tls_sem
);
1825 // reserve ID 0 for the main thread
1831 static void _Exit_Func(void)
1837 DB2(bug("%s()\n", __FUNCTION__
));
1839 // wait for the threads?
1841 for (i
= 0; i
< PTHREAD_THREADS_MAX
; i
++)
1842 pthread_join(i
, NULL
);
1847 ADD2INIT(_Init_Func
, 0);
1848 ADD2EXIT(_Exit_Func
, 0);
1850 static CONSTRUCTOR_P(_Init_Func
, 100)
1852 return !_Init_Func();
1855 static DESTRUCTOR_P(_Exit_Func
, 100)