fix comment
[AROS.git] / compiler / pthread / pthread.c
blob5c769ab81a75dc98be5aac6298e71a14fea84a8e
1 /*
2 Copyright (C) 2014 Szilard Biro
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
12 1. The origin of this software must not be misrepresented; you must not
13 claim that you wrote the original software. If you use this software
14 in a product, an acknowledgment in the product documentation would be
15 appreciated but is not required.
16 2. Altered source versions must be plainly marked as such, and must not be
17 misrepresented as being the original software.
18 3. This notice may not be removed or altered from any source distribution.
21 #ifdef __MORPHOS__
22 #include <sys/time.h>
23 #endif
24 #include <dos/dostags.h>
25 #include <proto/exec.h>
26 #include <proto/dos.h>
27 #include <proto/timer.h>
28 #ifdef __AROS__
29 #include <aros/symbolsets.h>
30 #define TIMESPEC_TO_TIMEVAL(tv, ts) { \
31 (tv)->tv_sec = (ts)->tv_sec; \
32 (tv)->tv_usec = (ts)->tv_nsec / 1000; }
33 #else
34 #include <constructor.h>
35 #define StackSwapArgs PPCStackSwapArgs
36 #define NewStackSwap NewPPCStackSwap
37 #endif
39 #include <setjmp.h>
40 #include <string.h>
41 #include <stdio.h>
42 #include <signal.h>
43 #include <stdlib.h>
45 #include "pthread.h"
46 #include "debug.h"
48 #define SIGB_PARENT SIGBREAKB_CTRL_F
49 #define SIGF_PARENT (1 << SIGB_PARENT)
50 #define SIGB_COND_FALLBACK SIGBREAKB_CTRL_E
51 #define SIGF_COND_FALLBACK (1 << SIGB_COND_FALLBACK)
52 #define SIGB_TIMER_FALLBACK SIGBREAKB_CTRL_D
53 #define SIGF_TIMER_FALLBACK (1 << SIGB_TIMER_FALLBACK)
55 #define NAMELEN 32
56 #define PTHREAD_FIRST_THREAD_ID (1)
57 #define PTHREAD_BARRIER_FLAG (1UL << 31)
59 //#define USE_ASYNC_CANCEL
61 typedef struct
63 struct MinNode node;
64 struct Task *task;
65 ULONG sigmask;
66 } CondWaiter;
68 typedef struct
70 void (*destructor)(void *);
71 BOOL used;
72 } TLSKey;
74 typedef struct
76 struct MinNode node;
77 void (*routine)(void *);
78 void *arg;
79 } CleanupHandler;
81 typedef struct
83 void *(*start)(void *);
84 void *arg;
85 struct Task *parent;
86 int finished;
87 struct Task *task;
88 void *ret;
89 jmp_buf jmp;
90 pthread_attr_t attr;
91 void *tlsvalues[PTHREAD_KEYS_MAX];
92 struct MinList cleanup;
93 int cancelstate;
94 int canceltype;
95 int canceled;
96 } ThreadInfo;
98 static ThreadInfo threads[PTHREAD_THREADS_MAX];
99 static struct SignalSemaphore thread_sem;
100 static TLSKey tlskeys[PTHREAD_KEYS_MAX];
101 static struct SignalSemaphore tls_sem;
104 // Helper functions
107 static int SemaphoreIsInvalid(struct SignalSemaphore *sem)
109 DB2(bug("%s(%p)\n", __FUNCTION__, sem));
111 return (!sem || sem->ss_Link.ln_Type != NT_SIGNALSEM || sem->ss_WaitQueue.mlh_Tail != NULL);
114 static int SemaphoreIsMine(struct SignalSemaphore *sem)
116 struct Task *me;
118 DB2(bug("%s(%p)\n", __FUNCTION__, sem));
120 me = FindTask(NULL);
122 return (sem && sem->ss_NestCount > 0 && sem->ss_Owner == me);
125 static ThreadInfo *GetThreadInfo(pthread_t thread)
127 ThreadInfo *inf = NULL;
129 DB2(bug("%s(%u)\n", __FUNCTION__, thread));
131 // TODO: more robust error handling?
132 if (thread < PTHREAD_THREADS_MAX)
133 inf = &threads[thread];
135 return inf;
138 static pthread_t GetThreadId(struct Task *task)
140 pthread_t i;
142 DB2(bug("%s(%p)\n", __FUNCTION__, task));
144 ObtainSemaphoreShared(&thread_sem);
146 // First thread id will be 1 so that it is different than default value of pthread_t
147 for (i = PTHREAD_FIRST_THREAD_ID; i < PTHREAD_THREADS_MAX; i++)
149 if (threads[i].task == task)
150 break;
153 ReleaseSemaphore(&thread_sem);
155 return i;
158 #if defined __mc68000__
159 /* No CAS instruction on m68k */
160 static int __m68k_sync_val_compare_and_swap(int *v, int o, int n)
162 int ret;
164 Disable();
165 if ((*v) == (o))
166 (*v) = (n);
167 ret = (*v);
168 Enable();
170 return ret;
172 #undef __sync_val_compare_and_swap
173 #define __sync_val_compare_and_swap(v, o, n) __m68k_sync_val_compare_and_swap(v, o, n)
175 static int __m68k_sync_lock_test_and_set(int *v, int n)
177 Disable();
178 (*v) = (n);
179 Enable();
181 return n;
183 #undef __sync_lock_test_and_set
184 #define __sync_lock_test_and_set(v, n) __m68k_sync_lock_test_and_set(v, n)
185 #undef __sync_lock_release
186 #define __sync_lock_release(v) __m68k_sync_lock_test_and_set(v, 0)
188 static inline int __m68k_sync_add_and_fetch(int *v, int n)
190 int ret;
192 Disable();
193 (*v) += (n);
194 ret = (*v);
195 Enable();
197 return ret;
199 #undef __sync_add_and_fetch
200 #define __sync_add_and_fetch(v, n) __m68k_sync_add_and_fetch(v, n)
201 #undef __sync_sub_and_fetch
202 #define __sync_sub_and_fetch(v, n) __m68k_sync_add_and_fetch(v, -(n))
203 #endif
206 // Thread specific data functions
209 int pthread_key_create(pthread_key_t *key, void (*destructor)(void *))
211 TLSKey *tls;
212 int i;
214 D(bug("%s(%p, %p)\n", __FUNCTION__, key, destructor));
216 if (key == NULL)
217 return EINVAL;
219 ObtainSemaphore(&tls_sem);
221 for (i = 0; i < PTHREAD_KEYS_MAX; i++)
223 if (tlskeys[i].used == FALSE)
224 break;
227 if (i == PTHREAD_KEYS_MAX)
229 ReleaseSemaphore(&tls_sem);
230 return EAGAIN;
233 tls = &tlskeys[i];
234 tls->used = TRUE;
235 tls->destructor = destructor;
237 ReleaseSemaphore(&tls_sem);
239 *key = i;
241 return 0;
244 int pthread_key_delete(pthread_key_t key)
246 TLSKey *tls;
248 D(bug("%s(%u)\n", __FUNCTION__, key));
250 if (key >= PTHREAD_KEYS_MAX)
251 return EINVAL;
253 tls = &tlskeys[key];
255 ObtainSemaphore(&tls_sem);
257 if (tls->used == FALSE)
259 ReleaseSemaphore(&tls_sem);
260 return EINVAL;
263 tls->used = FALSE;
264 tls->destructor = NULL;
266 ReleaseSemaphore(&tls_sem);
268 return 0;
271 int pthread_setspecific(pthread_key_t key, const void *value)
273 pthread_t thread;
274 ThreadInfo *inf;
275 TLSKey *tls;
277 D(bug("%s(%u)\n", __FUNCTION__, key));
279 if (key >= PTHREAD_KEYS_MAX)
280 return EINVAL;
282 thread = pthread_self();
283 tls = &tlskeys[key];
285 ObtainSemaphoreShared(&tls_sem);
287 if (tls->used == FALSE)
289 ReleaseSemaphore(&tls_sem);
290 return EINVAL;
293 ReleaseSemaphore(&tls_sem);
295 inf = GetThreadInfo(thread);
296 inf->tlsvalues[key] = (void *)value;
298 return 0;
301 void *pthread_getspecific(pthread_key_t key)
303 pthread_t thread;
304 ThreadInfo *inf;
305 void *value = NULL;
307 D(bug("%s(%u)\n", __FUNCTION__, key));
309 if (key >= PTHREAD_KEYS_MAX)
310 return NULL;
312 thread = pthread_self();
313 inf = GetThreadInfo(thread);
314 value = inf->tlsvalues[key];
316 return value;
320 // Mutex attribute functions
323 int pthread_mutexattr_init(pthread_mutexattr_t *attr)
325 D(bug("%s(%p)\n", __FUNCTION__, attr));
327 if (attr == NULL)
328 return EINVAL;
330 attr->kind = PTHREAD_MUTEX_DEFAULT;
332 return 0;
335 int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
337 D(bug("%s(%p)\n", __FUNCTION__, attr));
339 if (attr == NULL)
340 return EINVAL;
342 memset(attr, 0, sizeof(pthread_mutexattr_t));
344 return 0;
347 int pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *kind)
349 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, kind));
351 if (attr == NULL)
352 return EINVAL;
354 if (kind)
355 *kind = attr->kind;
357 return 0;
360 int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
362 D(bug("%s(%p)\n", __FUNCTION__, attr));
364 if (attr == NULL || !(kind >= PTHREAD_MUTEX_NORMAL && kind <= PTHREAD_MUTEX_ERRORCHECK))
365 return EINVAL;
367 attr->kind = kind;
369 return 0;
373 // Mutex functions
376 static int _pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr, BOOL staticinit)
378 DB2(bug("%s(%p, %p)\n", __FUNCTION__, mutex, attr));
380 if (mutex == NULL)
381 return EINVAL;
383 if (attr)
384 mutex->kind = attr->kind;
385 else if (!staticinit)
386 mutex->kind = PTHREAD_MUTEX_DEFAULT;
387 InitSemaphore(&mutex->semaphore);
388 mutex->incond = 0;
390 return 0;
393 int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
395 D(bug("%s(%p, %p)\n", __FUNCTION__, mutex, attr));
397 return _pthread_mutex_init(mutex, attr, FALSE);
400 int pthread_mutex_destroy(pthread_mutex_t *mutex)
402 D(bug("%s(%p)\n", __FUNCTION__, mutex));
404 if (mutex == NULL)
405 return EINVAL;
407 // probably a statically allocated mutex
408 if (SemaphoreIsInvalid(&mutex->semaphore))
409 return 0;
411 if (/*mutex->incond ||*/ AttemptSemaphore(&mutex->semaphore) == FALSE)
412 return EBUSY;
414 if (mutex->incond)
416 ReleaseSemaphore(&mutex->semaphore);
417 return EBUSY;
420 ReleaseSemaphore(&mutex->semaphore);
421 memset(mutex, 0, sizeof(pthread_mutex_t));
423 return 0;
426 int pthread_mutex_lock(pthread_mutex_t *mutex)
428 D(bug("%s(%p)\n", __FUNCTION__, mutex));
430 if (mutex == NULL)
431 return EINVAL;
433 // initialize static mutexes
434 if (SemaphoreIsInvalid(&mutex->semaphore))
435 _pthread_mutex_init(mutex, NULL, TRUE);
437 // normal mutexes would simply deadlock here
438 if (mutex->kind == PTHREAD_MUTEX_ERRORCHECK && SemaphoreIsMine(&mutex->semaphore))
439 return EDEADLK;
441 ObtainSemaphore(&mutex->semaphore);
443 return 0;
446 int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
448 struct timeval end, now;
449 int result;
451 D(bug("%s(%p, %p)\n", __FUNCTION__, mutex, abstime));
453 if (mutex == NULL)
454 return EINVAL;
456 if (abstime == NULL)
457 return pthread_mutex_lock(mutex);
458 /*else if (abstime.tv_nsec < 0 || abstime.tv_nsec >= 1000000000)
459 return EINVAL;*/
461 TIMESPEC_TO_TIMEVAL(&end, abstime);
463 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
464 while ((result = pthread_mutex_trylock(mutex)) == EBUSY)
466 sched_yield();
467 gettimeofday(&now, NULL);
468 if (timercmp(&end, &now, <))
469 return ETIMEDOUT;
472 return result;
475 int pthread_mutex_trylock(pthread_mutex_t *mutex)
477 ULONG ret;
479 D(bug("%s(%p)\n", __FUNCTION__, mutex));
481 if (mutex == NULL)
482 return EINVAL;
484 // initialize static mutexes
485 if (SemaphoreIsInvalid(&mutex->semaphore))
486 _pthread_mutex_init(mutex, NULL, TRUE);
488 if (mutex->kind != PTHREAD_MUTEX_RECURSIVE && SemaphoreIsMine(&mutex->semaphore))
489 return EBUSY;
491 ret = AttemptSemaphore(&mutex->semaphore);
493 return (ret == TRUE) ? 0 : EBUSY;
496 int pthread_mutex_unlock(pthread_mutex_t *mutex)
498 D(bug("%s(%p)\n", __FUNCTION__, mutex));
500 if (mutex == NULL)
501 return EINVAL;
503 // initialize static mutexes
504 if (SemaphoreIsInvalid(&mutex->semaphore))
505 _pthread_mutex_init(mutex, NULL, TRUE);
507 if (mutex->kind != PTHREAD_MUTEX_NORMAL && !SemaphoreIsMine(&mutex->semaphore))
508 return EPERM;
510 ReleaseSemaphore(&mutex->semaphore);
512 return 0;
516 // Condition variable attribute functions
519 int pthread_condattr_init(pthread_condattr_t *attr)
521 D(bug("%s(%p)\n", __FUNCTION__, attr));
523 if (attr == NULL)
524 return EINVAL;
526 memset(attr, 0, sizeof(pthread_condattr_t));
528 return 0;
531 int pthread_condattr_destroy(pthread_condattr_t *attr)
533 D(bug("%s(%p)\n", __FUNCTION__, attr));
535 if (attr == NULL)
536 return EINVAL;
538 memset(attr, 0, sizeof(pthread_condattr_t));
540 return 0;
544 // Condition variable functions
547 int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
549 D(bug("%s(%p, %p)\n", __FUNCTION__, cond, attr));
551 if (cond == NULL)
552 return EINVAL;
554 InitSemaphore(&cond->semaphore);
555 NEWLIST((struct List *)&cond->waiters);
557 return 0;
560 int pthread_cond_destroy(pthread_cond_t *cond)
562 D(bug("%s(%p)\n", __FUNCTION__, cond));
564 if (cond == NULL)
565 return EINVAL;
567 // probably a statically allocated condition
568 if (SemaphoreIsInvalid(&cond->semaphore))
569 return 0;
571 if (AttemptSemaphore(&cond->semaphore) == FALSE)
572 return EBUSY;
574 if (!IsListEmpty((struct List *)&cond->waiters))
576 ReleaseSemaphore(&cond->semaphore);
577 return EBUSY;
580 ReleaseSemaphore(&cond->semaphore);
581 memset(cond, 0, sizeof(pthread_cond_t));
583 return 0;
586 static int _pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, BOOL relative)
588 CondWaiter waiter;
589 BYTE signal;
590 ULONG sigs = 0;
591 ULONG timermask = 0;
592 struct MsgPort timermp;
593 struct timerequest timerio;
594 struct Task *task;
596 DB2(bug("%s(%p, %p, %p)\n", __FUNCTION__, cond, mutex, abstime));
598 if (cond == NULL || mutex == NULL)
599 return EINVAL;
601 pthread_testcancel();
603 // initialize static conditions
604 if (SemaphoreIsInvalid(&cond->semaphore))
605 pthread_cond_init(cond, NULL);
607 task = FindTask(NULL);
609 if (abstime)
611 // prepare MsgPort
612 timermp.mp_Node.ln_Type = NT_MSGPORT;
613 timermp.mp_Node.ln_Pri = 0;
614 timermp.mp_Node.ln_Name = NULL;
615 timermp.mp_Flags = PA_SIGNAL;
616 timermp.mp_SigTask = task;
617 signal = AllocSignal(-1);
618 if (signal == -1)
620 signal = SIGB_TIMER_FALLBACK;
621 SetSignal(SIGF_TIMER_FALLBACK, 0);
623 timermp.mp_SigBit = signal;
624 NEWLIST(&timermp.mp_MsgList);
626 // prepare IORequest
627 timerio.tr_node.io_Message.mn_Node.ln_Type = NT_MESSAGE;
628 timerio.tr_node.io_Message.mn_Node.ln_Pri = 0;
629 timerio.tr_node.io_Message.mn_Node.ln_Name = NULL;
630 timerio.tr_node.io_Message.mn_ReplyPort = &timermp;
631 timerio.tr_node.io_Message.mn_Length = sizeof(struct timerequest);
633 // open timer.device
634 if (OpenDevice((STRPTR)TIMERNAME, UNIT_MICROHZ, &timerio.tr_node, 0) != 0)
636 if (timermp.mp_SigBit != SIGB_TIMER_FALLBACK)
637 FreeSignal(timermp.mp_SigBit);
639 return EINVAL;
642 // prepare the device command and send it
643 timerio.tr_node.io_Command = TR_ADDREQUEST;
644 timerio.tr_node.io_Flags = 0;
645 TIMESPEC_TO_TIMEVAL(&timerio.tr_time, abstime);
646 if (!relative)
648 struct timeval starttime;
649 // absolute time has to be converted to relative
650 // GetSysTime can't be used due to the timezone offset in abstime
651 gettimeofday(&starttime, NULL);
652 timersub(&timerio.tr_time, &starttime, &timerio.tr_time);
654 timermask = 1 << timermp.mp_SigBit;
655 sigs |= timermask;
656 SendIO((struct IORequest *)&timerio);
659 // prepare a waiter node
660 waiter.task = task;
661 signal = AllocSignal(-1);
662 if (signal == -1)
664 signal = SIGB_COND_FALLBACK;
665 SetSignal(SIGF_COND_FALLBACK, 0);
667 waiter.sigmask = 1 << signal;
668 sigs |= waiter.sigmask;
670 // add it to the end of the list
671 ObtainSemaphore(&cond->semaphore);
672 AddTail((struct List *)&cond->waiters, (struct Node *)&waiter);
673 ReleaseSemaphore(&cond->semaphore);
675 // wait for the condition to be signalled or the timeout
676 mutex->incond++;
677 pthread_mutex_unlock(mutex);
678 sigs = Wait(sigs);
679 pthread_mutex_lock(mutex);
680 mutex->incond--;
682 // remove the node from the list
683 ObtainSemaphore(&cond->semaphore);
684 Remove((struct Node *)&waiter);
685 ReleaseSemaphore(&cond->semaphore);
687 if (signal != SIGB_COND_FALLBACK)
688 FreeSignal(signal);
690 if (abstime)
692 // clean up the timerequest
693 if (!CheckIO((struct IORequest *)&timerio))
695 AbortIO((struct IORequest *)&timerio);
696 WaitIO((struct IORequest *)&timerio);
698 CloseDevice((struct IORequest *)&timerio);
700 if (timermp.mp_SigBit != SIGB_TIMER_FALLBACK)
701 FreeSignal(timermp.mp_SigBit);
703 // did we timeout?
704 if (sigs & timermask)
705 return ETIMEDOUT;
708 return 0;
711 int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
713 D(bug("%s(%p, %p, %p)\n", __FUNCTION__, cond, mutex, abstime));
715 return _pthread_cond_timedwait(cond, mutex, abstime, FALSE);
718 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *reltime)
720 D(bug("%s(%p, %p, %p)\n", __FUNCTION__, cond, mutex, reltime));
722 return _pthread_cond_timedwait(cond, mutex, reltime, TRUE);
725 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
727 D(bug("%s(%p)\n", __FUNCTION__, cond));
729 return _pthread_cond_timedwait(cond, mutex, NULL, FALSE);
732 static int _pthread_cond_broadcast(pthread_cond_t *cond, BOOL onlyfirst)
734 CondWaiter *waiter;
736 DB2(bug("%s(%p, %d)\n", __FUNCTION__, cond, onlyfirst));
738 if (cond == NULL)
739 return EINVAL;
741 // initialize static conditions
742 if (SemaphoreIsInvalid(&cond->semaphore))
743 pthread_cond_init(cond, NULL);
745 // signal the waiting threads
746 ObtainSemaphore(&cond->semaphore);
747 ForeachNode(&cond->waiters, waiter)
749 Signal(waiter->task, waiter->sigmask);
750 if (onlyfirst) break;
752 ReleaseSemaphore(&cond->semaphore);
754 return 0;
757 int pthread_cond_signal(pthread_cond_t *cond)
759 D(bug("%s(%p)\n", __FUNCTION__, cond));
761 return _pthread_cond_broadcast(cond, TRUE);
764 int pthread_cond_broadcast(pthread_cond_t *cond)
766 D(bug("%s(%p)\n", __FUNCTION__, cond));
768 return _pthread_cond_broadcast(cond, FALSE);
772 // Barrier functions
775 int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
777 D(bug("%s(%p, %p, %u)\n", __FUNCTION__, barrier, attr, count));
779 if (barrier == NULL || count == 0)
780 return EINVAL;
782 barrier->curr_height = count;
783 barrier->total_height = PTHREAD_BARRIER_FLAG;
784 pthread_cond_init(&barrier->breeched, NULL);
785 pthread_mutex_init(&barrier->lock, NULL);
787 return 0;
790 int pthread_barrier_destroy(pthread_barrier_t *barrier)
792 D(bug("%s(%p)\n", __FUNCTION__, barrier));
794 if (barrier == NULL)
795 return EINVAL;
797 if (pthread_mutex_trylock(&barrier->lock) != 0)
798 return EBUSY;
800 if (barrier->total_height > PTHREAD_BARRIER_FLAG)
802 pthread_mutex_unlock(&barrier->lock);
803 return EBUSY;
806 pthread_mutex_unlock(&barrier->lock);
808 if (pthread_cond_destroy(&barrier->breeched) != 0)
809 return EBUSY;
811 pthread_mutex_destroy(&barrier->lock);
812 barrier->curr_height = barrier->total_height = 0;
814 return 0;
817 int pthread_barrier_wait(pthread_barrier_t *barrier)
819 D(bug("%s(%p)\n", __FUNCTION__, barrier));
821 if (barrier == NULL)
822 return EINVAL;
824 pthread_mutex_lock(&barrier->lock);
826 // wait until everyone exits the barrier
827 while (barrier->total_height > PTHREAD_BARRIER_FLAG)
828 pthread_cond_wait(&barrier->breeched, &barrier->lock);
830 // are we the first to enter?
831 if (barrier->total_height == PTHREAD_BARRIER_FLAG) barrier->total_height = 0;
833 barrier->total_height++;
835 if (barrier->total_height == barrier->curr_height)
837 barrier->total_height += PTHREAD_BARRIER_FLAG - 1;
838 pthread_cond_broadcast(&barrier->breeched);
840 pthread_mutex_unlock(&barrier->lock);
842 return PTHREAD_BARRIER_SERIAL_THREAD;
844 else
846 // wait until enough threads enter the barrier
847 while (barrier->total_height < PTHREAD_BARRIER_FLAG)
848 pthread_cond_wait(&barrier->breeched, &barrier->lock);
850 barrier->total_height--;
852 // get entering threads to wake up
853 if (barrier->total_height == PTHREAD_BARRIER_FLAG)
854 pthread_cond_broadcast(&barrier->breeched);
856 pthread_mutex_unlock(&barrier->lock);
858 return 0;
863 // Read-write lock attribute functions
866 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
868 D(bug("%s(%p)\n", __FUNCTION__, attr));
870 if (attr == NULL)
871 return EINVAL;
873 memset(attr, 0, sizeof(pthread_rwlockattr_t));
875 return 0;
878 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
880 D(bug("%s(%p)\n", __FUNCTION__, attr));
882 if (attr == NULL)
883 return EINVAL;
885 memset(attr, 0, sizeof(pthread_rwlockattr_t));
887 return 0;
891 // Read-write lock functions
894 int pthread_rwlock_init(pthread_rwlock_t *lock, const pthread_rwlockattr_t *attr)
896 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, attr));
898 if (lock == NULL)
899 return EINVAL;
901 InitSemaphore(&lock->semaphore);
903 return 0;
906 int pthread_rwlock_destroy(pthread_rwlock_t *lock)
908 D(bug("%s(%p)\n", __FUNCTION__, lock));
910 if (lock == NULL)
911 return EINVAL;
913 // probably a statically allocated rwlock
914 if (SemaphoreIsInvalid(&lock->semaphore))
915 return 0;
917 if (AttemptSemaphore(&lock->semaphore) == FALSE)
918 return EBUSY;
920 ReleaseSemaphore(&lock->semaphore);
921 memset(lock, 0, sizeof(pthread_rwlock_t));
923 return 0;
926 int pthread_rwlock_tryrdlock(pthread_rwlock_t *lock)
928 ULONG ret;
930 D(bug("%s(%p)\n", __FUNCTION__, lock));
932 if (lock == NULL)
933 return EINVAL;
935 // initialize static rwlocks
936 if (SemaphoreIsInvalid(&lock->semaphore))
937 pthread_rwlock_init(lock, NULL);
939 ret = AttemptSemaphoreShared(&lock->semaphore);
941 return (ret == TRUE) ? 0 : EBUSY;
944 int pthread_rwlock_trywrlock(pthread_rwlock_t *lock)
946 ULONG ret;
948 D(bug("%s(%p)\n", __FUNCTION__, lock));
950 if (lock == NULL)
951 return EINVAL;
953 // initialize static rwlocks
954 if (SemaphoreIsInvalid(&lock->semaphore))
955 pthread_rwlock_init(lock, NULL);
957 ret = AttemptSemaphore(&lock->semaphore);
959 return (ret == TRUE) ? 0 : EBUSY;
962 int pthread_rwlock_rdlock(pthread_rwlock_t *lock)
964 D(bug("%s(%p)\n", __FUNCTION__, lock));
966 if (lock == NULL)
967 return EINVAL;
969 pthread_testcancel();
971 // initialize static rwlocks
972 if (SemaphoreIsInvalid(&lock->semaphore))
973 pthread_rwlock_init(lock, NULL);
975 // we might already have a write lock
976 if (SemaphoreIsMine(&lock->semaphore))
977 return EDEADLK;
979 ObtainSemaphoreShared(&lock->semaphore);
981 return 0;
984 int pthread_rwlock_timedrdlock(pthread_rwlock_t *lock, const struct timespec *abstime)
986 struct timeval end, now;
987 int result;
989 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, abstime));
991 if (lock == NULL)
992 return EINVAL;
994 if (abstime == NULL)
995 return pthread_rwlock_rdlock(lock);
997 pthread_testcancel();
999 TIMESPEC_TO_TIMEVAL(&end, abstime);
1001 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
1002 while ((result = pthread_rwlock_tryrdlock(lock)) == EBUSY)
1004 sched_yield();
1005 gettimeofday(&now, NULL);
1006 if (timercmp(&end, &now, <))
1007 return ETIMEDOUT;
1010 return result;
1013 int pthread_rwlock_wrlock(pthread_rwlock_t *lock)
1015 D(bug("%s(%p)\n", __FUNCTION__, lock));
1017 if (lock == NULL)
1018 return EINVAL;
1020 pthread_testcancel();
1022 // initialize static rwlocks
1023 if (SemaphoreIsInvalid(&lock->semaphore))
1024 pthread_rwlock_init(lock, NULL);
1026 if (SemaphoreIsMine(&lock->semaphore))
1027 return EDEADLK;
1029 ObtainSemaphore(&lock->semaphore);
1031 return 0;
1034 int pthread_rwlock_timedwrlock(pthread_rwlock_t *lock, const struct timespec *abstime)
1036 struct timeval end, now;
1037 int result;
1039 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, abstime));
1041 if (lock == NULL)
1042 return EINVAL;
1044 if (abstime == NULL)
1045 return pthread_rwlock_wrlock(lock);
1047 pthread_testcancel();
1049 TIMESPEC_TO_TIMEVAL(&end, abstime);
1051 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
1052 while ((result = pthread_rwlock_trywrlock(lock)) == EBUSY)
1054 sched_yield();
1055 gettimeofday(&now, NULL);
1056 if (timercmp(&end, &now, <))
1057 return ETIMEDOUT;
1060 return result;
1063 int pthread_rwlock_unlock(pthread_rwlock_t *lock)
1065 D(bug("%s(%p)\n", __FUNCTION__, lock));
1067 if (lock == NULL)
1068 return EINVAL;
1070 // initialize static rwlocks
1071 if (SemaphoreIsInvalid(&lock->semaphore))
1072 pthread_rwlock_init(lock, NULL);
1074 //if (!SemaphoreIsMine(&lock->semaphore))
1075 // if no one has obtained the semaphore don't unlock the rwlock
1076 // this can be a leap of faith because we don't maintain a separate list of readers
1077 if (lock->semaphore.ss_NestCount < 1)
1078 return EPERM;
1080 ReleaseSemaphore(&lock->semaphore);
1082 return 0;
1086 // Spinlock functions
1089 int pthread_spin_init(pthread_spinlock_t *lock, int pshared)
1091 D(bug("%s(%p, %d)\n", __FUNCTION__, lock, pshared));
1093 if (lock == NULL)
1094 return EINVAL;
1096 *lock = 0;
1098 return 0;
1101 int pthread_spin_destroy(pthread_spinlock_t *lock)
1103 D(bug("%s(%p)\n", __FUNCTION__, lock));
1105 return 0;
1108 int pthread_spin_lock(pthread_spinlock_t *lock)
1110 D(bug("%s(%p)\n", __FUNCTION__, lock));
1112 if (lock == NULL)
1113 return EINVAL;
1115 while (__sync_lock_test_and_set((int *)lock, 1))
1116 sched_yield(); // TODO: don't yield the CPU every iteration
1118 return 0;
1121 int pthread_spin_trylock(pthread_spinlock_t *lock)
1123 D(bug("%s(%p)\n", __FUNCTION__, lock));
1125 if (lock == NULL)
1126 return EINVAL;
1128 if (__sync_lock_test_and_set((int *)lock, 1))
1129 return EBUSY;
1131 return 0;
1134 int pthread_spin_unlock(pthread_spinlock_t *lock)
1136 D(bug("%s(%p)\n", __FUNCTION__, lock));
1138 if (lock == NULL)
1139 return EINVAL;
1141 __sync_lock_release((int *)lock);
1143 return 0;
1147 // Thread attribute functions
1150 int pthread_attr_init(pthread_attr_t *attr)
1152 struct Task *task;
1154 D(bug("%s(%p)\n", __FUNCTION__, attr));
1156 if (attr == NULL)
1157 return EINVAL;
1159 memset(attr, 0, sizeof(pthread_attr_t));
1160 // inherit the priority and stack size of the parent thread
1161 task = FindTask(NULL);
1162 attr->param.sched_priority = task->tc_Node.ln_Pri;
1163 attr->stacksize = (UBYTE *)task->tc_SPUpper - (UBYTE *)task->tc_SPLower;
1165 return 0;
1168 int pthread_attr_destroy(pthread_attr_t *attr)
1170 D(bug("%s(%p)\n", __FUNCTION__, attr));
1172 if (attr == NULL)
1173 return EINVAL;
1175 memset(attr, 0, sizeof(pthread_attr_t));
1177 return 0;
1180 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
1182 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, detachstate));
1184 if (attr == NULL)
1185 return EINVAL;
1187 if (detachstate != NULL)
1188 *detachstate = attr->detachstate;
1190 return 0;
1193 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
1195 D(bug("%s(%p, %d)\n", __FUNCTION__, attr, detachstate));
1197 if (attr == NULL || detachstate != PTHREAD_CREATE_JOINABLE)
1198 return EINVAL;
1200 attr->detachstate = detachstate;
1202 return 0;
1205 int pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t *stacksize)
1207 D(bug("%s(%p, %p, %p)\n", __FUNCTION__, attr, stackaddr, stacksize));
1209 if (attr == NULL)
1210 return EINVAL;
1212 if (stackaddr != NULL)
1213 *stackaddr = attr->stackaddr;
1215 if (stacksize != NULL)
1216 *stacksize = attr->stacksize;
1218 return 0;
1221 int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
1223 D(bug("%s(%p, %p, %u)\n", __FUNCTION__, attr, stackaddr, stacksize));
1225 if (attr == NULL || (stackaddr != NULL && stacksize == 0))
1226 return EINVAL;
1228 attr->stackaddr = stackaddr;
1229 attr->stacksize = stacksize;
1231 return 0;
1234 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
1236 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, stacksize));
1238 return pthread_attr_getstack(attr, NULL, stacksize);
1241 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
1243 D(bug("%s(%p, %u)\n", __FUNCTION__, attr, stacksize));
1245 return pthread_attr_setstack(attr, NULL, stacksize);
1248 int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
1250 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, param));
1252 if (attr == NULL)
1253 return EINVAL;
1255 if (param != NULL)
1256 *param = attr->param;
1258 return 0;
1261 int pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
1263 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, param));
1265 if (attr == NULL || param == NULL)
1266 return EINVAL;
1268 attr->param = *param;
1270 return 0;
1274 // Thread functions
1277 #ifdef USE_ASYNC_CANCEL
1278 #ifdef __MORPHOS__
1279 static ULONG CancelHandlerFunc(void);
1280 static struct EmulLibEntry CancelHandler =
1282 TRAP_LIB, 0, (void (*)(void))CancelHandlerFunc
1284 static ULONG CancelHandlerFunc(void)
1286 ULONG signals = (ULONG)REG_D0;
1287 APTR data = (APTR)REG_A1;
1288 struct ExecBase *SysBase = (struct ExecBase *)REG_A6;
1289 #else
1290 AROS_UFH3S(ULONG, CancelHandler,
1291 AROS_UFHA(ULONG, signals, D0),
1292 AROS_UFHA(APTR, data, A1),
1293 AROS_UFHA(struct ExecBase *, SysBase, A6))
1295 AROS_USERFUNC_INIT
1296 #endif
1298 DB2(bug("%s(%u, %p, %p)\n", __FUNCTION__, signals, data, SysBase));
1300 pthread_testcancel();
1302 return signals;
1303 #ifdef __AROS__
1304 AROS_USERFUNC_EXIT
1305 #endif
1307 #endif
1309 static void StarterFunc(void)
1311 ThreadInfo *inf;
1312 int i, j;
1313 int foundkey = TRUE;
1314 #ifdef USE_ASYNC_CANCEL
1315 APTR oldexcept;
1316 #endif
1318 DB2(bug("%s()\n", __FUNCTION__));
1320 inf = (ThreadInfo *)FindTask(NULL)->tc_UserData;
1321 // trim the name
1322 //inf->task->tc_Node.ln_Name[inf->oldlen];
1324 // we have to set the priority here to avoid race conditions
1325 SetTaskPri(inf->task, inf->attr.param.sched_priority);
1327 #ifdef USE_ASYNC_CANCEL
1328 // set the exception handler for async cancellation
1329 oldexcept = inf->task->tc_ExceptCode;
1330 #ifdef __AROS__
1331 inf->task->tc_ExceptCode = &AROS_ASMSYMNAME(CancelHandler);
1332 #else
1333 inf->task->tc_ExceptCode = &CancelHandler;
1334 #endif
1335 SetExcept(SIGBREAKF_CTRL_C, SIGBREAKF_CTRL_C);
1336 #endif
1338 // set a jump point for pthread_exit
1339 if (!setjmp(inf->jmp))
1341 // custom stack requires special handling
1342 if (inf->attr.stackaddr != NULL && inf->attr.stacksize > 0)
1344 struct StackSwapArgs swapargs;
1345 struct StackSwapStruct stack;
1347 swapargs.Args[0] = (IPTR)inf->arg;
1348 stack.stk_Lower = inf->attr.stackaddr;
1349 stack.stk_Upper = (APTR)((IPTR)stack.stk_Lower + inf->attr.stacksize);
1350 stack.stk_Pointer = stack.stk_Upper;
1352 inf->ret = (void *)NewStackSwap(&stack, inf->start, &swapargs);
1354 else
1356 inf->ret = inf->start(inf->arg);
1360 #ifdef USE_ASYNC_CANCEL
1361 // remove the exception handler
1362 SetExcept(0, SIGBREAKF_CTRL_C);
1363 inf->task->tc_ExceptCode = oldexcept;
1364 #endif
1366 // destroy all non-NULL TLS key values
1367 // since the destructors can set the keys themselves, we have to do multiple iterations
1368 ObtainSemaphoreShared(&tls_sem);
1369 for (j = 0; foundkey && j < PTHREAD_DESTRUCTOR_ITERATIONS; j++)
1371 foundkey = FALSE;
1372 for (i = 0; i < PTHREAD_KEYS_MAX; i++)
1374 if (tlskeys[i].used && tlskeys[i].destructor && inf->tlsvalues[i])
1376 void *oldvalue = inf->tlsvalues[i];
1377 inf->tlsvalues[i] = NULL;
1378 tlskeys[i].destructor(oldvalue);
1379 foundkey = TRUE;
1383 ReleaseSemaphore(&tls_sem);
1385 // tell the parent thread that we are done
1386 Forbid();
1387 inf->finished = TRUE;
1388 Signal(inf->parent, SIGF_PARENT);
1391 int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start)(void *), void *arg)
1393 ThreadInfo *inf;
1394 char name[NAMELEN];
1395 size_t oldlen;
1396 pthread_t threadnew;
1398 D(bug("%s(%p, %p, %p, %p)\n", __FUNCTION__, thread, attr, start, arg));
1400 if (thread == NULL || start == NULL)
1401 return EINVAL;
1403 ObtainSemaphore(&thread_sem);
1405 // grab an empty thread slot
1406 threadnew = GetThreadId(NULL);
1407 if (threadnew == PTHREAD_THREADS_MAX)
1409 ReleaseSemaphore(&thread_sem);
1410 return EAGAIN;
1413 // prepare the ThreadInfo structure
1414 inf = GetThreadInfo(threadnew);
1415 memset(inf, 0, sizeof(ThreadInfo));
1416 inf->start = start;
1417 inf->arg = arg;
1418 inf->parent = FindTask(NULL);
1419 if (attr)
1420 inf->attr = *attr;
1421 else
1422 pthread_attr_init(&inf->attr);
1423 NEWLIST((struct List *)&inf->cleanup);
1424 inf->cancelstate = PTHREAD_CANCEL_ENABLE;
1425 inf->canceltype = PTHREAD_CANCEL_DEFERRED;
1427 // let's trick CreateNewProc into allocating a larger buffer for the name
1428 snprintf(name, sizeof(name), "pthread thread #%d", threadnew);
1429 oldlen = strlen(name);
1430 memset(name + oldlen, ' ', sizeof(name) - oldlen - 1);
1431 name[sizeof(name) - 1] = '\0';
1433 // start the child thread
1434 inf->task = (struct Task *)CreateNewProcTags(NP_Entry, StarterFunc,
1435 #ifdef __MORPHOS__
1436 NP_CodeType, CODETYPE_PPC,
1437 (inf->attr.stackaddr == NULL && inf->attr.stacksize > 0) ? NP_PPCStackSize : TAG_IGNORE, inf->attr.stacksize,
1438 #else
1439 (inf->attr.stackaddr == NULL && inf->attr.stacksize > 0) ? NP_StackSize : TAG_IGNORE, inf->attr.stacksize,
1440 #endif
1441 NP_UserData, inf,
1442 NP_Name, name,
1443 TAG_DONE);
1445 if (!inf->task)
1447 inf->parent = NULL;
1448 ReleaseSemaphore(&thread_sem);
1449 return EAGAIN;
1452 ReleaseSemaphore(&thread_sem);
1454 *thread = threadnew;
1456 return 0;
1459 int pthread_detach(pthread_t thread)
1461 D(bug("%s(%u) not implemented\n", __FUNCTION__, thread));
1463 return ESRCH;
1466 int pthread_join(pthread_t thread, void **value_ptr)
1468 ThreadInfo *inf;
1470 D(bug("%s(%u, %p)\n", __FUNCTION__, thread, value_ptr));
1472 inf = GetThreadInfo(thread);
1474 if (inf == NULL || inf->parent == NULL)
1475 return ESRCH;
1477 pthread_testcancel();
1479 while (!inf->finished)
1480 Wait(SIGF_PARENT);
1482 if (value_ptr)
1483 *value_ptr = inf->ret;
1485 ObtainSemaphore(&thread_sem);
1486 memset(inf, 0, sizeof(ThreadInfo));
1487 ReleaseSemaphore(&thread_sem);
1489 return 0;
1492 int pthread_equal(pthread_t t1, pthread_t t2)
1494 D(bug("%s(%u, %u)\n", __FUNCTION__, t1, t2));
1496 return (t1 == t2);
1499 pthread_t pthread_self(void)
1501 struct Task *task;
1502 pthread_t thread;
1504 D(bug("%s()\n", __FUNCTION__));
1506 task = FindTask(NULL);
1507 thread = GetThreadId(task);
1509 // add non-pthread processes to our list, so we can handle the main thread
1510 if (thread == PTHREAD_THREADS_MAX)
1512 ThreadInfo *inf;
1514 ObtainSemaphore(&thread_sem);
1515 thread = GetThreadId(NULL);
1516 if (thread == PTHREAD_THREADS_MAX)
1518 // TODO: pthread_self is supposed to always succeed, but we can fail
1519 // here if we run out of thread slots
1520 // this can only happen if too many non-pthread processes call
1521 // this function
1522 //ReleaseSemaphore(&thread_sem);
1523 //return EAGAIN;
1524 abort();
1526 inf = GetThreadInfo(thread);
1527 memset(inf, 0, sizeof(ThreadInfo));
1528 NEWLIST((struct List *)&inf->cleanup);
1529 inf->task = task;
1530 ReleaseSemaphore(&thread_sem);
1533 return thread;
1536 int pthread_cancel(pthread_t thread)
1538 ThreadInfo *inf;
1540 D(bug("%s(%u)\n", __FUNCTION__, thread));
1542 inf = GetThreadInfo(thread);
1544 if (inf == NULL || inf->parent == NULL || inf->canceled == TRUE)
1545 return ESRCH;
1547 inf->canceled = TRUE;
1549 // we might have to cancel the thread immediately
1550 if (inf->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS && inf->cancelstate == PTHREAD_CANCEL_ENABLE)
1552 struct Task *task;
1554 task = FindTask(NULL);
1556 if (inf->task == task)
1557 pthread_testcancel(); // cancel ourselves
1558 else
1559 Signal(inf->task, SIGBREAKF_CTRL_C); // trigger the exception handler
1562 return 0;
1565 int pthread_setcancelstate(int state, int *oldstate)
1567 pthread_t thread;
1568 ThreadInfo *inf;
1570 D(bug("%s(%d, %p)\n", __FUNCTION__, state, oldstate));
1572 if (state != PTHREAD_CANCEL_ENABLE && state != PTHREAD_CANCEL_DISABLE)
1573 return EINVAL;
1575 thread = pthread_self();
1576 inf = GetThreadInfo(thread);
1578 if (oldstate)
1579 *oldstate = inf->cancelstate;
1581 inf->cancelstate = state;
1583 return 0;
1586 int pthread_setcanceltype(int type, int *oldtype)
1588 pthread_t thread;
1589 ThreadInfo *inf;
1591 D(bug("%s(%d, %p)\n", __FUNCTION__, type, oldtype));
1593 if (type != PTHREAD_CANCEL_DEFERRED && type != PTHREAD_CANCEL_ASYNCHRONOUS)
1594 return EINVAL;
1596 thread = pthread_self();
1597 inf = GetThreadInfo(thread);
1599 if (oldtype)
1600 *oldtype = inf->canceltype;
1602 inf->canceltype = type;
1604 return 0;
1607 void pthread_testcancel(void)
1609 pthread_t thread;
1610 ThreadInfo *inf;
1612 D(bug("%s()\n", __FUNCTION__));
1614 thread = pthread_self();
1615 inf = GetThreadInfo(thread);
1617 if (inf->canceled && (inf->cancelstate == PTHREAD_CANCEL_ENABLE))
1618 pthread_exit(PTHREAD_CANCELED);
1621 void pthread_exit(void *value_ptr)
1623 pthread_t thread;
1624 ThreadInfo *inf;
1625 CleanupHandler *handler;
1627 D(bug("%s(%p)\n", __FUNCTION__, value_ptr));
1629 thread = pthread_self();
1630 inf = GetThreadInfo(thread);
1631 inf->ret = value_ptr;
1633 // execute the clean-up handlers
1634 while ((handler = (CleanupHandler *)RemTail((struct List *)&inf->cleanup)))
1635 if (handler->routine)
1636 handler->routine(handler->arg);
1638 longjmp(inf->jmp, 1);
1641 static void OnceCleanup(void *arg)
1643 pthread_once_t *once_control;
1645 DB2(bug("%s(%p)\n", __FUNCTION__, arg));
1647 once_control = (pthread_once_t *)arg;
1648 pthread_spin_unlock(&once_control->lock);
1651 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
1653 D(bug("%s(%p, %p)\n", __FUNCTION__, once_control, init_routine));
1655 if (once_control == NULL || init_routine == NULL)
1656 return EINVAL;
1658 if (__sync_val_compare_and_swap(&once_control->started, FALSE, TRUE))
1660 pthread_spin_lock(&once_control->lock);
1661 if (!once_control->done)
1663 pthread_cleanup_push(OnceCleanup, once_control);
1664 (*init_routine)();
1665 pthread_cleanup_pop(0);
1666 once_control->done = TRUE;
1668 pthread_spin_unlock(&once_control->lock);
1671 return 0;
1675 // Scheduling functions
1678 int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
1680 ThreadInfo *inf;
1682 D(bug("%s(%u, %d, %p)\n", __FUNCTION__, thread, policy, param));
1684 if (param == NULL)
1685 return EINVAL;
1687 inf = GetThreadInfo(thread);
1689 if (inf == NULL)
1690 return ESRCH;
1692 SetTaskPri(inf->task, param->sched_priority);
1694 return 0;
1698 // Non-portable functions
1700 int pthread_setname_np(pthread_t thread, const char *name)
1702 ThreadInfo *inf;
1703 char *currentname;
1704 size_t namelen;
1706 D(bug("%s(%u, %s)\n", __FUNCTION__, thread, name));
1708 if (name == NULL)
1709 return ERANGE;
1711 inf = GetThreadInfo(thread);
1713 if (inf == NULL)
1714 return ERANGE;
1716 currentname = GetNodeName(inf->task);
1718 if (inf->parent == NULL)
1719 namelen = strlen(currentname) + 1;
1720 else
1721 namelen = NAMELEN;
1723 if (strlen(name) + 1 > namelen)
1724 return ERANGE;
1726 strncpy(currentname, name, namelen);
1728 return 0;
1731 int pthread_getname_np(pthread_t thread, char *name, size_t len)
1733 ThreadInfo *inf;
1734 char *currentname;
1736 D(bug("%s(%u, %p, %u)\n", __FUNCTION__, thread, name, len));
1738 if (name == NULL || len == 0)
1739 return ERANGE;
1741 inf = GetThreadInfo(thread);
1743 if (inf == NULL)
1744 return ERANGE;
1746 currentname = GetNodeName(inf->task);
1748 if (strlen(currentname) + 1 > len)
1749 return ERANGE;
1751 // TODO: partially copy the name?
1752 strncpy(name, currentname, len);
1754 return 0;
1758 // Cancellation cleanup
1761 void pthread_cleanup_push(void (*routine)(void *), void *arg)
1763 pthread_t thread;
1764 ThreadInfo *inf;
1765 CleanupHandler *handler;
1767 D(bug("%s(%p, %p)\n", __FUNCTION__, routine, arg));
1769 if (routine == NULL)
1770 return;
1772 handler = malloc(sizeof(CleanupHandler));
1774 if (handler == NULL)
1775 return;
1777 thread = pthread_self();
1778 inf = GetThreadInfo(thread);
1780 handler->routine = routine;
1781 handler->arg = arg;
1782 AddTail((struct List *)&inf->cleanup, (struct Node *)handler);
1785 void pthread_cleanup_pop(int execute)
1787 pthread_t thread;
1788 ThreadInfo *inf;
1789 CleanupHandler *handler;
1791 D(bug("%s(%d)\n", __FUNCTION__, execute));
1793 thread = pthread_self();
1794 inf = GetThreadInfo(thread);
1795 handler = (CleanupHandler *)RemTail((struct List *)&inf->cleanup);
1797 if (handler && handler->routine && execute)
1798 handler->routine(handler->arg);
1800 free(handler);
1804 // Signalling
1807 int pthread_kill(pthread_t thread, int sig)
1809 D(bug("%s(%u, %d) not implemented\n", __FUNCTION__, thread, sig));
1811 return EINVAL;
1815 // Constructors, destructors
1818 static int _Init_Func(void)
1820 DB2(bug("%s()\n", __FUNCTION__));
1822 //memset(&threads, 0, sizeof(threads));
1823 InitSemaphore(&thread_sem);
1824 InitSemaphore(&tls_sem);
1825 // reserve ID 0 for the main thread
1826 //pthread_self();
1828 return TRUE;
1831 static void _Exit_Func(void)
1833 #if 0
1834 pthread_t i;
1835 #endif
1837 DB2(bug("%s()\n", __FUNCTION__));
1839 // wait for the threads?
1840 #if 0
1841 for (i = 0; i < PTHREAD_THREADS_MAX; i++)
1842 pthread_join(i, NULL);
1843 #endif
1846 #ifdef __AROS__
1847 ADD2INIT(_Init_Func, 0);
1848 ADD2EXIT(_Exit_Func, 0);
1849 #else
1850 static CONSTRUCTOR_P(_Init_Func, 100)
1852 return !_Init_Func();
1855 static DESTRUCTOR_P(_Exit_Func, 100)
1857 _Exit_Func();
1859 #endif