some further WIP polish localization.
[AROS.git] / compiler / pthread / pthread.c
blobbfc047971c896528cd94bf452b06817607d14519
1 /*
2 Copyright (C) 2014 Szilard Biro
4 This software is provided 'as-is', without any express or implied
5 warranty. In no event will the authors be held liable for any damages
6 arising from the use of this software.
8 Permission is granted to anyone to use this software for any purpose,
9 including commercial applications, and to alter it and redistribute it
10 freely, subject to the following restrictions:
12 1. The origin of this software must not be misrepresented; you must not
13 claim that you wrote the original software. If you use this software
14 in a product, an acknowledgment in the product documentation would be
15 appreciated but is not required.
16 2. Altered source versions must be plainly marked as such, and must not be
17 misrepresented as being the original software.
18 3. This notice may not be removed or altered from any source distribution.
21 #ifdef __MORPHOS__
22 #include <sys/time.h>
23 #endif
24 #include <dos/dostags.h>
25 #include <proto/exec.h>
26 #include <proto/dos.h>
27 #include <proto/timer.h>
29 #include <string.h>
30 #include <stdio.h>
31 #include <signal.h>
32 #include <stdlib.h>
34 #include "pthread_intern.h"
35 #include "debug.h"
38 //#define USE_ASYNC_CANCEL
40 ThreadInfo threads[PTHREAD_THREADS_MAX];
41 struct SignalSemaphore thread_sem;
42 TLSKey tlskeys[PTHREAD_KEYS_MAX];
43 struct SignalSemaphore tls_sem;
46 // Helper functions
49 int SemaphoreIsInvalid(struct SignalSemaphore *sem)
51 DB2(bug("%s(%p)\n", __FUNCTION__, sem));
53 return (!sem || sem->ss_Link.ln_Type != NT_SIGNALSEM || sem->ss_WaitQueue.mlh_Tail != NULL);
56 int SemaphoreIsMine(struct SignalSemaphore *sem)
58 struct Task *me;
60 DB2(bug("%s(%p)\n", __FUNCTION__, sem));
62 me = FindTask(NULL);
64 return (sem && sem->ss_NestCount > 0 && sem->ss_Owner == me);
67 ThreadInfo *GetThreadInfo(pthread_t thread)
69 ThreadInfo *inf = NULL;
71 DB2(bug("%s(%u)\n", __FUNCTION__, thread));
73 // TODO: more robust error handling?
74 if (thread < PTHREAD_THREADS_MAX)
75 inf = &threads[thread];
77 return inf;
80 pthread_t GetThreadId(struct Task *task)
82 pthread_t i;
84 DB2(bug("%s(%p)\n", __FUNCTION__, task));
86 ObtainSemaphoreShared(&thread_sem);
88 // First thread id will be 1 so that it is different than default value of pthread_t
89 for (i = PTHREAD_FIRST_THREAD_ID; i < PTHREAD_THREADS_MAX; i++)
91 if (threads[i].task == task)
92 break;
95 ReleaseSemaphore(&thread_sem);
97 return i;
100 #if defined __mc68000__
101 /* No CAS instruction on m68k */
102 static int __m68k_sync_val_compare_and_swap(int *v, int o, int n)
104 int ret;
106 Disable();
107 if ((*v) == (o))
108 (*v) = (n);
109 ret = (*v);
110 Enable();
112 return ret;
114 #undef __sync_val_compare_and_swap
115 #define __sync_val_compare_and_swap(v, o, n) __m68k_sync_val_compare_and_swap(v, o, n)
117 static int __m68k_sync_lock_test_and_set(int *v, int n)
119 Disable();
120 (*v) = (n);
121 Enable();
123 return n;
125 #undef __sync_lock_test_and_set
126 #define __sync_lock_test_and_set(v, n) __m68k_sync_lock_test_and_set(v, n)
127 #undef __sync_lock_release
128 #define __sync_lock_release(v) __m68k_sync_lock_test_and_set(v, 0)
130 static inline int __m68k_sync_add_and_fetch(int *v, int n)
132 int ret;
134 Disable();
135 (*v) += (n);
136 ret = (*v);
137 Enable();
139 return ret;
141 #undef __sync_add_and_fetch
142 #define __sync_add_and_fetch(v, n) __m68k_sync_add_and_fetch(v, n)
143 #undef __sync_sub_and_fetch
144 #define __sync_sub_and_fetch(v, n) __m68k_sync_add_and_fetch(v, -(n))
145 #endif
148 // Thread specific data functions
151 int pthread_key_delete(pthread_key_t key)
153 TLSKey *tls;
155 D(bug("%s(%u)\n", __FUNCTION__, key));
157 if (key >= PTHREAD_KEYS_MAX)
158 return EINVAL;
160 tls = &tlskeys[key];
162 ObtainSemaphore(&tls_sem);
164 if (tls->used == FALSE)
166 ReleaseSemaphore(&tls_sem);
167 return EINVAL;
170 tls->used = FALSE;
171 tls->destructor = NULL;
173 ReleaseSemaphore(&tls_sem);
175 return 0;
179 // Mutex attribute functions
182 int pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *kind)
184 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, kind));
186 if (attr == NULL)
187 return EINVAL;
189 if (kind)
190 *kind = attr->kind;
192 return 0;
196 // Mutex functions
199 int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime)
201 struct timeval end, now;
202 int result;
204 D(bug("%s(%p, %p)\n", __FUNCTION__, mutex, abstime));
206 if (mutex == NULL)
207 return EINVAL;
209 if (abstime == NULL)
210 return pthread_mutex_lock(mutex);
211 /*else if (abstime.tv_nsec < 0 || abstime.tv_nsec >= 1000000000)
212 return EINVAL;*/
214 TIMESPEC_TO_TIMEVAL(&end, abstime);
216 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
217 while ((result = pthread_mutex_trylock(mutex)) == EBUSY)
219 sched_yield();
220 gettimeofday(&now, NULL);
221 if (timercmp(&end, &now, <))
222 return ETIMEDOUT;
225 return result;
229 // Condition variable attribute functions
232 int pthread_condattr_init(pthread_condattr_t *attr)
234 D(bug("%s(%p)\n", __FUNCTION__, attr));
236 if (attr == NULL)
237 return EINVAL;
239 memset(attr, 0, sizeof(pthread_condattr_t));
241 return 0;
244 int pthread_condattr_destroy(pthread_condattr_t *attr)
246 D(bug("%s(%p)\n", __FUNCTION__, attr));
248 if (attr == NULL)
249 return EINVAL;
251 memset(attr, 0, sizeof(pthread_condattr_t));
253 return 0;
257 // Condition variable functions
260 int pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *reltime)
262 D(bug("%s(%p, %p, %p)\n", __FUNCTION__, cond, mutex, reltime));
264 return _pthread_cond_timedwait(cond, mutex, reltime, TRUE);
269 // Barrier functions
272 int pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count)
274 D(bug("%s(%p, %p, %u)\n", __FUNCTION__, barrier, attr, count));
276 if (barrier == NULL || count == 0)
277 return EINVAL;
279 barrier->curr_height = count;
280 barrier->total_height = PTHREAD_BARRIER_FLAG;
281 pthread_cond_init(&barrier->breeched, NULL);
282 pthread_mutex_init(&barrier->lock, NULL);
284 return 0;
287 int pthread_barrier_destroy(pthread_barrier_t *barrier)
289 D(bug("%s(%p)\n", __FUNCTION__, barrier));
291 if (barrier == NULL)
292 return EINVAL;
294 if (pthread_mutex_trylock(&barrier->lock) != 0)
295 return EBUSY;
297 if (barrier->total_height > PTHREAD_BARRIER_FLAG)
299 pthread_mutex_unlock(&barrier->lock);
300 return EBUSY;
303 pthread_mutex_unlock(&barrier->lock);
305 if (pthread_cond_destroy(&barrier->breeched) != 0)
306 return EBUSY;
308 pthread_mutex_destroy(&barrier->lock);
309 barrier->curr_height = barrier->total_height = 0;
311 return 0;
314 int pthread_barrier_wait(pthread_barrier_t *barrier)
316 D(bug("%s(%p)\n", __FUNCTION__, barrier));
318 if (barrier == NULL)
319 return EINVAL;
321 pthread_mutex_lock(&barrier->lock);
323 // wait until everyone exits the barrier
324 while (barrier->total_height > PTHREAD_BARRIER_FLAG)
325 pthread_cond_wait(&barrier->breeched, &barrier->lock);
327 // are we the first to enter?
328 if (barrier->total_height == PTHREAD_BARRIER_FLAG) barrier->total_height = 0;
330 barrier->total_height++;
332 if (barrier->total_height == barrier->curr_height)
334 barrier->total_height += PTHREAD_BARRIER_FLAG - 1;
335 pthread_cond_broadcast(&barrier->breeched);
337 pthread_mutex_unlock(&barrier->lock);
339 return PTHREAD_BARRIER_SERIAL_THREAD;
341 else
343 // wait until enough threads enter the barrier
344 while (barrier->total_height < PTHREAD_BARRIER_FLAG)
345 pthread_cond_wait(&barrier->breeched, &barrier->lock);
347 barrier->total_height--;
349 // get entering threads to wake up
350 if (barrier->total_height == PTHREAD_BARRIER_FLAG)
351 pthread_cond_broadcast(&barrier->breeched);
353 pthread_mutex_unlock(&barrier->lock);
355 return 0;
360 // Read-write lock attribute functions
363 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
365 D(bug("%s(%p)\n", __FUNCTION__, attr));
367 if (attr == NULL)
368 return EINVAL;
370 memset(attr, 0, sizeof(pthread_rwlockattr_t));
372 return 0;
375 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
377 D(bug("%s(%p)\n", __FUNCTION__, attr));
379 if (attr == NULL)
380 return EINVAL;
382 memset(attr, 0, sizeof(pthread_rwlockattr_t));
384 return 0;
388 // Read-write lock functions
391 int pthread_rwlock_init(pthread_rwlock_t *lock, const pthread_rwlockattr_t *attr)
393 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, attr));
395 if (lock == NULL)
396 return EINVAL;
398 InitSemaphore(&lock->semaphore);
400 return 0;
403 int pthread_rwlock_destroy(pthread_rwlock_t *lock)
405 D(bug("%s(%p)\n", __FUNCTION__, lock));
407 if (lock == NULL)
408 return EINVAL;
410 // probably a statically allocated rwlock
411 if (SemaphoreIsInvalid(&lock->semaphore))
412 return 0;
414 if (AttemptSemaphore(&lock->semaphore) == FALSE)
415 return EBUSY;
417 ReleaseSemaphore(&lock->semaphore);
418 memset(lock, 0, sizeof(pthread_rwlock_t));
420 return 0;
423 int pthread_rwlock_tryrdlock(pthread_rwlock_t *lock)
425 ULONG ret;
427 D(bug("%s(%p)\n", __FUNCTION__, lock));
429 if (lock == NULL)
430 return EINVAL;
432 // initialize static rwlocks
433 if (SemaphoreIsInvalid(&lock->semaphore))
434 pthread_rwlock_init(lock, NULL);
436 ret = AttemptSemaphoreShared(&lock->semaphore);
438 return (ret == TRUE) ? 0 : EBUSY;
441 int pthread_rwlock_trywrlock(pthread_rwlock_t *lock)
443 ULONG ret;
445 D(bug("%s(%p)\n", __FUNCTION__, lock));
447 if (lock == NULL)
448 return EINVAL;
450 // initialize static rwlocks
451 if (SemaphoreIsInvalid(&lock->semaphore))
452 pthread_rwlock_init(lock, NULL);
454 ret = AttemptSemaphore(&lock->semaphore);
456 return (ret == TRUE) ? 0 : EBUSY;
459 int pthread_rwlock_rdlock(pthread_rwlock_t *lock)
461 D(bug("%s(%p)\n", __FUNCTION__, lock));
463 if (lock == NULL)
464 return EINVAL;
466 pthread_testcancel();
468 // initialize static rwlocks
469 if (SemaphoreIsInvalid(&lock->semaphore))
470 pthread_rwlock_init(lock, NULL);
472 // we might already have a write lock
473 if (SemaphoreIsMine(&lock->semaphore))
474 return EDEADLK;
476 ObtainSemaphoreShared(&lock->semaphore);
478 return 0;
481 int pthread_rwlock_timedrdlock(pthread_rwlock_t *lock, const struct timespec *abstime)
483 struct timeval end, now;
484 int result;
486 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, abstime));
488 if (lock == NULL)
489 return EINVAL;
491 if (abstime == NULL)
492 return pthread_rwlock_rdlock(lock);
494 pthread_testcancel();
496 TIMESPEC_TO_TIMEVAL(&end, abstime);
498 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
499 while ((result = pthread_rwlock_tryrdlock(lock)) == EBUSY)
501 sched_yield();
502 gettimeofday(&now, NULL);
503 if (timercmp(&end, &now, <))
504 return ETIMEDOUT;
507 return result;
510 int pthread_rwlock_wrlock(pthread_rwlock_t *lock)
512 D(bug("%s(%p)\n", __FUNCTION__, lock));
514 if (lock == NULL)
515 return EINVAL;
517 pthread_testcancel();
519 // initialize static rwlocks
520 if (SemaphoreIsInvalid(&lock->semaphore))
521 pthread_rwlock_init(lock, NULL);
523 if (SemaphoreIsMine(&lock->semaphore))
524 return EDEADLK;
526 ObtainSemaphore(&lock->semaphore);
528 return 0;
531 int pthread_rwlock_timedwrlock(pthread_rwlock_t *lock, const struct timespec *abstime)
533 struct timeval end, now;
534 int result;
536 D(bug("%s(%p, %p)\n", __FUNCTION__, lock, abstime));
538 if (lock == NULL)
539 return EINVAL;
541 if (abstime == NULL)
542 return pthread_rwlock_wrlock(lock);
544 pthread_testcancel();
546 TIMESPEC_TO_TIMEVAL(&end, abstime);
548 // busy waiting is not very nice, but ObtainSemaphore doesn't support timeouts
549 while ((result = pthread_rwlock_trywrlock(lock)) == EBUSY)
551 sched_yield();
552 gettimeofday(&now, NULL);
553 if (timercmp(&end, &now, <))
554 return ETIMEDOUT;
557 return result;
560 int pthread_rwlock_unlock(pthread_rwlock_t *lock)
562 D(bug("%s(%p)\n", __FUNCTION__, lock));
564 if (lock == NULL)
565 return EINVAL;
567 // initialize static rwlocks
568 if (SemaphoreIsInvalid(&lock->semaphore))
569 pthread_rwlock_init(lock, NULL);
571 //if (!SemaphoreIsMine(&lock->semaphore))
572 // if no one has obtained the semaphore don't unlock the rwlock
573 // this can be a leap of faith because we don't maintain a separate list of readers
574 if (lock->semaphore.ss_NestCount < 1)
575 return EPERM;
577 ReleaseSemaphore(&lock->semaphore);
579 return 0;
583 // Spinlock functions
586 int pthread_spin_init(pthread_spinlock_t *lock, int pshared)
588 D(bug("%s(%p, %d)\n", __FUNCTION__, lock, pshared));
590 if (lock == NULL)
591 return EINVAL;
593 *lock = 0;
595 return 0;
598 int pthread_spin_destroy(pthread_spinlock_t *lock)
600 D(bug("%s(%p)\n", __FUNCTION__, lock));
602 return 0;
605 int pthread_spin_lock(pthread_spinlock_t *lock)
607 D(bug("%s(%p)\n", __FUNCTION__, lock));
609 if (lock == NULL)
610 return EINVAL;
612 while (__sync_lock_test_and_set((int *)lock, 1))
613 sched_yield(); // TODO: don't yield the CPU every iteration
615 return 0;
618 int pthread_spin_trylock(pthread_spinlock_t *lock)
620 D(bug("%s(%p)\n", __FUNCTION__, lock));
622 if (lock == NULL)
623 return EINVAL;
625 if (__sync_lock_test_and_set((int *)lock, 1))
626 return EBUSY;
628 return 0;
631 int pthread_spin_unlock(pthread_spinlock_t *lock)
633 D(bug("%s(%p)\n", __FUNCTION__, lock));
635 if (lock == NULL)
636 return EINVAL;
638 __sync_lock_release((int *)lock);
640 return 0;
644 // Thread attribute functions
647 int pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
649 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, detachstate));
651 if (attr == NULL)
652 return EINVAL;
654 if (detachstate != NULL)
655 *detachstate = attr->detachstate;
657 return 0;
660 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
662 D(bug("%s(%p, %d)\n", __FUNCTION__, attr, detachstate));
664 if (attr == NULL || detachstate != PTHREAD_CREATE_JOINABLE)
665 return EINVAL;
667 attr->detachstate = detachstate;
669 return 0;
672 int pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t *stacksize)
674 D(bug("%s(%p, %p, %p)\n", __FUNCTION__, attr, stackaddr, stacksize));
676 if (attr == NULL)
677 return EINVAL;
679 if (stackaddr != NULL)
680 *stackaddr = attr->stackaddr;
682 if (stacksize != NULL)
683 *stacksize = attr->stacksize;
685 return 0;
688 int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
690 D(bug("%s(%p, %p, %u)\n", __FUNCTION__, attr, stackaddr, stacksize));
692 if (attr == NULL || (stackaddr != NULL && stacksize == 0))
693 return EINVAL;
695 attr->stackaddr = stackaddr;
696 attr->stacksize = stacksize;
698 return 0;
701 int pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
703 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, stacksize));
705 return pthread_attr_getstack(attr, NULL, stacksize);
708 int pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
710 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, param));
712 if (attr == NULL)
713 return EINVAL;
715 if (param != NULL)
716 *param = attr->param;
718 return 0;
721 int pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
723 D(bug("%s(%p, %p)\n", __FUNCTION__, attr, param));
725 if (attr == NULL || param == NULL)
726 return EINVAL;
728 attr->param = *param;
730 return 0;
734 // Thread functions
737 #ifdef USE_ASYNC_CANCEL
738 #ifdef __MORPHOS__
739 static ULONG CancelHandlerFunc(void);
740 static struct EmulLibEntry CancelHandler =
742 TRAP_LIB, 0, (void (*)(void))CancelHandlerFunc
744 static ULONG CancelHandlerFunc(void)
746 ULONG signals = (ULONG)REG_D0;
747 APTR data = (APTR)REG_A1;
748 struct ExecBase *SysBase = (struct ExecBase *)REG_A6;
749 #else
750 AROS_UFH3S(ULONG, CancelHandler,
751 AROS_UFHA(ULONG, signals, D0),
752 AROS_UFHA(APTR, data, A1),
753 AROS_UFHA(struct ExecBase *, SysBase, A6))
755 AROS_USERFUNC_INIT
756 #endif
758 DB2(bug("%s(%u, %p, %p)\n", __FUNCTION__, signals, data, SysBase));
760 pthread_testcancel();
762 return signals;
763 #ifdef __AROS__
764 AROS_USERFUNC_EXIT
765 #endif
767 #endif
769 int pthread_detach(pthread_t thread)
771 ThreadInfo *inf;
773 D(bug("%s(%u, %p)\n", __FUNCTION__, thread, value_ptr));
775 inf = GetThreadInfo(thread);
777 if (inf == NULL)
778 return ESRCH;
780 inf->detached = TRUE;
782 return 0;
785 void pthread_testcancel(void)
787 pthread_t thread;
788 ThreadInfo *inf;
790 D(bug("%s()\n", __FUNCTION__));
792 thread = pthread_self();
793 inf = GetThreadInfo(thread);
795 if (inf->canceled && (inf->cancelstate == PTHREAD_CANCEL_ENABLE))
796 pthread_exit(PTHREAD_CANCELED);
799 static void OnceCleanup(void *arg)
801 pthread_once_t *once_control;
803 DB2(bug("%s(%p)\n", __FUNCTION__, arg));
805 once_control = (pthread_once_t *)arg;
806 pthread_spin_unlock(&once_control->lock);
809 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
811 D(bug("%s(%p, %p)\n", __FUNCTION__, once_control, init_routine));
813 if (once_control == NULL || init_routine == NULL)
814 return EINVAL;
816 if (__sync_val_compare_and_swap(&once_control->started, FALSE, TRUE))
818 pthread_spin_lock(&once_control->lock);
819 if (!once_control->done)
821 pthread_cleanup_push(OnceCleanup, once_control);
822 (*init_routine)();
823 pthread_cleanup_pop(0);
824 once_control->done = TRUE;
826 pthread_spin_unlock(&once_control->lock);
829 return 0;
833 // Scheduling functions
836 int pthread_setschedparam(pthread_t thread, int policy, const struct sched_param *param)
838 ThreadInfo *inf;
840 D(bug("%s(%u, %d, %p)\n", __FUNCTION__, thread, policy, param));
842 if (param == NULL)
843 return EINVAL;
845 inf = GetThreadInfo(thread);
847 if (inf == NULL)
848 return ESRCH;
850 SetTaskPri(inf->task, param->sched_priority);
852 return 0;
855 int pthread_getschedparam(pthread_t thread, int *policy, struct sched_param *param)
857 ThreadInfo *inf;
859 D(bug("%s(%u, %d, %p)\n", __FUNCTION__, thread, policy, param));
861 if ((param == NULL) || (policy == NULL))
862 return EINVAL;
864 inf = GetThreadInfo(thread);
866 if (inf == NULL)
867 return ESRCH;
869 param->sched_priority = inf->task->tc_Node.ln_Pri;
870 *policy = 1;
872 return 0;
876 // Non-portable functions
878 int pthread_setname_np(pthread_t thread, const char *name)
880 ThreadInfo *inf;
881 char *currentname;
882 size_t namelen;
884 D(bug("%s(%u, %s)\n", __FUNCTION__, thread, name));
886 if (name == NULL)
887 return ERANGE;
889 inf = GetThreadInfo(thread);
891 if (inf == NULL)
892 return ERANGE;
894 currentname = GetNodeName(inf->task);
896 if (inf->parent == NULL)
897 namelen = strlen(currentname) + 1;
898 else
899 namelen = NAMELEN;
901 if (strlen(name) + 1 > namelen)
902 return ERANGE;
904 strncpy(currentname, name, namelen);
906 return 0;
909 int pthread_getname_np(pthread_t thread, char *name, size_t len)
911 ThreadInfo *inf;
912 char *currentname;
914 D(bug("%s(%u, %p, %u)\n", __FUNCTION__, thread, name, len));
916 if (name == NULL || len == 0)
917 return ERANGE;
919 inf = GetThreadInfo(thread);
921 if (inf == NULL)
922 return ERANGE;
924 currentname = GetNodeName(inf->task);
926 if (strlen(currentname) + 1 > len)
927 return ERANGE;
929 // TODO: partially copy the name?
930 strncpy(name, currentname, len);
932 return 0;
936 // Cancellation cleanup
939 void pthread_cleanup_push(void (*routine)(void *), void *arg)
941 pthread_t thread;
942 ThreadInfo *inf;
943 CleanupHandler *handler;
945 D(bug("%s(%p, %p)\n", __FUNCTION__, routine, arg));
947 if (routine == NULL)
948 return;
950 handler = malloc(sizeof(CleanupHandler));
952 if (handler == NULL)
953 return;
955 thread = pthread_self();
956 inf = GetThreadInfo(thread);
958 handler->routine = routine;
959 handler->arg = arg;
960 AddTail((struct List *)&inf->cleanup, (struct Node *)handler);
963 void pthread_cleanup_pop(int execute)
965 pthread_t thread;
966 ThreadInfo *inf;
967 CleanupHandler *handler;
969 D(bug("%s(%d)\n", __FUNCTION__, execute));
971 thread = pthread_self();
972 inf = GetThreadInfo(thread);
973 handler = (CleanupHandler *)RemTail((struct List *)&inf->cleanup);
975 if (handler && handler->routine && execute)
976 handler->routine(handler->arg);
978 free(handler);
982 // Signalling
985 int pthread_kill(pthread_t thread, int sig)
987 D(bug("%s(%u, %d) not implemented\n", __FUNCTION__, thread, sig));
989 return EINVAL;
993 // Constructors, destructors
996 static int _Init_Func(void)
998 DB2(bug("%s()\n", __FUNCTION__));
1000 //memset(&threads, 0, sizeof(threads));
1001 InitSemaphore(&thread_sem);
1002 InitSemaphore(&tls_sem);
1003 // reserve ID 0 for the main thread
1004 //pthread_self();
1006 return TRUE;
1009 static void _Exit_Func(void)
1011 #if 0
1012 pthread_t i;
1013 #endif
1015 DB2(bug("%s()\n", __FUNCTION__));
1017 // wait for the threads?
1018 #if 0
1019 for (i = 0; i < PTHREAD_THREADS_MAX; i++)
1020 pthread_join(i, NULL);
1021 #endif
1024 #ifdef __AROS__
1025 ADD2INIT(_Init_Func, 0);
1026 ADD2EXIT(_Exit_Func, 0);
1027 #else
1028 static CONSTRUCTOR_P(_Init_Func, 100)
1030 return !_Init_Func();
1033 static DESTRUCTOR_P(_Exit_Func, 100)
1035 _Exit_Func();
1037 #endif