Fix namespace in deprecation note injected by DOCUMENTATION
[sbcl.git] / src / runtime / pthreads_win32.c
bloba370888fd827d0df9b91c814fb7d612553c42b0b
1 #include "sbcl.h"
2 #ifdef LISP_FEATURE_SB_THREAD /* entire file */
4 #define PTHREAD_INTERNALS
5 #include "pthreads_win32.h"
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <time.h>
9 #include <sys/time.h>
11 #ifdef PTHREAD_DEBUG_OUTPUT
12 #define pthshow(fmt,...) \
13 do { \
14 fprintf(stderr,fmt "\n", __VA_ARGS__); \
15 fflush(stderr); \
16 } while (0)
18 #define DEBUG_OWN(cs) do {(cs)->owner=pthread_self(); } while(0)
19 #define DEBUG_RELEASE(cs) do {(cs)->owner=0;} while(0)
21 #else
22 #define pthshow(fmt,...) do {} while (0)
23 #define DEBUG_OWN(cs) do {} while(0)
24 #define DEBUG_RELEASE(cs) do {} while(0)
25 #endif
28 struct freelist_cell {
29 struct freelist_cell * next;
30 void* data;
33 struct freelist {
34 void* (*create_fn)();
35 pthread_mutex_t lock;
36 struct freelist_cell * empty;
37 struct freelist_cell * full;
38 unsigned int count;
41 #define FREELIST_INITIALIZER(create_fn) \
42 { \
43 event_create, PTHREAD_MUTEX_INITIALIZER, \
44 NULL, NULL, 0 \
45 } \
48 static void* freelist_get(struct freelist *fl)
50 void* result = NULL;
51 if (fl->full) {
52 pthread_mutex_lock(&fl->lock);
53 if (fl->full) {
54 struct freelist_cell *cell = fl->full;
55 fl->full = cell->next;
56 result = cell->data;
57 cell->next = fl->empty;
58 fl->empty = cell;
60 pthread_mutex_unlock(&fl->lock);
62 if (!result) {
63 result = fl->create_fn();
65 return result;
68 static void freelist_return(struct freelist *fl, void*data)
70 struct freelist_cell* cell = NULL;
71 if (fl->empty) {
72 pthread_mutex_lock(&fl->lock);
73 if (fl->empty) {
74 cell = fl->empty;
75 fl->empty = cell->next;
76 goto add_locked;
78 pthread_mutex_unlock(&fl->lock);
80 if (!cell) {
81 int i,n=32;
82 cell = malloc(sizeof(*cell)*n);
83 for (i=0; i<(n-1); ++i)
84 cell[i].next = &cell[i+1];
85 cell[i].next = NULL;
88 pthread_mutex_lock(&fl->lock);
89 ++fl->count;
90 add_locked:
91 cell->data = data;
92 cell->next = fl->full;
93 fl->full = cell;
94 pthread_mutex_unlock(&fl->lock);
97 int pthread_attr_init(pthread_attr_t *attr)
99 attr->stack_size = 0;
100 return 0;
103 int pthread_attr_destroy(pthread_attr_t *attr)
105 return 0;
108 int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
110 fprintf(stderr, "pthread_attr_setstack called\n");
111 ExitProcess(1);
112 return 0;
115 int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
117 attr->stack_size = stacksize;
118 return 0;
122 typedef unsigned char boolean;
124 /* TLS management internals */
126 static DWORD thread_self_tls_index;
128 static void (*tls_destructors[PTHREAD_KEYS_MAX])(void*);
129 static boolean tls_used[PTHREAD_KEYS_MAX];
130 static pthread_key_t tls_max_used_key;
131 static pthread_mutex_t thread_key_lock = PTHREAD_MUTEX_INITIALIZER;
132 static void tls_call_destructors();
133 static pthread_t tls_impersonate(pthread_t other) {
134 pthread_t old = pthread_self();
135 TlsSetValue(thread_self_tls_index,other);
136 return old;
139 static void do_nothing() {}
140 /* Fiber context hooks */
141 void (*pthread_save_context_hook)() = do_nothing;
142 void (*pthread_restore_context_hook)() = do_nothing;
144 /* Some parts of pthread_np API provide access to Windows NT Fibers
145 (cooperatively scheduled coroutines). Each fiber is wrapped in its
146 own pthread.
148 Fibers may be entered by different threads during their lifetime,
149 i.e. they are orthogonal to threads.
151 Contrary to the raw NT Fibers API, we will distinguish two kinds of
152 objects: fibers-created-as-fibers and any other thing (thread that
153 is not a fiber, thread converted to fiber, system thread
154 noticed). Consequently, though there is no "main fiber" in NT,
155 there _is_ a main pthread for each (wrapped) system thread, living
156 or dying with this system thread. It may be converted to fiber, but
157 its "fiberness" is incidental, only to be able to switch into
158 another fibers or create them.
160 Any fiber that is currently running belongs to some thread
161 (fiber-created-as-thread, to be exact). Call it FCAT group.
163 [1] Entrance lock: prevent double entry.
165 [2] Suspend for fibers -> "try locking entrance lock; if failed, do
166 real thread suspend"
168 [3] Resume for fibers -> two strategies depending on what [2] done.
170 [4] Exit/death for fibers -> switch to its FCAT group.
172 [2],[3],[4] doesn't apply to threads-converted-to-fibers: full
173 stop/resume is done on them if there is no cooperatively-accessed
174 published context (of which see below).
176 void pthread_np_suspend(pthread_t thread)
178 pthread_mutex_lock(&thread->fiber_lock);
179 if (thread->fiber_group) {
180 CONTEXT context;
181 SuspendThread(thread->fiber_group->handle);
182 context.ContextFlags = CONTEXT_FULL;
183 GetThreadContext(thread->fiber_group->handle, &context);
187 /* Momentary suspend/getcontext/resume without locking or preventing
188 fiber reentrance. This call is for asymmetric synchronization,
189 ensuring that the thread sees global state before doing any
190 globally visible stores.
192 void pthread_np_serialize(pthread_t thread)
194 CONTEXT winctx;
195 winctx.ContextFlags = CONTEXT_INTEGER;
196 if (!thread->created_as_fiber) {
197 SuspendThread(thread->handle);
198 GetThreadContext(thread->handle,&winctx);
199 ResumeThread(thread->handle);
203 int pthread_np_get_thread_context(pthread_t thread, CONTEXT* context)
205 context->ContextFlags = CONTEXT_FULL;
206 return thread->fiber_group &&
207 GetThreadContext(thread->fiber_group->handle, context) != 0;
210 void pthread_np_resume(pthread_t thread)
212 HANDLE host_thread = thread->fiber_group ? thread->fiber_group->handle : NULL;
213 /* Unlock first, _then_ resume, or we may end up accessing freed
214 pthread structure (e.g. at startup with CREATE_SUSPENDED) */
215 pthread_mutex_unlock(&thread->fiber_lock);
216 if (host_thread) {
217 ResumeThread(host_thread);
221 /* FIXME shouldn't be used. */
222 void pthread_np_request_interruption(pthread_t thread)
224 if (thread->waiting_cond) {
225 pthread_cond_broadcast(thread->waiting_cond);
229 /* Thread identity, as much as pthreads are concerned, is determined
230 by pthread_t structure that is stored in TLS slot
231 (thread_self_tls_index). This slot is reassigned when fibers are
232 switched with pthread_np API.
234 Two reasons for not using fiber-local storage for this purpose: (1)
235 Fls is too young: all other things work with Win2000, it requires
236 WinXP; (2) this implementation works also with threads that aren't
237 fibers, and it's a good thing.
239 There is one more case, besides fiber switching, when pthread_self
240 identity migrates between system threads: for non-main system
241 thread that is not [pthread_create]d, thread-specific data
242 destructors run in a thread from a system thread pool, after the
243 original thread dies. In order to provide compatibility with
244 classic pthread TSD, the system pool thread acquires dead thread's
245 identity for the duration of destructor calls.
247 pthread_t pthread_self()
249 return (pthread_t)TlsGetValue(thread_self_tls_index);
252 const char * state_to_str(pthread_thread_state state)
254 switch (state) {
255 case pthread_state_running: return "running";
256 case pthread_state_finished: return "finished";
257 case pthread_state_joined: return "joined";
258 default: return "unknown";
262 /* Two kinds of threads (or fibers) are supported: (1) created by
263 pthread_create, (2) created independently and noticed by
264 pthread_np_notice_thread. The first kind is running a predefined
265 thread function or fiber function; thread_or_fiber_function
266 incorporates whatever they have in common.
268 static void thread_or_fiber_function(pthread_t self)
270 pthread_t prev = tls_impersonate(self);
271 void* arg = self->arg;
272 pthread_fn fn = self->start_routine;
274 if (prev) {
275 pthread_mutex_lock(&prev->fiber_lock);
276 prev->fiber_group = NULL;
277 /* Previous fiber, that started us, had assigned our
278 fiber_group. Now we clear its fiber_group. */
279 pthread_mutex_unlock(&prev->fiber_lock);
281 self->retval = fn(arg);
282 pthread_mutex_lock(&self->lock);
283 self->state = pthread_state_finished;
284 pthread_cond_broadcast(&self->cond);
285 while (!self->detached && self->state != pthread_state_joined) {
286 if (self->created_as_fiber) {
287 pthread_mutex_unlock(&self->lock);
288 pthread_np_switch_to_fiber(self->fiber_group);
289 pthread_mutex_lock(&self->lock);
290 } else {
291 pthread_cond_wait(&self->cond, &self->lock);
294 pthread_mutex_unlock(&self->lock);
295 pthread_mutex_destroy(&self->lock);
296 pthread_mutex_destroy(&self->fiber_lock);
297 pthread_cond_destroy(&self->cond);
298 tls_call_destructors();
301 /* Thread function for [pthread_create]d threads. Thread may become a
302 fiber later, but (as stated above) it isn't supposed to be
303 reattached to other system thread, even after it happens.
305 DWORD WINAPI Thread_Function(LPVOID param)
307 pthread_t self = (pthread_t) param;
309 self->teb = NtCurrentTeb();
310 thread_or_fiber_function(param);
311 CloseHandle(self->handle);
313 void* fiber = self->fiber;
314 free(self);
315 if (fiber) {
316 /* If thread was converted to fiber, deleting the fiber from
317 itself exits the thread. There are some rumors on possible
318 memory leaks if we just ExitThread or return here, hence the
319 statement below. However, no memory leaks on bare ExitThread
320 were observed yet. */
321 DeleteFiber(GetCurrentFiber());
324 return 0;
327 /* Fiber can't delete itself without exiting the current thread
328 simultaneously. We arrange for some other fiber calling
329 fiber_destructor when fiber dies but doesn't want to terminate its
330 thread. */
331 static void fiber_destructor(void* fiber) { DeleteFiber(fiber); }
333 VOID CALLBACK Fiber_Function(LPVOID param)
335 pthread_t self = (pthread_t) param;
336 thread_or_fiber_function(param);
338 /* fiber_group is a main thread into which we are to call */
339 pthread_t group = self->fiber_group;
340 free(self);
341 /* pthread_np_run_in_fiber (see below) normally switches back to
342 caller. Nullify our identity, so it knows there is nothing to
343 switch to, and continues running instead. */
344 tls_impersonate(NULL);
345 if (group) {
346 /* Every running [pthread_create]d fiber runs in some thread
347 that has its own pthread_self identity (that was created as
348 thread and later converted to fiber). `group' field of
349 running fiber always points to that other pthread.
351 Now switch to our group ("current master fiber created as
352 thread"), asking it to delete our (OS) fiber data with
353 fiber_destructor. */
354 pthread_np_run_in_fiber(group, fiber_destructor, GetCurrentFiber());
356 /* Within current pthread API we never end up here.
358 BTW, if fibers are ever pooled, to avoid stack space reallocation
359 etc, jumping to the beginning of Fiber_Function should be the
360 thing to do here. */
361 DeleteFiber(GetCurrentFiber()); /* Exits. See Thread_Function for
362 explanation -- why not
363 ExitThread. */
367 /* Signals */
368 struct sigaction signal_handlers[NSIG];
370 /* Never called for now */
371 int sigaction(int signum, const struct sigaction* act, struct sigaction* oldact)
373 struct sigaction newact = *act;
374 if (oldact)
375 *oldact = signal_handlers[signum];
376 if (!(newact.sa_flags & SA_SIGINFO)) {
377 newact.sa_sigaction = (typeof(newact.sa_sigaction))newact.sa_handler;
379 signal_handlers[signum] = newact;
380 return 0;
383 /* Create thread or fiber, depending on current thread's "fiber
384 factory mode". In the latter case, switch into newly-created fiber
385 immediately.
387 int pthread_create(pthread_t *thread, const pthread_attr_t *attr,
388 void *(*start_routine) (void *), void *arg)
390 pthread_t pth = (pthread_t)calloc(sizeof(pthread_thread),1);
391 pthread_t self = pthread_self();
392 int i;
393 HANDLE createdThread = NULL;
395 if (self && self->fiber_factory) {
396 pth->fiber = CreateFiber (attr ? attr->stack_size : 0, Fiber_Function, pth);
397 if (!pth->fiber) return 1;
398 pth->created_as_fiber = 1;
399 /* Has no fiber-group until someone enters it (we will) */
400 } else {
401 createdThread = CreateThread(NULL, attr ? attr->stack_size : 0,
402 Thread_Function, pth, CREATE_SUSPENDED, NULL);
403 if (!createdThread) return 1;
404 /* FCAT is its own fiber-group [initially] */
405 pth->fiber_group = pth;
406 pth->handle = createdThread;
408 pth->start_routine = start_routine;
409 pth->arg = arg;
410 if (self) {
411 pth->blocked_signal_set = self->blocked_signal_set;
412 } else {
413 sigemptyset(&pth->blocked_signal_set);
415 pth->state = pthread_state_running;
416 pthread_mutex_init(&pth->lock, NULL);
417 pthread_mutex_init(&pth->fiber_lock, NULL);
418 pthread_cond_init(&pth->cond, NULL);
419 pth->detached = 0;
420 if (thread) *thread = pth;
421 if (pth->fiber) {
422 pthread_np_switch_to_fiber(pth);
423 } else {
424 /* Resume will unlock, so we lock here */
425 pthread_mutex_lock(&pth->fiber_lock);
426 pthread_np_resume(pth);
428 return 0;
431 int pthread_equal(pthread_t thread1, pthread_t thread2)
433 return thread1 == thread2;
436 int pthread_detach(pthread_t thread)
438 int retval = 0;
439 pthread_mutex_lock(&thread->lock);
440 thread->detached = 1;
441 pthread_cond_broadcast(&thread->cond);
442 pthread_mutex_unlock(&thread->lock);
443 return retval;
446 int pthread_join(pthread_t thread, void **retval)
448 int fiberp = thread->created_as_fiber;
449 pthread_mutex_lock(&thread->lock);
450 while (thread->state != pthread_state_finished) {
451 if (fiberp) {
452 /* just trying */
453 pthread_mutex_unlock(&thread->lock);
454 pthread_np_switch_to_fiber(thread);
455 pthread_mutex_lock(&thread->lock);
456 } else {
457 pthread_cond_wait(&thread->cond, &thread->lock);
460 thread->state = pthread_state_joined;
461 pthread_cond_broadcast(&thread->cond);
462 if (retval)
463 *retval = thread->retval;
464 pthread_mutex_unlock(&thread->lock);
465 if (fiberp)
466 pthread_np_switch_to_fiber(thread);
467 return 0;
470 /* We manage our own TSD instead of relying on system TLS for anything
471 other than pthread identity itself. Reasons: (1) Windows NT TLS
472 slots are expensive, (2) pthread identity migration requires only
473 one TLS slot assignment, instead of massive copying. */
474 int pthread_key_create(pthread_key_t *key, void (*destructor)(void*))
476 pthread_key_t index;
477 boolean success = 0;
478 pthread_mutex_lock(&thread_key_lock);
479 for (index = 0; index < PTHREAD_KEYS_MAX; ++index) {
480 if (!tls_used[index]) {
481 if (tls_max_used_key<index)
482 tls_max_used_key = index;
483 tls_destructors[index] = destructor;
484 tls_used[index] = 1;
485 success = 1;
486 break;
489 pthread_mutex_unlock(&thread_key_lock);
491 if (success) {
492 *key = index;
493 return 0;
494 } else {
495 return 1;
499 int pthread_key_delete(pthread_key_t key)
501 /* tls_used flag is not a machine word. Let's lock, as there is no
502 atomic guarantee even on x86. */
503 pthread_mutex_lock(&thread_key_lock);
504 tls_destructors[key] = 0;
505 /* No memory barrier here: application is responsible for proper
506 call sequence, and having the key around at this point is an
507 official UB. */
508 tls_used[key] = 0;
509 pthread_mutex_unlock(&thread_key_lock);
510 return 0;
513 void __attribute__((sysv_abi)) *pthread_getspecific(pthread_key_t key)
515 return pthread_self()->specifics[key];
518 /* Internal function calling destructors for current pthread */
519 static void tls_call_destructors()
521 pthread_key_t key;
522 int i;
523 int called;
525 for (i = 0; i<PTHREAD_DESTRUCTOR_ITERATIONS; ++i) {
526 called = 0;
527 for (key = 0; key<=tls_max_used_key; ++key) {
528 void *cell = pthread_getspecific(key);
529 pthread_setspecific(key,NULL);
530 if (cell && tls_destructors[key]) {
531 (tls_destructors[key])(cell);
532 called = 1;
535 if (!called)
536 break;
540 pthread_mutex_t once_mutex = PTHREAD_MUTEX_INITIALIZER;
542 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
544 if (PTHREAD_ONCE_INIT == *once_control) {
545 pthread_mutex_lock(&once_mutex);
546 if (PTHREAD_ONCE_INIT == *once_control) {
547 init_routine();
548 *once_control = 42;
550 pthread_mutex_unlock(&once_mutex);
552 return 0;
555 /* TODO call signal handlers */
556 int _sbcl_pthread_sigmask(int how, const sigset_t *set, sigset_t *oldset)
558 pthread_t self = pthread_self();
559 if (oldset)
560 *oldset = self->blocked_signal_set;
561 if (set) {
562 switch (how) {
563 case SIG_BLOCK:
564 self->blocked_signal_set |= *set;
565 break;
566 case SIG_UNBLOCK:
567 self->blocked_signal_set &= ~(*set);
568 break;
569 case SIG_SETMASK:
570 self->blocked_signal_set = *set;
571 break;
574 return 0;
577 pthread_mutex_t mutex_init_lock;
579 int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * attr)
581 *mutex = (struct _pthread_mutex_info*)malloc(sizeof(struct _pthread_mutex_info));
582 InitializeCriticalSection(&(*mutex)->cs);
583 (*mutex)->file = " (free) ";
584 return 0;
587 int pthread_mutexattr_init(pthread_mutexattr_t* attr)
589 return 0;
591 int pthread_mutexattr_destroy(pthread_mutexattr_t* attr)
593 return 0;
596 int pthread_mutexattr_settype(pthread_mutexattr_t* attr,int mutex_type)
598 return 0;
601 int pthread_mutex_destroy(pthread_mutex_t *mutex)
603 if (*mutex != PTHREAD_MUTEX_INITIALIZER) {
604 pthread_np_assert_live_mutex(mutex,"destroy");
605 DeleteCriticalSection(&(*mutex)->cs);
606 free(*mutex);
607 *mutex = &DEAD_MUTEX;
609 return 0;
612 /* Add pending signal to (other) thread */
613 void pthread_np_add_pending_signal(pthread_t thread, int signum)
615 /* See __sync_fetch_and_or() for gcc 4.4, at least. As some
616 people are still using gcc 3.x, I prefer to do this in asm.
618 For win64 we'll HAVE to rewrite it. __sync_fetch_and_or() seems
619 to be a rational choice -- there are plenty of GCCisms in SBCL
620 anyway.
622 sigset_t to_add = 1<<signum;
623 asm("lock orl %1,%0":"=m"(thread->pending_signal_set):"r"(to_add));
626 static void futex_interrupt(pthread_t thread);
628 /* This pthread_kill doesn't do anything to notify target pthread of a
629 * new pending signal.
631 * DFL: ... or so the original comment claimed, but that was before
632 * futexes. Now that we wake up futexes, it's not entirely accurate
633 * anymore, is it? */
634 int pthread_kill(pthread_t thread, int signum)
636 pthread_np_add_pending_signal(thread,signum);
637 futex_interrupt(thread);
638 return 0;
641 void pthread_np_remove_pending_signal(pthread_t thread, int signum)
643 sigset_t to_and = ~(1<<signum);
644 asm("lock andl %1,%0":"=m"(thread->pending_signal_set):"r"(to_and));
647 sigset_t pthread_np_other_thread_sigpending(pthread_t thread)
649 return
650 InterlockedCompareExchange((volatile LONG*)&thread->pending_signal_set,
651 0, 0);
654 /* Mutex implementation uses CRITICAL_SECTIONs. Somethings to keep in
655 mind: (1) uncontested locking is cheap; (2) long wait on a busy
656 lock causes exception, so it should never be attempted; (3) those
657 mutexes are recursive; (4) one thread locks, the other unlocks ->
658 the next one hangs. */
659 int pthread_mutex_lock(pthread_mutex_t *mutex)
661 pthread_np_assert_live_mutex(mutex,"lock");
662 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
663 pthread_mutex_lock(&mutex_init_lock);
664 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
665 pthread_mutex_init(mutex, NULL);
667 pthread_mutex_unlock(&mutex_init_lock);
669 EnterCriticalSection(&(*mutex)->cs);
670 DEBUG_OWN(*mutex);
671 return 0;
674 int pthread_mutex_trylock(pthread_mutex_t *mutex)
676 pthread_np_assert_live_mutex(mutex,"trylock");
677 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
678 pthread_mutex_lock(&mutex_init_lock);
679 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
680 pthread_mutex_init(mutex, NULL);
682 pthread_mutex_unlock(&mutex_init_lock);
684 if (TryEnterCriticalSection(&(*mutex)->cs)) {
685 DEBUG_OWN(*mutex);
686 return 0;
688 else
689 return EBUSY;
692 /* Versions of lock/trylock useful for debugging. Our header file
693 conditionally redefines lock/trylock to call them. */
695 int pthread_mutex_lock_annotate_np(pthread_mutex_t *mutex, const char* file, int line)
697 int contention = 0;
698 pthread_np_assert_live_mutex(mutex,"lock");
699 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
700 pthread_mutex_lock(&mutex_init_lock);
701 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
702 pthread_mutex_init(mutex, NULL);
703 pthshow("Mutex #x%p: automatic initialization; #x%p %s +%d",
704 mutex, *mutex,
705 file, line);
707 pthread_mutex_unlock(&mutex_init_lock);
709 if ((*mutex)->owner) {
710 pthshow("Mutex #x%p -> #x%p: contention; owned by #x%p, wanted by #x%p",
711 mutex, *mutex,
712 (*mutex)->owner,
713 pthread_self());
714 pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d",
715 mutex, *mutex,
716 (*mutex)->file,(*mutex)->line, file, line);
717 contention = 1;
719 EnterCriticalSection(&(*mutex)->cs);
720 if (contention) {
721 pthshow("Mutex #x%p -> #x%p: contention end; left by #x%p, taken by #x%p",
722 mutex, *mutex,
723 (*mutex)->owner,
724 pthread_self());
725 pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d",
726 mutex, *mutex,
727 (*mutex)->file,(*mutex)->line, file, line);
729 (*mutex)->owner = pthread_self();
730 (*mutex)->file = file;
731 (*mutex)->line = line;
732 return 0;
735 int pthread_mutex_trylock_annotate_np(pthread_mutex_t *mutex, const char* file, int line)
737 int contention = 0;
738 pthread_np_assert_live_mutex(mutex,"trylock");
739 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
740 pthread_mutex_lock(&mutex_init_lock);
741 if (*mutex == PTHREAD_MUTEX_INITIALIZER) {
742 pthread_mutex_init(mutex, NULL);
744 pthread_mutex_unlock(&mutex_init_lock);
746 if ((*mutex)->owner) {
747 pthshow("Mutex #x%p -> #x%p: tried contention; owned by #x%p, wanted by #x%p",
748 mutex, *mutex,
749 (*mutex)->owner,
750 pthread_self());
751 pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d",
752 mutex, *mutex,
753 (*mutex)->file,(*mutex)->line, file, line);
754 contention = 1;
756 if (TryEnterCriticalSection(&(*mutex)->cs)) {
757 if (contention) {
758 pthshow("Mutex #x%p -> #x%p: contention end; left by #x%p, taken by #x%p",
759 mutex, *mutex,
760 (*mutex)->owner,
761 pthread_self());
762 pthshow("Mutex #x%p -> #x%p: contention notes: old %s +%d, new %s +%d",
763 mutex, *mutex,
764 (*mutex)->file,(*mutex)->line, file, line);
766 (*mutex)->owner = pthread_self();
767 (*mutex)->file = file;
768 (*mutex)->line = line;
769 return 0;
771 else
772 return EBUSY;
775 int pthread_mutex_unlock(pthread_mutex_t *mutex)
777 /* Owner is for debugging only; NB if mutex is used recursively,
778 owner field will lie. */
779 pthread_np_assert_live_mutex(mutex,"unlock");
780 DEBUG_RELEASE(*mutex);
781 LeaveCriticalSection(&(*mutex)->cs);
782 return 0;
785 /* Condition variables implemented with events and wakeup queues. */
787 /* Thread-local wakeup events are kept in TSD to avoid kernel object
788 creation on each call to pthread_cond_[timed]wait */
789 static pthread_key_t cv_event_key;
791 /* .info field in wakeup record is an "opportunistic" indicator that
792 wakeup has happened. On timeout from WaitForSingleObject, thread
793 doesn't know (1) whether to reset event, (2) whether to (try) to
794 find and unlink wakeup record. Let's let it know (of course,
795 it will know for sure only under cv_wakeup_lock). */
797 #define WAKEUP_WAITING_NOTIMEOUT 0
798 #define WAKEUP_WAITING_TIMEOUT 4
800 #define WAKEUP_HAPPENED 1
801 #define WAKEUP_BY_INTERRUPT 2
803 static void* event_create()
805 return (void*)CreateEvent(NULL,FALSE,FALSE,NULL);
808 static struct freelist event_freelist = FREELIST_INITIALIZER(event_create);
811 unsigned int pthread_free_event_pool_size()
813 return event_freelist.count;
816 static HANDLE fe_get_event()
818 return (HANDLE)freelist_get(&event_freelist);
821 static void fe_return_event(HANDLE handle)
823 freelist_return(&event_freelist, (void*)handle);
826 static void cv_event_destroy(void* event)
828 CloseHandle((HANDLE)event);
831 static HANDLE cv_default_event_get_fn()
833 HANDLE event = pthread_getspecific(cv_event_key);
834 if (!event) {
835 event = CreateEvent(NULL, FALSE, FALSE, NULL);
836 pthread_setspecific(cv_event_key, event);
837 } else {
838 /* ResetEvent(event); used to be here. Let's try without. It's
839 safe in pthread_cond_wait: if WaitForSingleObjectEx ever
840 returns, event is reset automatically, and the wakeup queue item
841 is removed by the signaller under wakeup_lock.
843 pthread_cond_timedwait should reset the event if
844 cv_wakeup_remove failed to find its wakeup record, otherwise
845 it's safe too. */
847 return event;
850 static void cv_default_event_return_fn(HANDLE event)
852 /* ResetEvent(event); could be here as well (and used to be).
853 Avoiding syscalls makes sense, however. */
856 static pthread_condattr_t cv_default_attr = {
857 0, /* alertable */
858 fe_get_event,
859 fe_return_event,
860 /* cv_default_event_get_fn, /\* get_fn *\/ */
861 /* cv_default_event_return_fn /\* return_fn *\/ */
864 int pthread_cond_init(pthread_cond_t * cv, const pthread_condattr_t * attr)
866 if (!attr)
867 attr = &cv_default_attr;
868 pthread_mutex_init(&cv->wakeup_lock, NULL);
869 cv->first_wakeup = NULL;
870 cv->last_wakeup = NULL;
871 cv->alertable = attr->alertable;
872 cv->get_fn = attr->get_fn;
873 cv->return_fn = attr->return_fn;
874 return 0;
877 int pthread_condattr_init(pthread_condattr_t *attr)
879 *attr = cv_default_attr;
880 return 0;
883 int pthread_condattr_destroy(pthread_condattr_t *attr)
885 return 0;
887 int pthread_condattr_setevent_np(pthread_condattr_t *attr,
888 cv_event_get_fn get_fn, cv_event_return_fn ret_fn)
890 attr->get_fn = get_fn ? get_fn : fe_get_event;// cv_default_event_get_fn;
891 attr->return_fn = ret_fn ? ret_fn : fe_return_event; // cv_default_event_return_fn;
892 return 0;
895 int pthread_cond_destroy(pthread_cond_t *cv)
897 pthread_mutex_destroy(&cv->wakeup_lock);
898 return 0;
901 int pthread_cond_broadcast(pthread_cond_t *cv)
903 int count = 0;
905 HANDLE postponed[128];
906 int npostponed = 0,i;
908 /* No strict requirements to memory visibility model, because of
909 mutex unlock around waiting. */
910 if (!cv->first_wakeup)
911 return 0;
912 pthread_mutex_lock(&cv->wakeup_lock);
913 while (cv->first_wakeup)
915 struct thread_wakeup * w = cv->first_wakeup;
916 HANDLE waitevent = w->event;
917 cv->first_wakeup = w->next;
918 w->info = WAKEUP_HAPPENED;
919 postponed[npostponed++] = waitevent;
920 if (/* w->info == WAKEUP_WAITING_TIMEOUT || */ npostponed ==
921 sizeof(postponed)/sizeof(postponed[0])) {
922 for (i=0; i<npostponed; ++i)
923 SetEvent(postponed[i]);
924 npostponed = 0;
926 ++count;
928 cv->last_wakeup = NULL;
929 pthread_mutex_unlock(&cv->wakeup_lock);
930 for (i=0; i<npostponed; ++i)
931 SetEvent(postponed[i]);
932 return 0;
935 int pthread_cond_signal(pthread_cond_t *cv)
937 struct thread_wakeup * w;
938 /* No strict requirements to memory visibility model, because of
939 mutex unlock around waiting. */
940 if (!cv->first_wakeup)
941 return 0;
942 pthread_mutex_lock(&cv->wakeup_lock);
943 w = cv->first_wakeup;
944 if (w) {
945 HANDLE waitevent = w->event;
946 cv->first_wakeup = w->next;
947 if (!cv->first_wakeup)
948 cv->last_wakeup = NULL;
949 w->info = WAKEUP_HAPPENED;
950 SetEvent(waitevent);
952 pthread_mutex_unlock(&cv->wakeup_lock);
953 return 0;
956 /* Return value is used for futexes: 0=ok, 1 on unexpected word change. */
957 int cv_wakeup_add(struct pthread_cond_t* cv, struct thread_wakeup* w)
959 HANDLE event;
960 w->next = NULL;
961 pthread_mutex_lock(&cv->wakeup_lock);
962 if (w->uaddr) {
963 if (w->uval != *w->uaddr) {
964 pthread_mutex_unlock(&cv->wakeup_lock);
965 return 1;
967 pthread_self()->futex_wakeup = w;
969 event = cv->get_fn();
970 w->event = event;
971 if (cv->last_wakeup == w) {
972 fprintf(stderr, "cv->last_wakeup == w\n");
973 fflush(stderr);
974 ExitProcess(0);
976 if (cv->last_wakeup != NULL)
978 cv->last_wakeup->next = w;
979 cv->last_wakeup = w;
981 else
983 cv->first_wakeup = w;
984 cv->last_wakeup = w;
986 pthread_mutex_unlock(&cv->wakeup_lock);
987 return 0;
990 /* Return true if wakeup found, false if missing */
991 int cv_wakeup_remove(struct pthread_cond_t* cv, struct thread_wakeup* w)
993 int result = 0;
994 if (w->info == WAKEUP_HAPPENED || w->info == WAKEUP_BY_INTERRUPT)
995 goto finish;
996 pthread_mutex_lock(&cv->wakeup_lock);
998 if (w->info == WAKEUP_HAPPENED || w->info == WAKEUP_BY_INTERRUPT)
999 goto unlock;
1000 if (cv->first_wakeup == w) {
1001 cv->first_wakeup = w->next;
1002 if (cv->last_wakeup == w)
1003 cv->last_wakeup = NULL;
1004 result = 1;
1005 } else {
1006 struct thread_wakeup * prev = cv->first_wakeup;
1007 while (prev && prev->next != w)
1008 prev = prev->next;
1009 if (!prev) {
1010 goto unlock;
1012 prev->next = w->next;
1013 if (cv->last_wakeup == w)
1014 cv->last_wakeup = prev;
1015 result = 1;
1018 unlock:
1019 pthread_mutex_unlock(&cv->wakeup_lock);
1020 finish:
1021 return result;
1025 int pthread_cond_wait(pthread_cond_t * cv, pthread_mutex_t * cs)
1027 struct thread_wakeup w;
1028 w.uaddr = 0;
1029 w.info = WAKEUP_WAITING_NOTIMEOUT;
1030 cv_wakeup_add(cv, &w);
1031 if (cv->last_wakeup->next == cv->last_wakeup) {
1032 pthread_np_lose(5,"cv->last_wakeup->next == cv->last_wakeup\n");
1034 if (cv->last_wakeup->next != NULL) {
1035 pthread_np_lose(5,"cv->last_wakeup->next == cv->last_wakeup\n");
1037 pthread_self()->waiting_cond = cv;
1038 DEBUG_RELEASE(*cs);
1039 pthread_mutex_unlock(cs);
1040 do {
1041 if (cv->alertable) {
1042 while (WaitForSingleObjectEx(w.event, INFINITE, TRUE) == WAIT_IO_COMPLETION);
1043 } else {
1044 WaitForSingleObject(w.event, INFINITE);
1046 } while (w.info == WAKEUP_WAITING_NOTIMEOUT);
1047 pthread_self()->waiting_cond = NULL;
1048 /* Event is signalled once, wakeup is dequeued by signaller. */
1049 cv->return_fn(w.event);
1050 pthread_mutex_lock(cs);
1051 DEBUG_OWN(*cs);
1052 return 0;
1055 int pthread_cond_timedwait(pthread_cond_t * cv, pthread_mutex_t * cs,
1056 const struct timespec * abstime)
1058 DWORD rv;
1059 struct thread_wakeup w;
1060 pthread_t self = pthread_self();
1062 w.info = WAKEUP_WAITING_TIMEOUT;
1063 w.uaddr = 0;
1064 cv_wakeup_add(cv, &w);
1065 if (cv->last_wakeup->next == cv->last_wakeup) {
1066 fprintf(stderr, "cv->last_wakeup->next == cv->last_wakeup\n");
1067 ExitProcess(0);
1069 self->waiting_cond = cv;
1070 DEBUG_RELEASE(*cs);
1071 /* barrier (release); waiting_cond globally visible */
1072 pthread_mutex_unlock(cs);
1074 struct timeval cur_tm;
1075 long sec, msec;
1076 gettimeofday(&cur_tm, NULL);
1077 sec = abstime->tv_sec - cur_tm.tv_sec;
1078 msec = sec * 1000 + abstime->tv_nsec / 1000000 - cur_tm.tv_usec / 1000;
1079 if (msec < 0)
1080 msec = 0;
1081 do {
1082 if (cv->alertable) {
1083 while ((rv = WaitForSingleObjectEx(w.event, msec, TRUE))
1084 == WAIT_IO_COMPLETION);
1085 } else {
1086 rv = WaitForSingleObject(w.event, msec);
1088 } while (rv == WAIT_OBJECT_0 && w.info == WAKEUP_WAITING_TIMEOUT);
1090 self->waiting_cond = NULL;
1092 if (rv == WAIT_TIMEOUT) {
1093 if (!cv_wakeup_remove(cv, &w)) {
1094 /* Someone removed our wakeup record: though we got a timeout,
1095 event was (will be) signalled before we are here.
1096 Consume this wakeup. */
1097 WaitForSingleObject(w.event, INFINITE);
1100 cv->return_fn(w.event);
1101 pthread_mutex_lock(cs);
1102 DEBUG_OWN(*cs);
1103 if (rv == WAIT_TIMEOUT)
1104 return ETIMEDOUT;
1105 else
1106 return 0;
1109 int sched_yield()
1111 /* http://stackoverflow.com/questions/1383943/switchtothread-vs-sleep1
1112 SwitchToThread(); was here. Unsure what's better for us, just trying.. */
1114 if(!SwitchToThread())
1115 Sleep(0);
1116 return 0;
1119 void pthread_lock_structures()
1121 pthread_mutex_lock(&mutex_init_lock);
1124 void pthread_unlock_structures()
1126 pthread_mutex_unlock(&mutex_init_lock);
1129 static int pthread_initialized = 0;
1131 static pthread_cond_t futex_pseudo_cond;
1133 void pthreads_win32_init()
1135 if (!pthread_initialized) {
1136 thread_self_tls_index = TlsAlloc();
1137 pthread_mutex_init(&mutex_init_lock, NULL);
1138 pthread_np_notice_thread();
1139 pthread_key_create(&cv_event_key,cv_event_destroy);
1140 pthread_cond_init(&futex_pseudo_cond, NULL);
1141 pthread_initialized = 1;
1145 static
1146 VOID CALLBACK pthreads_win32_unnotice(void* parameter, BOOLEAN timerOrWait)
1148 pthread_t pth = parameter;
1149 pthread_t self = tls_impersonate(pth);
1151 tls_call_destructors();
1152 CloseHandle(pth->handle);
1154 if (pth->fiber && pth->own_fiber) {
1155 DeleteFiber(pth->fiber);
1156 } */
1157 UnregisterWait(pth->wait_handle);
1159 tls_impersonate(self);
1160 pthread_mutex_destroy(&pth->fiber_lock);
1161 pthread_mutex_destroy(&pth->lock);
1162 free(pth);
1165 int pthread_np_notice_thread()
1167 if (!pthread_self()) {
1168 pthread_t pth = (pthread_t)calloc(sizeof(pthread_thread),1);
1169 pth->teb = NtCurrentTeb();
1170 pthread_mutex_init(&pth->fiber_lock,NULL);
1171 pthread_mutex_init(&pth->lock,NULL);
1172 pth->state = pthread_state_running;
1173 pth->fiber_group = pth;
1175 sigemptyset(&pth->blocked_signal_set);
1177 DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1178 GetCurrentProcess(), &pth->handle, 0, TRUE,
1179 DUPLICATE_SAME_ACCESS);
1180 tls_impersonate(pth);
1182 if (pthread_initialized) {
1183 RegisterWaitForSingleObject(&pth->wait_handle,
1184 pth->handle,
1185 pthreads_win32_unnotice,
1186 pth,
1187 INFINITE,
1188 WT_EXECUTEONLYONCE);
1190 return 1;
1191 } else {
1192 return 0;
1196 int pthread_np_convert_self_to_fiber()
1198 pthread_t pth = pthread_self();
1199 if (!pth)
1200 return 1;
1201 if (!pth->fiber) {
1202 void* fiber = GetCurrentFiber();
1203 /* Beware: undocumented (but widely used) method below to check if
1204 the thread is already converted. */
1205 if (fiber != NULL && fiber != (void*)0x1E00) {
1206 pth->fiber = fiber;
1207 pth->own_fiber = 0;
1208 } else {
1209 pth->fiber = ConvertThreadToFiber(pth);
1210 pth->own_fiber = 1;
1212 if (!pth->fiber)
1213 return 1;
1215 return 0;
1218 int pthread_np_set_fiber_factory_mode(int on)
1220 pthread_t pth = pthread_self();
1221 if (on && pthread_np_convert_self_to_fiber()) {
1222 return 1;
1224 pth->fiber_factory = on;
1225 return 0;
1228 int pthread_np_switch_to_fiber(pthread_t pth)
1230 pthread_t self = pthread_self();
1232 again:
1233 if (pth == self) {
1234 /* Switch to itself is a successful no-op.
1235 NB. SwitchToFiber(GetCurrentFiber()) is not(!). */
1236 return 0;
1239 if (!pth->fiber) {
1240 /* Switch to not-a-fiber-at-all */
1241 return -1;
1244 if (!pth->created_as_fiber) {
1245 /* Switch to main thread (group): fails if... */
1246 if (self && (self->fiber_group != pth)) {
1247 /* ...trying to switch from [under] one main thread into another */
1248 return -1;
1251 if (!self && pth->created_as_fiber) {
1252 /* Switch to free fiber from non-noticed thread */
1253 return -1;
1256 if (self && pthread_np_convert_self_to_fiber()) {
1257 /* Current thread can't become a fiber (and run fibers) */
1258 return -1;
1261 /* If target fiber is suspened, we wait here. */
1262 pthread_mutex_lock(&pth->fiber_lock);
1263 if (pth->fiber_group) {
1264 /* Reentering a running fiber */
1265 pthread_mutex_unlock(&pth->fiber_lock);
1266 /* Don't wait for a running fiber here, just fail. If an
1267 application wants to wait, it should use some separate
1268 synchronization. */
1269 return -1;
1271 if (self) {
1272 /* Target fiber group is like mine */
1273 pth->fiber_group = self->fiber_group;
1274 } else {
1275 /* Switch-from-null-self (always into thread, usually from
1276 terminating fiber) */
1277 pth->fiber_group = pth;
1279 /* Target fiber now marked as busy */
1280 pthread_mutex_unlock(&pth->fiber_lock);
1282 if (self) {
1283 pthread_save_context_hook();
1285 /* NB we don't set pthread TLS, let target fiber do it by itself. */
1286 SwitchToFiber(pth->fiber);
1288 /* When we return here... */
1289 pth = tls_impersonate(self);
1291 /* Now pth contains fiber that entered this one */
1292 pthread_restore_context_hook();
1294 if (pth) {
1295 pthread_mutex_lock(&pth->fiber_lock);
1296 if (pth->fiber_group == self->fiber_group) {
1297 pth->fiber_group = NULL;
1299 pthread_mutex_unlock(&pth->fiber_lock);
1301 /* Self surely is not NULL, or we'd never be here */
1303 /* Implement call-in-fiber */
1304 if (self->fiber_callback) {
1305 void (*cb)(void*) = self->fiber_callback;
1306 void *ctx = self->fiber_callback_context;
1308 /* Nested callbacks and fiber switches are possible, so clean
1309 up a cb pointer here */
1310 self->fiber_callback = NULL;
1311 self->fiber_callback_context = NULL;
1312 cb(ctx);
1313 if (pth) {
1314 /* Return to caller without recursive
1315 pthread_np_switch_to_fiber. This way, an "utility fiber"
1316 serving multiple callbacks won't grow its stack to infinity */
1317 goto again;
1319 /* There is no `callback client' pretending to be returned
1320 into: it means callback shouldn't yield to caller. */
1322 return 0; /* success */
1325 int pthread_np_run_in_fiber(pthread_t pth, void (*callback)(void*),
1326 void* context)
1328 pth->fiber_callback = callback;
1329 pth->fiber_callback_context = context;
1330 return pthread_np_switch_to_fiber(pth);
1333 HANDLE pthread_np_get_handle(pthread_t pth)
1335 return pth->handle;
1338 void* pthread_np_get_lowlevel_fiber(pthread_t pth)
1340 return pth->fiber;
1343 int pthread_np_delete_lowlevel_fiber(void* fiber)
1345 DeleteFiber(fiber);
1346 return 0;
1349 int sigemptyset(sigset_t *set)
1351 *set = 0;
1352 return 0;
1355 int sigfillset(sigset_t *set)
1357 *set = 0xfffffffful;
1358 return 0;
1361 int sigaddset(sigset_t *set, int signum)
1363 *set |= 1 << signum;
1364 return 0;
1367 int sigdelset(sigset_t *set, int signum)
1369 *set &= ~(1 << signum);
1370 return 0;
1373 int sigismember(const sigset_t *set, int signum)
1375 return (*set & (1 << signum)) != 0;
1377 int sigpending(sigset_t *set)
1379 int i;
1380 *set = InterlockedCompareExchange((volatile LONG*)&pthread_self()->pending_signal_set,
1381 0, 0);
1382 return 0;
1386 #define FUTEX_EWOULDBLOCK 3
1387 #define FUTEX_EINTR 2
1388 #define FUTEX_ETIMEDOUT 1
1391 futex_wait(volatile intptr_t *lock_word, intptr_t oldval, long sec, unsigned long usec)
1393 struct thread_wakeup w;
1394 pthread_t self = pthread_self();
1395 DWORD msec = sec<0 ? INFINITE : (sec*1000 + usec/1000);
1396 DWORD wfso;
1397 int result;
1398 sigset_t pendset, blocked;
1399 int maybeINTR;
1400 int info = sec<0 ? WAKEUP_WAITING_NOTIMEOUT: WAKEUP_WAITING_TIMEOUT;
1402 sigpending(&pendset);
1403 if (pendset & ~self->blocked_signal_set)
1404 return FUTEX_EINTR;
1405 w.uaddr = lock_word;
1406 w.uval = oldval;
1407 w.info = info;
1409 if (cv_wakeup_add(&futex_pseudo_cond,&w)) {
1410 return FUTEX_EWOULDBLOCK;
1412 self->futex_wakeup = &w;
1413 do {
1414 wfso = WaitForSingleObject(w.event, msec);
1415 } while (wfso == WAIT_OBJECT_0 && w.info == info);
1416 self->futex_wakeup = NULL;
1417 sigpending(&pendset);
1418 maybeINTR = (pendset & ~self->blocked_signal_set)? FUTEX_EINTR : 0;
1420 switch(wfso) {
1421 case WAIT_TIMEOUT:
1422 if (!cv_wakeup_remove(&futex_pseudo_cond,&w)) {
1423 /* timeout, but someone other removed wakeup. */
1424 result = maybeINTR;
1425 WaitForSingleObject(w.event,INFINITE);
1426 } else {
1427 result = FUTEX_ETIMEDOUT;
1429 break;
1430 case WAIT_OBJECT_0:
1431 result = maybeINTR;
1432 break;
1433 default:
1434 result = -1;
1435 break;
1437 futex_pseudo_cond.return_fn(w.event);
1438 return result;
1442 futex_wake(volatile intptr_t *lock_word, int n)
1444 pthread_cond_t *cv = &futex_pseudo_cond;
1445 int result = 0;
1446 struct thread_wakeup *w, *prev;
1447 HANDLE postponed[128];
1448 int npostponed = 0,i;
1450 if (n==0) return 0;
1452 pthread_mutex_lock(&cv->wakeup_lock);
1453 for (w = cv->first_wakeup, prev = NULL; w && n;) {
1454 if (w->uaddr == lock_word) {
1455 HANDLE event = w->event;
1456 int oldinfo = w->info;
1457 w->info = WAKEUP_HAPPENED;
1458 if (cv->last_wakeup == w)
1459 cv->last_wakeup = prev;
1460 w = w->next;
1461 if (!prev) {
1462 cv->first_wakeup = w;
1463 } else {
1464 prev->next = w;
1466 n--;
1467 postponed[npostponed++] = event;
1468 if (npostponed == sizeof(postponed)/sizeof(postponed[0])) {
1469 for (i=0; i<npostponed; ++i)
1470 SetEvent(postponed[i]);
1471 npostponed = 0;
1473 } else {
1474 prev=w, w=w->next;
1477 pthread_mutex_unlock(&cv->wakeup_lock);
1478 for (i=0; i<npostponed; ++i)
1479 SetEvent(postponed[i]);
1480 return 0;
1484 static void futex_interrupt(pthread_t thread)
1486 if (thread->futex_wakeup) {
1487 pthread_cond_t *cv = &futex_pseudo_cond;
1488 struct thread_wakeup *w;
1489 HANDLE event;
1490 pthread_mutex_lock(&cv->wakeup_lock);
1491 if ((w = thread->futex_wakeup)) {
1492 /* we are taking wakeup_lock recursively - ok with
1493 CRITICAL_SECTIONs */
1494 if (cv_wakeup_remove(&futex_pseudo_cond,w)) {
1495 event = w->event;
1496 w->info = WAKEUP_BY_INTERRUPT;
1497 thread->futex_wakeup = NULL;
1498 } else {
1499 w = NULL;
1502 if (w) {
1503 SetEvent(event);
1505 pthread_mutex_unlock(&cv->wakeup_lock);
1509 void pthread_np_lose(int trace_depth, const char* fmt, ...)
1511 va_list header;
1512 void* frame;
1513 int n = 0;
1514 void** lastseh;
1516 va_start(header,fmt);
1517 vfprintf(stderr,fmt,header);
1518 for (lastseh = *(void**)NtCurrentTeb();
1519 lastseh && (lastseh!=(void*)0xFFFFFFFF);
1520 lastseh = *lastseh);
1522 fprintf(stderr, "Backtrace: %s (pthread %p)\n", header, pthread_self());
1523 for (frame = __builtin_frame_address(0); frame; frame=*(void**)frame)
1525 if ((n++)>trace_depth)
1526 return;
1527 fprintf(stderr, "[#%02d]: ebp = %p, ret = %p\n",n,
1528 frame, ((void**)frame)[1]);
1530 ExitProcess(0);
1534 sem_init(sem_t *sem, int pshared_not_implemented, unsigned int value)
1536 sem_t semh = CreateSemaphore(NULL, value, SEM_VALUE_MAX, NULL);
1537 if (!semh)
1538 return -1;
1539 *sem = semh;
1540 return 0;
1544 sem_post(sem_t *sem)
1546 return !ReleaseSemaphore(*sem, 1, NULL);
1549 static int
1550 sem_wait_timeout(sem_t *sem, DWORD ms)
1552 switch (WaitForSingleObject(*sem, ms)) {
1553 case WAIT_OBJECT_0:
1554 return 0;
1555 case WAIT_TIMEOUT:
1556 /* errno = EAGAIN; */
1557 return -1;
1558 default:
1559 /* errno = EINVAL; */
1560 return -1;
1565 sem_wait(sem_t *sem)
1567 return sem_wait_timeout(sem, INFINITE);
1571 sem_trywait(sem_t *sem)
1573 return sem_wait_timeout(sem, 0);
1577 sem_destroy(sem_t *sem)
1579 return !CloseHandle(*sem);
1582 #endif