2 /* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
3 /* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
4 /* Eliminated some memory leaks, gsw@agere.com */
12 typedef struct NRMUTEX
{
16 } NRMUTEX
, *PNRMUTEX
;
18 typedef PVOID WINAPI
interlocked_cmp_xchg_t(PVOID
*dest
, PVOID exc
, PVOID comperand
) ;
20 /* Sorry mate, but we haven't got InterlockedCompareExchange in Win95! */
22 interlocked_cmp_xchg(PVOID
*dest
, PVOID exc
, PVOID comperand
)
24 static LONG spinlock
= 0 ;
28 /* Acqire spinlock (yielding control to other threads if cant aquire for the moment) */
29 while(InterlockedExchange(&spinlock
, 1))
31 // Using Sleep(0) can cause a priority inversion.
32 // Sleep(0) only yields the processor if there's
33 // another thread of the same priority that's
34 // ready to run. If a high-priority thread is
35 // trying to acquire the lock, which is held by
36 // a low-priority thread, then the low-priority
37 // thread may never get scheduled and hence never
38 // free the lock. NT attempts to avoid priority
39 // inversions by temporarily boosting the priority
40 // of low-priority runnable threads, but the problem
41 // can still occur if there's a medium-priority
42 // thread that's always runnable. If Sleep(1) is used,
43 // then the thread unconditionally yields the CPU. We
44 // only do this for the second and subsequent even
45 // iterations, since a millisecond is a long time to wait
46 // if the thread can be scheduled in again sooner
47 // (~100,000 instructions).
48 // Avoid priority inversion: 0, 1, 0, 1,...
53 if (result
== comperand
)
55 /* Release spinlock */
60 static interlocked_cmp_xchg_t
*ixchg
;
63 InitializeNonRecursiveMutex(PNRMUTEX mutex
)
67 /* Sorely, Win95 has no InterlockedCompareExchange API (Win98 has), so we have to use emulation */
68 HANDLE kernel
= GetModuleHandle("kernel32.dll") ;
69 if (!kernel
|| (ixchg
= (interlocked_cmp_xchg_t
*)GetProcAddress(kernel
, "InterlockedCompareExchange")) == NULL
)
70 ixchg
= interlocked_cmp_xchg
;
73 mutex
->owned
= -1 ; /* No threads have entered NonRecursiveMutex */
74 mutex
->thread_id
= 0 ;
75 mutex
->hevent
= CreateEvent(NULL
, FALSE
, FALSE
, NULL
) ;
76 return mutex
->hevent
!= NULL
; /* TRUE if the mutex is created */
79 #ifdef InterlockedCompareExchange
80 #undef InterlockedCompareExchange
82 #define InterlockedCompareExchange(dest,exchange,comperand) (ixchg((dest), (exchange), (comperand)))
85 DeleteNonRecursiveMutex(PNRMUTEX mutex
)
88 CloseHandle(mutex
->hevent
) ;
89 mutex
->hevent
= NULL
; /* Just in case */
93 EnterNonRecursiveMutex(PNRMUTEX mutex
, BOOL wait
)
95 /* Assume that the thread waits successfully */
98 /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */
101 if (InterlockedCompareExchange((PVOID
*)&mutex
->owned
, (PVOID
)0, (PVOID
)-1) != (PVOID
)-1)
102 return WAIT_TIMEOUT
;
103 ret
= WAIT_OBJECT_0
;
106 ret
= InterlockedIncrement(&mutex
->owned
) ?
107 /* Some thread owns the mutex, let's wait... */
108 WaitForSingleObject(mutex
->hevent
, INFINITE
) : WAIT_OBJECT_0
;
110 mutex
->thread_id
= GetCurrentThreadId() ; /* We own it */
115 LeaveNonRecursiveMutex(PNRMUTEX mutex
)
117 /* We don't own the mutex */
118 mutex
->thread_id
= 0 ;
120 InterlockedDecrement(&mutex
->owned
) < 0 ||
121 SetEvent(mutex
->hevent
) ; /* Other threads are waiting, wake one on them up */
125 AllocNonRecursiveMutex(void)
127 PNRMUTEX mutex
= (PNRMUTEX
)malloc(sizeof(NRMUTEX
)) ;
128 if (mutex
&& !InitializeNonRecursiveMutex(mutex
))
137 FreeNonRecursiveMutex(PNRMUTEX mutex
)
141 DeleteNonRecursiveMutex(mutex
) ;
146 long PyThread_get_thread_ident(void);
149 * Initialization of the C package, should not be needed.
152 PyThread__init_thread(void)
168 bootstrap(void *call
)
170 callobj
*obj
= (callobj
*)call
;
171 /* copy callobj since other thread might free it before we're done */
172 void (*func
)(void*) = obj
->func
;
173 void *arg
= obj
->arg
;
175 obj
->id
= PyThread_get_thread_ident();
176 ReleaseSemaphore(obj
->done
, 1, NULL
);
182 PyThread_start_new_thread(void (*func
)(void *), void *arg
)
187 dprintf(("%ld: PyThread_start_new_thread called\n",
188 PyThread_get_thread_ident()));
190 PyThread_init_thread();
192 obj
.id
= -1; /* guilty until proved innocent */
195 obj
.done
= CreateSemaphore(NULL
, 0, 1, NULL
);
196 if (obj
.done
== NULL
)
199 rv
= _beginthread(bootstrap
, _pythread_stacksize
, &obj
);
200 if (rv
== (Py_uintptr_t
)-1) {
201 /* I've seen errno == EAGAIN here, which means "there are
204 dprintf(("%ld: PyThread_start_new_thread failed: %p errno %d\n",
205 PyThread_get_thread_ident(), rv
, errno
));
209 dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n",
210 PyThread_get_thread_ident(), rv
));
211 /* wait for thread to initialize, so we can get its id */
212 WaitForSingleObject(obj
.done
, INFINITE
);
213 assert(obj
.id
!= -1);
215 CloseHandle((HANDLE
)obj
.done
);
220 * Return the thread Id instead of an handle. The Id is said to uniquely identify the
221 * thread in the system
224 PyThread_get_thread_ident(void)
227 PyThread_init_thread();
229 return GetCurrentThreadId();
233 do_PyThread_exit_thread(int no_cleanup
)
235 dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
245 PyThread_exit_thread(void)
247 do_PyThread_exit_thread(0);
251 PyThread__exit_thread(void)
253 do_PyThread_exit_thread(1);
258 do_PyThread_exit_prog(int status
, int no_cleanup
)
260 dprintf(("PyThread_exit_prog(%d) called\n", status
));
269 PyThread_exit_prog(int status
)
271 do_PyThread_exit_prog(status
, 0);
275 PyThread__exit_prog(int status
)
277 do_PyThread_exit_prog(status
, 1);
279 #endif /* NO_EXIT_PROG */
282 * Lock support. It has too be implemented as semaphores.
283 * I [Dag] tried to implement it with mutex but I could find a way to
284 * tell whether a thread already own the lock or not.
287 PyThread_allocate_lock(void)
291 dprintf(("PyThread_allocate_lock called\n"));
293 PyThread_init_thread();
295 aLock
= AllocNonRecursiveMutex() ;
297 dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock
));
299 return (PyThread_type_lock
) aLock
;
303 PyThread_free_lock(PyThread_type_lock aLock
)
305 dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
307 FreeNonRecursiveMutex(aLock
) ;
311 * Return 1 on success if the lock was acquired
313 * and 0 if the lock was not acquired. This means a 0 is returned
314 * if the lock has already been acquired by this thread!
317 PyThread_acquire_lock(PyThread_type_lock aLock
, int waitflag
)
321 dprintf(("%ld: PyThread_acquire_lock(%p, %d) called\n", PyThread_get_thread_ident(),aLock
, waitflag
));
323 success
= aLock
&& EnterNonRecursiveMutex((PNRMUTEX
) aLock
, (waitflag
? INFINITE
: 0)) == WAIT_OBJECT_0
;
325 dprintf(("%ld: PyThread_acquire_lock(%p, %d) -> %d\n", PyThread_get_thread_ident(),aLock
, waitflag
, success
));
331 PyThread_release_lock(PyThread_type_lock aLock
)
333 dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock
));
335 if (!(aLock
&& LeaveNonRecursiveMutex((PNRMUTEX
) aLock
)))
336 dprintf(("%ld: Could not PyThread_release_lock(%p) error: %l\n", PyThread_get_thread_ident(), aLock
, GetLastError()));
339 /* minimum/maximum thread stack sizes supported */
340 #define THREAD_MIN_STACKSIZE 0x8000 /* 32kB */
341 #define THREAD_MAX_STACKSIZE 0x10000000 /* 256MB */
343 /* set the thread stack size.
344 * Return 0 if size is valid, -1 otherwise.
347 _pythread_nt_set_stacksize(size_t size
)
351 _pythread_stacksize
= 0;
356 if (size
>= THREAD_MIN_STACKSIZE
&& size
< THREAD_MAX_STACKSIZE
) {
357 _pythread_stacksize
= size
;
364 #define THREAD_SET_STACKSIZE(x) _pythread_nt_set_stacksize(x)