Patch by Jeremy Katz (SF #1609407)
[python.git] / Python / thread_nt.h
blob67f5ed51747543c961a5f94627a58e11309390d3
2 /* This code implemented by Dag.Gruneau@elsa.preseco.comm.se */
3 /* Fast NonRecursiveMutex support by Yakov Markovitch, markovitch@iso.ru */
4 /* Eliminated some memory leaks, gsw@agere.com */
6 #include <windows.h>
7 #include <limits.h>
8 #ifdef HAVE_PROCESS_H
9 #include <process.h>
10 #endif
12 typedef struct NRMUTEX {
13 LONG owned ;
14 DWORD thread_id ;
15 HANDLE hevent ;
16 } NRMUTEX, *PNRMUTEX ;
18 typedef PVOID WINAPI interlocked_cmp_xchg_t(PVOID *dest, PVOID exc, PVOID comperand) ;
20 /* Sorry mate, but we haven't got InterlockedCompareExchange in Win95! */
21 static PVOID WINAPI
22 interlocked_cmp_xchg(PVOID *dest, PVOID exc, PVOID comperand)
24 static LONG spinlock = 0 ;
25 PVOID result ;
26 DWORD dwSleep = 0;
28 /* Acqire spinlock (yielding control to other threads if cant aquire for the moment) */
29 while(InterlockedExchange(&spinlock, 1))
31 // Using Sleep(0) can cause a priority inversion.
32 // Sleep(0) only yields the processor if there's
33 // another thread of the same priority that's
34 // ready to run. If a high-priority thread is
35 // trying to acquire the lock, which is held by
36 // a low-priority thread, then the low-priority
37 // thread may never get scheduled and hence never
38 // free the lock. NT attempts to avoid priority
39 // inversions by temporarily boosting the priority
40 // of low-priority runnable threads, but the problem
41 // can still occur if there's a medium-priority
42 // thread that's always runnable. If Sleep(1) is used,
43 // then the thread unconditionally yields the CPU. We
44 // only do this for the second and subsequent even
45 // iterations, since a millisecond is a long time to wait
46 // if the thread can be scheduled in again sooner
47 // (~100,000 instructions).
48 // Avoid priority inversion: 0, 1, 0, 1,...
49 Sleep(dwSleep);
50 dwSleep = !dwSleep;
52 result = *dest ;
53 if (result == comperand)
54 *dest = exc ;
55 /* Release spinlock */
56 spinlock = 0 ;
57 return result ;
58 } ;
60 static interlocked_cmp_xchg_t *ixchg;
62 BOOL
63 InitializeNonRecursiveMutex(PNRMUTEX mutex)
65 if (!ixchg)
67 /* Sorely, Win95 has no InterlockedCompareExchange API (Win98 has), so we have to use emulation */
68 HANDLE kernel = GetModuleHandle("kernel32.dll") ;
69 if (!kernel || (ixchg = (interlocked_cmp_xchg_t *)GetProcAddress(kernel, "InterlockedCompareExchange")) == NULL)
70 ixchg = interlocked_cmp_xchg ;
73 mutex->owned = -1 ; /* No threads have entered NonRecursiveMutex */
74 mutex->thread_id = 0 ;
75 mutex->hevent = CreateEvent(NULL, FALSE, FALSE, NULL) ;
76 return mutex->hevent != NULL ; /* TRUE if the mutex is created */
79 #ifdef InterlockedCompareExchange
80 #undef InterlockedCompareExchange
81 #endif
82 #define InterlockedCompareExchange(dest,exchange,comperand) (ixchg((dest), (exchange), (comperand)))
84 VOID
85 DeleteNonRecursiveMutex(PNRMUTEX mutex)
87 /* No in-use check */
88 CloseHandle(mutex->hevent) ;
89 mutex->hevent = NULL ; /* Just in case */
92 DWORD
93 EnterNonRecursiveMutex(PNRMUTEX mutex, BOOL wait)
95 /* Assume that the thread waits successfully */
96 DWORD ret ;
98 /* InterlockedIncrement(&mutex->owned) == 0 means that no thread currently owns the mutex */
99 if (!wait)
101 if (InterlockedCompareExchange((PVOID *)&mutex->owned, (PVOID)0, (PVOID)-1) != (PVOID)-1)
102 return WAIT_TIMEOUT ;
103 ret = WAIT_OBJECT_0 ;
105 else
106 ret = InterlockedIncrement(&mutex->owned) ?
107 /* Some thread owns the mutex, let's wait... */
108 WaitForSingleObject(mutex->hevent, INFINITE) : WAIT_OBJECT_0 ;
110 mutex->thread_id = GetCurrentThreadId() ; /* We own it */
111 return ret ;
114 BOOL
115 LeaveNonRecursiveMutex(PNRMUTEX mutex)
117 /* We don't own the mutex */
118 mutex->thread_id = 0 ;
119 return
120 InterlockedDecrement(&mutex->owned) < 0 ||
121 SetEvent(mutex->hevent) ; /* Other threads are waiting, wake one on them up */
124 PNRMUTEX
125 AllocNonRecursiveMutex(void)
127 PNRMUTEX mutex = (PNRMUTEX)malloc(sizeof(NRMUTEX)) ;
128 if (mutex && !InitializeNonRecursiveMutex(mutex))
130 free(mutex) ;
131 mutex = NULL ;
133 return mutex ;
136 void
137 FreeNonRecursiveMutex(PNRMUTEX mutex)
139 if (mutex)
141 DeleteNonRecursiveMutex(mutex) ;
142 free(mutex) ;
146 long PyThread_get_thread_ident(void);
149 * Initialization of the C package, should not be needed.
151 static void
152 PyThread__init_thread(void)
157 * Thread support.
160 typedef struct {
161 void (*func)(void*);
162 void *arg;
163 long id;
164 HANDLE done;
165 } callobj;
167 static int
168 bootstrap(void *call)
170 callobj *obj = (callobj*)call;
171 /* copy callobj since other thread might free it before we're done */
172 void (*func)(void*) = obj->func;
173 void *arg = obj->arg;
175 obj->id = PyThread_get_thread_ident();
176 ReleaseSemaphore(obj->done, 1, NULL);
177 func(arg);
178 return 0;
181 long
182 PyThread_start_new_thread(void (*func)(void *), void *arg)
184 Py_uintptr_t rv;
185 callobj obj;
187 dprintf(("%ld: PyThread_start_new_thread called\n",
188 PyThread_get_thread_ident()));
189 if (!initialized)
190 PyThread_init_thread();
192 obj.id = -1; /* guilty until proved innocent */
193 obj.func = func;
194 obj.arg = arg;
195 obj.done = CreateSemaphore(NULL, 0, 1, NULL);
196 if (obj.done == NULL)
197 return -1;
199 rv = _beginthread(bootstrap, _pythread_stacksize, &obj);
200 if (rv == (Py_uintptr_t)-1) {
201 /* I've seen errno == EAGAIN here, which means "there are
202 * too many threads".
204 dprintf(("%ld: PyThread_start_new_thread failed: %p errno %d\n",
205 PyThread_get_thread_ident(), rv, errno));
206 obj.id = -1;
208 else {
209 dprintf(("%ld: PyThread_start_new_thread succeeded: %p\n",
210 PyThread_get_thread_ident(), rv));
211 /* wait for thread to initialize, so we can get its id */
212 WaitForSingleObject(obj.done, INFINITE);
213 assert(obj.id != -1);
215 CloseHandle((HANDLE)obj.done);
216 return obj.id;
220 * Return the thread Id instead of an handle. The Id is said to uniquely identify the
221 * thread in the system
223 long
224 PyThread_get_thread_ident(void)
226 if (!initialized)
227 PyThread_init_thread();
229 return GetCurrentThreadId();
232 static void
233 do_PyThread_exit_thread(int no_cleanup)
235 dprintf(("%ld: PyThread_exit_thread called\n", PyThread_get_thread_ident()));
236 if (!initialized)
237 if (no_cleanup)
238 _exit(0);
239 else
240 exit(0);
241 _endthread();
244 void
245 PyThread_exit_thread(void)
247 do_PyThread_exit_thread(0);
250 void
251 PyThread__exit_thread(void)
253 do_PyThread_exit_thread(1);
256 #ifndef NO_EXIT_PROG
257 static void
258 do_PyThread_exit_prog(int status, int no_cleanup)
260 dprintf(("PyThread_exit_prog(%d) called\n", status));
261 if (!initialized)
262 if (no_cleanup)
263 _exit(status);
264 else
265 exit(status);
268 void
269 PyThread_exit_prog(int status)
271 do_PyThread_exit_prog(status, 0);
274 void
275 PyThread__exit_prog(int status)
277 do_PyThread_exit_prog(status, 1);
279 #endif /* NO_EXIT_PROG */
282 * Lock support. It has too be implemented as semaphores.
283 * I [Dag] tried to implement it with mutex but I could find a way to
284 * tell whether a thread already own the lock or not.
286 PyThread_type_lock
287 PyThread_allocate_lock(void)
289 PNRMUTEX aLock;
291 dprintf(("PyThread_allocate_lock called\n"));
292 if (!initialized)
293 PyThread_init_thread();
295 aLock = AllocNonRecursiveMutex() ;
297 dprintf(("%ld: PyThread_allocate_lock() -> %p\n", PyThread_get_thread_ident(), aLock));
299 return (PyThread_type_lock) aLock;
302 void
303 PyThread_free_lock(PyThread_type_lock aLock)
305 dprintf(("%ld: PyThread_free_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
307 FreeNonRecursiveMutex(aLock) ;
311 * Return 1 on success if the lock was acquired
313 * and 0 if the lock was not acquired. This means a 0 is returned
314 * if the lock has already been acquired by this thread!
317 PyThread_acquire_lock(PyThread_type_lock aLock, int waitflag)
319 int success ;
321 dprintf(("%ld: PyThread_acquire_lock(%p, %d) called\n", PyThread_get_thread_ident(),aLock, waitflag));
323 success = aLock && EnterNonRecursiveMutex((PNRMUTEX) aLock, (waitflag ? INFINITE : 0)) == WAIT_OBJECT_0 ;
325 dprintf(("%ld: PyThread_acquire_lock(%p, %d) -> %d\n", PyThread_get_thread_ident(),aLock, waitflag, success));
327 return success;
330 void
331 PyThread_release_lock(PyThread_type_lock aLock)
333 dprintf(("%ld: PyThread_release_lock(%p) called\n", PyThread_get_thread_ident(),aLock));
335 if (!(aLock && LeaveNonRecursiveMutex((PNRMUTEX) aLock)))
336 dprintf(("%ld: Could not PyThread_release_lock(%p) error: %l\n", PyThread_get_thread_ident(), aLock, GetLastError()));
339 /* minimum/maximum thread stack sizes supported */
340 #define THREAD_MIN_STACKSIZE 0x8000 /* 32kB */
341 #define THREAD_MAX_STACKSIZE 0x10000000 /* 256MB */
343 /* set the thread stack size.
344 * Return 0 if size is valid, -1 otherwise.
346 static int
347 _pythread_nt_set_stacksize(size_t size)
349 /* set to default */
350 if (size == 0) {
351 _pythread_stacksize = 0;
352 return 0;
355 /* valid range? */
356 if (size >= THREAD_MIN_STACKSIZE && size < THREAD_MAX_STACKSIZE) {
357 _pythread_stacksize = size;
358 return 0;
361 return -1;
364 #define THREAD_SET_STACKSIZE(x) _pythread_nt_set_stacksize(x)