vcomp/tests: Add tests for atomic double functions.
[wine.git] / dlls / vcomp / main.c
blob301370c1bb6c3476d5d88ae5cb2386d3e1114871
1 /*
3 * vcomp implementation
5 * Copyright 2011 Austin English
6 * Copyright 2012 Dan Kegel
7 * Copyright 2015 Sebastian Lackner
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
24 #include "config.h"
25 #include "wine/port.h"
27 #include <stdarg.h>
28 #include <assert.h>
30 #include "windef.h"
31 #include "winbase.h"
32 #include "wine/debug.h"
33 #include "wine/list.h"
35 WINE_DEFAULT_DEBUG_CHANNEL(vcomp);
37 static struct list vcomp_idle_threads = LIST_INIT(vcomp_idle_threads);
38 static DWORD vcomp_context_tls = TLS_OUT_OF_INDEXES;
39 static HMODULE vcomp_module;
40 static int vcomp_max_threads;
41 static int vcomp_num_threads;
42 static BOOL vcomp_nested_fork = FALSE;
44 static RTL_CRITICAL_SECTION vcomp_section;
45 static RTL_CRITICAL_SECTION_DEBUG critsect_debug =
47 0, 0, &vcomp_section,
48 { &critsect_debug.ProcessLocksList, &critsect_debug.ProcessLocksList },
49 0, 0, { (DWORD_PTR)(__FILE__ ": vcomp_section") }
51 static RTL_CRITICAL_SECTION vcomp_section = { &critsect_debug, -1, 0, 0, 0, 0 };
53 struct vcomp_thread_data
55 struct vcomp_team_data *team;
56 struct vcomp_task_data *task;
57 int thread_num;
58 BOOL parallel;
59 int fork_threads;
61 /* only used for concurrent tasks */
62 struct list entry;
63 CONDITION_VARIABLE cond;
65 /* section */
66 unsigned int section;
69 struct vcomp_team_data
71 CONDITION_VARIABLE cond;
72 int num_threads;
73 int finished_threads;
75 /* callback arguments */
76 int nargs;
77 void *wrapper;
78 __ms_va_list valist;
80 /* barrier */
81 unsigned int barrier;
82 int barrier_count;
85 struct vcomp_task_data
87 /* section */
88 unsigned int section;
89 int num_sections;
90 int section_index;
93 #if defined(__i386__)
95 extern void CDECL _vcomp_fork_call_wrapper(void *wrapper, int nargs, __ms_va_list args);
96 __ASM_GLOBAL_FUNC( _vcomp_fork_call_wrapper,
97 "pushl %ebp\n\t"
98 __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
99 __ASM_CFI(".cfi_rel_offset %ebp,0\n\t")
100 "movl %esp,%ebp\n\t"
101 __ASM_CFI(".cfi_def_cfa_register %ebp\n\t")
102 "pushl %esi\n\t"
103 __ASM_CFI(".cfi_rel_offset %esi,-4\n\t")
104 "pushl %edi\n\t"
105 __ASM_CFI(".cfi_rel_offset %edi,-8\n\t")
106 "movl 12(%ebp),%edx\n\t"
107 "movl %esp,%edi\n\t"
108 "shll $2,%edx\n\t"
109 "jz 1f\n\t"
110 "subl %edx,%edi\n\t"
111 "andl $~15,%edi\n\t"
112 "movl %edi,%esp\n\t"
113 "movl 12(%ebp),%ecx\n\t"
114 "movl 16(%ebp),%esi\n\t"
115 "cld\n\t"
116 "rep; movsl\n"
117 "1:\tcall *8(%ebp)\n\t"
118 "leal -8(%ebp),%esp\n\t"
119 "popl %edi\n\t"
120 __ASM_CFI(".cfi_same_value %edi\n\t")
121 "popl %esi\n\t"
122 __ASM_CFI(".cfi_same_value %esi\n\t")
123 "popl %ebp\n\t"
124 __ASM_CFI(".cfi_def_cfa %esp,4\n\t")
125 __ASM_CFI(".cfi_same_value %ebp\n\t")
126 "ret" )
128 #elif defined(__x86_64__)
130 extern void CDECL _vcomp_fork_call_wrapper(void *wrapper, int nargs, __ms_va_list args);
131 __ASM_GLOBAL_FUNC( _vcomp_fork_call_wrapper,
132 "pushq %rbp\n\t"
133 __ASM_CFI(".cfi_adjust_cfa_offset 8\n\t")
134 __ASM_CFI(".cfi_rel_offset %rbp,0\n\t")
135 "movq %rsp,%rbp\n\t"
136 __ASM_CFI(".cfi_def_cfa_register %rbp\n\t")
137 "pushq %rsi\n\t"
138 __ASM_CFI(".cfi_rel_offset %rsi,-8\n\t")
139 "pushq %rdi\n\t"
140 __ASM_CFI(".cfi_rel_offset %rdi,-16\n\t")
141 "movq %rcx,%rax\n\t"
142 "movq $4,%rcx\n\t"
143 "cmp %rcx,%rdx\n\t"
144 "cmovgq %rdx,%rcx\n\t"
145 "leaq 0(,%rcx,8),%rdx\n\t"
146 "subq %rdx,%rsp\n\t"
147 "andq $~15,%rsp\n\t"
148 "movq %rsp,%rdi\n\t"
149 "movq %r8,%rsi\n\t"
150 "rep; movsq\n\t"
151 "movq 0(%rsp),%rcx\n\t"
152 "movq 8(%rsp),%rdx\n\t"
153 "movq 16(%rsp),%r8\n\t"
154 "movq 24(%rsp),%r9\n\t"
155 "callq *%rax\n\t"
156 "leaq -16(%rbp),%rsp\n\t"
157 "popq %rdi\n\t"
158 __ASM_CFI(".cfi_same_value %rdi\n\t")
159 "popq %rsi\n\t"
160 __ASM_CFI(".cfi_same_value %rsi\n\t")
161 __ASM_CFI(".cfi_def_cfa_register %rsp\n\t")
162 "popq %rbp\n\t"
163 __ASM_CFI(".cfi_adjust_cfa_offset -8\n\t")
164 __ASM_CFI(".cfi_same_value %rbp\n\t")
165 "ret")
167 #else
169 static void CDECL _vcomp_fork_call_wrapper(void *wrapper, int nargs, __ms_va_list args)
171 ERR("Not implemented for this architecture\n");
174 #endif
176 static inline struct vcomp_thread_data *vcomp_get_thread_data(void)
178 return (struct vcomp_thread_data *)TlsGetValue(vcomp_context_tls);
181 static inline void vcomp_set_thread_data(struct vcomp_thread_data *thread_data)
183 TlsSetValue(vcomp_context_tls, thread_data);
186 static struct vcomp_thread_data *vcomp_init_thread_data(void)
188 struct vcomp_thread_data *thread_data = vcomp_get_thread_data();
189 struct
191 struct vcomp_thread_data thread;
192 struct vcomp_task_data task;
193 } *data;
195 if (thread_data) return thread_data;
196 if (!(data = HeapAlloc(GetProcessHeap(), 0, sizeof(*data))))
198 ERR("could not create thread data\n");
199 ExitProcess(1);
202 data->task.section = 0;
204 thread_data = &data->thread;
205 thread_data->team = NULL;
206 thread_data->task = &data->task;
207 thread_data->thread_num = 0;
208 thread_data->parallel = FALSE;
209 thread_data->fork_threads = 0;
210 thread_data->section = 1;
212 vcomp_set_thread_data(thread_data);
213 return thread_data;
216 static void vcomp_free_thread_data(void)
218 struct vcomp_thread_data *thread_data = vcomp_get_thread_data();
219 if (!thread_data) return;
221 HeapFree(GetProcessHeap(), 0, thread_data);
222 vcomp_set_thread_data(NULL);
225 void CDECL _vcomp_atomic_add_i4(int *dest, int val)
227 interlocked_xchg_add(dest, val);
230 void CDECL _vcomp_atomic_and_i4(int *dest, int val)
232 int old;
233 do old = *dest; while (interlocked_cmpxchg(dest, old & val, old) != old);
236 void CDECL _vcomp_atomic_div_i4(int *dest, int val)
238 int old;
239 do old = *dest; while (interlocked_cmpxchg(dest, old / val, old) != old);
242 void CDECL _vcomp_atomic_div_ui4(unsigned int *dest, unsigned int val)
244 unsigned int old;
245 do old = *dest; while (interlocked_cmpxchg((int *)dest, old / val, old) != old);
248 void CDECL _vcomp_atomic_mul_i4(int *dest, int val)
250 int old;
251 do old = *dest; while (interlocked_cmpxchg(dest, old * val, old) != old);
254 void CDECL _vcomp_atomic_or_i4(int *dest, int val)
256 int old;
257 do old = *dest; while (interlocked_cmpxchg(dest, old | val, old) != old);
260 void CDECL _vcomp_atomic_shl_i4(int *dest, int val)
262 int old;
263 do old = *dest; while (interlocked_cmpxchg(dest, old << val, old) != old);
266 void CDECL _vcomp_atomic_shr_i4(int *dest, int val)
268 int old;
269 do old = *dest; while (interlocked_cmpxchg(dest, old >> val, old) != old);
272 void CDECL _vcomp_atomic_shr_ui4(unsigned int *dest, unsigned int val)
274 unsigned int old;
275 do old = *dest; while (interlocked_cmpxchg((int *)dest, old >> val, old) != old);
278 void CDECL _vcomp_atomic_sub_i4(int *dest, int val)
280 interlocked_xchg_add(dest, -val);
283 void CDECL _vcomp_atomic_xor_i4(int *dest, int val)
285 int old;
286 do old = *dest; while (interlocked_cmpxchg(dest, old ^ val, old) != old);
289 void CDECL _vcomp_atomic_add_r4(float *dest, float val)
291 int old, new;
294 old = *(int *)dest;
295 *(float *)&new = *(float *)&old + val;
297 while (interlocked_cmpxchg((int *)dest, new, old) != old);
300 void CDECL _vcomp_atomic_div_r4(float *dest, float val)
302 int old, new;
305 old = *(int *)dest;
306 *(float *)&new = *(float *)&old / val;
308 while (interlocked_cmpxchg((int *)dest, new, old) != old);
311 void CDECL _vcomp_atomic_mul_r4(float *dest, float val)
313 int old, new;
316 old = *(int *)dest;
317 *(float *)&new = *(float *)&old * val;
319 while (interlocked_cmpxchg((int *)dest, new, old) != old);
322 void CDECL _vcomp_atomic_sub_r4(float *dest, float val)
324 int old, new;
327 old = *(int *)dest;
328 *(float *)&new = *(float *)&old - val;
330 while (interlocked_cmpxchg((int *)dest, new, old) != old);
333 void CDECL _vcomp_atomic_add_r8(double *dest, double val)
335 LONG64 old, new;
338 old = *(LONG64 *)dest;
339 *(double *)&new = *(double *)&old + val;
341 while (interlocked_cmpxchg64((LONG64 *)dest, new, old) != old);
344 void CDECL _vcomp_atomic_div_r8(double *dest, double val)
346 LONG64 old, new;
349 old = *(LONG64 *)dest;
350 *(double *)&new = *(double *)&old / val;
352 while (interlocked_cmpxchg64((LONG64 *)dest, new, old) != old);
355 void CDECL _vcomp_atomic_mul_r8(double *dest, double val)
357 LONG64 old, new;
360 old = *(LONG64 *)dest;
361 *(double *)&new = *(double *)&old * val;
363 while (interlocked_cmpxchg64((LONG64 *)dest, new, old) != old);
366 void CDECL _vcomp_atomic_sub_r8(double *dest, double val)
368 LONG64 old, new;
371 old = *(LONG64 *)dest;
372 *(double *)&new = *(double *)&old - val;
374 while (interlocked_cmpxchg64((LONG64 *)dest, new, old) != old);
377 int CDECL omp_get_dynamic(void)
379 TRACE("stub\n");
380 return 0;
383 int CDECL omp_get_max_threads(void)
385 TRACE("()\n");
386 return vcomp_max_threads;
389 int CDECL omp_get_nested(void)
391 TRACE("stub\n");
392 return vcomp_nested_fork;
395 int CDECL omp_get_num_procs(void)
397 TRACE("stub\n");
398 return 1;
401 int CDECL omp_get_num_threads(void)
403 struct vcomp_team_data *team_data = vcomp_init_thread_data()->team;
404 TRACE("()\n");
405 return team_data ? team_data->num_threads : 1;
408 int CDECL omp_get_thread_num(void)
410 TRACE("()\n");
411 return vcomp_init_thread_data()->thread_num;
414 /* Time in seconds since "some time in the past" */
415 double CDECL omp_get_wtime(void)
417 return GetTickCount() / 1000.0;
420 void CDECL omp_set_dynamic(int val)
422 TRACE("(%d): stub\n", val);
425 void CDECL omp_set_nested(int nested)
427 TRACE("(%d)\n", nested);
428 vcomp_nested_fork = (nested != 0);
431 void CDECL omp_set_num_threads(int num_threads)
433 TRACE("(%d)\n", num_threads);
434 if (num_threads >= 1)
435 vcomp_num_threads = num_threads;
438 void CDECL _vcomp_barrier(void)
440 struct vcomp_team_data *team_data = vcomp_init_thread_data()->team;
442 TRACE("()\n");
444 if (!team_data)
445 return;
447 EnterCriticalSection(&vcomp_section);
448 if (++team_data->barrier_count >= team_data->num_threads)
450 team_data->barrier++;
451 team_data->barrier_count = 0;
452 WakeAllConditionVariable(&team_data->cond);
454 else
456 unsigned int barrier = team_data->barrier;
457 while (team_data->barrier == barrier)
458 SleepConditionVariableCS(&team_data->cond, &vcomp_section, INFINITE);
460 LeaveCriticalSection(&vcomp_section);
463 void CDECL _vcomp_set_num_threads(int num_threads)
465 TRACE("(%d)\n", num_threads);
466 if (num_threads >= 1)
467 vcomp_init_thread_data()->fork_threads = num_threads;
470 int CDECL _vcomp_single_begin(int flags)
472 TRACE("(%x): stub\n", flags);
473 return TRUE;
476 void CDECL _vcomp_single_end(void)
478 TRACE("stub\n");
481 void CDECL _vcomp_sections_init(int n)
483 struct vcomp_thread_data *thread_data = vcomp_init_thread_data();
484 struct vcomp_task_data *task_data = thread_data->task;
486 TRACE("(%d)\n", n);
488 EnterCriticalSection(&vcomp_section);
489 thread_data->section++;
490 if ((int)(thread_data->section - task_data->section) > 0)
492 task_data->section = thread_data->section;
493 task_data->num_sections = n;
494 task_data->section_index = 0;
496 LeaveCriticalSection(&vcomp_section);
499 int CDECL _vcomp_sections_next(void)
501 struct vcomp_thread_data *thread_data = vcomp_init_thread_data();
502 struct vcomp_task_data *task_data = thread_data->task;
503 int i = -1;
505 TRACE("()\n");
507 EnterCriticalSection(&vcomp_section);
508 if (thread_data->section == task_data->section &&
509 task_data->section_index != task_data->num_sections)
511 i = task_data->section_index++;
513 LeaveCriticalSection(&vcomp_section);
514 return i;
517 void CDECL _vcomp_for_static_simple_init(unsigned int first, unsigned int last, int step,
518 BOOL increment, unsigned int *begin, unsigned int *end)
520 unsigned int iterations, per_thread, remaining;
521 struct vcomp_thread_data *thread_data = vcomp_init_thread_data();
522 struct vcomp_team_data *team_data = thread_data->team;
523 int num_threads = team_data ? team_data->num_threads : 1;
524 int thread_num = thread_data->thread_num;
526 TRACE("(%u, %u, %d, %u, %p, %p)\n", first, last, step, increment, begin, end);
528 if (num_threads == 1)
530 *begin = first;
531 *end = last;
532 return;
535 if (step <= 0)
537 *begin = 0;
538 *end = increment ? -1 : 1;
539 return;
542 if (increment)
543 iterations = 1 + (last - first) / step;
544 else
546 iterations = 1 + (first - last) / step;
547 step *= -1;
550 per_thread = iterations / num_threads;
551 remaining = iterations - per_thread * num_threads;
553 if (thread_num < remaining)
554 per_thread++;
555 else if (per_thread)
556 first += remaining * step;
557 else
559 *begin = first;
560 *end = first - step;
561 return;
564 *begin = first + per_thread * thread_num * step;
565 *end = *begin + (per_thread - 1) * step;
568 void CDECL _vcomp_for_static_init(int first, int last, int step, int chunksize, unsigned int *loops,
569 int *begin, int *end, int *next, int *lastchunk)
571 unsigned int iterations, num_chunks, per_thread, remaining;
572 struct vcomp_thread_data *thread_data = vcomp_init_thread_data();
573 struct vcomp_team_data *team_data = thread_data->team;
574 int num_threads = team_data ? team_data->num_threads : 1;
575 int thread_num = thread_data->thread_num;
577 TRACE("(%d, %d, %d, %d, %p, %p, %p, %p, %p)\n",
578 first, last, step, chunksize, loops, begin, end, next, lastchunk);
580 if (num_threads == 1 && chunksize != 1)
582 *loops = 1;
583 *begin = first;
584 *end = last;
585 *next = 0;
586 *lastchunk = first;
587 return;
590 if (first == last)
592 *loops = !thread_num;
593 if (!thread_num)
595 *begin = first;
596 *end = last;
597 *next = 0;
598 *lastchunk = first;
600 return;
603 if (step <= 0)
605 *loops = 0;
606 return;
609 if (first < last)
610 iterations = 1 + (last - first) / step;
611 else
613 iterations = 1 + (first - last) / step;
614 step *= -1;
617 if (chunksize < 1)
618 chunksize = 1;
620 num_chunks = ((DWORD64)iterations + chunksize - 1) / chunksize;
621 per_thread = num_chunks / num_threads;
622 remaining = num_chunks - per_thread * num_threads;
624 *loops = per_thread + (thread_num < remaining);
625 *begin = first + thread_num * chunksize * step;
626 *end = *begin + (chunksize - 1) * step;
627 *next = chunksize * num_threads * step;
628 *lastchunk = first + (num_chunks - 1) * chunksize * step;
631 void CDECL _vcomp_for_static_end(void)
633 TRACE("()\n");
634 /* nothing to do here */
637 int CDECL omp_in_parallel(void)
639 TRACE("()\n");
640 return vcomp_init_thread_data()->parallel;
643 static DWORD WINAPI _vcomp_fork_worker(void *param)
645 struct vcomp_thread_data *thread_data = param;
646 vcomp_set_thread_data(thread_data);
648 TRACE("starting worker thread for %p\n", thread_data);
650 EnterCriticalSection(&vcomp_section);
651 for (;;)
653 struct vcomp_team_data *team = thread_data->team;
654 if (team != NULL)
656 LeaveCriticalSection(&vcomp_section);
657 _vcomp_fork_call_wrapper(team->wrapper, team->nargs, team->valist);
658 EnterCriticalSection(&vcomp_section);
660 thread_data->team = NULL;
661 list_remove(&thread_data->entry);
662 list_add_tail(&vcomp_idle_threads, &thread_data->entry);
663 if (++team->finished_threads >= team->num_threads)
664 WakeAllConditionVariable(&team->cond);
667 if (!SleepConditionVariableCS(&thread_data->cond, &vcomp_section, 5000) &&
668 GetLastError() == ERROR_TIMEOUT && !thread_data->team)
670 break;
673 list_remove(&thread_data->entry);
674 LeaveCriticalSection(&vcomp_section);
676 TRACE("terminating worker thread for %p\n", thread_data);
678 HeapFree(GetProcessHeap(), 0, thread_data);
679 vcomp_set_thread_data(NULL);
680 FreeLibraryAndExitThread(vcomp_module, 0);
681 return 0;
684 void WINAPIV _vcomp_fork(BOOL ifval, int nargs, void *wrapper, ...)
686 struct vcomp_thread_data *prev_thread_data = vcomp_init_thread_data();
687 struct vcomp_thread_data thread_data;
688 struct vcomp_team_data team_data;
689 struct vcomp_task_data task_data;
690 int num_threads;
692 TRACE("(%d, %d, %p, ...)\n", ifval, nargs, wrapper);
694 if (prev_thread_data->parallel && !vcomp_nested_fork)
695 ifval = FALSE;
697 if (!ifval)
698 num_threads = 1;
699 else if (prev_thread_data->fork_threads)
700 num_threads = prev_thread_data->fork_threads;
701 else
702 num_threads = vcomp_num_threads;
704 InitializeConditionVariable(&team_data.cond);
705 team_data.num_threads = 1;
706 team_data.finished_threads = 0;
707 team_data.nargs = nargs;
708 team_data.wrapper = wrapper;
709 __ms_va_start(team_data.valist, wrapper);
710 team_data.barrier = 0;
711 team_data.barrier_count = 0;
713 task_data.section = 0;
715 thread_data.team = &team_data;
716 thread_data.task = &task_data;
717 thread_data.thread_num = 0;
718 thread_data.parallel = ifval || prev_thread_data->parallel;
719 thread_data.fork_threads = 0;
720 thread_data.section = 1;
721 list_init(&thread_data.entry);
722 InitializeConditionVariable(&thread_data.cond);
724 if (num_threads > 1)
726 struct list *ptr;
727 EnterCriticalSection(&vcomp_section);
729 /* reuse existing threads (if any) */
730 while (team_data.num_threads < num_threads && (ptr = list_head(&vcomp_idle_threads)))
732 struct vcomp_thread_data *data = LIST_ENTRY(ptr, struct vcomp_thread_data, entry);
733 data->team = &team_data;
734 data->task = &task_data;
735 data->thread_num = team_data.num_threads++;
736 data->parallel = thread_data.parallel;
737 data->fork_threads = 0;
738 data->section = 1;
739 list_remove(&data->entry);
740 list_add_tail(&thread_data.entry, &data->entry);
741 WakeAllConditionVariable(&data->cond);
744 /* spawn additional threads */
745 while (team_data.num_threads < num_threads)
747 struct vcomp_thread_data *data;
748 HMODULE module;
749 HANDLE thread;
751 data = HeapAlloc(GetProcessHeap(), 0, sizeof(*data));
752 if (!data) break;
754 data->team = &team_data;
755 data->task = &task_data;
756 data->thread_num = team_data.num_threads;
757 data->parallel = thread_data.parallel;
758 data->fork_threads = 0;
759 data->section = 1;
760 InitializeConditionVariable(&data->cond);
762 thread = CreateThread(NULL, 0, _vcomp_fork_worker, data, 0, NULL);
763 if (!thread)
765 HeapFree(GetProcessHeap(), 0, data);
766 break;
769 GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
770 (const WCHAR *)vcomp_module, &module);
771 team_data.num_threads++;
772 list_add_tail(&thread_data.entry, &data->entry);
773 CloseHandle(thread);
776 LeaveCriticalSection(&vcomp_section);
779 vcomp_set_thread_data(&thread_data);
780 _vcomp_fork_call_wrapper(team_data.wrapper, team_data.nargs, team_data.valist);
781 vcomp_set_thread_data(prev_thread_data);
782 prev_thread_data->fork_threads = 0;
784 if (team_data.num_threads > 1)
786 EnterCriticalSection(&vcomp_section);
788 team_data.finished_threads++;
789 while (team_data.finished_threads < team_data.num_threads)
790 SleepConditionVariableCS(&team_data.cond, &vcomp_section, INFINITE);
792 LeaveCriticalSection(&vcomp_section);
793 assert(list_empty(&thread_data.entry));
796 __ms_va_end(team_data.valist);
799 BOOL WINAPI DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved)
801 TRACE("(%p, %d, %p)\n", instance, reason, reserved);
803 switch (reason)
805 case DLL_WINE_PREATTACH:
806 return FALSE; /* prefer native version */
808 case DLL_PROCESS_ATTACH:
810 SYSTEM_INFO sysinfo;
812 if ((vcomp_context_tls = TlsAlloc()) == TLS_OUT_OF_INDEXES)
814 ERR("Failed to allocate TLS index\n");
815 return FALSE;
818 GetSystemInfo(&sysinfo);
819 vcomp_module = instance;
820 vcomp_max_threads = sysinfo.dwNumberOfProcessors;
821 vcomp_num_threads = sysinfo.dwNumberOfProcessors;
822 break;
825 case DLL_PROCESS_DETACH:
827 if (reserved) break;
828 if (vcomp_context_tls != TLS_OUT_OF_INDEXES)
830 vcomp_free_thread_data();
831 TlsFree(vcomp_context_tls);
833 break;
836 case DLL_THREAD_DETACH:
838 vcomp_free_thread_data();
839 break;
843 return TRUE;