5 * Copyright 2011 Austin English
6 * Copyright 2012 Dan Kegel
7 * Copyright 2015-2016 Sebastian Lackner
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
25 #include "wine/port.h"
33 #include "wine/debug.h"
34 #include "wine/list.h"
36 WINE_DEFAULT_DEBUG_CHANNEL(vcomp
);
38 typedef CRITICAL_SECTION
*omp_lock_t
;
39 typedef CRITICAL_SECTION
*omp_nest_lock_t
;
41 static struct list vcomp_idle_threads
= LIST_INIT(vcomp_idle_threads
);
42 static DWORD vcomp_context_tls
= TLS_OUT_OF_INDEXES
;
43 static HMODULE vcomp_module
;
44 static int vcomp_max_threads
;
45 static int vcomp_num_threads
;
46 static BOOL vcomp_nested_fork
= FALSE
;
48 static RTL_CRITICAL_SECTION vcomp_section
;
49 static RTL_CRITICAL_SECTION_DEBUG critsect_debug
=
52 { &critsect_debug
.ProcessLocksList
, &critsect_debug
.ProcessLocksList
},
53 0, 0, { (DWORD_PTR
)(__FILE__
": vcomp_section") }
55 static RTL_CRITICAL_SECTION vcomp_section
= { &critsect_debug
, -1, 0, 0, 0, 0 };
57 #define VCOMP_DYNAMIC_FLAGS_STATIC 0x01
58 #define VCOMP_DYNAMIC_FLAGS_CHUNKED 0x02
59 #define VCOMP_DYNAMIC_FLAGS_GUIDED 0x03
60 #define VCOMP_DYNAMIC_FLAGS_INCREMENT 0x40
62 struct vcomp_thread_data
64 struct vcomp_team_data
*team
;
65 struct vcomp_task_data
*task
;
70 /* only used for concurrent tasks */
72 CONDITION_VARIABLE cond
;
82 unsigned int dynamic_type
;
83 unsigned int dynamic_begin
;
84 unsigned int dynamic_end
;
87 struct vcomp_team_data
89 CONDITION_VARIABLE cond
;
93 /* callback arguments */
103 struct vcomp_task_data
109 unsigned int section
;
114 unsigned int dynamic
;
115 unsigned int dynamic_first
;
116 unsigned int dynamic_last
;
117 unsigned int dynamic_iterations
;
119 unsigned int dynamic_chunksize
;
122 #if defined(__i386__)
124 extern void CDECL
_vcomp_fork_call_wrapper(void *wrapper
, int nargs
, __ms_va_list args
);
125 __ASM_GLOBAL_FUNC( _vcomp_fork_call_wrapper
,
127 __ASM_CFI(".cfi_adjust_cfa_offset 4\n\t")
128 __ASM_CFI(".cfi_rel_offset %ebp,0\n\t")
130 __ASM_CFI(".cfi_def_cfa_register %ebp\n\t")
132 __ASM_CFI(".cfi_rel_offset %esi,-4\n\t")
134 __ASM_CFI(".cfi_rel_offset %edi,-8\n\t")
135 "movl 12(%ebp),%edx\n\t"
142 "movl 12(%ebp),%ecx\n\t"
143 "movl 16(%ebp),%esi\n\t"
146 "1:\tcall *8(%ebp)\n\t"
147 "leal -8(%ebp),%esp\n\t"
149 __ASM_CFI(".cfi_same_value %edi\n\t")
151 __ASM_CFI(".cfi_same_value %esi\n\t")
153 __ASM_CFI(".cfi_def_cfa %esp,4\n\t")
154 __ASM_CFI(".cfi_same_value %ebp\n\t")
157 #elif defined(__x86_64__)
159 extern void CDECL
_vcomp_fork_call_wrapper(void *wrapper
, int nargs
, __ms_va_list args
);
160 __ASM_GLOBAL_FUNC( _vcomp_fork_call_wrapper
,
162 __ASM_CFI(".cfi_adjust_cfa_offset 8\n\t")
163 __ASM_CFI(".cfi_rel_offset %rbp,0\n\t")
165 __ASM_CFI(".cfi_def_cfa_register %rbp\n\t")
167 __ASM_CFI(".cfi_rel_offset %rsi,-8\n\t")
169 __ASM_CFI(".cfi_rel_offset %rdi,-16\n\t")
173 "cmovgq %rdx,%rcx\n\t"
174 "leaq 0(,%rcx,8),%rdx\n\t"
180 "movq 0(%rsp),%rcx\n\t"
181 "movq 8(%rsp),%rdx\n\t"
182 "movq 16(%rsp),%r8\n\t"
183 "movq 24(%rsp),%r9\n\t"
185 "leaq -16(%rbp),%rsp\n\t"
187 __ASM_CFI(".cfi_same_value %rdi\n\t")
189 __ASM_CFI(".cfi_same_value %rsi\n\t")
190 __ASM_CFI(".cfi_def_cfa_register %rsp\n\t")
192 __ASM_CFI(".cfi_adjust_cfa_offset -8\n\t")
193 __ASM_CFI(".cfi_same_value %rbp\n\t")
196 #elif defined(__arm__)
198 extern void CDECL
_vcomp_fork_call_wrapper(void *wrapper
, int nargs
, __ms_va_list args
);
199 __ASM_GLOBAL_FUNC( _vcomp_fork_call_wrapper
,
201 "push {r4, r5, LR}\n\t"
209 "subeq SP, SP, #4\n\t"
210 "1:\tsub r3, r3, #4\n\t"
211 "ldr r0, [r2, r3]\n\t"
212 "str r0, [SP, r3]\n\t"
227 "4:\tpop {r0-r3}\n\t"
234 static void CDECL
_vcomp_fork_call_wrapper(void *wrapper
, int nargs
, __ms_va_list args
)
236 ERR("Not implemented for this architecture\n");
241 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
243 static inline char interlocked_cmpxchg8(char *dest
, char xchg
, char compare
)
246 __asm__
__volatile__( "lock; cmpxchgb %2,(%1)"
247 : "=a" (ret
) : "r" (dest
), "q" (xchg
), "0" (compare
) : "memory" );
251 static inline short interlocked_cmpxchg16(short *dest
, short xchg
, short compare
)
254 __asm__
__volatile__( "lock; cmpxchgw %2,(%1)"
255 : "=a" (ret
) : "r" (dest
), "r" (xchg
), "0" (compare
) : "memory" );
259 static inline char interlocked_xchg_add8(char *dest
, char incr
)
262 __asm__
__volatile__( "lock; xaddb %0,(%1)"
263 : "=q" (ret
) : "r" (dest
), "0" (incr
) : "memory" );
267 static inline short interlocked_xchg_add16(short *dest
, short incr
)
270 __asm__
__volatile__( "lock; xaddw %0,(%1)"
271 : "=r" (ret
) : "r" (dest
), "0" (incr
) : "memory" );
277 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_1
278 static inline char interlocked_cmpxchg8(char *dest
, char xchg
, char compare
)
280 return __sync_val_compare_and_swap(dest
, compare
, xchg
);
283 static inline char interlocked_xchg_add8(char *dest
, char incr
)
285 return __sync_fetch_and_add(dest
, incr
);
288 static char interlocked_cmpxchg8(char *dest
, char xchg
, char compare
)
290 EnterCriticalSection(&vcomp_section
);
291 if (*dest
== compare
) *dest
= xchg
; else compare
= *dest
;
292 LeaveCriticalSection(&vcomp_section
);
296 static char interlocked_xchg_add8(char *dest
, char incr
)
299 EnterCriticalSection(&vcomp_section
);
300 ret
= *dest
; *dest
+= incr
;
301 LeaveCriticalSection(&vcomp_section
);
306 #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_2
307 static inline short interlocked_cmpxchg16(short *dest
, short xchg
, short compare
)
309 return __sync_val_compare_and_swap(dest
, compare
, xchg
);
312 static inline short interlocked_xchg_add16(short *dest
, short incr
)
314 return __sync_fetch_and_add(dest
, incr
);
317 static short interlocked_cmpxchg16(short *dest
, short xchg
, short compare
)
319 EnterCriticalSection(&vcomp_section
);
320 if (*dest
== compare
) *dest
= xchg
; else compare
= *dest
;
321 LeaveCriticalSection(&vcomp_section
);
325 static short interlocked_xchg_add16(short *dest
, short incr
)
328 EnterCriticalSection(&vcomp_section
);
329 ret
= *dest
; *dest
+= incr
;
330 LeaveCriticalSection(&vcomp_section
);
335 #endif /* __GNUC__ */
337 static inline struct vcomp_thread_data
*vcomp_get_thread_data(void)
339 return (struct vcomp_thread_data
*)TlsGetValue(vcomp_context_tls
);
342 static inline void vcomp_set_thread_data(struct vcomp_thread_data
*thread_data
)
344 TlsSetValue(vcomp_context_tls
, thread_data
);
347 static struct vcomp_thread_data
*vcomp_init_thread_data(void)
349 struct vcomp_thread_data
*thread_data
= vcomp_get_thread_data();
352 struct vcomp_thread_data thread
;
353 struct vcomp_task_data task
;
356 if (thread_data
) return thread_data
;
357 if (!(data
= HeapAlloc(GetProcessHeap(), 0, sizeof(*data
))))
359 ERR("could not create thread data\n");
363 data
->task
.single
= 0;
364 data
->task
.section
= 0;
365 data
->task
.dynamic
= 0;
367 thread_data
= &data
->thread
;
368 thread_data
->team
= NULL
;
369 thread_data
->task
= &data
->task
;
370 thread_data
->thread_num
= 0;
371 thread_data
->parallel
= FALSE
;
372 thread_data
->fork_threads
= 0;
373 thread_data
->single
= 1;
374 thread_data
->section
= 1;
375 thread_data
->dynamic
= 1;
376 thread_data
->dynamic_type
= 0;
378 vcomp_set_thread_data(thread_data
);
382 static void vcomp_free_thread_data(void)
384 struct vcomp_thread_data
*thread_data
= vcomp_get_thread_data();
385 if (!thread_data
) return;
387 HeapFree(GetProcessHeap(), 0, thread_data
);
388 vcomp_set_thread_data(NULL
);
391 void CDECL
_vcomp_atomic_add_i1(char *dest
, char val
)
393 interlocked_xchg_add8(dest
, val
);
396 void CDECL
_vcomp_atomic_and_i1(char *dest
, char val
)
399 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
& val
, old
) != old
);
402 void CDECL
_vcomp_atomic_div_i1(char *dest
, char val
)
405 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
/ val
, old
) != old
);
408 void CDECL
_vcomp_atomic_div_ui1(unsigned char *dest
, unsigned char val
)
411 do old
= *dest
; while ((unsigned char)interlocked_cmpxchg8((char *)dest
, old
/ val
, old
) != old
);
414 void CDECL
_vcomp_atomic_mul_i1(char *dest
, char val
)
417 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
* val
, old
) != old
);
420 void CDECL
_vcomp_atomic_or_i1(char *dest
, char val
)
423 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
| val
, old
) != old
);
426 void CDECL
_vcomp_atomic_shl_i1(char *dest
, unsigned int val
)
429 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
<< val
, old
) != old
);
432 void CDECL
_vcomp_atomic_shr_i1(char *dest
, unsigned int val
)
435 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
>> val
, old
) != old
);
438 void CDECL
_vcomp_atomic_shr_ui1(unsigned char *dest
, unsigned int val
)
441 do old
= *dest
; while ((unsigned char)interlocked_cmpxchg8((char *)dest
, old
>> val
, old
) != old
);
444 void CDECL
_vcomp_atomic_sub_i1(char *dest
, char val
)
446 interlocked_xchg_add8(dest
, -val
);
449 void CDECL
_vcomp_atomic_xor_i1(char *dest
, char val
)
452 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
^ val
, old
) != old
);
455 static void CDECL
_vcomp_atomic_bool_and_i1(char *dest
, char val
)
458 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
&& val
, old
) != old
);
461 static void CDECL
_vcomp_atomic_bool_or_i1(char *dest
, char val
)
464 do old
= *dest
; while (interlocked_cmpxchg8(dest
, old
? old
: (val
!= 0), old
) != old
);
467 void CDECL
_vcomp_reduction_i1(unsigned int flags
, char *dest
, char val
)
469 static void (CDECL
* const funcs
[])(char *, char) =
471 _vcomp_atomic_add_i1
,
472 _vcomp_atomic_add_i1
,
473 _vcomp_atomic_mul_i1
,
474 _vcomp_atomic_and_i1
,
476 _vcomp_atomic_xor_i1
,
477 _vcomp_atomic_bool_and_i1
,
478 _vcomp_atomic_bool_or_i1
,
480 unsigned int op
= (flags
>> 8) & 0xf;
481 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
482 funcs
[op
](dest
, val
);
485 void CDECL
_vcomp_atomic_add_i2(short *dest
, short val
)
487 interlocked_xchg_add16(dest
, val
);
490 void CDECL
_vcomp_atomic_and_i2(short *dest
, short val
)
493 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
& val
, old
) != old
);
496 void CDECL
_vcomp_atomic_div_i2(short *dest
, short val
)
499 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
/ val
, old
) != old
);
502 void CDECL
_vcomp_atomic_div_ui2(unsigned short *dest
, unsigned short val
)
505 do old
= *dest
; while ((unsigned short)interlocked_cmpxchg16((short *)dest
, old
/ val
, old
) != old
);
508 void CDECL
_vcomp_atomic_mul_i2(short *dest
, short val
)
511 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
* val
, old
) != old
);
514 void CDECL
_vcomp_atomic_or_i2(short *dest
, short val
)
517 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
| val
, old
) != old
);
520 void CDECL
_vcomp_atomic_shl_i2(short *dest
, unsigned int val
)
523 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
<< val
, old
) != old
);
526 void CDECL
_vcomp_atomic_shr_i2(short *dest
, unsigned int val
)
529 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
>> val
, old
) != old
);
532 void CDECL
_vcomp_atomic_shr_ui2(unsigned short *dest
, unsigned int val
)
535 do old
= *dest
; while ((unsigned short)interlocked_cmpxchg16((short *)dest
, old
>> val
, old
) != old
);
538 void CDECL
_vcomp_atomic_sub_i2(short *dest
, short val
)
540 interlocked_xchg_add16(dest
, -val
);
543 void CDECL
_vcomp_atomic_xor_i2(short *dest
, short val
)
546 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
^ val
, old
) != old
);
549 static void CDECL
_vcomp_atomic_bool_and_i2(short *dest
, short val
)
552 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
&& val
, old
) != old
);
555 static void CDECL
_vcomp_atomic_bool_or_i2(short *dest
, short val
)
558 do old
= *dest
; while (interlocked_cmpxchg16(dest
, old
? old
: (val
!= 0), old
) != old
);
561 void CDECL
_vcomp_reduction_i2(unsigned int flags
, short *dest
, short val
)
563 static void (CDECL
* const funcs
[])(short *, short) =
565 _vcomp_atomic_add_i2
,
566 _vcomp_atomic_add_i2
,
567 _vcomp_atomic_mul_i2
,
568 _vcomp_atomic_and_i2
,
570 _vcomp_atomic_xor_i2
,
571 _vcomp_atomic_bool_and_i2
,
572 _vcomp_atomic_bool_or_i2
,
574 unsigned int op
= (flags
>> 8) & 0xf;
575 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
576 funcs
[op
](dest
, val
);
579 void CDECL
_vcomp_atomic_add_i4(int *dest
, int val
)
581 interlocked_xchg_add(dest
, val
);
584 void CDECL
_vcomp_atomic_and_i4(int *dest
, int val
)
587 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
& val
, old
) != old
);
590 void CDECL
_vcomp_atomic_div_i4(int *dest
, int val
)
593 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
/ val
, old
) != old
);
596 void CDECL
_vcomp_atomic_div_ui4(unsigned int *dest
, unsigned int val
)
599 do old
= *dest
; while (interlocked_cmpxchg((int *)dest
, old
/ val
, old
) != old
);
602 void CDECL
_vcomp_atomic_mul_i4(int *dest
, int val
)
605 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
* val
, old
) != old
);
608 void CDECL
_vcomp_atomic_or_i4(int *dest
, int val
)
611 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
| val
, old
) != old
);
614 void CDECL
_vcomp_atomic_shl_i4(int *dest
, int val
)
617 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
<< val
, old
) != old
);
620 void CDECL
_vcomp_atomic_shr_i4(int *dest
, int val
)
623 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
>> val
, old
) != old
);
626 void CDECL
_vcomp_atomic_shr_ui4(unsigned int *dest
, unsigned int val
)
629 do old
= *dest
; while (interlocked_cmpxchg((int *)dest
, old
>> val
, old
) != old
);
632 void CDECL
_vcomp_atomic_sub_i4(int *dest
, int val
)
634 interlocked_xchg_add(dest
, -val
);
637 void CDECL
_vcomp_atomic_xor_i4(int *dest
, int val
)
640 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
^ val
, old
) != old
);
643 static void CDECL
_vcomp_atomic_bool_and_i4(int *dest
, int val
)
646 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
&& val
, old
) != old
);
649 static void CDECL
_vcomp_atomic_bool_or_i4(int *dest
, int val
)
652 do old
= *dest
; while (interlocked_cmpxchg(dest
, old
? old
: (val
!= 0), old
) != old
);
655 void CDECL
_vcomp_reduction_i4(unsigned int flags
, int *dest
, int val
)
657 static void (CDECL
* const funcs
[])(int *, int) =
659 _vcomp_atomic_add_i4
,
660 _vcomp_atomic_add_i4
,
661 _vcomp_atomic_mul_i4
,
662 _vcomp_atomic_and_i4
,
664 _vcomp_atomic_xor_i4
,
665 _vcomp_atomic_bool_and_i4
,
666 _vcomp_atomic_bool_or_i4
,
668 unsigned int op
= (flags
>> 8) & 0xf;
669 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
670 funcs
[op
](dest
, val
);
673 void CDECL
_vcomp_atomic_add_i8(LONG64
*dest
, LONG64 val
)
676 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
+ val
, old
) != old
);
679 void CDECL
_vcomp_atomic_and_i8(LONG64
*dest
, LONG64 val
)
682 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
& val
, old
) != old
);
685 void CDECL
_vcomp_atomic_div_i8(LONG64
*dest
, LONG64 val
)
688 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
/ val
, old
) != old
);
691 void CDECL
_vcomp_atomic_div_ui8(ULONG64
*dest
, ULONG64 val
)
694 do old
= *dest
; while (interlocked_cmpxchg64((LONG64
*)dest
, old
/ val
, old
) != old
);
697 void CDECL
_vcomp_atomic_mul_i8(LONG64
*dest
, LONG64 val
)
700 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
* val
, old
) != old
);
703 void CDECL
_vcomp_atomic_or_i8(LONG64
*dest
, LONG64 val
)
706 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
| val
, old
) != old
);
709 void CDECL
_vcomp_atomic_shl_i8(LONG64
*dest
, unsigned int val
)
712 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
<< val
, old
) != old
);
715 void CDECL
_vcomp_atomic_shr_i8(LONG64
*dest
, unsigned int val
)
718 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
>> val
, old
) != old
);
721 void CDECL
_vcomp_atomic_shr_ui8(ULONG64
*dest
, unsigned int val
)
724 do old
= *dest
; while (interlocked_cmpxchg64((LONG64
*)dest
, old
>> val
, old
) != old
);
727 void CDECL
_vcomp_atomic_sub_i8(LONG64
*dest
, LONG64 val
)
730 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
- val
, old
) != old
);
733 void CDECL
_vcomp_atomic_xor_i8(LONG64
*dest
, LONG64 val
)
736 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
^ val
, old
) != old
);
739 static void CDECL
_vcomp_atomic_bool_and_i8(LONG64
*dest
, LONG64 val
)
742 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
&& val
, old
) != old
);
745 static void CDECL
_vcomp_atomic_bool_or_i8(LONG64
*dest
, LONG64 val
)
748 do old
= *dest
; while (interlocked_cmpxchg64(dest
, old
? old
: (val
!= 0), old
) != old
);
751 void CDECL
_vcomp_reduction_i8(unsigned int flags
, LONG64
*dest
, LONG64 val
)
753 static void (CDECL
* const funcs
[])(LONG64
*, LONG64
) =
755 _vcomp_atomic_add_i8
,
756 _vcomp_atomic_add_i8
,
757 _vcomp_atomic_mul_i8
,
758 _vcomp_atomic_and_i8
,
760 _vcomp_atomic_xor_i8
,
761 _vcomp_atomic_bool_and_i8
,
762 _vcomp_atomic_bool_or_i8
,
764 unsigned int op
= (flags
>> 8) & 0xf;
765 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
766 funcs
[op
](dest
, val
);
769 void CDECL
_vcomp_atomic_add_r4(float *dest
, float val
)
775 *(float *)&new = *(float *)&old
+ val
;
777 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
780 void CDECL
_vcomp_atomic_div_r4(float *dest
, float val
)
786 *(float *)&new = *(float *)&old
/ val
;
788 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
791 void CDECL
_vcomp_atomic_mul_r4(float *dest
, float val
)
797 *(float *)&new = *(float *)&old
* val
;
799 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
802 void CDECL
_vcomp_atomic_sub_r4(float *dest
, float val
)
808 *(float *)&new = *(float *)&old
- val
;
810 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
813 static void CDECL
_vcomp_atomic_bool_and_r4(float *dest
, float val
)
819 *(float *)&new = (*(float *)&old
!= 0.0) ? (val
!= 0.0) : 0.0;
821 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
824 static void CDECL
_vcomp_atomic_bool_or_r4(float *dest
, float val
)
830 *(float *)&new = (*(float *)&old
!= 0.0) ? *(float *)&old
: (val
!= 0.0);
832 while (interlocked_cmpxchg((int *)dest
, new, old
) != old
);
835 void CDECL
_vcomp_reduction_r4(unsigned int flags
, float *dest
, float val
)
837 static void (CDECL
* const funcs
[])(float *, float) =
839 _vcomp_atomic_add_r4
,
840 _vcomp_atomic_add_r4
,
841 _vcomp_atomic_mul_r4
,
842 _vcomp_atomic_bool_or_r4
,
843 _vcomp_atomic_bool_or_r4
,
844 _vcomp_atomic_bool_or_r4
,
845 _vcomp_atomic_bool_and_r4
,
846 _vcomp_atomic_bool_or_r4
,
848 unsigned int op
= (flags
>> 8) & 0xf;
849 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
850 funcs
[op
](dest
, val
);
853 void CDECL
_vcomp_atomic_add_r8(double *dest
, double val
)
858 old
= *(LONG64
*)dest
;
859 *(double *)&new = *(double *)&old
+ val
;
861 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
864 void CDECL
_vcomp_atomic_div_r8(double *dest
, double val
)
869 old
= *(LONG64
*)dest
;
870 *(double *)&new = *(double *)&old
/ val
;
872 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
875 void CDECL
_vcomp_atomic_mul_r8(double *dest
, double val
)
880 old
= *(LONG64
*)dest
;
881 *(double *)&new = *(double *)&old
* val
;
883 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
886 void CDECL
_vcomp_atomic_sub_r8(double *dest
, double val
)
891 old
= *(LONG64
*)dest
;
892 *(double *)&new = *(double *)&old
- val
;
894 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
897 static void CDECL
_vcomp_atomic_bool_and_r8(double *dest
, double val
)
902 old
= *(LONG64
*)dest
;
903 *(double *)&new = (*(double *)&old
!= 0.0) ? (val
!= 0.0) : 0.0;
905 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
908 static void CDECL
_vcomp_atomic_bool_or_r8(double *dest
, double val
)
913 old
= *(LONG64
*)dest
;
914 *(double *)&new = (*(double *)&old
!= 0.0) ? *(double *)&old
: (val
!= 0.0);
916 while (interlocked_cmpxchg64((LONG64
*)dest
, new, old
) != old
);
919 void CDECL
_vcomp_reduction_r8(unsigned int flags
, double *dest
, double val
)
921 static void (CDECL
* const funcs
[])(double *, double) =
923 _vcomp_atomic_add_r8
,
924 _vcomp_atomic_add_r8
,
925 _vcomp_atomic_mul_r8
,
926 _vcomp_atomic_bool_or_r8
,
927 _vcomp_atomic_bool_or_r8
,
928 _vcomp_atomic_bool_or_r8
,
929 _vcomp_atomic_bool_and_r8
,
930 _vcomp_atomic_bool_or_r8
,
932 unsigned int op
= (flags
>> 8) & 0xf;
933 op
= min(op
, sizeof(funcs
)/sizeof(funcs
[0]) - 1);
934 funcs
[op
](dest
, val
);
937 int CDECL
omp_get_dynamic(void)
943 int CDECL
omp_get_max_threads(void)
946 return vcomp_max_threads
;
949 int CDECL
omp_get_nested(void)
952 return vcomp_nested_fork
;
955 int CDECL
omp_get_num_procs(void)
961 int CDECL
omp_get_num_threads(void)
963 struct vcomp_team_data
*team_data
= vcomp_init_thread_data()->team
;
965 return team_data
? team_data
->num_threads
: 1;
968 int CDECL
omp_get_thread_num(void)
971 return vcomp_init_thread_data()->thread_num
;
974 int CDECL
_vcomp_get_thread_num(void)
977 return vcomp_init_thread_data()->thread_num
;
980 /* Time in seconds since "some time in the past" */
981 double CDECL
omp_get_wtime(void)
983 return GetTickCount() / 1000.0;
986 void CDECL
omp_set_dynamic(int val
)
988 TRACE("(%d): stub\n", val
);
991 void CDECL
omp_set_nested(int nested
)
993 TRACE("(%d)\n", nested
);
994 vcomp_nested_fork
= (nested
!= 0);
997 void CDECL
omp_set_num_threads(int num_threads
)
999 TRACE("(%d)\n", num_threads
);
1000 if (num_threads
>= 1)
1001 vcomp_num_threads
= num_threads
;
1004 void CDECL
_vcomp_flush(void)
1006 TRACE("(): stub\n");
1009 void CDECL
_vcomp_barrier(void)
1011 struct vcomp_team_data
*team_data
= vcomp_init_thread_data()->team
;
1018 EnterCriticalSection(&vcomp_section
);
1019 if (++team_data
->barrier_count
>= team_data
->num_threads
)
1021 team_data
->barrier
++;
1022 team_data
->barrier_count
= 0;
1023 WakeAllConditionVariable(&team_data
->cond
);
1027 unsigned int barrier
= team_data
->barrier
;
1028 while (team_data
->barrier
== barrier
)
1029 SleepConditionVariableCS(&team_data
->cond
, &vcomp_section
, INFINITE
);
1031 LeaveCriticalSection(&vcomp_section
);
1034 void CDECL
_vcomp_set_num_threads(int num_threads
)
1036 TRACE("(%d)\n", num_threads
);
1037 if (num_threads
>= 1)
1038 vcomp_init_thread_data()->fork_threads
= num_threads
;
1041 int CDECL
_vcomp_master_begin(void)
1044 return !vcomp_init_thread_data()->thread_num
;
1047 void CDECL
_vcomp_master_end(void)
1050 /* nothing to do here */
1053 int CDECL
_vcomp_single_begin(int flags
)
1055 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1056 struct vcomp_task_data
*task_data
= thread_data
->task
;
1059 TRACE("(%x): semi-stub\n", flags
);
1061 EnterCriticalSection(&vcomp_section
);
1062 thread_data
->single
++;
1063 if ((int)(thread_data
->single
- task_data
->single
) > 0)
1065 task_data
->single
= thread_data
->single
;
1068 LeaveCriticalSection(&vcomp_section
);
1073 void CDECL
_vcomp_single_end(void)
1076 /* nothing to do here */
1079 void CDECL
_vcomp_sections_init(int n
)
1081 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1082 struct vcomp_task_data
*task_data
= thread_data
->task
;
1086 EnterCriticalSection(&vcomp_section
);
1087 thread_data
->section
++;
1088 if ((int)(thread_data
->section
- task_data
->section
) > 0)
1090 task_data
->section
= thread_data
->section
;
1091 task_data
->num_sections
= n
;
1092 task_data
->section_index
= 0;
1094 LeaveCriticalSection(&vcomp_section
);
1097 int CDECL
_vcomp_sections_next(void)
1099 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1100 struct vcomp_task_data
*task_data
= thread_data
->task
;
1105 EnterCriticalSection(&vcomp_section
);
1106 if (thread_data
->section
== task_data
->section
&&
1107 task_data
->section_index
!= task_data
->num_sections
)
1109 i
= task_data
->section_index
++;
1111 LeaveCriticalSection(&vcomp_section
);
1115 void CDECL
_vcomp_for_static_simple_init(unsigned int first
, unsigned int last
, int step
,
1116 BOOL increment
, unsigned int *begin
, unsigned int *end
)
1118 unsigned int iterations
, per_thread
, remaining
;
1119 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1120 struct vcomp_team_data
*team_data
= thread_data
->team
;
1121 int num_threads
= team_data
? team_data
->num_threads
: 1;
1122 int thread_num
= thread_data
->thread_num
;
1124 TRACE("(%u, %u, %d, %u, %p, %p)\n", first
, last
, step
, increment
, begin
, end
);
1126 if (num_threads
== 1)
1136 *end
= increment
? -1 : 1;
1141 iterations
= 1 + (last
- first
) / step
;
1144 iterations
= 1 + (first
- last
) / step
;
1148 per_thread
= iterations
/ num_threads
;
1149 remaining
= iterations
- per_thread
* num_threads
;
1151 if (thread_num
< remaining
)
1153 else if (per_thread
)
1154 first
+= remaining
* step
;
1158 *end
= first
- step
;
1162 *begin
= first
+ per_thread
* thread_num
* step
;
1163 *end
= *begin
+ (per_thread
- 1) * step
;
1166 void CDECL
_vcomp_for_static_init(int first
, int last
, int step
, int chunksize
, unsigned int *loops
,
1167 int *begin
, int *end
, int *next
, int *lastchunk
)
1169 unsigned int iterations
, num_chunks
, per_thread
, remaining
;
1170 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1171 struct vcomp_team_data
*team_data
= thread_data
->team
;
1172 int num_threads
= team_data
? team_data
->num_threads
: 1;
1173 int thread_num
= thread_data
->thread_num
;
1174 int no_begin
, no_lastchunk
;
1176 TRACE("(%d, %d, %d, %d, %p, %p, %p, %p, %p)\n",
1177 first
, last
, step
, chunksize
, loops
, begin
, end
, next
, lastchunk
);
1182 lastchunk
= &no_lastchunk
;
1185 if (num_threads
== 1 && chunksize
!= 1)
1197 *loops
= !thread_num
;
1215 iterations
= 1 + (last
- first
) / step
;
1218 iterations
= 1 + (first
- last
) / step
;
1225 num_chunks
= ((DWORD64
)iterations
+ chunksize
- 1) / chunksize
;
1226 per_thread
= num_chunks
/ num_threads
;
1227 remaining
= num_chunks
- per_thread
* num_threads
;
1229 *loops
= per_thread
+ (thread_num
< remaining
);
1230 *begin
= first
+ thread_num
* chunksize
* step
;
1231 *end
= *begin
+ (chunksize
- 1) * step
;
1232 *next
= chunksize
* num_threads
* step
;
1233 *lastchunk
= first
+ (num_chunks
- 1) * chunksize
* step
;
1236 void CDECL
_vcomp_for_static_end(void)
1239 /* nothing to do here */
1242 void CDECL
_vcomp_for_dynamic_init(unsigned int flags
, unsigned int first
, unsigned int last
,
1243 int step
, unsigned int chunksize
)
1245 unsigned int iterations
, per_thread
, remaining
;
1246 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1247 struct vcomp_team_data
*team_data
= thread_data
->team
;
1248 struct vcomp_task_data
*task_data
= thread_data
->task
;
1249 int num_threads
= team_data
? team_data
->num_threads
: 1;
1250 int thread_num
= thread_data
->thread_num
;
1251 unsigned int type
= flags
& ~VCOMP_DYNAMIC_FLAGS_INCREMENT
;
1253 TRACE("(%u, %u, %u, %d, %u)\n", flags
, first
, last
, step
, chunksize
);
1257 thread_data
->dynamic_type
= 0;
1261 if (flags
& VCOMP_DYNAMIC_FLAGS_INCREMENT
)
1262 iterations
= 1 + (last
- first
) / step
;
1265 iterations
= 1 + (first
- last
) / step
;
1269 if (type
== VCOMP_DYNAMIC_FLAGS_STATIC
)
1271 per_thread
= iterations
/ num_threads
;
1272 remaining
= iterations
- per_thread
* num_threads
;
1274 if (thread_num
< remaining
)
1276 else if (per_thread
)
1277 first
+= remaining
* step
;
1280 thread_data
->dynamic_type
= 0;
1284 thread_data
->dynamic_type
= VCOMP_DYNAMIC_FLAGS_STATIC
;
1285 thread_data
->dynamic_begin
= first
+ per_thread
* thread_num
* step
;
1286 thread_data
->dynamic_end
= thread_data
->dynamic_begin
+ (per_thread
- 1) * step
;
1290 if (type
!= VCOMP_DYNAMIC_FLAGS_CHUNKED
&&
1291 type
!= VCOMP_DYNAMIC_FLAGS_GUIDED
)
1293 FIXME("unsupported flags %u\n", flags
);
1294 type
= VCOMP_DYNAMIC_FLAGS_GUIDED
;
1297 EnterCriticalSection(&vcomp_section
);
1298 thread_data
->dynamic
++;
1299 thread_data
->dynamic_type
= type
;
1300 if ((int)(thread_data
->dynamic
- task_data
->dynamic
) > 0)
1302 task_data
->dynamic
= thread_data
->dynamic
;
1303 task_data
->dynamic_first
= first
;
1304 task_data
->dynamic_last
= last
;
1305 task_data
->dynamic_iterations
= iterations
;
1306 task_data
->dynamic_step
= step
;
1307 task_data
->dynamic_chunksize
= chunksize
;
1309 LeaveCriticalSection(&vcomp_section
);
1313 int CDECL
_vcomp_for_dynamic_next(unsigned int *begin
, unsigned int *end
)
1315 struct vcomp_thread_data
*thread_data
= vcomp_init_thread_data();
1316 struct vcomp_task_data
*task_data
= thread_data
->task
;
1317 struct vcomp_team_data
*team_data
= thread_data
->team
;
1318 int num_threads
= team_data
? team_data
->num_threads
: 1;
1320 TRACE("(%p, %p)\n", begin
, end
);
1322 if (thread_data
->dynamic_type
== VCOMP_DYNAMIC_FLAGS_STATIC
)
1324 *begin
= thread_data
->dynamic_begin
;
1325 *end
= thread_data
->dynamic_end
;
1326 thread_data
->dynamic_type
= 0;
1329 else if (thread_data
->dynamic_type
== VCOMP_DYNAMIC_FLAGS_CHUNKED
||
1330 thread_data
->dynamic_type
== VCOMP_DYNAMIC_FLAGS_GUIDED
)
1332 unsigned int iterations
= 0;
1333 EnterCriticalSection(&vcomp_section
);
1334 if (thread_data
->dynamic
== task_data
->dynamic
&&
1335 task_data
->dynamic_iterations
!= 0)
1337 iterations
= min(task_data
->dynamic_iterations
, task_data
->dynamic_chunksize
);
1338 if (thread_data
->dynamic_type
== VCOMP_DYNAMIC_FLAGS_GUIDED
&&
1339 task_data
->dynamic_iterations
> num_threads
* task_data
->dynamic_chunksize
)
1341 iterations
= (task_data
->dynamic_iterations
+ num_threads
- 1) / num_threads
;
1343 *begin
= task_data
->dynamic_first
;
1344 *end
= task_data
->dynamic_first
+ (iterations
- 1) * task_data
->dynamic_step
;
1345 task_data
->dynamic_iterations
-= iterations
;
1346 task_data
->dynamic_first
+= iterations
* task_data
->dynamic_step
;
1347 if (!task_data
->dynamic_iterations
)
1348 *end
= task_data
->dynamic_last
;
1350 LeaveCriticalSection(&vcomp_section
);
1351 return iterations
!= 0;
1357 int CDECL
omp_in_parallel(void)
1360 return vcomp_init_thread_data()->parallel
;
1363 static DWORD WINAPI
_vcomp_fork_worker(void *param
)
1365 struct vcomp_thread_data
*thread_data
= param
;
1366 vcomp_set_thread_data(thread_data
);
1368 TRACE("starting worker thread for %p\n", thread_data
);
1370 EnterCriticalSection(&vcomp_section
);
1373 struct vcomp_team_data
*team
= thread_data
->team
;
1376 LeaveCriticalSection(&vcomp_section
);
1377 _vcomp_fork_call_wrapper(team
->wrapper
, team
->nargs
, team
->valist
);
1378 EnterCriticalSection(&vcomp_section
);
1380 thread_data
->team
= NULL
;
1381 list_remove(&thread_data
->entry
);
1382 list_add_tail(&vcomp_idle_threads
, &thread_data
->entry
);
1383 if (++team
->finished_threads
>= team
->num_threads
)
1384 WakeAllConditionVariable(&team
->cond
);
1387 if (!SleepConditionVariableCS(&thread_data
->cond
, &vcomp_section
, 5000) &&
1388 GetLastError() == ERROR_TIMEOUT
&& !thread_data
->team
)
1393 list_remove(&thread_data
->entry
);
1394 LeaveCriticalSection(&vcomp_section
);
1396 TRACE("terminating worker thread for %p\n", thread_data
);
1398 HeapFree(GetProcessHeap(), 0, thread_data
);
1399 vcomp_set_thread_data(NULL
);
1400 FreeLibraryAndExitThread(vcomp_module
, 0);
1404 void WINAPIV
_vcomp_fork(BOOL ifval
, int nargs
, void *wrapper
, ...)
1406 struct vcomp_thread_data
*prev_thread_data
= vcomp_init_thread_data();
1407 struct vcomp_thread_data thread_data
;
1408 struct vcomp_team_data team_data
;
1409 struct vcomp_task_data task_data
;
1412 TRACE("(%d, %d, %p, ...)\n", ifval
, nargs
, wrapper
);
1414 if (prev_thread_data
->parallel
&& !vcomp_nested_fork
)
1419 else if (prev_thread_data
->fork_threads
)
1420 num_threads
= prev_thread_data
->fork_threads
;
1422 num_threads
= vcomp_num_threads
;
1424 InitializeConditionVariable(&team_data
.cond
);
1425 team_data
.num_threads
= 1;
1426 team_data
.finished_threads
= 0;
1427 team_data
.nargs
= nargs
;
1428 team_data
.wrapper
= wrapper
;
1429 __ms_va_start(team_data
.valist
, wrapper
);
1430 team_data
.barrier
= 0;
1431 team_data
.barrier_count
= 0;
1433 task_data
.single
= 0;
1434 task_data
.section
= 0;
1435 task_data
.dynamic
= 0;
1437 thread_data
.team
= &team_data
;
1438 thread_data
.task
= &task_data
;
1439 thread_data
.thread_num
= 0;
1440 thread_data
.parallel
= ifval
|| prev_thread_data
->parallel
;
1441 thread_data
.fork_threads
= 0;
1442 thread_data
.single
= 1;
1443 thread_data
.section
= 1;
1444 thread_data
.dynamic
= 1;
1445 thread_data
.dynamic_type
= 0;
1446 list_init(&thread_data
.entry
);
1447 InitializeConditionVariable(&thread_data
.cond
);
1449 if (num_threads
> 1)
1452 EnterCriticalSection(&vcomp_section
);
1454 /* reuse existing threads (if any) */
1455 while (team_data
.num_threads
< num_threads
&& (ptr
= list_head(&vcomp_idle_threads
)))
1457 struct vcomp_thread_data
*data
= LIST_ENTRY(ptr
, struct vcomp_thread_data
, entry
);
1458 data
->team
= &team_data
;
1459 data
->task
= &task_data
;
1460 data
->thread_num
= team_data
.num_threads
++;
1461 data
->parallel
= thread_data
.parallel
;
1462 data
->fork_threads
= 0;
1466 data
->dynamic_type
= 0;
1467 list_remove(&data
->entry
);
1468 list_add_tail(&thread_data
.entry
, &data
->entry
);
1469 WakeAllConditionVariable(&data
->cond
);
1472 /* spawn additional threads */
1473 while (team_data
.num_threads
< num_threads
)
1475 struct vcomp_thread_data
*data
;
1479 data
= HeapAlloc(GetProcessHeap(), 0, sizeof(*data
));
1482 data
->team
= &team_data
;
1483 data
->task
= &task_data
;
1484 data
->thread_num
= team_data
.num_threads
;
1485 data
->parallel
= thread_data
.parallel
;
1486 data
->fork_threads
= 0;
1490 data
->dynamic_type
= 0;
1491 InitializeConditionVariable(&data
->cond
);
1493 thread
= CreateThread(NULL
, 0, _vcomp_fork_worker
, data
, 0, NULL
);
1496 HeapFree(GetProcessHeap(), 0, data
);
1500 GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS
,
1501 (const WCHAR
*)vcomp_module
, &module
);
1502 team_data
.num_threads
++;
1503 list_add_tail(&thread_data
.entry
, &data
->entry
);
1504 CloseHandle(thread
);
1507 LeaveCriticalSection(&vcomp_section
);
1510 vcomp_set_thread_data(&thread_data
);
1511 _vcomp_fork_call_wrapper(team_data
.wrapper
, team_data
.nargs
, team_data
.valist
);
1512 vcomp_set_thread_data(prev_thread_data
);
1513 prev_thread_data
->fork_threads
= 0;
1515 if (team_data
.num_threads
> 1)
1517 EnterCriticalSection(&vcomp_section
);
1519 team_data
.finished_threads
++;
1520 while (team_data
.finished_threads
< team_data
.num_threads
)
1521 SleepConditionVariableCS(&team_data
.cond
, &vcomp_section
, INFINITE
);
1523 LeaveCriticalSection(&vcomp_section
);
1524 assert(list_empty(&thread_data
.entry
));
1527 __ms_va_end(team_data
.valist
);
1530 static CRITICAL_SECTION
*alloc_critsect(void)
1532 CRITICAL_SECTION
*critsect
;
1533 if (!(critsect
= HeapAlloc(GetProcessHeap(), 0, sizeof(*critsect
))))
1535 ERR("could not allocate critical section\n");
1539 InitializeCriticalSection(critsect
);
1540 critsect
->DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": critsect");
1544 static void destroy_critsect(CRITICAL_SECTION
*critsect
)
1546 if (!critsect
) return;
1547 critsect
->DebugInfo
->Spare
[0] = 0;
1548 DeleteCriticalSection(critsect
);
1549 HeapFree(GetProcessHeap(), 0, critsect
);
1552 void CDECL
omp_init_lock(omp_lock_t
*lock
)
1554 TRACE("(%p)\n", lock
);
1555 *lock
= alloc_critsect();
1558 void CDECL
omp_destroy_lock(omp_lock_t
*lock
)
1560 TRACE("(%p)\n", lock
);
1561 destroy_critsect(*lock
);
1564 void CDECL
omp_set_lock(omp_lock_t
*lock
)
1566 TRACE("(%p)\n", lock
);
1568 if (RtlIsCriticalSectionLockedByThread(*lock
))
1570 ERR("omp_set_lock called while holding lock %p\n", *lock
);
1574 EnterCriticalSection(*lock
);
1577 void CDECL
omp_unset_lock(omp_lock_t
*lock
)
1579 TRACE("(%p)\n", lock
);
1580 LeaveCriticalSection(*lock
);
1583 int CDECL
omp_test_lock(omp_lock_t
*lock
)
1585 TRACE("(%p)\n", lock
);
1587 if (RtlIsCriticalSectionLockedByThread(*lock
))
1590 return TryEnterCriticalSection(*lock
);
1593 void CDECL
omp_set_nest_lock(omp_nest_lock_t
*lock
)
1595 TRACE("(%p)\n", lock
);
1596 EnterCriticalSection(*lock
);
1599 void CDECL
omp_unset_nest_lock(omp_nest_lock_t
*lock
)
1601 TRACE("(%p)\n", lock
);
1602 LeaveCriticalSection(*lock
);
1605 int CDECL
omp_test_nest_lock(omp_nest_lock_t
*lock
)
1607 TRACE("(%p)\n", lock
);
1608 return TryEnterCriticalSection(*lock
) ? (*lock
)->RecursionCount
: 0;
1611 void CDECL
_vcomp_enter_critsect(CRITICAL_SECTION
**critsect
)
1613 TRACE("(%p)\n", critsect
);
1617 CRITICAL_SECTION
*new_critsect
= alloc_critsect();
1618 if (interlocked_cmpxchg_ptr((void **)critsect
, new_critsect
, NULL
) != NULL
)
1619 destroy_critsect(new_critsect
); /* someone beat us to it */
1622 EnterCriticalSection(*critsect
);
1625 void CDECL
_vcomp_leave_critsect(CRITICAL_SECTION
*critsect
)
1627 TRACE("(%p)\n", critsect
);
1628 LeaveCriticalSection(critsect
);
1631 BOOL WINAPI
DllMain(HINSTANCE instance
, DWORD reason
, LPVOID reserved
)
1633 TRACE("(%p, %d, %p)\n", instance
, reason
, reserved
);
1637 case DLL_PROCESS_ATTACH
:
1639 SYSTEM_INFO sysinfo
;
1641 if ((vcomp_context_tls
= TlsAlloc()) == TLS_OUT_OF_INDEXES
)
1643 ERR("Failed to allocate TLS index\n");
1647 GetSystemInfo(&sysinfo
);
1648 vcomp_module
= instance
;
1649 vcomp_max_threads
= sysinfo
.dwNumberOfProcessors
;
1650 vcomp_num_threads
= sysinfo
.dwNumberOfProcessors
;
1654 case DLL_PROCESS_DETACH
:
1656 if (reserved
) break;
1657 if (vcomp_context_tls
!= TLS_OUT_OF_INDEXES
)
1659 vcomp_free_thread_data();
1660 TlsFree(vcomp_context_tls
);
1665 case DLL_THREAD_DETACH
:
1667 vcomp_free_thread_data();