2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
26 #include "wine/debug.h"
32 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt
);
34 typedef exception cexception
;
35 CREATE_EXCEPTION_OBJECT(cexception
)
37 static LONG context_id
= -1;
38 static LONG scheduler_id
= -1;
44 TargetOversubscriptionFactor
,
45 LocalContextCacheSize
,
49 DynamicProgressFeedback
,
55 struct _policy_container
{
56 unsigned int policies
[last_policy_id
];
61 const vtable_ptr
*vtable
;
63 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
64 unsigned int, (const Context*), (this))
65 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
70 Context*, (Context*, unsigned int), (this, flags))
76 union allocator_cache_entry
{
79 union allocator_cache_entry
*next
;
87 struct scheduler_list
{
88 struct Scheduler
*scheduler
;
89 struct scheduler_list
*next
;
94 struct scheduler_list scheduler
;
96 union allocator_cache_entry
*allocator_cache
[8];
97 } ExternalContextBase
;
98 extern const vtable_ptr ExternalContextBase_vtable
;
99 static void ExternalContextBase_ctor(ExternalContextBase
*);
101 typedef struct Scheduler
{
102 const vtable_ptr
*vtable
;
104 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
105 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
106 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
107 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
108 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
109 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
110 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
111 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
113 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
114 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
115 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
116 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
117 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
118 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
119 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
120 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
121 bool, (Scheduler*,const /*location*/void*), (this,placement))
123 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
124 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
125 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
132 unsigned int virt_proc_no
;
133 SchedulerPolicy policy
;
136 HANDLE
*shutdown_events
;
139 extern const vtable_ptr ThreadScheduler_vtable
;
142 Scheduler
*scheduler
;
157 typedef void (__cdecl
*yield_func
)(void);
163 SpinWait_state state
;
164 yield_func yield_func
;
167 /* keep in sync with msvcp90/msvcp90.h */
168 typedef struct cs_queue
170 struct cs_queue
*next
;
171 #if _MSVCR_VER >= 110
179 ULONG_PTR unk_thread_id
;
181 #if _MSVCR_VER >= 110
192 critical_section
*cs
;
200 } critical_section_scoped_lock
;
205 } _NonReentrantPPLLock
;
209 _NonReentrantPPLLock
*lock
;
217 } _NonReentrantPPLLock__Scoped_lock
;
228 _ReentrantPPLLock
*lock
;
236 } _ReentrantPPLLock__Scoped_lock
;
238 #define EVT_RUNNING (void*)1
239 #define EVT_WAITING NULL
242 typedef struct thread_wait_entry
244 struct thread_wait
*wait
;
245 struct thread_wait_entry
*next
;
246 struct thread_wait_entry
*prev
;
249 typedef struct thread_wait
253 thread_wait_entry entries
[1];
258 thread_wait_entry
*waiters
;
263 #if _MSVCR_VER >= 110
264 typedef struct cv_queue
{
265 struct cv_queue
*next
;
270 /* cv_queue structure is not binary compatible */
272 critical_section lock
;
273 } _Condition_variable
;
276 typedef struct rwl_queue
278 struct rwl_queue
*next
;
281 #define WRITER_WAITING 0x80000000
282 /* FIXME: reader_writer_lock structure is not binary compatible
283 * it can't exceed 28/56 bytes */
289 rwl_queue
*writer_head
;
290 rwl_queue
*writer_tail
;
291 rwl_queue
*reader_head
;
292 } reader_writer_lock
;
295 reader_writer_lock
*lock
;
296 } reader_writer_lock_scoped_lock
;
300 } _ReentrantBlockingLock
;
302 #define TICKSPERMSEC 10000
304 const vtable_ptr
*vtable
;
309 extern const vtable_ptr _Timer_vtable
;
310 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
312 typedef exception improper_lock
;
313 extern const vtable_ptr improper_lock_vtable
;
315 typedef exception improper_scheduler_attach
;
316 extern const vtable_ptr improper_scheduler_attach_vtable
;
318 typedef exception improper_scheduler_detach
;
319 extern const vtable_ptr improper_scheduler_detach_vtable
;
321 typedef exception invalid_scheduler_policy_key
;
322 extern const vtable_ptr invalid_scheduler_policy_key_vtable
;
324 typedef exception invalid_scheduler_policy_thread_specification
;
325 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable
;
327 typedef exception invalid_scheduler_policy_value
;
328 extern const vtable_ptr invalid_scheduler_policy_value_vtable
;
333 } scheduler_resource_allocation_error
;
334 extern const vtable_ptr scheduler_resource_allocation_error_vtable
;
336 enum ConcRT_EventType
338 CONCRT_EVENT_GENERIC
,
342 CONCRT_EVENT_UNBLOCK
,
348 static DWORD context_tls_index
= TLS_OUT_OF_INDEXES
;
350 static CRITICAL_SECTION default_scheduler_cs
;
351 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug
=
353 0, 0, &default_scheduler_cs
,
354 { &default_scheduler_cs_debug
.ProcessLocksList
, &default_scheduler_cs_debug
.ProcessLocksList
},
355 0, 0, { (DWORD_PTR
)(__FILE__
": default_scheduler_cs") }
357 static CRITICAL_SECTION default_scheduler_cs
= { &default_scheduler_cs_debug
, -1, 0, 0, 0, 0 };
358 static SchedulerPolicy default_scheduler_policy
;
359 static ThreadScheduler
*default_scheduler
;
361 static HANDLE keyed_event
;
363 static void create_default_scheduler(void);
365 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
366 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
367 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str
, 8)
368 improper_lock
* __thiscall
improper_lock_ctor_str(improper_lock
*this, const char *str
)
370 TRACE("(%p %p)\n", this, str
);
371 return __exception_ctor(this, str
, &improper_lock_vtable
);
374 /* ??0improper_lock@Concurrency@@QAE@XZ */
375 /* ??0improper_lock@Concurrency@@QEAA@XZ */
376 DEFINE_THISCALL_WRAPPER(improper_lock_ctor
, 4)
377 improper_lock
* __thiscall
improper_lock_ctor(improper_lock
*this)
379 return improper_lock_ctor_str(this, NULL
);
382 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor
,8)
383 improper_lock
* __thiscall
improper_lock_copy_ctor(improper_lock
*this, const improper_lock
*rhs
)
385 TRACE("(%p %p)\n", this, rhs
);
386 return __exception_copy_ctor(this, rhs
, &improper_lock_vtable
);
389 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
390 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
391 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str
, 8)
392 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_ctor_str(
393 improper_scheduler_attach
*this, const char *str
)
395 TRACE("(%p %p)\n", this, str
);
396 return __exception_ctor(this, str
, &improper_scheduler_attach_vtable
);
399 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
400 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
401 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor
, 4)
402 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_ctor(
403 improper_scheduler_attach
*this)
405 return improper_scheduler_attach_ctor_str(this, NULL
);
408 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor
,8)
409 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_copy_ctor(
410 improper_scheduler_attach
* _this
, const improper_scheduler_attach
* rhs
)
412 TRACE("(%p %p)\n", _this
, rhs
);
413 return __exception_copy_ctor(_this
, rhs
, &improper_scheduler_attach_vtable
);
416 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
417 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
418 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str
, 8)
419 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_ctor_str(
420 improper_scheduler_detach
*this, const char *str
)
422 TRACE("(%p %p)\n", this, str
);
423 return __exception_ctor(this, str
, &improper_scheduler_detach_vtable
);
426 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
427 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
428 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor
, 4)
429 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_ctor(
430 improper_scheduler_detach
*this)
432 return improper_scheduler_detach_ctor_str(this, NULL
);
435 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor
,8)
436 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_copy_ctor(
437 improper_scheduler_detach
* _this
, const improper_scheduler_detach
* rhs
)
439 TRACE("(%p %p)\n", _this
, rhs
);
440 return __exception_copy_ctor(_this
, rhs
, &improper_scheduler_detach_vtable
);
443 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
444 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
445 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str
, 8)
446 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_ctor_str(
447 invalid_scheduler_policy_key
*this, const char *str
)
449 TRACE("(%p %p)\n", this, str
);
450 return __exception_ctor(this, str
, &invalid_scheduler_policy_key_vtable
);
453 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
454 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
455 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor
, 4)
456 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_ctor(
457 invalid_scheduler_policy_key
*this)
459 return invalid_scheduler_policy_key_ctor_str(this, NULL
);
462 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor
,8)
463 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_copy_ctor(
464 invalid_scheduler_policy_key
* _this
, const invalid_scheduler_policy_key
* rhs
)
466 TRACE("(%p %p)\n", _this
, rhs
);
467 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_key_vtable
);
470 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
471 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
472 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str
, 8)
473 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_ctor_str(
474 invalid_scheduler_policy_thread_specification
*this, const char *str
)
476 TRACE("(%p %p)\n", this, str
);
477 return __exception_ctor(this, str
, &invalid_scheduler_policy_thread_specification_vtable
);
480 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
481 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
482 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor
, 4)
483 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_ctor(
484 invalid_scheduler_policy_thread_specification
*this)
486 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL
);
489 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor
,8)
490 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_copy_ctor(
491 invalid_scheduler_policy_thread_specification
* _this
, const invalid_scheduler_policy_thread_specification
* rhs
)
493 TRACE("(%p %p)\n", _this
, rhs
);
494 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_thread_specification_vtable
);
497 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
498 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
499 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str
, 8)
500 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_ctor_str(
501 invalid_scheduler_policy_value
*this, const char *str
)
503 TRACE("(%p %p)\n", this, str
);
504 return __exception_ctor(this, str
, &invalid_scheduler_policy_value_vtable
);
507 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
508 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
509 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor
, 4)
510 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_ctor(
511 invalid_scheduler_policy_value
*this)
513 return invalid_scheduler_policy_value_ctor_str(this, NULL
);
516 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor
,8)
517 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_copy_ctor(
518 invalid_scheduler_policy_value
* _this
, const invalid_scheduler_policy_value
* rhs
)
520 TRACE("(%p %p)\n", _this
, rhs
);
521 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_value_vtable
);
524 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
525 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
526 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name
, 12)
527 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_ctor_name(
528 scheduler_resource_allocation_error
*this, const char *name
, HRESULT hr
)
530 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name
), hr
);
531 __exception_ctor(&this->e
, name
, &scheduler_resource_allocation_error_vtable
);
536 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
537 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
538 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor
, 8)
539 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_ctor(
540 scheduler_resource_allocation_error
*this, HRESULT hr
)
542 return scheduler_resource_allocation_error_ctor_name(this, NULL
, hr
);
545 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor
,8)
546 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_copy_ctor(
547 scheduler_resource_allocation_error
*this,
548 const scheduler_resource_allocation_error
*rhs
)
550 TRACE("(%p,%p)\n", this, rhs
);
553 memcpy(this, rhs
, sizeof(*this));
555 scheduler_resource_allocation_error_ctor_name(this, rhs
->e
.name
, rhs
->hr
);
559 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
560 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
561 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code
, 4)
562 HRESULT __thiscall
scheduler_resource_allocation_error_get_error_code(
563 const scheduler_resource_allocation_error
*this)
565 TRACE("(%p)\n", this);
569 DEFINE_RTTI_DATA1(improper_lock
, 0, &cexception_rtti_base_descriptor
,
570 ".?AVimproper_lock@Concurrency@@")
571 DEFINE_RTTI_DATA1(improper_scheduler_attach
, 0, &cexception_rtti_base_descriptor
,
572 ".?AVimproper_scheduler_attach@Concurrency@@")
573 DEFINE_RTTI_DATA1(improper_scheduler_detach
, 0, &cexception_rtti_base_descriptor
,
574 ".?AVimproper_scheduler_detach@Concurrency@@")
575 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key
, 0, &cexception_rtti_base_descriptor
,
576 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
577 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification
, 0, &cexception_rtti_base_descriptor
,
578 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
579 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value
, 0, &cexception_rtti_base_descriptor
,
580 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
581 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error
, 0, &cexception_rtti_base_descriptor
,
582 ".?AVscheduler_resource_allocation_error@Concurrency@@")
584 DEFINE_CXX_DATA1(improper_lock
, &cexception_cxx_type_info
, cexception_dtor
)
585 DEFINE_CXX_DATA1(improper_scheduler_attach
, &cexception_cxx_type_info
, cexception_dtor
)
586 DEFINE_CXX_DATA1(improper_scheduler_detach
, &cexception_cxx_type_info
, cexception_dtor
)
587 DEFINE_CXX_DATA1(invalid_scheduler_policy_key
, &cexception_cxx_type_info
, cexception_dtor
)
588 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification
, &cexception_cxx_type_info
, cexception_dtor
)
589 DEFINE_CXX_DATA1(invalid_scheduler_policy_value
, &cexception_cxx_type_info
, cexception_dtor
)
590 DEFINE_CXX_DATA1(scheduler_resource_allocation_error
, &cexception_cxx_type_info
, cexception_dtor
)
592 __ASM_BLOCK_BEGIN(concurrency_exception_vtables
)
593 __ASM_VTABLE(improper_lock
,
594 VTABLE_ADD_FUNC(cexception_vector_dtor
)
595 VTABLE_ADD_FUNC(cexception_what
));
596 __ASM_VTABLE(improper_scheduler_attach
,
597 VTABLE_ADD_FUNC(cexception_vector_dtor
)
598 VTABLE_ADD_FUNC(cexception_what
));
599 __ASM_VTABLE(improper_scheduler_detach
,
600 VTABLE_ADD_FUNC(cexception_vector_dtor
)
601 VTABLE_ADD_FUNC(cexception_what
));
602 __ASM_VTABLE(invalid_scheduler_policy_key
,
603 VTABLE_ADD_FUNC(cexception_vector_dtor
)
604 VTABLE_ADD_FUNC(cexception_what
));
605 __ASM_VTABLE(invalid_scheduler_policy_thread_specification
,
606 VTABLE_ADD_FUNC(cexception_vector_dtor
)
607 VTABLE_ADD_FUNC(cexception_what
));
608 __ASM_VTABLE(invalid_scheduler_policy_value
,
609 VTABLE_ADD_FUNC(cexception_vector_dtor
)
610 VTABLE_ADD_FUNC(cexception_what
));
611 __ASM_VTABLE(scheduler_resource_allocation_error
,
612 VTABLE_ADD_FUNC(cexception_vector_dtor
)
613 VTABLE_ADD_FUNC(cexception_what
));
616 static Context
* try_get_current_context(void)
618 if (context_tls_index
== TLS_OUT_OF_INDEXES
)
620 return TlsGetValue(context_tls_index
);
623 static BOOL WINAPI
init_context_tls_index(INIT_ONCE
*once
, void *param
, void **context
)
625 context_tls_index
= TlsAlloc();
626 return context_tls_index
!= TLS_OUT_OF_INDEXES
;
629 static Context
* get_current_context(void)
631 static INIT_ONCE init_once
= INIT_ONCE_STATIC_INIT
;
634 if(!InitOnceExecuteOnce(&init_once
, init_context_tls_index
, NULL
, NULL
))
636 scheduler_resource_allocation_error e
;
637 scheduler_resource_allocation_error_ctor_name(&e
, NULL
,
638 HRESULT_FROM_WIN32(GetLastError()));
639 _CxxThrowException(&e
, &scheduler_resource_allocation_error_exception_type
);
642 ret
= TlsGetValue(context_tls_index
);
644 ExternalContextBase
*context
= operator_new(sizeof(ExternalContextBase
));
645 ExternalContextBase_ctor(context
);
646 TlsSetValue(context_tls_index
, context
);
647 ret
= &context
->context
;
652 static Scheduler
* try_get_current_scheduler(void)
654 ExternalContextBase
*context
= (ExternalContextBase
*)try_get_current_context();
659 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
660 ERR("unknown context set\n");
663 return context
->scheduler
.scheduler
;
666 static Scheduler
* get_current_scheduler(void)
668 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
670 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
671 ERR("unknown context set\n");
674 return context
->scheduler
.scheduler
;
677 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
678 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
679 Context
* __cdecl
Context_CurrentContext(void)
682 return get_current_context();
685 /* ?Id@Context@Concurrency@@SAIXZ */
686 unsigned int __cdecl
Context_Id(void)
688 Context
*ctx
= try_get_current_context();
690 return ctx
? call_Context_GetId(ctx
) : -1;
693 /* ?Block@Context@Concurrency@@SAXXZ */
694 void __cdecl
Context_Block(void)
699 /* ?Yield@Context@Concurrency@@SAXXZ */
700 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
701 void __cdecl
Context_Yield(void)
706 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
707 void __cdecl
Context__SpinYield(void)
712 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
713 bool __cdecl
Context_IsCurrentTaskCollectionCanceling(void)
719 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
720 void __cdecl
Context_Oversubscribe(bool begin
)
722 FIXME("(%x)\n", begin
);
725 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
726 unsigned int __cdecl
Context_ScheduleGroupId(void)
728 Context
*ctx
= try_get_current_context();
730 return ctx
? call_Context_GetScheduleGroupId(ctx
) : -1;
733 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
734 unsigned int __cdecl
Context_VirtualProcessorId(void)
736 Context
*ctx
= try_get_current_context();
738 return ctx
? call_Context_GetVirtualProcessorId(ctx
) : -1;
742 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
743 _Context
*__cdecl
_Context__CurrentContext(_Context
*ret
)
745 TRACE("(%p)\n", ret
);
746 ret
->context
= Context_CurrentContext();
751 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId
, 4)
752 unsigned int __thiscall
ExternalContextBase_GetId(const ExternalContextBase
*this)
754 TRACE("(%p)->()\n", this);
758 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId
, 4)
759 unsigned int __thiscall
ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase
*this)
761 FIXME("(%p)->() stub\n", this);
765 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId
, 4)
766 unsigned int __thiscall
ExternalContextBase_GetScheduleGroupId(const ExternalContextBase
*this)
768 FIXME("(%p)->() stub\n", this);
772 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock
, 4)
773 void __thiscall
ExternalContextBase_Unblock(ExternalContextBase
*this)
775 FIXME("(%p)->() stub\n", this);
778 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked
, 4)
779 bool __thiscall
ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase
*this)
781 FIXME("(%p)->() stub\n", this);
785 static void ExternalContextBase_dtor(ExternalContextBase
*this)
787 struct scheduler_list
*scheduler_cur
, *scheduler_next
;
788 union allocator_cache_entry
*next
, *cur
;
791 /* TODO: move the allocator cache to scheduler so it can be reused */
792 for(i
=0; i
<ARRAY_SIZE(this->allocator_cache
); i
++) {
793 for(cur
= this->allocator_cache
[i
]; cur
; cur
=next
) {
794 next
= cur
->free
.next
;
795 operator_delete(cur
);
799 if (this->scheduler
.scheduler
) {
800 call_Scheduler_Release(this->scheduler
.scheduler
);
802 for(scheduler_cur
=this->scheduler
.next
; scheduler_cur
; scheduler_cur
=scheduler_next
) {
803 scheduler_next
= scheduler_cur
->next
;
804 call_Scheduler_Release(scheduler_cur
->scheduler
);
805 operator_delete(scheduler_cur
);
810 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor
, 8)
811 Context
* __thiscall
ExternalContextBase_vector_dtor(ExternalContextBase
*this, unsigned int flags
)
813 TRACE("(%p %x)\n", this, flags
);
815 /* we have an array, with the number of elements stored before the first object */
816 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
818 for(i
=*ptr
-1; i
>=0; i
--)
819 ExternalContextBase_dtor(this+i
);
820 operator_delete(ptr
);
822 ExternalContextBase_dtor(this);
824 operator_delete(this);
827 return &this->context
;
830 static void ExternalContextBase_ctor(ExternalContextBase
*this)
832 TRACE("(%p)->()\n", this);
834 memset(this, 0, sizeof(*this));
835 this->context
.vtable
= &ExternalContextBase_vtable
;
836 this->id
= InterlockedIncrement(&context_id
);
838 create_default_scheduler();
839 this->scheduler
.scheduler
= &default_scheduler
->scheduler
;
840 call_Scheduler_Reference(&default_scheduler
->scheduler
);
843 /* ?Alloc@Concurrency@@YAPAXI@Z */
844 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
845 void * CDECL
Concurrency_Alloc(size_t size
)
847 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
848 union allocator_cache_entry
*p
;
850 size
+= FIELD_OFFSET(union allocator_cache_entry
, alloc
.mem
);
851 if (size
< sizeof(*p
))
854 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
855 p
= operator_new(size
);
856 p
->alloc
.bucket
= -1;
860 C_ASSERT(sizeof(union allocator_cache_entry
) <= 1 << 4);
861 for(i
=0; i
<ARRAY_SIZE(context
->allocator_cache
); i
++)
862 if (1 << (i
+4) >= size
) break;
864 if(i
==ARRAY_SIZE(context
->allocator_cache
)) {
865 p
= operator_new(size
);
866 p
->alloc
.bucket
= -1;
867 }else if (context
->allocator_cache
[i
]) {
868 p
= context
->allocator_cache
[i
];
869 context
->allocator_cache
[i
] = p
->free
.next
;
872 p
= operator_new(1 << (i
+4));
877 TRACE("(%Iu) returning %p\n", size
, p
->alloc
.mem
);
881 /* ?Free@Concurrency@@YAXPAX@Z */
882 /* ?Free@Concurrency@@YAXPEAX@Z */
883 void CDECL
Concurrency_Free(void* mem
)
885 union allocator_cache_entry
*p
= (union allocator_cache_entry
*)((char*)mem
-FIELD_OFFSET(union allocator_cache_entry
, alloc
.mem
));
886 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
887 int bucket
= p
->alloc
.bucket
;
889 TRACE("(%p)\n", mem
);
891 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
894 if(bucket
>= 0 && bucket
< ARRAY_SIZE(context
->allocator_cache
) &&
895 (!context
->allocator_cache
[bucket
] || context
->allocator_cache
[bucket
]->free
.depth
< 20)) {
896 p
->free
.next
= context
->allocator_cache
[bucket
];
897 p
->free
.depth
= p
->free
.next
? p
->free
.next
->free
.depth
+1 : 0;
898 context
->allocator_cache
[bucket
] = p
;
905 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
906 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
907 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue
, 12)
908 unsigned int __thiscall
SchedulerPolicy_SetPolicyValue(SchedulerPolicy
*this,
909 PolicyElementKey policy
, unsigned int val
)
913 TRACE("(%p %d %d)\n", this, policy
, val
);
915 if (policy
== MinConcurrency
) {
916 invalid_scheduler_policy_key e
;
917 invalid_scheduler_policy_key_ctor_str(&e
, "MinConcurrency");
918 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
920 if (policy
== MaxConcurrency
) {
921 invalid_scheduler_policy_key e
;
922 invalid_scheduler_policy_key_ctor_str(&e
, "MaxConcurrency");
923 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
925 if (policy
>= last_policy_id
) {
926 invalid_scheduler_policy_key e
;
927 invalid_scheduler_policy_key_ctor_str(&e
, "Invalid policy");
928 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
934 invalid_scheduler_policy_value e
;
935 invalid_scheduler_policy_value_ctor_str(&e
, "SchedulerKind");
936 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
939 case TargetOversubscriptionFactor
:
941 invalid_scheduler_policy_value e
;
942 invalid_scheduler_policy_value_ctor_str(&e
, "TargetOversubscriptionFactor");
943 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
946 case ContextPriority
:
947 if (((int)val
< -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
948 || val
> 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
949 && val
!= THREAD_PRIORITY_IDLE
&& val
!= THREAD_PRIORITY_TIME_CRITICAL
950 && val
!= INHERIT_THREAD_PRIORITY
) {
951 invalid_scheduler_policy_value e
;
952 invalid_scheduler_policy_value_ctor_str(&e
, "ContextPriority");
953 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
956 case SchedulingProtocol
:
957 case DynamicProgressFeedback
:
958 case WinRTInitialization
:
959 if (val
!= 0 && val
!= 1) {
960 invalid_scheduler_policy_value e
;
961 invalid_scheduler_policy_value_ctor_str(&e
, "SchedulingProtocol");
962 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
969 ret
= this->policy_container
->policies
[policy
];
970 this->policy_container
->policies
[policy
] = val
;
974 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
975 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
976 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits
, 12)
977 void __thiscall
SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy
*this,
978 unsigned int min_concurrency
, unsigned int max_concurrency
)
980 TRACE("(%p %d %d)\n", this, min_concurrency
, max_concurrency
);
982 if (min_concurrency
> max_concurrency
) {
983 invalid_scheduler_policy_thread_specification e
;
984 invalid_scheduler_policy_thread_specification_ctor_str(&e
, NULL
);
985 _CxxThrowException(&e
, &invalid_scheduler_policy_thread_specification_exception_type
);
987 if (!max_concurrency
) {
988 invalid_scheduler_policy_value e
;
989 invalid_scheduler_policy_value_ctor_str(&e
, "MaxConcurrency");
990 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
993 this->policy_container
->policies
[MinConcurrency
] = min_concurrency
;
994 this->policy_container
->policies
[MaxConcurrency
] = max_concurrency
;
997 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
998 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
999 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue
, 8)
1000 unsigned int __thiscall
SchedulerPolicy_GetPolicyValue(
1001 const SchedulerPolicy
*this, PolicyElementKey policy
)
1003 TRACE("(%p %d)\n", this, policy
);
1005 if (policy
>= last_policy_id
) {
1006 invalid_scheduler_policy_key e
;
1007 invalid_scheduler_policy_key_ctor_str(&e
, "Invalid policy");
1008 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
1010 return this->policy_container
->policies
[policy
];
1013 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1014 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1015 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor
, 4)
1016 SchedulerPolicy
* __thiscall
SchedulerPolicy_ctor(SchedulerPolicy
*this)
1018 TRACE("(%p)\n", this);
1020 this->policy_container
= operator_new(sizeof(*this->policy_container
));
1021 /* TODO: default values can probably be affected by CurrentScheduler */
1022 this->policy_container
->policies
[SchedulerKind
] = 0;
1023 this->policy_container
->policies
[MaxConcurrency
] = -1;
1024 this->policy_container
->policies
[MinConcurrency
] = 1;
1025 this->policy_container
->policies
[TargetOversubscriptionFactor
] = 1;
1026 this->policy_container
->policies
[LocalContextCacheSize
] = 8;
1027 this->policy_container
->policies
[ContextStackSize
] = 0;
1028 this->policy_container
->policies
[ContextPriority
] = THREAD_PRIORITY_NORMAL
;
1029 this->policy_container
->policies
[SchedulingProtocol
] = 0;
1030 this->policy_container
->policies
[DynamicProgressFeedback
] = 1;
1034 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1035 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1036 /* TODO: don't leak policy_container on exception */
1037 SchedulerPolicy
* WINAPIV
SchedulerPolicy_ctor_policies(
1038 SchedulerPolicy
*this, size_t n
, ...)
1040 unsigned int min_concurrency
, max_concurrency
;
1044 TRACE("(%p %Iu)\n", this, n
);
1046 SchedulerPolicy_ctor(this);
1047 min_concurrency
= this->policy_container
->policies
[MinConcurrency
];
1048 max_concurrency
= this->policy_container
->policies
[MaxConcurrency
];
1050 va_start(valist
, n
);
1051 for(i
=0; i
<n
; i
++) {
1052 PolicyElementKey policy
= va_arg(valist
, PolicyElementKey
);
1053 unsigned int val
= va_arg(valist
, unsigned int);
1055 if(policy
== MinConcurrency
)
1056 min_concurrency
= val
;
1057 else if(policy
== MaxConcurrency
)
1058 max_concurrency
= val
;
1060 SchedulerPolicy_SetPolicyValue(this, policy
, val
);
1064 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency
, max_concurrency
);
1068 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1069 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1070 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign
, 8)
1071 SchedulerPolicy
* __thiscall
SchedulerPolicy_op_assign(
1072 SchedulerPolicy
*this, const SchedulerPolicy
*rhs
)
1074 TRACE("(%p %p)\n", this, rhs
);
1075 memcpy(this->policy_container
->policies
, rhs
->policy_container
->policies
,
1076 sizeof(this->policy_container
->policies
));
1080 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1081 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1082 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor
, 8)
1083 SchedulerPolicy
* __thiscall
SchedulerPolicy_copy_ctor(
1084 SchedulerPolicy
*this, const SchedulerPolicy
*rhs
)
1086 TRACE("(%p %p)\n", this, rhs
);
1087 SchedulerPolicy_ctor(this);
1088 return SchedulerPolicy_op_assign(this, rhs
);
1091 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1092 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1093 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor
, 4)
1094 void __thiscall
SchedulerPolicy_dtor(SchedulerPolicy
*this)
1096 TRACE("(%p)\n", this);
1097 operator_delete(this->policy_container
);
1100 static void ThreadScheduler_dtor(ThreadScheduler
*this)
1104 if(this->ref
!= 0) WARN("ref = %ld\n", this->ref
);
1105 SchedulerPolicy_dtor(&this->policy
);
1107 for(i
=0; i
<this->shutdown_count
; i
++)
1108 SetEvent(this->shutdown_events
[i
]);
1109 operator_delete(this->shutdown_events
);
1111 this->cs
.DebugInfo
->Spare
[0] = 0;
1112 DeleteCriticalSection(&this->cs
);
1115 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id
, 4)
1116 unsigned int __thiscall
ThreadScheduler_Id(const ThreadScheduler
*this)
1118 TRACE("(%p)\n", this);
1122 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors
, 4)
1123 unsigned int __thiscall
ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler
*this)
1125 TRACE("(%p)\n", this);
1126 return this->virt_proc_no
;
1129 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy
, 8)
1130 SchedulerPolicy
* __thiscall
ThreadScheduler_GetPolicy(
1131 const ThreadScheduler
*this, SchedulerPolicy
*ret
)
1133 TRACE("(%p %p)\n", this, ret
);
1134 return SchedulerPolicy_copy_ctor(ret
, &this->policy
);
1137 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference
, 4)
1138 unsigned int __thiscall
ThreadScheduler_Reference(ThreadScheduler
*this)
1140 TRACE("(%p)\n", this);
1141 return InterlockedIncrement(&this->ref
);
1144 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release
, 4)
1145 unsigned int __thiscall
ThreadScheduler_Release(ThreadScheduler
*this)
1147 unsigned int ret
= InterlockedDecrement(&this->ref
);
1149 TRACE("(%p)\n", this);
1152 ThreadScheduler_dtor(this);
1153 operator_delete(this);
1158 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent
, 8)
1159 void __thiscall
ThreadScheduler_RegisterShutdownEvent(ThreadScheduler
*this, HANDLE event
)
1161 HANDLE
*shutdown_events
;
1164 TRACE("(%p %p)\n", this, event
);
1166 EnterCriticalSection(&this->cs
);
1168 size
= this->shutdown_size
? this->shutdown_size
* 2 : 1;
1169 shutdown_events
= operator_new(size
* sizeof(*shutdown_events
));
1170 memcpy(shutdown_events
, this->shutdown_events
,
1171 this->shutdown_count
* sizeof(*shutdown_events
));
1172 operator_delete(this->shutdown_events
);
1173 this->shutdown_size
= size
;
1174 this->shutdown_events
= shutdown_events
;
1175 this->shutdown_events
[this->shutdown_count
++] = event
;
1177 LeaveCriticalSection(&this->cs
);
1180 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach
, 4)
1181 void __thiscall
ThreadScheduler_Attach(ThreadScheduler
*this)
1183 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
1185 TRACE("(%p)\n", this);
1187 if(context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1188 ERR("unknown context set\n");
1192 if(context
->scheduler
.scheduler
== &this->scheduler
) {
1193 improper_scheduler_attach e
;
1194 improper_scheduler_attach_ctor_str(&e
, NULL
);
1195 _CxxThrowException(&e
, &improper_scheduler_attach_exception_type
);
1198 if(context
->scheduler
.scheduler
) {
1199 struct scheduler_list
*l
= operator_new(sizeof(*l
));
1200 *l
= context
->scheduler
;
1201 context
->scheduler
.next
= l
;
1203 context
->scheduler
.scheduler
= &this->scheduler
;
1204 ThreadScheduler_Reference(this);
1207 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc
, 8)
1208 /*ScheduleGroup*/void* __thiscall
ThreadScheduler_CreateScheduleGroup_loc(
1209 ThreadScheduler
*this, /*location*/void *placement
)
1211 FIXME("(%p %p) stub\n", this, placement
);
1215 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup
, 4)
1216 /*ScheduleGroup*/void* __thiscall
ThreadScheduler_CreateScheduleGroup(ThreadScheduler
*this)
1218 FIXME("(%p) stub\n", this);
1222 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc
, 16)
1223 void __thiscall
ThreadScheduler_ScheduleTask_loc(ThreadScheduler
*this,
1224 void (__cdecl
*proc
)(void*), void* data
, /*location*/void *placement
)
1226 FIXME("(%p %p %p %p) stub\n", this, proc
, data
, placement
);
1229 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask
, 12)
1230 void __thiscall
ThreadScheduler_ScheduleTask(ThreadScheduler
*this,
1231 void (__cdecl
*proc
)(void*), void* data
)
1233 FIXME("(%p %p %p) stub\n", this, proc
, data
);
1236 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation
, 8)
1237 bool __thiscall
ThreadScheduler_IsAvailableLocation(
1238 const ThreadScheduler
*this, const /*location*/void *placement
)
1240 FIXME("(%p %p) stub\n", this, placement
);
1244 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor
, 8)
1245 Scheduler
* __thiscall
ThreadScheduler_vector_dtor(ThreadScheduler
*this, unsigned int flags
)
1247 TRACE("(%p %x)\n", this, flags
);
1249 /* we have an array, with the number of elements stored before the first object */
1250 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
1252 for(i
=*ptr
-1; i
>=0; i
--)
1253 ThreadScheduler_dtor(this+i
);
1254 operator_delete(ptr
);
1256 ThreadScheduler_dtor(this);
1258 operator_delete(this);
1261 return &this->scheduler
;
1264 static ThreadScheduler
* ThreadScheduler_ctor(ThreadScheduler
*this,
1265 const SchedulerPolicy
*policy
)
1269 TRACE("(%p)->()\n", this);
1271 this->scheduler
.vtable
= &ThreadScheduler_vtable
;
1273 this->id
= InterlockedIncrement(&scheduler_id
);
1274 SchedulerPolicy_copy_ctor(&this->policy
, policy
);
1277 this->virt_proc_no
= SchedulerPolicy_GetPolicyValue(&this->policy
, MaxConcurrency
);
1278 if(this->virt_proc_no
> si
.dwNumberOfProcessors
)
1279 this->virt_proc_no
= si
.dwNumberOfProcessors
;
1281 this->shutdown_count
= this->shutdown_size
= 0;
1282 this->shutdown_events
= NULL
;
1284 InitializeCriticalSection(&this->cs
);
1285 this->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": ThreadScheduler");
1289 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1290 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1291 Scheduler
* __cdecl
Scheduler_Create(const SchedulerPolicy
*policy
)
1293 ThreadScheduler
*ret
;
1295 TRACE("(%p)\n", policy
);
1297 ret
= operator_new(sizeof(*ret
));
1298 return &ThreadScheduler_ctor(ret
, policy
)->scheduler
;
1301 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1302 void __cdecl
Scheduler_ResetDefaultSchedulerPolicy(void)
1306 EnterCriticalSection(&default_scheduler_cs
);
1307 if(default_scheduler_policy
.policy_container
)
1308 SchedulerPolicy_dtor(&default_scheduler_policy
);
1309 SchedulerPolicy_ctor(&default_scheduler_policy
);
1310 LeaveCriticalSection(&default_scheduler_cs
);
1313 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1314 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1315 void __cdecl
Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy
*policy
)
1317 TRACE("(%p)\n", policy
);
1319 EnterCriticalSection(&default_scheduler_cs
);
1320 if(!default_scheduler_policy
.policy_container
)
1321 SchedulerPolicy_copy_ctor(&default_scheduler_policy
, policy
);
1323 SchedulerPolicy_op_assign(&default_scheduler_policy
, policy
);
1324 LeaveCriticalSection(&default_scheduler_cs
);
1327 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1328 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1329 void __cdecl
CurrentScheduler_Create(const SchedulerPolicy
*policy
)
1331 Scheduler
*scheduler
;
1333 TRACE("(%p)\n", policy
);
1335 scheduler
= Scheduler_Create(policy
);
1336 call_Scheduler_Attach(scheduler
);
1339 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1340 void __cdecl
CurrentScheduler_Detach(void)
1342 ExternalContextBase
*context
= (ExternalContextBase
*)try_get_current_context();
1347 improper_scheduler_detach e
;
1348 improper_scheduler_detach_ctor_str(&e
, NULL
);
1349 _CxxThrowException(&e
, &improper_scheduler_detach_exception_type
);
1352 if(context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1353 ERR("unknown context set\n");
1357 if(!context
->scheduler
.next
) {
1358 improper_scheduler_detach e
;
1359 improper_scheduler_detach_ctor_str(&e
, NULL
);
1360 _CxxThrowException(&e
, &improper_scheduler_detach_exception_type
);
1363 call_Scheduler_Release(context
->scheduler
.scheduler
);
1364 if(!context
->scheduler
.next
) {
1365 context
->scheduler
.scheduler
= NULL
;
1367 struct scheduler_list
*entry
= context
->scheduler
.next
;
1368 context
->scheduler
.scheduler
= entry
->scheduler
;
1369 context
->scheduler
.next
= entry
->next
;
1370 operator_delete(entry
);
1374 static void create_default_scheduler(void)
1376 if(default_scheduler
)
1379 EnterCriticalSection(&default_scheduler_cs
);
1380 if(!default_scheduler
) {
1381 ThreadScheduler
*scheduler
;
1383 if(!default_scheduler_policy
.policy_container
)
1384 SchedulerPolicy_ctor(&default_scheduler_policy
);
1386 scheduler
= operator_new(sizeof(*scheduler
));
1387 ThreadScheduler_ctor(scheduler
, &default_scheduler_policy
);
1388 default_scheduler
= scheduler
;
1390 LeaveCriticalSection(&default_scheduler_cs
);
1393 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1394 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1395 Scheduler
* __cdecl
CurrentScheduler_Get(void)
1398 return get_current_scheduler();
1401 #if _MSVCR_VER > 100
1402 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1403 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1404 /*ScheduleGroup*/void* __cdecl
CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement
)
1406 TRACE("(%p)\n", placement
);
1407 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement
);
1411 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1412 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1413 /*ScheduleGroup*/void* __cdecl
CurrentScheduler_CreateScheduleGroup(void)
1416 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1419 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1420 unsigned int __cdecl
CurrentScheduler_GetNumberOfVirtualProcessors(void)
1422 Scheduler
*scheduler
= try_get_current_scheduler();
1428 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler
);
1431 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1432 SchedulerPolicy
* __cdecl
CurrentScheduler_GetPolicy(SchedulerPolicy
*policy
)
1434 TRACE("(%p)\n", policy
);
1435 return call_Scheduler_GetPolicy(get_current_scheduler(), policy
);
1438 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1439 unsigned int __cdecl
CurrentScheduler_Id(void)
1441 Scheduler
*scheduler
= try_get_current_scheduler();
1447 return call_Scheduler_Id(scheduler
);
1450 #if _MSVCR_VER > 100
1451 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1452 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1453 bool __cdecl
CurrentScheduler_IsAvailableLocation(const /*location*/void *placement
)
1455 Scheduler
*scheduler
= try_get_current_scheduler();
1457 TRACE("(%p)\n", placement
);
1461 return call_Scheduler_IsAvailableLocation(scheduler
, placement
);
1465 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1466 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1467 void __cdecl
CurrentScheduler_RegisterShutdownEvent(HANDLE event
)
1469 TRACE("(%p)\n", event
);
1470 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event
);
1473 #if _MSVCR_VER > 100
1474 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1475 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1476 void __cdecl
CurrentScheduler_ScheduleTask_loc(void (__cdecl
*proc
)(void*),
1477 void *data
, /*location*/void *placement
)
1479 TRACE("(%p %p %p)\n", proc
, data
, placement
);
1480 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc
, data
, placement
);
1484 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1485 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1486 void __cdecl
CurrentScheduler_ScheduleTask(void (__cdecl
*proc
)(void*), void *data
)
1488 TRACE("(%p %p)\n", proc
, data
);
1489 call_Scheduler_ScheduleTask(get_current_scheduler(), proc
, data
);
1492 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1493 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1494 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched
, 8)
1495 _Scheduler
* __thiscall
_Scheduler_ctor_sched(_Scheduler
*this, Scheduler
*scheduler
)
1497 TRACE("(%p %p)\n", this, scheduler
);
1499 this->scheduler
= scheduler
;
1503 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1504 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1505 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor
, 4)
1506 _Scheduler
* __thiscall
_Scheduler_ctor(_Scheduler
*this)
1508 return _Scheduler_ctor_sched(this, NULL
);
1511 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1512 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1513 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler
, 4)
1514 Scheduler
* __thiscall
_Scheduler__GetScheduler(_Scheduler
*this)
1516 TRACE("(%p)\n", this);
1517 return this->scheduler
;
1520 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1521 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1522 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference
, 4)
1523 unsigned int __thiscall
_Scheduler__Reference(_Scheduler
*this)
1525 TRACE("(%p)\n", this);
1526 return call_Scheduler_Reference(this->scheduler
);
1529 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1530 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1531 DEFINE_THISCALL_WRAPPER(_Scheduler__Release
, 4)
1532 unsigned int __thiscall
_Scheduler__Release(_Scheduler
*this)
1534 TRACE("(%p)\n", this);
1535 return call_Scheduler_Release(this->scheduler
);
1538 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1539 _Scheduler
* __cdecl
_CurrentScheduler__Get(_Scheduler
*ret
)
1542 return _Scheduler_ctor_sched(ret
, get_current_scheduler());
1545 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1546 unsigned int __cdecl
_CurrentScheduler__GetNumberOfVirtualProcessors(void)
1549 get_current_scheduler();
1550 return CurrentScheduler_GetNumberOfVirtualProcessors();
1553 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1554 unsigned int __cdecl
_CurrentScheduler__Id(void)
1557 get_current_scheduler();
1558 return CurrentScheduler_Id();
1561 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1562 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1563 void __cdecl
_CurrentScheduler__ScheduleTask(void (__cdecl
*proc
)(void*), void *data
)
1565 TRACE("(%p %p)\n", proc
, data
);
1566 CurrentScheduler_ScheduleTask(proc
, data
);
1569 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1570 unsigned int __cdecl
SpinCount__Value(void)
1572 static unsigned int val
= -1;
1580 val
= si
.dwNumberOfProcessors
>1 ? 4000 : 0;
1586 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1587 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1588 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield
, 8)
1589 SpinWait
* __thiscall
SpinWait_ctor_yield(SpinWait
*this, yield_func yf
)
1591 TRACE("(%p %p)\n", this, yf
);
1593 this->state
= SPINWAIT_INIT
;
1595 this->yield_func
= yf
;
1599 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1600 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1601 DEFINE_THISCALL_WRAPPER(SpinWait_ctor
, 8)
1602 SpinWait
* __thiscall
SpinWait_ctor(SpinWait
*this, yield_func yf
)
1604 TRACE("(%p %p)\n", this, yf
);
1606 this->state
= SPINWAIT_INIT
;
1608 this->yield_func
= yf
;
1612 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1613 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1614 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1615 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1616 DEFINE_THISCALL_WRAPPER(SpinWait_dtor
, 4)
1617 void __thiscall
SpinWait_dtor(SpinWait
*this)
1619 TRACE("(%p)\n", this);
1622 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1623 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1624 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1625 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1626 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield
, 4)
1627 void __thiscall
SpinWait__DoYield(SpinWait
*this)
1629 TRACE("(%p)\n", this);
1635 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1636 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1637 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1638 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1639 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins
, 4)
1640 ULONG __thiscall
SpinWait__NumberOfSpins(SpinWait
*this)
1642 TRACE("(%p)\n", this);
1646 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1647 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1648 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1649 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1650 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount
, 8)
1651 void __thiscall
SpinWait__SetSpinCount(SpinWait
*this, unsigned int spin
)
1653 TRACE("(%p %d)\n", this, spin
);
1656 this->state
= spin
? SPINWAIT_SPIN
: SPINWAIT_YIELD
;
1659 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1660 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1661 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1662 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1663 DEFINE_THISCALL_WRAPPER(SpinWait__Reset
, 4)
1664 void __thiscall
SpinWait__Reset(SpinWait
*this)
1666 SpinWait__SetSpinCount(this, SpinCount__Value());
1669 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1670 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1671 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1672 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1673 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain
, 4)
1674 bool __thiscall
SpinWait__ShouldSpinAgain(SpinWait
*this)
1676 TRACE("(%p)\n", this);
1679 return this->spin
> 0;
1682 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
1683 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
1684 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
1685 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
1686 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce
, 4)
1687 bool __thiscall
SpinWait__SpinOnce(SpinWait
*this)
1689 switch(this->state
) {
1691 SpinWait__Reset(this);
1694 InterlockedDecrement((LONG
*)&this->spin
);
1696 this->state
= this->unknown
? SPINWAIT_YIELD
: SPINWAIT_DONE
;
1698 case SPINWAIT_YIELD
:
1699 this->state
= SPINWAIT_DONE
;
1703 SpinWait__Reset(this);
1708 /* ??0critical_section@Concurrency@@QAE@XZ */
1709 /* ??0critical_section@Concurrency@@QEAA@XZ */
1710 DEFINE_THISCALL_WRAPPER(critical_section_ctor
, 4)
1711 critical_section
* __thiscall
critical_section_ctor(critical_section
*this)
1713 TRACE("(%p)\n", this);
1718 NtCreateKeyedEvent(&event
, GENERIC_READ
|GENERIC_WRITE
, NULL
, 0);
1719 if(InterlockedCompareExchangePointer(&keyed_event
, event
, NULL
) != NULL
)
1723 this->unk_thread_id
= 0;
1724 this->head
= this->tail
= NULL
;
1728 /* ??1critical_section@Concurrency@@QAE@XZ */
1729 /* ??1critical_section@Concurrency@@QEAA@XZ */
1730 DEFINE_THISCALL_WRAPPER(critical_section_dtor
, 4)
1731 void __thiscall
critical_section_dtor(critical_section
*this)
1733 TRACE("(%p)\n", this);
1736 static void __cdecl
spin_wait_yield(void)
1741 static inline void spin_wait_for_next_cs(cs_queue
*q
)
1747 SpinWait_ctor(&sw
, &spin_wait_yield
);
1748 SpinWait__Reset(&sw
);
1750 SpinWait__SpinOnce(&sw
);
1754 static inline void cs_set_head(critical_section
*cs
, cs_queue
*q
)
1756 cs
->unk_thread_id
= GetCurrentThreadId();
1757 cs
->unk_active
.next
= q
->next
;
1758 cs
->head
= &cs
->unk_active
;
1761 static inline void cs_lock(critical_section
*cs
, cs_queue
*q
)
1765 if(cs
->unk_thread_id
== GetCurrentThreadId()) {
1767 improper_lock_ctor_str(&e
, "Already locked");
1768 _CxxThrowException(&e
, &improper_lock_exception_type
);
1771 memset(q
, 0, sizeof(*q
));
1772 last
= InterlockedExchangePointer(&cs
->tail
, q
);
1775 NtWaitForKeyedEvent(keyed_event
, q
, 0, NULL
);
1779 if(InterlockedCompareExchangePointer(&cs
->tail
, &cs
->unk_active
, q
) != q
) {
1780 spin_wait_for_next_cs(q
);
1781 cs
->unk_active
.next
= q
->next
;
1785 /* ?lock@critical_section@Concurrency@@QAEXXZ */
1786 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
1787 DEFINE_THISCALL_WRAPPER(critical_section_lock
, 4)
1788 void __thiscall
critical_section_lock(critical_section
*this)
1792 TRACE("(%p)\n", this);
1796 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
1797 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
1798 DEFINE_THISCALL_WRAPPER(critical_section_try_lock
, 4)
1799 bool __thiscall
critical_section_try_lock(critical_section
*this)
1803 TRACE("(%p)\n", this);
1805 if(this->unk_thread_id
== GetCurrentThreadId())
1808 memset(&q
, 0, sizeof(q
));
1809 if(!InterlockedCompareExchangePointer(&this->tail
, &q
, NULL
)) {
1810 cs_set_head(this, &q
);
1811 if(InterlockedCompareExchangePointer(&this->tail
, &this->unk_active
, &q
) != &q
) {
1812 spin_wait_for_next_cs(&q
);
1813 this->unk_active
.next
= q
.next
;
1820 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
1821 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
1822 DEFINE_THISCALL_WRAPPER(critical_section_unlock
, 4)
1823 void __thiscall
critical_section_unlock(critical_section
*this)
1825 TRACE("(%p)\n", this);
1827 this->unk_thread_id
= 0;
1829 if(InterlockedCompareExchangePointer(&this->tail
, NULL
, &this->unk_active
)
1830 == &this->unk_active
) return;
1831 spin_wait_for_next_cs(&this->unk_active
);
1833 #if _MSVCR_VER >= 110
1837 if(!InterlockedExchange(&this->unk_active
.next
->free
, TRUE
))
1840 next
= this->unk_active
.next
;
1841 if(InterlockedCompareExchangePointer(&this->tail
, NULL
, next
) == next
) {
1842 HeapFree(GetProcessHeap(), 0, next
);
1845 spin_wait_for_next_cs(next
);
1847 this->unk_active
.next
= next
->next
;
1848 HeapFree(GetProcessHeap(), 0, next
);
1852 NtReleaseKeyedEvent(keyed_event
, this->unk_active
.next
, 0, NULL
);
1855 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
1856 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
1857 DEFINE_THISCALL_WRAPPER(critical_section_native_handle
, 4)
1858 critical_section
* __thiscall
critical_section_native_handle(critical_section
*this)
1860 TRACE("(%p)\n", this);
1864 #if _MSVCR_VER >= 110
1865 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
1866 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
1867 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for
, 8)
1868 bool __thiscall
critical_section_try_lock_for(
1869 critical_section
*this, unsigned int timeout
)
1873 TRACE("(%p %d)\n", this, timeout
);
1875 if(this->unk_thread_id
== GetCurrentThreadId()) {
1877 improper_lock_ctor_str(&e
, "Already locked");
1878 _CxxThrowException(&e
, &improper_lock_exception_type
);
1881 if(!(q
= HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY
, sizeof(*q
))))
1882 return critical_section_try_lock(this);
1884 last
= InterlockedExchangePointer(&this->tail
, q
);
1891 GetSystemTimeAsFileTime(&ft
);
1892 to
.QuadPart
= ((LONGLONG
)ft
.dwHighDateTime
<<32) +
1893 ft
.dwLowDateTime
+ (LONGLONG
)timeout
*10000;
1894 status
= NtWaitForKeyedEvent(keyed_event
, q
, 0, &to
);
1895 if(status
== STATUS_TIMEOUT
) {
1896 if(!InterlockedExchange(&q
->free
, TRUE
))
1898 /* A thread has signaled the event and is block waiting. */
1899 /* We need to catch the event to wake the thread. */
1900 NtWaitForKeyedEvent(keyed_event
, q
, 0, NULL
);
1904 cs_set_head(this, q
);
1905 if(InterlockedCompareExchangePointer(&this->tail
, &this->unk_active
, q
) != q
) {
1906 spin_wait_for_next_cs(q
);
1907 this->unk_active
.next
= q
->next
;
1910 HeapFree(GetProcessHeap(), 0, q
);
1915 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
1916 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
1917 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor
, 8)
1918 critical_section_scoped_lock
* __thiscall
critical_section_scoped_lock_ctor(
1919 critical_section_scoped_lock
*this, critical_section
*cs
)
1921 TRACE("(%p %p)\n", this, cs
);
1923 cs_lock(this->cs
, &this->lock
.q
);
1927 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
1928 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
1929 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor
, 4)
1930 void __thiscall
critical_section_scoped_lock_dtor(critical_section_scoped_lock
*this)
1932 TRACE("(%p)\n", this);
1933 critical_section_unlock(this->cs
);
1936 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
1937 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1938 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor
, 4)
1939 _NonReentrantPPLLock
* __thiscall
_NonReentrantPPLLock_ctor(_NonReentrantPPLLock
*this)
1941 TRACE("(%p)\n", this);
1943 critical_section_ctor(&this->cs
);
1947 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
1948 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
1949 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire
, 8)
1950 void __thiscall
_NonReentrantPPLLock__Acquire(_NonReentrantPPLLock
*this, cs_queue
*q
)
1952 TRACE("(%p %p)\n", this, q
);
1953 cs_lock(&this->cs
, q
);
1956 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
1957 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
1958 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release
, 4)
1959 void __thiscall
_NonReentrantPPLLock__Release(_NonReentrantPPLLock
*this)
1961 TRACE("(%p)\n", this);
1962 critical_section_unlock(&this->cs
);
1965 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
1966 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
1967 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor
, 8)
1968 _NonReentrantPPLLock__Scoped_lock
* __thiscall
_NonReentrantPPLLock__Scoped_lock_ctor(
1969 _NonReentrantPPLLock__Scoped_lock
*this, _NonReentrantPPLLock
*lock
)
1971 TRACE("(%p %p)\n", this, lock
);
1974 _NonReentrantPPLLock__Acquire(this->lock
, &this->wait
.q
);
1978 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
1979 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1980 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor
, 4)
1981 void __thiscall
_NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock
*this)
1983 TRACE("(%p)\n", this);
1985 _NonReentrantPPLLock__Release(this->lock
);
1988 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
1989 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
1990 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor
, 4)
1991 _ReentrantPPLLock
* __thiscall
_ReentrantPPLLock_ctor(_ReentrantPPLLock
*this)
1993 TRACE("(%p)\n", this);
1995 critical_section_ctor(&this->cs
);
2001 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2002 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2003 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire
, 8)
2004 void __thiscall
_ReentrantPPLLock__Acquire(_ReentrantPPLLock
*this, cs_queue
*q
)
2006 TRACE("(%p %p)\n", this, q
);
2008 if(this->owner
== GetCurrentThreadId()) {
2013 cs_lock(&this->cs
, q
);
2015 this->owner
= GetCurrentThreadId();
2018 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2019 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2020 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release
, 4)
2021 void __thiscall
_ReentrantPPLLock__Release(_ReentrantPPLLock
*this)
2023 TRACE("(%p)\n", this);
2030 critical_section_unlock(&this->cs
);
2033 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2034 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2035 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor
, 8)
2036 _ReentrantPPLLock__Scoped_lock
* __thiscall
_ReentrantPPLLock__Scoped_lock_ctor(
2037 _ReentrantPPLLock__Scoped_lock
*this, _ReentrantPPLLock
*lock
)
2039 TRACE("(%p %p)\n", this, lock
);
2042 _ReentrantPPLLock__Acquire(this->lock
, &this->wait
.q
);
2046 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2047 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2048 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor
, 4)
2049 void __thiscall
_ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock
*this)
2051 TRACE("(%p)\n", this);
2053 _ReentrantPPLLock__Release(this->lock
);
2056 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2057 unsigned int __cdecl
_GetConcurrency(void)
2059 static unsigned int val
= -1;
2067 val
= si
.dwNumberOfProcessors
;
2073 static inline PLARGE_INTEGER
evt_timeout(PLARGE_INTEGER pTime
, unsigned int timeout
)
2075 if(timeout
== COOPERATIVE_TIMEOUT_INFINITE
) return NULL
;
2076 pTime
->QuadPart
= (ULONGLONG
)timeout
* -10000;
2080 static void evt_add_queue(thread_wait_entry
**head
, thread_wait_entry
*entry
)
2082 entry
->next
= *head
;
2084 if(*head
) (*head
)->prev
= entry
;
2088 static void evt_remove_queue(thread_wait_entry
**head
, thread_wait_entry
*entry
)
2091 *head
= entry
->next
;
2092 else if(entry
->prev
)
2093 entry
->prev
->next
= entry
->next
;
2094 if(entry
->next
) entry
->next
->prev
= entry
->prev
;
2097 static size_t evt_end_wait(thread_wait
*wait
, event
**events
, int count
)
2099 size_t i
, ret
= COOPERATIVE_WAIT_TIMEOUT
;
2101 for(i
= 0; i
< count
; i
++) {
2102 critical_section_lock(&events
[i
]->cs
);
2103 if(events
[i
] == wait
->signaled
) ret
= i
;
2104 evt_remove_queue(&events
[i
]->waiters
, &wait
->entries
[i
]);
2105 critical_section_unlock(&events
[i
]->cs
);
2111 static inline int evt_transition(void **state
, void *from
, void *to
)
2113 return InterlockedCompareExchangePointer(state
, to
, from
) == from
;
2116 static size_t evt_wait(thread_wait
*wait
, event
**events
, int count
, bool wait_all
, unsigned int timeout
)
2122 wait
->signaled
= EVT_RUNNING
;
2123 wait
->pending_waits
= wait_all
? count
: 1;
2124 for(i
= 0; i
< count
; i
++) {
2125 wait
->entries
[i
].wait
= wait
;
2127 critical_section_lock(&events
[i
]->cs
);
2128 evt_add_queue(&events
[i
]->waiters
, &wait
->entries
[i
]);
2129 if(events
[i
]->signaled
) {
2130 if(!InterlockedDecrement(&wait
->pending_waits
)) {
2131 wait
->signaled
= events
[i
];
2132 critical_section_unlock(&events
[i
]->cs
);
2134 return evt_end_wait(wait
, events
, i
+1);
2137 critical_section_unlock(&events
[i
]->cs
);
2141 return evt_end_wait(wait
, events
, count
);
2143 if(!evt_transition(&wait
->signaled
, EVT_RUNNING
, EVT_WAITING
))
2144 return evt_end_wait(wait
, events
, count
);
2146 status
= NtWaitForKeyedEvent(keyed_event
, wait
, 0, evt_timeout(&ntto
, timeout
));
2148 if(status
&& !evt_transition(&wait
->signaled
, EVT_WAITING
, EVT_RUNNING
))
2149 NtWaitForKeyedEvent(keyed_event
, wait
, 0, NULL
);
2151 return evt_end_wait(wait
, events
, count
);
2154 /* ??0event@Concurrency@@QAE@XZ */
2155 /* ??0event@Concurrency@@QEAA@XZ */
2156 DEFINE_THISCALL_WRAPPER(event_ctor
, 4)
2157 event
* __thiscall
event_ctor(event
*this)
2159 TRACE("(%p)\n", this);
2161 this->waiters
= NULL
;
2162 this->signaled
= FALSE
;
2163 critical_section_ctor(&this->cs
);
2168 /* ??1event@Concurrency@@QAE@XZ */
2169 /* ??1event@Concurrency@@QEAA@XZ */
2170 DEFINE_THISCALL_WRAPPER(event_dtor
, 4)
2171 void __thiscall
event_dtor(event
*this)
2173 TRACE("(%p)\n", this);
2174 critical_section_dtor(&this->cs
);
2176 ERR("there's a wait on destroyed event\n");
2179 /* ?reset@event@Concurrency@@QAEXXZ */
2180 /* ?reset@event@Concurrency@@QEAAXXZ */
2181 DEFINE_THISCALL_WRAPPER(event_reset
, 4)
2182 void __thiscall
event_reset(event
*this)
2184 thread_wait_entry
*entry
;
2186 TRACE("(%p)\n", this);
2188 critical_section_lock(&this->cs
);
2189 if(this->signaled
) {
2190 this->signaled
= FALSE
;
2191 for(entry
=this->waiters
; entry
; entry
= entry
->next
)
2192 InterlockedIncrement(&entry
->wait
->pending_waits
);
2194 critical_section_unlock(&this->cs
);
2197 /* ?set@event@Concurrency@@QAEXXZ */
2198 /* ?set@event@Concurrency@@QEAAXXZ */
2199 DEFINE_THISCALL_WRAPPER(event_set
, 4)
2200 void __thiscall
event_set(event
*this)
2202 thread_wait_entry
*wakeup
= NULL
;
2203 thread_wait_entry
*entry
, *next
;
2205 TRACE("(%p)\n", this);
2207 critical_section_lock(&this->cs
);
2208 if(!this->signaled
) {
2209 this->signaled
= TRUE
;
2210 for(entry
=this->waiters
; entry
; entry
=next
) {
2212 if(!InterlockedDecrement(&entry
->wait
->pending_waits
)) {
2213 if(InterlockedExchangePointer(&entry
->wait
->signaled
, this) == EVT_WAITING
) {
2214 evt_remove_queue(&this->waiters
, entry
);
2215 evt_add_queue(&wakeup
, entry
);
2220 critical_section_unlock(&this->cs
);
2222 for(entry
=wakeup
; entry
; entry
=next
) {
2224 entry
->next
= entry
->prev
= NULL
;
2225 NtReleaseKeyedEvent(keyed_event
, entry
->wait
, 0, NULL
);
2229 /* ?wait@event@Concurrency@@QAEII@Z */
2230 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2231 DEFINE_THISCALL_WRAPPER(event_wait
, 8)
2232 size_t __thiscall
event_wait(event
*this, unsigned int timeout
)
2237 TRACE("(%p %u)\n", this, timeout
);
2239 critical_section_lock(&this->cs
);
2240 signaled
= this->signaled
;
2241 critical_section_unlock(&this->cs
);
2243 if(!timeout
) return signaled
? 0 : COOPERATIVE_WAIT_TIMEOUT
;
2244 return signaled
? 0 : evt_wait(&wait
, &this, 1, FALSE
, timeout
);
2247 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2248 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2249 int __cdecl
event_wait_for_multiple(event
**events
, size_t count
, bool wait_all
, unsigned int timeout
)
2254 TRACE("(%p %Iu %d %u)\n", events
, count
, wait_all
, timeout
);
2259 wait
= operator_new(FIELD_OFFSET(thread_wait
, entries
[count
]));
2260 ret
= evt_wait(wait
, events
, count
, wait_all
, timeout
);
2261 operator_delete(wait
);
2266 #if _MSVCR_VER >= 110
2268 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
2269 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
2270 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor
, 4)
2271 _Condition_variable
* __thiscall
_Condition_variable_ctor(_Condition_variable
*this)
2273 TRACE("(%p)\n", this);
2276 critical_section_ctor(&this->lock
);
2280 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
2281 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
2282 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor
, 4)
2283 void __thiscall
_Condition_variable_dtor(_Condition_variable
*this)
2285 TRACE("(%p)\n", this);
2287 while(this->queue
) {
2288 cv_queue
*next
= this->queue
->next
;
2289 if(!this->queue
->expired
)
2290 ERR("there's an active wait\n");
2291 HeapFree(GetProcessHeap(), 0, this->queue
);
2294 critical_section_dtor(&this->lock
);
2297 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
2298 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
2299 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait
, 8)
2300 void __thiscall
_Condition_variable_wait(_Condition_variable
*this, critical_section
*cs
)
2304 TRACE("(%p, %p)\n", this, cs
);
2306 critical_section_lock(&this->lock
);
2307 q
.next
= this->queue
;
2310 critical_section_unlock(&this->lock
);
2312 critical_section_unlock(cs
);
2313 NtWaitForKeyedEvent(keyed_event
, &q
, 0, NULL
);
2314 critical_section_lock(cs
);
2317 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
2318 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
2319 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for
, 12)
2320 bool __thiscall
_Condition_variable_wait_for(_Condition_variable
*this,
2321 critical_section
*cs
, unsigned int timeout
)
2328 TRACE("(%p %p %d)\n", this, cs
, timeout
);
2330 q
= operator_new(sizeof(cv_queue
));
2331 critical_section_lock(&this->lock
);
2332 q
->next
= this->queue
;
2335 critical_section_unlock(&this->lock
);
2337 critical_section_unlock(cs
);
2339 GetSystemTimeAsFileTime(&ft
);
2340 to
.QuadPart
= ((LONGLONG
)ft
.dwHighDateTime
<< 32) +
2341 ft
.dwLowDateTime
+ (LONGLONG
)timeout
* 10000;
2342 status
= NtWaitForKeyedEvent(keyed_event
, q
, 0, &to
);
2343 if(status
== STATUS_TIMEOUT
) {
2344 if(!InterlockedExchange(&q
->expired
, TRUE
)) {
2345 critical_section_lock(cs
);
2349 NtWaitForKeyedEvent(keyed_event
, q
, 0, 0);
2353 critical_section_lock(cs
);
2357 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
2358 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
2359 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one
, 4)
2360 void __thiscall
_Condition_variable_notify_one(_Condition_variable
*this)
2364 TRACE("(%p)\n", this);
2370 critical_section_lock(&this->lock
);
2373 critical_section_unlock(&this->lock
);
2376 this->queue
= node
->next
;
2377 critical_section_unlock(&this->lock
);
2379 if(!InterlockedExchange(&node
->expired
, TRUE
)) {
2380 NtReleaseKeyedEvent(keyed_event
, node
, 0, NULL
);
2383 HeapFree(GetProcessHeap(), 0, node
);
2388 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
2389 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
2390 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all
, 4)
2391 void __thiscall
_Condition_variable_notify_all(_Condition_variable
*this)
2395 TRACE("(%p)\n", this);
2400 critical_section_lock(&this->lock
);
2403 critical_section_unlock(&this->lock
);
2406 cv_queue
*next
= ptr
->next
;
2408 if(!InterlockedExchange(&ptr
->expired
, TRUE
))
2409 NtReleaseKeyedEvent(keyed_event
, ptr
, 0, NULL
);
2411 HeapFree(GetProcessHeap(), 0, ptr
);
2417 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
2418 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
2419 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor
, 4)
2420 reader_writer_lock
* __thiscall
reader_writer_lock_ctor(reader_writer_lock
*this)
2422 TRACE("(%p)\n", this);
2427 NtCreateKeyedEvent(&event
, GENERIC_READ
|GENERIC_WRITE
, NULL
, 0);
2428 if (InterlockedCompareExchangePointer(&keyed_event
, event
, NULL
) != NULL
)
2432 memset(this, 0, sizeof(*this));
2436 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
2437 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
2438 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor
, 4)
2439 void __thiscall
reader_writer_lock_dtor(reader_writer_lock
*this)
2441 TRACE("(%p)\n", this);
2443 if (this->thread_id
!= 0 || this->count
)
2444 WARN("destroying locked reader_writer_lock\n");
2447 static inline void spin_wait_for_next_rwl(rwl_queue
*q
)
2453 SpinWait_ctor(&sw
, &spin_wait_yield
);
2454 SpinWait__Reset(&sw
);
2456 SpinWait__SpinOnce(&sw
);
2460 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
2461 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
2462 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock
, 4)
2463 void __thiscall
reader_writer_lock_lock(reader_writer_lock
*this)
2465 rwl_queue q
= { NULL
}, *last
;
2467 TRACE("(%p)\n", this);
2469 if (this->thread_id
== GetCurrentThreadId()) {
2471 improper_lock_ctor_str(&e
, "Already locked");
2472 _CxxThrowException(&e
, &improper_lock_exception_type
);
2475 last
= InterlockedExchangePointer((void**)&this->writer_tail
, &q
);
2478 NtWaitForKeyedEvent(keyed_event
, &q
, 0, NULL
);
2480 this->writer_head
= &q
;
2481 if (InterlockedOr(&this->count
, WRITER_WAITING
))
2482 NtWaitForKeyedEvent(keyed_event
, &q
, 0, NULL
);
2485 this->thread_id
= GetCurrentThreadId();
2486 this->writer_head
= &this->active
;
2487 this->active
.next
= NULL
;
2488 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &this->active
, &q
) != &q
) {
2489 spin_wait_for_next_rwl(&q
);
2490 this->active
.next
= q
.next
;
2494 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
2495 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
2496 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read
, 4)
2497 void __thiscall
reader_writer_lock_lock_read(reader_writer_lock
*this)
2501 TRACE("(%p)\n", this);
2503 if (this->thread_id
== GetCurrentThreadId()) {
2505 improper_lock_ctor_str(&e
, "Already locked as writer");
2506 _CxxThrowException(&e
, &improper_lock_exception_type
);
2510 q
.next
= this->reader_head
;
2511 } while(InterlockedCompareExchangePointer((void**)&this->reader_head
, &q
, q
.next
) != q
.next
);
2517 while (!((count
= this->count
) & WRITER_WAITING
))
2518 if (InterlockedCompareExchange(&this->count
, count
+1, count
) == count
) break;
2520 if (count
& WRITER_WAITING
)
2521 NtWaitForKeyedEvent(keyed_event
, &q
, 0, NULL
);
2523 head
= InterlockedExchangePointer((void**)&this->reader_head
, NULL
);
2524 while(head
&& head
!= &q
) {
2525 rwl_queue
*next
= head
->next
;
2526 InterlockedIncrement(&this->count
);
2527 NtReleaseKeyedEvent(keyed_event
, head
, 0, NULL
);
2531 NtWaitForKeyedEvent(keyed_event
, &q
, 0, NULL
);
2535 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
2536 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
2537 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock
, 4)
2538 bool __thiscall
reader_writer_lock_try_lock(reader_writer_lock
*this)
2540 rwl_queue q
= { NULL
};
2542 TRACE("(%p)\n", this);
2544 if (this->thread_id
== GetCurrentThreadId())
2547 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &q
, NULL
))
2549 this->writer_head
= &q
;
2550 if (!InterlockedCompareExchange(&this->count
, WRITER_WAITING
, 0)) {
2551 this->thread_id
= GetCurrentThreadId();
2552 this->writer_head
= &this->active
;
2553 this->active
.next
= NULL
;
2554 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &this->active
, &q
) != &q
) {
2555 spin_wait_for_next_rwl(&q
);
2556 this->active
.next
= q
.next
;
2561 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, NULL
, &q
) == &q
)
2563 spin_wait_for_next_rwl(&q
);
2564 this->writer_head
= q
.next
;
2565 if (!InterlockedOr(&this->count
, WRITER_WAITING
)) {
2566 this->thread_id
= GetCurrentThreadId();
2567 this->writer_head
= &this->active
;
2568 this->active
.next
= q
.next
;
2574 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
2575 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
2576 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read
, 4)
2577 bool __thiscall
reader_writer_lock_try_lock_read(reader_writer_lock
*this)
2581 TRACE("(%p)\n", this);
2583 while (!((count
= this->count
) & WRITER_WAITING
))
2584 if (InterlockedCompareExchange(&this->count
, count
+1, count
) == count
) return TRUE
;
2588 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
2589 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
2590 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock
, 4)
2591 void __thiscall
reader_writer_lock_unlock(reader_writer_lock
*this)
2594 rwl_queue
*head
, *next
;
2596 TRACE("(%p)\n", this);
2598 if ((count
= this->count
) & ~WRITER_WAITING
) {
2599 count
= InterlockedDecrement(&this->count
);
2600 if (count
!= WRITER_WAITING
)
2602 NtReleaseKeyedEvent(keyed_event
, this->writer_head
, 0, NULL
);
2606 this->thread_id
= 0;
2607 next
= this->writer_head
->next
;
2609 NtReleaseKeyedEvent(keyed_event
, next
, 0, NULL
);
2612 InterlockedAnd(&this->count
, ~WRITER_WAITING
);
2613 head
= InterlockedExchangePointer((void**)&this->reader_head
, NULL
);
2616 InterlockedIncrement(&this->count
);
2617 NtReleaseKeyedEvent(keyed_event
, head
, 0, NULL
);
2621 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, NULL
, this->writer_head
) == this->writer_head
)
2623 InterlockedOr(&this->count
, WRITER_WAITING
);
2626 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2627 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2628 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor
, 8)
2629 reader_writer_lock_scoped_lock
* __thiscall
reader_writer_lock_scoped_lock_ctor(
2630 reader_writer_lock_scoped_lock
*this, reader_writer_lock
*lock
)
2632 TRACE("(%p %p)\n", this, lock
);
2635 reader_writer_lock_lock(lock
);
2639 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
2640 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
2641 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor
, 4)
2642 void __thiscall
reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock
*this)
2644 TRACE("(%p)\n", this);
2645 reader_writer_lock_unlock(this->lock
);
2648 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
2649 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
2650 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor
, 8)
2651 reader_writer_lock_scoped_lock
* __thiscall
reader_writer_lock_scoped_lock_read_ctor(
2652 reader_writer_lock_scoped_lock
*this, reader_writer_lock
*lock
)
2654 TRACE("(%p %p)\n", this, lock
);
2657 reader_writer_lock_lock_read(lock
);
2661 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
2662 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
2663 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor
, 4)
2664 void __thiscall
reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock
*this)
2666 TRACE("(%p)\n", this);
2667 reader_writer_lock_unlock(this->lock
);
2670 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2671 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2672 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor
, 4)
2673 _ReentrantBlockingLock
* __thiscall
_ReentrantBlockingLock_ctor(_ReentrantBlockingLock
*this)
2675 TRACE("(%p)\n", this);
2677 InitializeCriticalSection(&this->cs
);
2678 this->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": _ReentrantBlockingLock");
2682 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
2683 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
2684 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor
, 4)
2685 void __thiscall
_ReentrantBlockingLock_dtor(_ReentrantBlockingLock
*this)
2687 TRACE("(%p)\n", this);
2689 this->cs
.DebugInfo
->Spare
[0] = 0;
2690 DeleteCriticalSection(&this->cs
);
2693 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2694 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2695 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire
, 4)
2696 void __thiscall
_ReentrantBlockingLock__Acquire(_ReentrantBlockingLock
*this)
2698 TRACE("(%p)\n", this);
2699 EnterCriticalSection(&this->cs
);
2702 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
2703 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
2704 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release
, 4)
2705 void __thiscall
_ReentrantBlockingLock__Release(_ReentrantBlockingLock
*this)
2707 TRACE("(%p)\n", this);
2708 LeaveCriticalSection(&this->cs
);
2711 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
2712 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
2713 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire
, 4)
2714 bool __thiscall
_ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock
*this)
2716 TRACE("(%p)\n", this);
2717 return TryEnterCriticalSection(&this->cs
);
2720 /* ?wait@Concurrency@@YAXI@Z */
2721 void __cdecl
Concurrency_wait(unsigned int time
)
2725 if (!once
++) FIXME("(%d) stub!\n", time
);
2731 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
2732 void WINAPIV
_Trace_agents(/*enum Concurrency::Agents_EventType*/int type
, __int64 id
, ...)
2734 FIXME("(%d %#I64x)\n", type
, id
);
2738 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
2739 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
2740 void __cdecl
_Trace_ppl_function(const GUID
*guid
, unsigned char level
, enum ConcRT_EventType type
)
2742 FIXME("(%s %u %i) stub\n", debugstr_guid(guid
), level
, type
);
2745 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
2746 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
2747 DEFINE_THISCALL_WRAPPER(_Timer_ctor
, 12)
2748 _Timer
* __thiscall
_Timer_ctor(_Timer
*this, unsigned int elapse
, bool repeat
)
2750 TRACE("(%p %u %x)\n", this, elapse
, repeat
);
2752 this->vtable
= &_Timer_vtable
;
2754 this->elapse
= elapse
;
2755 this->repeat
= repeat
;
2759 static void WINAPI
timer_callback(TP_CALLBACK_INSTANCE
*instance
, void *ctx
, TP_TIMER
*timer
)
2762 TRACE("calling _Timer(%p) callback\n", this);
2763 call__Timer_callback(this);
2766 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
2767 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
2768 DEFINE_THISCALL_WRAPPER(_Timer__Start
, 4)
2769 void __thiscall
_Timer__Start(_Timer
*this)
2774 TRACE("(%p)\n", this);
2776 this->timer
= CreateThreadpoolTimer(timer_callback
, this, NULL
);
2779 FIXME("throw exception?\n");
2783 ll
= -(LONGLONG
)this->elapse
* TICKSPERMSEC
;
2784 ft
.dwLowDateTime
= ll
& 0xffffffff;
2785 ft
.dwHighDateTime
= ll
>> 32;
2786 SetThreadpoolTimer(this->timer
, &ft
, this->repeat
? this->elapse
: 0, 0);
2789 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
2790 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
2791 DEFINE_THISCALL_WRAPPER(_Timer__Stop
, 4)
2792 void __thiscall
_Timer__Stop(_Timer
*this)
2794 TRACE("(%p)\n", this);
2796 SetThreadpoolTimer(this->timer
, NULL
, 0, 0);
2797 WaitForThreadpoolTimerCallbacks(this->timer
, TRUE
);
2798 CloseThreadpoolTimer(this->timer
);
2802 /* ??1_Timer@details@Concurrency@@MAE@XZ */
2803 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
2804 DEFINE_THISCALL_WRAPPER(_Timer_dtor
, 4)
2805 void __thiscall
_Timer_dtor(_Timer
*this)
2807 TRACE("(%p)\n", this);
2813 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor
, 8)
2814 _Timer
* __thiscall
_Timer_vector_dtor(_Timer
*this, unsigned int flags
)
2816 TRACE("(%p %x)\n", this, flags
);
2818 /* we have an array, with the number of elements stored before the first object */
2819 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
2821 for (i
=*ptr
-1; i
>=0; i
--)
2822 _Timer_dtor(this+i
);
2823 operator_delete(ptr
);
2827 operator_delete(this);
2833 #ifdef __ASM_USE_THISCALL_WRAPPER
2835 #define DEFINE_VTBL_WRAPPER(off) \
2836 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
2840 "movl 0(%ecx), %eax\n\t" \
2841 "jmp *" #off "(%eax)\n\t")
2843 DEFINE_VTBL_WRAPPER(0);
2844 DEFINE_VTBL_WRAPPER(4);
2845 DEFINE_VTBL_WRAPPER(8);
2846 DEFINE_VTBL_WRAPPER(12);
2847 DEFINE_VTBL_WRAPPER(16);
2848 DEFINE_VTBL_WRAPPER(20);
2849 DEFINE_VTBL_WRAPPER(24);
2850 DEFINE_VTBL_WRAPPER(28);
2851 DEFINE_VTBL_WRAPPER(32);
2852 DEFINE_VTBL_WRAPPER(36);
2853 DEFINE_VTBL_WRAPPER(40);
2854 DEFINE_VTBL_WRAPPER(44);
2855 DEFINE_VTBL_WRAPPER(48);
2859 DEFINE_RTTI_DATA0(Context
, 0, ".?AVContext@Concurrency@@")
2860 DEFINE_RTTI_DATA1(ContextBase
, 0, &Context_rtti_base_descriptor
, ".?AVContextBase@details@Concurrency@@")
2861 DEFINE_RTTI_DATA2(ExternalContextBase
, 0, &ContextBase_rtti_base_descriptor
,
2862 &Context_rtti_base_descriptor
, ".?AVExternalContextBase@details@Concurrency@@")
2863 DEFINE_RTTI_DATA0(Scheduler
, 0, ".?AVScheduler@Concurrency@@")
2864 DEFINE_RTTI_DATA1(SchedulerBase
, 0, &Scheduler_rtti_base_descriptor
, ".?AVSchedulerBase@details@Concurrency@@")
2865 DEFINE_RTTI_DATA2(ThreadScheduler
, 0, &SchedulerBase_rtti_base_descriptor
,
2866 &Scheduler_rtti_base_descriptor
, ".?AVThreadScheduler@details@Concurrency@@")
2867 DEFINE_RTTI_DATA0(_Timer
, 0, ".?AV_Timer@details@Concurrency@@");
2869 __ASM_BLOCK_BEGIN(concurrency_vtables
)
2870 __ASM_VTABLE(ExternalContextBase
,
2871 VTABLE_ADD_FUNC(ExternalContextBase_GetId
)
2872 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId
)
2873 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId
)
2874 VTABLE_ADD_FUNC(ExternalContextBase_Unblock
)
2875 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked
)
2876 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor
));
2877 __ASM_VTABLE(ThreadScheduler
,
2878 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor
)
2879 VTABLE_ADD_FUNC(ThreadScheduler_Id
)
2880 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors
)
2881 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy
)
2882 VTABLE_ADD_FUNC(ThreadScheduler_Reference
)
2883 VTABLE_ADD_FUNC(ThreadScheduler_Release
)
2884 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent
)
2885 VTABLE_ADD_FUNC(ThreadScheduler_Attach
)
2886 #if _MSVCR_VER > 100
2887 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc
)
2889 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup
)
2890 #if _MSVCR_VER > 100
2891 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc
)
2893 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask
)
2894 #if _MSVCR_VER > 100
2895 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation
)
2898 __ASM_VTABLE(_Timer
,
2899 VTABLE_ADD_FUNC(_Timer_vector_dtor
));
2902 void msvcrt_init_concurrency(void *base
)
2905 init_cexception_rtti(base
);
2906 init_improper_lock_rtti(base
);
2907 init_improper_scheduler_attach_rtti(base
);
2908 init_improper_scheduler_detach_rtti(base
);
2909 init_invalid_scheduler_policy_key_rtti(base
);
2910 init_invalid_scheduler_policy_thread_specification_rtti(base
);
2911 init_invalid_scheduler_policy_value_rtti(base
);
2912 init_scheduler_resource_allocation_error_rtti(base
);
2913 init_Context_rtti(base
);
2914 init_ContextBase_rtti(base
);
2915 init_ExternalContextBase_rtti(base
);
2916 init_Scheduler_rtti(base
);
2917 init_SchedulerBase_rtti(base
);
2918 init_ThreadScheduler_rtti(base
);
2919 init__Timer_rtti(base
);
2921 init_cexception_cxx_type_info(base
);
2922 init_improper_lock_cxx(base
);
2923 init_improper_scheduler_attach_cxx(base
);
2924 init_improper_scheduler_detach_cxx(base
);
2925 init_invalid_scheduler_policy_key_cxx(base
);
2926 init_invalid_scheduler_policy_thread_specification_cxx(base
);
2927 init_invalid_scheduler_policy_value_cxx(base
);
2928 init_scheduler_resource_allocation_error_cxx(base
);
2932 void msvcrt_free_concurrency(void)
2934 if (context_tls_index
!= TLS_OUT_OF_INDEXES
)
2935 TlsFree(context_tls_index
);
2936 if(default_scheduler_policy
.policy_container
)
2937 SchedulerPolicy_dtor(&default_scheduler_policy
);
2938 if(default_scheduler
) {
2939 ThreadScheduler_dtor(default_scheduler
);
2940 operator_delete(default_scheduler
);
2944 NtClose(keyed_event
);
2947 void msvcrt_free_scheduler_thread(void)
2949 Context
*context
= try_get_current_context();
2950 if (!context
) return;
2951 call_Context_dtor(context
, 1);
2954 #endif /* _MSVCR_VER >= 100 */