2 * Concurrency namespace implementation
4 * Copyright 2017 Piotr Caban
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
26 #include "wine/debug.h"
27 #include "wine/exception.h"
28 #include "wine/list.h"
34 WINE_DEFAULT_DEBUG_CHANNEL(msvcrt
);
36 typedef exception cexception
;
37 CREATE_EXCEPTION_OBJECT(cexception
)
39 static LONG context_id
= -1;
40 static LONG scheduler_id
= -1;
46 TargetOversubscriptionFactor
,
47 LocalContextCacheSize
,
51 DynamicProgressFeedback
,
57 struct _policy_container
{
58 unsigned int policies
[last_policy_id
];
63 const vtable_ptr
*vtable
;
65 #define call_Context_GetId(this) CALL_VTBL_FUNC(this, 0, \
66 unsigned int, (const Context*), (this))
67 #define call_Context_GetVirtualProcessorId(this) CALL_VTBL_FUNC(this, 4, \
68 unsigned int, (const Context*), (this))
69 #define call_Context_GetScheduleGroupId(this) CALL_VTBL_FUNC(this, 8, \
70 unsigned int, (const Context*), (this))
71 #define call_Context_Unblock(this) CALL_VTBL_FUNC(this, 12, \
72 void, (Context*), (this))
73 #define call_Context_IsSynchronouslyBlocked(this) CALL_VTBL_FUNC(this, 16, \
74 bool, (const Context*), (this))
75 #define call_Context_dtor(this, flags) CALL_VTBL_FUNC(this, 20, \
76 Context*, (Context*, unsigned int), (this, flags))
77 #define call_Context_Block(this) CALL_VTBL_FUNC(this, 24, \
78 void, (Context*), (this))
84 union allocator_cache_entry
{
87 union allocator_cache_entry
*next
;
95 struct scheduler_list
{
96 struct Scheduler
*scheduler
;
97 struct scheduler_list
*next
;
102 struct scheduler_list scheduler
;
104 union allocator_cache_entry
*allocator_cache
[8];
106 } ExternalContextBase
;
107 extern const vtable_ptr ExternalContextBase_vtable
;
108 static void ExternalContextBase_ctor(ExternalContextBase
*);
110 typedef struct Scheduler
{
111 const vtable_ptr
*vtable
;
113 #define call_Scheduler_Id(this) CALL_VTBL_FUNC(this, 4, unsigned int, (const Scheduler*), (this))
114 #define call_Scheduler_GetNumberOfVirtualProcessors(this) CALL_VTBL_FUNC(this, 8, unsigned int, (const Scheduler*), (this))
115 #define call_Scheduler_GetPolicy(this,policy) CALL_VTBL_FUNC(this, 12, \
116 SchedulerPolicy*, (Scheduler*,SchedulerPolicy*), (this,policy))
117 #define call_Scheduler_Reference(this) CALL_VTBL_FUNC(this, 16, unsigned int, (Scheduler*), (this))
118 #define call_Scheduler_Release(this) CALL_VTBL_FUNC(this, 20, unsigned int, (Scheduler*), (this))
119 #define call_Scheduler_RegisterShutdownEvent(this,event) CALL_VTBL_FUNC(this, 24, void, (Scheduler*,HANDLE), (this,event))
120 #define call_Scheduler_Attach(this) CALL_VTBL_FUNC(this, 28, void, (Scheduler*), (this))
122 #define call_Scheduler_CreateScheduleGroup_loc(this,placement) CALL_VTBL_FUNC(this, 32, \
123 /*ScheduleGroup*/void*, (Scheduler*,/*location*/void*), (this,placement))
124 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 36, /*ScheduleGroup*/void*, (Scheduler*), (this))
125 #define call_Scheduler_ScheduleTask_loc(this,proc,data,placement) CALL_VTBL_FUNC(this, 40, \
126 void, (Scheduler*,void (__cdecl*)(void*),void*,/*location*/void*), (this,proc,data,placement))
127 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 44, \
128 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
129 #define call_Scheduler_IsAvailableLocation(this,placement) CALL_VTBL_FUNC(this, 48, \
130 bool, (Scheduler*,const /*location*/void*), (this,placement))
132 #define call_Scheduler_CreateScheduleGroup(this) CALL_VTBL_FUNC(this, 32, /*ScheduleGroup*/void*, (Scheduler*), (this))
133 #define call_Scheduler_ScheduleTask(this,proc,data) CALL_VTBL_FUNC(this, 36, \
134 void, (Scheduler*,void (__cdecl*)(void*),void*), (this,proc,data))
141 unsigned int virt_proc_no
;
142 SchedulerPolicy policy
;
145 HANDLE
*shutdown_events
;
147 struct list scheduled_chores
;
149 extern const vtable_ptr ThreadScheduler_vtable
;
152 Scheduler
*scheduler
;
167 typedef void (__cdecl
*yield_func
)(void);
173 SpinWait_state state
;
174 yield_func yield_func
;
177 #define FINISHED_INITIAL 0x80000000
185 volatile LONG finished
;
188 } _StructuredTaskCollection
;
192 TASK_COLLECTION_SUCCESS
= 1,
193 TASK_COLLECTION_CANCELLED
194 } _TaskCollectionStatus
;
198 STRUCTURED_TASK_COLLECTION_CANCELLED
= 0x2,
199 STRUCTURED_TASK_COLLECTION_STATUS_MASK
= 0x7
200 } _StructuredTaskCollectionStatusBits
;
202 typedef struct _UnrealizedChore
204 const vtable_ptr
*vtable
;
205 void (__cdecl
*chore_proc
)(struct _UnrealizedChore
*);
206 _StructuredTaskCollection
*task_collection
;
207 void (__cdecl
*chore_wrapper
)(struct _UnrealizedChore
*);
211 struct scheduled_chore
{
213 _UnrealizedChore
*chore
;
216 /* keep in sync with msvcp90/msvcp90.h */
217 typedef struct cs_queue
220 struct cs_queue
*next
;
221 #if _MSVCR_VER >= 110
230 #if _MSVCR_VER >= 110
241 critical_section
*cs
;
249 } critical_section_scoped_lock
;
254 } _NonReentrantPPLLock
;
258 _NonReentrantPPLLock
*lock
;
266 } _NonReentrantPPLLock__Scoped_lock
;
277 _ReentrantPPLLock
*lock
;
285 } _ReentrantPPLLock__Scoped_lock
;
287 #define EVT_RUNNING (void*)1
288 #define EVT_WAITING NULL
291 typedef struct thread_wait_entry
293 struct thread_wait
*wait
;
294 struct thread_wait_entry
*next
;
295 struct thread_wait_entry
*prev
;
298 typedef struct thread_wait
303 thread_wait_entry entries
[1];
308 thread_wait_entry
*waiters
;
313 #if _MSVCR_VER >= 110
314 #define CV_WAKE (void*)1
315 typedef struct cv_queue
{
317 struct cv_queue
*next
;
322 /* cv_queue structure is not binary compatible */
324 critical_section lock
;
325 } _Condition_variable
;
328 typedef struct rwl_queue
330 struct rwl_queue
*next
;
334 #define WRITER_WAITING 0x80000000
335 /* FIXME: reader_writer_lock structure is not binary compatible
336 * it can't exceed 28/56 bytes */
342 rwl_queue
*writer_head
;
343 rwl_queue
*writer_tail
;
344 rwl_queue
*reader_head
;
345 } reader_writer_lock
;
348 reader_writer_lock
*lock
;
349 } reader_writer_lock_scoped_lock
;
353 } _ReentrantBlockingLock
;
355 #define TICKSPERMSEC 10000
357 const vtable_ptr
*vtable
;
362 extern const vtable_ptr _Timer_vtable
;
363 #define call__Timer_callback(this) CALL_VTBL_FUNC(this, 4, void, (_Timer*), (this))
365 typedef exception improper_lock
;
366 extern const vtable_ptr improper_lock_vtable
;
368 typedef exception improper_scheduler_attach
;
369 extern const vtable_ptr improper_scheduler_attach_vtable
;
371 typedef exception improper_scheduler_detach
;
372 extern const vtable_ptr improper_scheduler_detach_vtable
;
374 typedef exception invalid_multiple_scheduling
;
375 extern const vtable_ptr invalid_multiple_scheduling_vtable
;
377 typedef exception invalid_scheduler_policy_key
;
378 extern const vtable_ptr invalid_scheduler_policy_key_vtable
;
380 typedef exception invalid_scheduler_policy_thread_specification
;
381 extern const vtable_ptr invalid_scheduler_policy_thread_specification_vtable
;
383 typedef exception invalid_scheduler_policy_value
;
384 extern const vtable_ptr invalid_scheduler_policy_value_vtable
;
386 typedef exception missing_wait
;
387 extern const vtable_ptr missing_wait_vtable
;
392 } scheduler_resource_allocation_error
;
393 extern const vtable_ptr scheduler_resource_allocation_error_vtable
;
395 enum ConcRT_EventType
397 CONCRT_EVENT_GENERIC
,
401 CONCRT_EVENT_UNBLOCK
,
407 static DWORD context_tls_index
= TLS_OUT_OF_INDEXES
;
409 static CRITICAL_SECTION default_scheduler_cs
;
410 static CRITICAL_SECTION_DEBUG default_scheduler_cs_debug
=
412 0, 0, &default_scheduler_cs
,
413 { &default_scheduler_cs_debug
.ProcessLocksList
, &default_scheduler_cs_debug
.ProcessLocksList
},
414 0, 0, { (DWORD_PTR
)(__FILE__
": default_scheduler_cs") }
416 static CRITICAL_SECTION default_scheduler_cs
= { &default_scheduler_cs_debug
, -1, 0, 0, 0, 0 };
417 static SchedulerPolicy default_scheduler_policy
;
418 static ThreadScheduler
*default_scheduler
;
420 static void create_default_scheduler(void);
422 /* ??0improper_lock@Concurrency@@QAE@PBD@Z */
423 /* ??0improper_lock@Concurrency@@QEAA@PEBD@Z */
424 DEFINE_THISCALL_WRAPPER(improper_lock_ctor_str
, 8)
425 improper_lock
* __thiscall
improper_lock_ctor_str(improper_lock
*this, const char *str
)
427 TRACE("(%p %s)\n", this, str
);
428 return __exception_ctor(this, str
, &improper_lock_vtable
);
431 /* ??0improper_lock@Concurrency@@QAE@XZ */
432 /* ??0improper_lock@Concurrency@@QEAA@XZ */
433 DEFINE_THISCALL_WRAPPER(improper_lock_ctor
, 4)
434 improper_lock
* __thiscall
improper_lock_ctor(improper_lock
*this)
436 return improper_lock_ctor_str(this, NULL
);
439 DEFINE_THISCALL_WRAPPER(improper_lock_copy_ctor
,8)
440 improper_lock
* __thiscall
improper_lock_copy_ctor(improper_lock
*this, const improper_lock
*rhs
)
442 TRACE("(%p %p)\n", this, rhs
);
443 return __exception_copy_ctor(this, rhs
, &improper_lock_vtable
);
446 /* ??0improper_scheduler_attach@Concurrency@@QAE@PBD@Z */
447 /* ??0improper_scheduler_attach@Concurrency@@QEAA@PEBD@Z */
448 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor_str
, 8)
449 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_ctor_str(
450 improper_scheduler_attach
*this, const char *str
)
452 TRACE("(%p %s)\n", this, str
);
453 return __exception_ctor(this, str
, &improper_scheduler_attach_vtable
);
456 /* ??0improper_scheduler_attach@Concurrency@@QAE@XZ */
457 /* ??0improper_scheduler_attach@Concurrency@@QEAA@XZ */
458 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_ctor
, 4)
459 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_ctor(
460 improper_scheduler_attach
*this)
462 return improper_scheduler_attach_ctor_str(this, NULL
);
465 DEFINE_THISCALL_WRAPPER(improper_scheduler_attach_copy_ctor
,8)
466 improper_scheduler_attach
* __thiscall
improper_scheduler_attach_copy_ctor(
467 improper_scheduler_attach
* _this
, const improper_scheduler_attach
* rhs
)
469 TRACE("(%p %p)\n", _this
, rhs
);
470 return __exception_copy_ctor(_this
, rhs
, &improper_scheduler_attach_vtable
);
473 /* ??0improper_scheduler_detach@Concurrency@@QAE@PBD@Z */
474 /* ??0improper_scheduler_detach@Concurrency@@QEAA@PEBD@Z */
475 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor_str
, 8)
476 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_ctor_str(
477 improper_scheduler_detach
*this, const char *str
)
479 TRACE("(%p %s)\n", this, str
);
480 return __exception_ctor(this, str
, &improper_scheduler_detach_vtable
);
483 /* ??0improper_scheduler_detach@Concurrency@@QAE@XZ */
484 /* ??0improper_scheduler_detach@Concurrency@@QEAA@XZ */
485 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_ctor
, 4)
486 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_ctor(
487 improper_scheduler_detach
*this)
489 return improper_scheduler_detach_ctor_str(this, NULL
);
492 DEFINE_THISCALL_WRAPPER(improper_scheduler_detach_copy_ctor
,8)
493 improper_scheduler_detach
* __thiscall
improper_scheduler_detach_copy_ctor(
494 improper_scheduler_detach
* _this
, const improper_scheduler_detach
* rhs
)
496 TRACE("(%p %p)\n", _this
, rhs
);
497 return __exception_copy_ctor(_this
, rhs
, &improper_scheduler_detach_vtable
);
500 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@PBD@Z */
501 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@PBD@Z */
502 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@PEBD@Z */
503 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor_str
, 8)
504 invalid_multiple_scheduling
* __thiscall
invalid_multiple_scheduling_ctor_str(
505 invalid_multiple_scheduling
*this, const char *str
)
507 TRACE("(%p %s)\n", this, str
);
508 return __exception_ctor(this, str
, &invalid_multiple_scheduling_vtable
);
511 /* ??0invalid_multiple_scheduling@Concurrency@@QAA@XZ */
512 /* ??0invalid_multiple_scheduling@Concurrency@@QAE@XZ */
513 /* ??0invalid_multiple_scheduling@Concurrency@@QEAA@XZ */
514 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_ctor
, 4)
515 invalid_multiple_scheduling
* __thiscall
invalid_multiple_scheduling_ctor(
516 invalid_multiple_scheduling
*this)
518 return invalid_multiple_scheduling_ctor_str(this, NULL
);
521 DEFINE_THISCALL_WRAPPER(invalid_multiple_scheduling_copy_ctor
,8)
522 invalid_multiple_scheduling
* __thiscall
invalid_multiple_scheduling_copy_ctor(
523 invalid_multiple_scheduling
* _this
, const invalid_multiple_scheduling
* rhs
)
525 TRACE("(%p %p)\n", _this
, rhs
);
526 return __exception_copy_ctor(_this
, rhs
, &invalid_multiple_scheduling_vtable
);
529 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@PBD@Z */
530 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@PEBD@Z */
531 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor_str
, 8)
532 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_ctor_str(
533 invalid_scheduler_policy_key
*this, const char *str
)
535 TRACE("(%p %s)\n", this, str
);
536 return __exception_ctor(this, str
, &invalid_scheduler_policy_key_vtable
);
539 /* ??0invalid_scheduler_policy_key@Concurrency@@QAE@XZ */
540 /* ??0invalid_scheduler_policy_key@Concurrency@@QEAA@XZ */
541 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_ctor
, 4)
542 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_ctor(
543 invalid_scheduler_policy_key
*this)
545 return invalid_scheduler_policy_key_ctor_str(this, NULL
);
548 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_key_copy_ctor
,8)
549 invalid_scheduler_policy_key
* __thiscall
invalid_scheduler_policy_key_copy_ctor(
550 invalid_scheduler_policy_key
* _this
, const invalid_scheduler_policy_key
* rhs
)
552 TRACE("(%p %p)\n", _this
, rhs
);
553 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_key_vtable
);
556 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@PBD@Z */
557 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@PEBD@Z */
558 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor_str
, 8)
559 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_ctor_str(
560 invalid_scheduler_policy_thread_specification
*this, const char *str
)
562 TRACE("(%p %s)\n", this, str
);
563 return __exception_ctor(this, str
, &invalid_scheduler_policy_thread_specification_vtable
);
566 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QAE@XZ */
567 /* ??0invalid_scheduler_policy_thread_specification@Concurrency@@QEAA@XZ */
568 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_ctor
, 4)
569 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_ctor(
570 invalid_scheduler_policy_thread_specification
*this)
572 return invalid_scheduler_policy_thread_specification_ctor_str(this, NULL
);
575 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_thread_specification_copy_ctor
,8)
576 invalid_scheduler_policy_thread_specification
* __thiscall
invalid_scheduler_policy_thread_specification_copy_ctor(
577 invalid_scheduler_policy_thread_specification
* _this
, const invalid_scheduler_policy_thread_specification
* rhs
)
579 TRACE("(%p %p)\n", _this
, rhs
);
580 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_thread_specification_vtable
);
583 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@PBD@Z */
584 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@PEBD@Z */
585 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor_str
, 8)
586 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_ctor_str(
587 invalid_scheduler_policy_value
*this, const char *str
)
589 TRACE("(%p %s)\n", this, str
);
590 return __exception_ctor(this, str
, &invalid_scheduler_policy_value_vtable
);
593 /* ??0invalid_scheduler_policy_value@Concurrency@@QAE@XZ */
594 /* ??0invalid_scheduler_policy_value@Concurrency@@QEAA@XZ */
595 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_ctor
, 4)
596 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_ctor(
597 invalid_scheduler_policy_value
*this)
599 return invalid_scheduler_policy_value_ctor_str(this, NULL
);
602 DEFINE_THISCALL_WRAPPER(invalid_scheduler_policy_value_copy_ctor
,8)
603 invalid_scheduler_policy_value
* __thiscall
invalid_scheduler_policy_value_copy_ctor(
604 invalid_scheduler_policy_value
* _this
, const invalid_scheduler_policy_value
* rhs
)
606 TRACE("(%p %p)\n", _this
, rhs
);
607 return __exception_copy_ctor(_this
, rhs
, &invalid_scheduler_policy_value_vtable
);
610 /* ??0missing_wait@Concurrency@@QAA@PBD@Z */
611 /* ??0missing_wait@Concurrency@@QAE@PBD@Z */
612 /* ??0missing_wait@Concurrency@@QEAA@PEBD@Z */
613 DEFINE_THISCALL_WRAPPER(missing_wait_ctor_str
, 8)
614 missing_wait
* __thiscall
missing_wait_ctor_str(
615 missing_wait
*this, const char *str
)
617 TRACE("(%p %p)\n", this, str
);
618 return __exception_ctor(this, str
, &missing_wait_vtable
);
621 /* ??0missing_wait@Concurrency@@QAA@XZ */
622 /* ??0missing_wait@Concurrency@@QAE@XZ */
623 /* ??0missing_wait@Concurrency@@QEAA@XZ */
624 DEFINE_THISCALL_WRAPPER(missing_wait_ctor
, 4)
625 missing_wait
* __thiscall
missing_wait_ctor(missing_wait
*this)
627 return missing_wait_ctor_str(this, NULL
);
630 DEFINE_THISCALL_WRAPPER(missing_wait_copy_ctor
,8)
631 missing_wait
* __thiscall
missing_wait_copy_ctor(
632 missing_wait
* _this
, const missing_wait
* rhs
)
634 TRACE("(%p %p)\n", _this
, rhs
);
635 return __exception_copy_ctor(_this
, rhs
, &missing_wait_vtable
);
638 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@PBDJ@Z */
639 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@PEBDJ@Z */
640 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor_name
, 12)
641 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_ctor_name(
642 scheduler_resource_allocation_error
*this, const char *name
, HRESULT hr
)
644 TRACE("(%p %s %lx)\n", this, wine_dbgstr_a(name
), hr
);
645 __exception_ctor(&this->e
, name
, &scheduler_resource_allocation_error_vtable
);
650 /* ??0scheduler_resource_allocation_error@Concurrency@@QAE@J@Z */
651 /* ??0scheduler_resource_allocation_error@Concurrency@@QEAA@J@Z */
652 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_ctor
, 8)
653 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_ctor(
654 scheduler_resource_allocation_error
*this, HRESULT hr
)
656 return scheduler_resource_allocation_error_ctor_name(this, NULL
, hr
);
659 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_copy_ctor
,8)
660 scheduler_resource_allocation_error
* __thiscall
scheduler_resource_allocation_error_copy_ctor(
661 scheduler_resource_allocation_error
*this,
662 const scheduler_resource_allocation_error
*rhs
)
664 TRACE("(%p,%p)\n", this, rhs
);
667 memcpy(this, rhs
, sizeof(*this));
669 scheduler_resource_allocation_error_ctor_name(this, rhs
->e
.name
, rhs
->hr
);
673 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QBEJXZ */
674 /* ?get_error_code@scheduler_resource_allocation_error@Concurrency@@QEBAJXZ */
675 DEFINE_THISCALL_WRAPPER(scheduler_resource_allocation_error_get_error_code
, 4)
676 HRESULT __thiscall
scheduler_resource_allocation_error_get_error_code(
677 const scheduler_resource_allocation_error
*this)
679 TRACE("(%p)\n", this);
683 DEFINE_RTTI_DATA1(improper_lock
, 0, &cexception_rtti_base_descriptor
,
684 ".?AVimproper_lock@Concurrency@@")
685 DEFINE_RTTI_DATA1(improper_scheduler_attach
, 0, &cexception_rtti_base_descriptor
,
686 ".?AVimproper_scheduler_attach@Concurrency@@")
687 DEFINE_RTTI_DATA1(improper_scheduler_detach
, 0, &cexception_rtti_base_descriptor
,
688 ".?AVimproper_scheduler_detach@Concurrency@@")
689 DEFINE_RTTI_DATA1(invalid_multiple_scheduling
, 0, &cexception_rtti_base_descriptor
,
690 ".?AVinvalid_multiple_scheduling@Concurrency@@")
691 DEFINE_RTTI_DATA1(invalid_scheduler_policy_key
, 0, &cexception_rtti_base_descriptor
,
692 ".?AVinvalid_scheduler_policy_key@Concurrency@@")
693 DEFINE_RTTI_DATA1(invalid_scheduler_policy_thread_specification
, 0, &cexception_rtti_base_descriptor
,
694 ".?AVinvalid_scheduler_policy_thread_specification@Concurrency@@")
695 DEFINE_RTTI_DATA1(invalid_scheduler_policy_value
, 0, &cexception_rtti_base_descriptor
,
696 ".?AVinvalid_scheduler_policy_value@Concurrency@@")
697 DEFINE_RTTI_DATA1(missing_wait
, 0, &cexception_rtti_base_descriptor
,
698 ".?AVmissing_wait@Concurrency@@")
699 DEFINE_RTTI_DATA1(scheduler_resource_allocation_error
, 0, &cexception_rtti_base_descriptor
,
700 ".?AVscheduler_resource_allocation_error@Concurrency@@")
702 DEFINE_CXX_DATA1(improper_lock
, &cexception_cxx_type_info
, cexception_dtor
)
703 DEFINE_CXX_DATA1(improper_scheduler_attach
, &cexception_cxx_type_info
, cexception_dtor
)
704 DEFINE_CXX_DATA1(improper_scheduler_detach
, &cexception_cxx_type_info
, cexception_dtor
)
705 DEFINE_CXX_DATA1(invalid_multiple_scheduling
, &cexception_cxx_type_info
, cexception_dtor
)
706 DEFINE_CXX_DATA1(invalid_scheduler_policy_key
, &cexception_cxx_type_info
, cexception_dtor
)
707 DEFINE_CXX_DATA1(invalid_scheduler_policy_thread_specification
, &cexception_cxx_type_info
, cexception_dtor
)
708 DEFINE_CXX_DATA1(invalid_scheduler_policy_value
, &cexception_cxx_type_info
, cexception_dtor
)
709 #if _MSVCR_VER >= 120
710 DEFINE_CXX_DATA1(missing_wait
, &cexception_cxx_type_info
, cexception_dtor
)
712 DEFINE_CXX_DATA1(scheduler_resource_allocation_error
, &cexception_cxx_type_info
, cexception_dtor
)
714 __ASM_BLOCK_BEGIN(concurrency_exception_vtables
)
715 __ASM_VTABLE(improper_lock
,
716 VTABLE_ADD_FUNC(cexception_vector_dtor
)
717 VTABLE_ADD_FUNC(cexception_what
));
718 __ASM_VTABLE(improper_scheduler_attach
,
719 VTABLE_ADD_FUNC(cexception_vector_dtor
)
720 VTABLE_ADD_FUNC(cexception_what
));
721 __ASM_VTABLE(improper_scheduler_detach
,
722 VTABLE_ADD_FUNC(cexception_vector_dtor
)
723 VTABLE_ADD_FUNC(cexception_what
));
724 __ASM_VTABLE(invalid_multiple_scheduling
,
725 VTABLE_ADD_FUNC(cexception_vector_dtor
)
726 VTABLE_ADD_FUNC(cexception_what
));
727 __ASM_VTABLE(invalid_scheduler_policy_key
,
728 VTABLE_ADD_FUNC(cexception_vector_dtor
)
729 VTABLE_ADD_FUNC(cexception_what
));
730 __ASM_VTABLE(invalid_scheduler_policy_thread_specification
,
731 VTABLE_ADD_FUNC(cexception_vector_dtor
)
732 VTABLE_ADD_FUNC(cexception_what
));
733 __ASM_VTABLE(invalid_scheduler_policy_value
,
734 VTABLE_ADD_FUNC(cexception_vector_dtor
)
735 VTABLE_ADD_FUNC(cexception_what
));
736 __ASM_VTABLE(missing_wait
,
737 VTABLE_ADD_FUNC(cexception_vector_dtor
)
738 VTABLE_ADD_FUNC(cexception_what
));
739 __ASM_VTABLE(scheduler_resource_allocation_error
,
740 VTABLE_ADD_FUNC(cexception_vector_dtor
)
741 VTABLE_ADD_FUNC(cexception_what
));
744 static Context
* try_get_current_context(void)
746 if (context_tls_index
== TLS_OUT_OF_INDEXES
)
748 return TlsGetValue(context_tls_index
);
751 static BOOL WINAPI
init_context_tls_index(INIT_ONCE
*once
, void *param
, void **context
)
753 context_tls_index
= TlsAlloc();
754 return context_tls_index
!= TLS_OUT_OF_INDEXES
;
757 static Context
* get_current_context(void)
759 static INIT_ONCE init_once
= INIT_ONCE_STATIC_INIT
;
762 if(!InitOnceExecuteOnce(&init_once
, init_context_tls_index
, NULL
, NULL
))
764 scheduler_resource_allocation_error e
;
765 scheduler_resource_allocation_error_ctor_name(&e
, NULL
,
766 HRESULT_FROM_WIN32(GetLastError()));
767 _CxxThrowException(&e
, &scheduler_resource_allocation_error_exception_type
);
770 ret
= TlsGetValue(context_tls_index
);
772 ExternalContextBase
*context
= operator_new(sizeof(ExternalContextBase
));
773 ExternalContextBase_ctor(context
);
774 TlsSetValue(context_tls_index
, context
);
775 ret
= &context
->context
;
780 static Scheduler
* get_scheduler_from_context(Context
*ctx
)
782 ExternalContextBase
*context
= (ExternalContextBase
*)ctx
;
784 if (context
->context
.vtable
!= &ExternalContextBase_vtable
)
786 return context
->scheduler
.scheduler
;
789 static Scheduler
* try_get_current_scheduler(void)
791 Context
*context
= try_get_current_context();
797 ret
= get_scheduler_from_context(context
);
799 ERR("unknown context set\n");
803 static Scheduler
* get_current_scheduler(void)
805 Context
*context
= get_current_context();
808 ret
= get_scheduler_from_context(context
);
810 ERR("unknown context set\n");
814 /* ?CurrentContext@Context@Concurrency@@SAPAV12@XZ */
815 /* ?CurrentContext@Context@Concurrency@@SAPEAV12@XZ */
816 Context
* __cdecl
Context_CurrentContext(void)
819 return get_current_context();
822 /* ?Id@Context@Concurrency@@SAIXZ */
823 unsigned int __cdecl
Context_Id(void)
825 Context
*ctx
= try_get_current_context();
827 return ctx
? call_Context_GetId(ctx
) : -1;
830 /* ?Block@Context@Concurrency@@SAXXZ */
831 void __cdecl
Context_Block(void)
833 Context
*ctx
= get_current_context();
835 call_Context_Block(ctx
);
838 /* ?Yield@Context@Concurrency@@SAXXZ */
839 /* ?_Yield@_Context@details@Concurrency@@SAXXZ */
840 void __cdecl
Context_Yield(void)
845 /* ?_SpinYield@Context@Concurrency@@SAXXZ */
846 void __cdecl
Context__SpinYield(void)
851 /* ?IsCurrentTaskCollectionCanceling@Context@Concurrency@@SA_NXZ */
852 bool __cdecl
Context_IsCurrentTaskCollectionCanceling(void)
858 /* ?Oversubscribe@Context@Concurrency@@SAX_N@Z */
859 void __cdecl
Context_Oversubscribe(bool begin
)
861 FIXME("(%x)\n", begin
);
864 /* ?ScheduleGroupId@Context@Concurrency@@SAIXZ */
865 unsigned int __cdecl
Context_ScheduleGroupId(void)
867 Context
*ctx
= try_get_current_context();
869 return ctx
? call_Context_GetScheduleGroupId(ctx
) : -1;
872 /* ?VirtualProcessorId@Context@Concurrency@@SAIXZ */
873 unsigned int __cdecl
Context_VirtualProcessorId(void)
875 Context
*ctx
= try_get_current_context();
877 return ctx
? call_Context_GetVirtualProcessorId(ctx
) : -1;
881 /* ?_CurrentContext@_Context@details@Concurrency@@SA?AV123@XZ */
882 _Context
*__cdecl
_Context__CurrentContext(_Context
*ret
)
884 TRACE("(%p)\n", ret
);
885 ret
->context
= Context_CurrentContext();
889 DEFINE_THISCALL_WRAPPER(_Context_IsSynchronouslyBlocked
, 4)
890 BOOL __thiscall
_Context_IsSynchronouslyBlocked(const _Context
*this)
892 TRACE("(%p)\n", this);
893 return call_Context_IsSynchronouslyBlocked(this->context
);
897 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetId
, 4)
898 unsigned int __thiscall
ExternalContextBase_GetId(const ExternalContextBase
*this)
900 TRACE("(%p)->()\n", this);
904 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetVirtualProcessorId
, 4)
905 unsigned int __thiscall
ExternalContextBase_GetVirtualProcessorId(const ExternalContextBase
*this)
907 FIXME("(%p)->() stub\n", this);
911 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetScheduleGroupId
, 4)
912 unsigned int __thiscall
ExternalContextBase_GetScheduleGroupId(const ExternalContextBase
*this)
914 FIXME("(%p)->() stub\n", this);
918 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Unblock
, 4)
919 void __thiscall
ExternalContextBase_Unblock(ExternalContextBase
*this)
921 TRACE("(%p)->()\n", this);
923 /* TODO: throw context_unblock_unbalanced if this->blocked goes below -1 */
924 if (!InterlockedDecrement(&this->blocked
))
925 RtlWakeAddressSingle(&this->blocked
);
928 DEFINE_THISCALL_WRAPPER(ExternalContextBase_IsSynchronouslyBlocked
, 4)
929 bool __thiscall
ExternalContextBase_IsSynchronouslyBlocked(const ExternalContextBase
*this)
931 TRACE("(%p)->()\n", this);
932 return this->blocked
>= 1;
935 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Block
, 4)
936 void __thiscall
ExternalContextBase_Block(ExternalContextBase
*this)
940 TRACE("(%p)->()\n", this);
942 blocked
= InterlockedIncrement(&this->blocked
);
945 RtlWaitOnAddress(&this->blocked
, &blocked
, sizeof(LONG
), NULL
);
946 blocked
= this->blocked
;
950 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Yield
, 4)
951 void __thiscall
ExternalContextBase_Yield(ExternalContextBase
*this)
953 FIXME("(%p)->() stub\n", this);
956 DEFINE_THISCALL_WRAPPER(ExternalContextBase_SpinYield
, 4)
957 void __thiscall
ExternalContextBase_SpinYield(ExternalContextBase
*this)
959 FIXME("(%p)->() stub\n", this);
962 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Oversubscribe
, 8)
963 void __thiscall
ExternalContextBase_Oversubscribe(
964 ExternalContextBase
*this, bool oversubscribe
)
966 FIXME("(%p)->(%x) stub\n", this, oversubscribe
);
969 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Alloc
, 8)
970 void* __thiscall
ExternalContextBase_Alloc(ExternalContextBase
*this, size_t size
)
972 FIXME("(%p)->(%Iu) stub\n", this, size
);
976 DEFINE_THISCALL_WRAPPER(ExternalContextBase_Free
, 8)
977 void __thiscall
ExternalContextBase_Free(ExternalContextBase
*this, void *addr
)
979 FIXME("(%p)->(%p) stub\n", this, addr
);
982 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterCriticalRegionHelper
, 4)
983 int __thiscall
ExternalContextBase_EnterCriticalRegionHelper(ExternalContextBase
*this)
985 FIXME("(%p)->() stub\n", this);
989 DEFINE_THISCALL_WRAPPER(ExternalContextBase_EnterHyperCriticalRegionHelper
, 4)
990 int __thiscall
ExternalContextBase_EnterHyperCriticalRegionHelper(ExternalContextBase
*this)
992 FIXME("(%p)->() stub\n", this);
996 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitCriticalRegionHelper
, 4)
997 int __thiscall
ExternalContextBase_ExitCriticalRegionHelper(ExternalContextBase
*this)
999 FIXME("(%p)->() stub\n", this);
1003 DEFINE_THISCALL_WRAPPER(ExternalContextBase_ExitHyperCriticalRegionHelper
, 4)
1004 int __thiscall
ExternalContextBase_ExitHyperCriticalRegionHelper(ExternalContextBase
*this)
1006 FIXME("(%p)->() stub\n", this);
1010 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetCriticalRegionType
, 4)
1011 int __thiscall
ExternalContextBase_GetCriticalRegionType(const ExternalContextBase
*this)
1013 FIXME("(%p)->() stub\n", this);
1017 DEFINE_THISCALL_WRAPPER(ExternalContextBase_GetContextKind
, 4)
1018 int __thiscall
ExternalContextBase_GetContextKind(const ExternalContextBase
*this)
1020 FIXME("(%p)->() stub\n", this);
1024 static void remove_scheduled_chores(Scheduler
*scheduler
, const ExternalContextBase
*context
)
1026 ThreadScheduler
*tscheduler
= (ThreadScheduler
*)scheduler
;
1027 struct scheduled_chore
*sc
, *next
;
1029 if (tscheduler
->scheduler
.vtable
!= &ThreadScheduler_vtable
)
1032 EnterCriticalSection(&tscheduler
->cs
);
1033 LIST_FOR_EACH_ENTRY_SAFE(sc
, next
, &tscheduler
->scheduled_chores
,
1034 struct scheduled_chore
, entry
) {
1035 if (sc
->chore
->task_collection
->context
== &context
->context
) {
1036 list_remove(&sc
->entry
);
1037 operator_delete(sc
);
1040 LeaveCriticalSection(&tscheduler
->cs
);
1043 static void ExternalContextBase_dtor(ExternalContextBase
*this)
1045 struct scheduler_list
*scheduler_cur
, *scheduler_next
;
1046 union allocator_cache_entry
*next
, *cur
;
1049 /* TODO: move the allocator cache to scheduler so it can be reused */
1050 for(i
=0; i
<ARRAY_SIZE(this->allocator_cache
); i
++) {
1051 for(cur
= this->allocator_cache
[i
]; cur
; cur
=next
) {
1052 next
= cur
->free
.next
;
1053 operator_delete(cur
);
1057 if (this->scheduler
.scheduler
) {
1058 remove_scheduled_chores(this->scheduler
.scheduler
, this);
1059 call_Scheduler_Release(this->scheduler
.scheduler
);
1061 for(scheduler_cur
=this->scheduler
.next
; scheduler_cur
; scheduler_cur
=scheduler_next
) {
1062 scheduler_next
= scheduler_cur
->next
;
1063 remove_scheduled_chores(scheduler_cur
->scheduler
, this);
1064 call_Scheduler_Release(scheduler_cur
->scheduler
);
1065 operator_delete(scheduler_cur
);
1070 DEFINE_THISCALL_WRAPPER(ExternalContextBase_vector_dtor
, 8)
1071 Context
* __thiscall
ExternalContextBase_vector_dtor(ExternalContextBase
*this, unsigned int flags
)
1073 TRACE("(%p %x)\n", this, flags
);
1075 /* we have an array, with the number of elements stored before the first object */
1076 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
1078 for(i
=*ptr
-1; i
>=0; i
--)
1079 ExternalContextBase_dtor(this+i
);
1080 operator_delete(ptr
);
1082 ExternalContextBase_dtor(this);
1084 operator_delete(this);
1087 return &this->context
;
1090 static void ExternalContextBase_ctor(ExternalContextBase
*this)
1092 TRACE("(%p)->()\n", this);
1094 memset(this, 0, sizeof(*this));
1095 this->context
.vtable
= &ExternalContextBase_vtable
;
1096 this->id
= InterlockedIncrement(&context_id
);
1098 create_default_scheduler();
1099 this->scheduler
.scheduler
= &default_scheduler
->scheduler
;
1100 call_Scheduler_Reference(&default_scheduler
->scheduler
);
1103 /* ?Alloc@Concurrency@@YAPAXI@Z */
1104 /* ?Alloc@Concurrency@@YAPEAX_K@Z */
1105 void * CDECL
Concurrency_Alloc(size_t size
)
1107 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
1108 union allocator_cache_entry
*p
;
1110 size
+= FIELD_OFFSET(union allocator_cache_entry
, alloc
.mem
);
1111 if (size
< sizeof(*p
))
1114 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1115 p
= operator_new(size
);
1116 p
->alloc
.bucket
= -1;
1120 C_ASSERT(sizeof(union allocator_cache_entry
) <= 1 << 4);
1121 for(i
=0; i
<ARRAY_SIZE(context
->allocator_cache
); i
++)
1122 if (1 << (i
+4) >= size
) break;
1124 if(i
==ARRAY_SIZE(context
->allocator_cache
)) {
1125 p
= operator_new(size
);
1126 p
->alloc
.bucket
= -1;
1127 }else if (context
->allocator_cache
[i
]) {
1128 p
= context
->allocator_cache
[i
];
1129 context
->allocator_cache
[i
] = p
->free
.next
;
1130 p
->alloc
.bucket
= i
;
1132 p
= operator_new(1 << (i
+4));
1133 p
->alloc
.bucket
= i
;
1137 TRACE("(%Iu) returning %p\n", size
, p
->alloc
.mem
);
1138 return p
->alloc
.mem
;
1141 /* ?Free@Concurrency@@YAXPAX@Z */
1142 /* ?Free@Concurrency@@YAXPEAX@Z */
1143 void CDECL
Concurrency_Free(void* mem
)
1145 union allocator_cache_entry
*p
= (union allocator_cache_entry
*)((char*)mem
-FIELD_OFFSET(union allocator_cache_entry
, alloc
.mem
));
1146 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
1147 int bucket
= p
->alloc
.bucket
;
1149 TRACE("(%p)\n", mem
);
1151 if (context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1154 if(bucket
>= 0 && bucket
< ARRAY_SIZE(context
->allocator_cache
) &&
1155 (!context
->allocator_cache
[bucket
] || context
->allocator_cache
[bucket
]->free
.depth
< 20)) {
1156 p
->free
.next
= context
->allocator_cache
[bucket
];
1157 p
->free
.depth
= p
->free
.next
? p
->free
.next
->free
.depth
+1 : 0;
1158 context
->allocator_cache
[bucket
] = p
;
1165 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QAEIW4PolicyElementKey@2@I@Z */
1166 /* ?SetPolicyValue@SchedulerPolicy@Concurrency@@QEAAIW4PolicyElementKey@2@I@Z */
1167 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetPolicyValue
, 12)
1168 unsigned int __thiscall
SchedulerPolicy_SetPolicyValue(SchedulerPolicy
*this,
1169 PolicyElementKey policy
, unsigned int val
)
1173 TRACE("(%p %d %d)\n", this, policy
, val
);
1175 if (policy
== MinConcurrency
) {
1176 invalid_scheduler_policy_key e
;
1177 invalid_scheduler_policy_key_ctor_str(&e
, "MinConcurrency");
1178 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
1180 if (policy
== MaxConcurrency
) {
1181 invalid_scheduler_policy_key e
;
1182 invalid_scheduler_policy_key_ctor_str(&e
, "MaxConcurrency");
1183 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
1185 if (policy
>= last_policy_id
) {
1186 invalid_scheduler_policy_key e
;
1187 invalid_scheduler_policy_key_ctor_str(&e
, "Invalid policy");
1188 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
1194 invalid_scheduler_policy_value e
;
1195 invalid_scheduler_policy_value_ctor_str(&e
, "SchedulerKind");
1196 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
1199 case TargetOversubscriptionFactor
:
1201 invalid_scheduler_policy_value e
;
1202 invalid_scheduler_policy_value_ctor_str(&e
, "TargetOversubscriptionFactor");
1203 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
1206 case ContextPriority
:
1207 if (((int)val
< -7 /* THREAD_PRIORITY_REALTIME_LOWEST */
1208 || val
> 6 /* THREAD_PRIORITY_REALTIME_HIGHEST */)
1209 && val
!= THREAD_PRIORITY_IDLE
&& val
!= THREAD_PRIORITY_TIME_CRITICAL
1210 && val
!= INHERIT_THREAD_PRIORITY
) {
1211 invalid_scheduler_policy_value e
;
1212 invalid_scheduler_policy_value_ctor_str(&e
, "ContextPriority");
1213 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
1216 case SchedulingProtocol
:
1217 case DynamicProgressFeedback
:
1218 case WinRTInitialization
:
1219 if (val
!= 0 && val
!= 1) {
1220 invalid_scheduler_policy_value e
;
1221 invalid_scheduler_policy_value_ctor_str(&e
, "SchedulingProtocol");
1222 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
1229 ret
= this->policy_container
->policies
[policy
];
1230 this->policy_container
->policies
[policy
] = val
;
1234 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QAEXII@Z */
1235 /* ?SetConcurrencyLimits@SchedulerPolicy@Concurrency@@QEAAXII@Z */
1236 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_SetConcurrencyLimits
, 12)
1237 void __thiscall
SchedulerPolicy_SetConcurrencyLimits(SchedulerPolicy
*this,
1238 unsigned int min_concurrency
, unsigned int max_concurrency
)
1240 TRACE("(%p %d %d)\n", this, min_concurrency
, max_concurrency
);
1242 if (min_concurrency
> max_concurrency
) {
1243 invalid_scheduler_policy_thread_specification e
;
1244 invalid_scheduler_policy_thread_specification_ctor_str(&e
, NULL
);
1245 _CxxThrowException(&e
, &invalid_scheduler_policy_thread_specification_exception_type
);
1247 if (!max_concurrency
) {
1248 invalid_scheduler_policy_value e
;
1249 invalid_scheduler_policy_value_ctor_str(&e
, "MaxConcurrency");
1250 _CxxThrowException(&e
, &invalid_scheduler_policy_value_exception_type
);
1253 this->policy_container
->policies
[MinConcurrency
] = min_concurrency
;
1254 this->policy_container
->policies
[MaxConcurrency
] = max_concurrency
;
1257 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QBEIW4PolicyElementKey@2@@Z */
1258 /* ?GetPolicyValue@SchedulerPolicy@Concurrency@@QEBAIW4PolicyElementKey@2@@Z */
1259 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_GetPolicyValue
, 8)
1260 unsigned int __thiscall
SchedulerPolicy_GetPolicyValue(
1261 const SchedulerPolicy
*this, PolicyElementKey policy
)
1263 TRACE("(%p %d)\n", this, policy
);
1265 if (policy
>= last_policy_id
) {
1266 invalid_scheduler_policy_key e
;
1267 invalid_scheduler_policy_key_ctor_str(&e
, "Invalid policy");
1268 _CxxThrowException(&e
, &invalid_scheduler_policy_key_exception_type
);
1270 return this->policy_container
->policies
[policy
];
1273 /* ??0SchedulerPolicy@Concurrency@@QAE@XZ */
1274 /* ??0SchedulerPolicy@Concurrency@@QEAA@XZ */
1275 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_ctor
, 4)
1276 SchedulerPolicy
* __thiscall
SchedulerPolicy_ctor(SchedulerPolicy
*this)
1278 TRACE("(%p)\n", this);
1280 this->policy_container
= operator_new(sizeof(*this->policy_container
));
1281 /* TODO: default values can probably be affected by CurrentScheduler */
1282 this->policy_container
->policies
[SchedulerKind
] = 0;
1283 this->policy_container
->policies
[MaxConcurrency
] = -1;
1284 this->policy_container
->policies
[MinConcurrency
] = 1;
1285 this->policy_container
->policies
[TargetOversubscriptionFactor
] = 1;
1286 this->policy_container
->policies
[LocalContextCacheSize
] = 8;
1287 this->policy_container
->policies
[ContextStackSize
] = 0;
1288 this->policy_container
->policies
[ContextPriority
] = THREAD_PRIORITY_NORMAL
;
1289 this->policy_container
->policies
[SchedulingProtocol
] = 0;
1290 this->policy_container
->policies
[DynamicProgressFeedback
] = 1;
1294 /* ??0SchedulerPolicy@Concurrency@@QAA@IZZ */
1295 /* ??0SchedulerPolicy@Concurrency@@QEAA@_KZZ */
1296 /* TODO: don't leak policy_container on exception */
1297 SchedulerPolicy
* WINAPIV
SchedulerPolicy_ctor_policies(
1298 SchedulerPolicy
*this, size_t n
, ...)
1300 unsigned int min_concurrency
, max_concurrency
;
1304 TRACE("(%p %Iu)\n", this, n
);
1306 SchedulerPolicy_ctor(this);
1307 min_concurrency
= this->policy_container
->policies
[MinConcurrency
];
1308 max_concurrency
= this->policy_container
->policies
[MaxConcurrency
];
1310 va_start(valist
, n
);
1311 for(i
=0; i
<n
; i
++) {
1312 PolicyElementKey policy
= va_arg(valist
, PolicyElementKey
);
1313 unsigned int val
= va_arg(valist
, unsigned int);
1315 if(policy
== MinConcurrency
)
1316 min_concurrency
= val
;
1317 else if(policy
== MaxConcurrency
)
1318 max_concurrency
= val
;
1320 SchedulerPolicy_SetPolicyValue(this, policy
, val
);
1324 SchedulerPolicy_SetConcurrencyLimits(this, min_concurrency
, max_concurrency
);
1328 /* ??4SchedulerPolicy@Concurrency@@QAEAAV01@ABV01@@Z */
1329 /* ??4SchedulerPolicy@Concurrency@@QEAAAEAV01@AEBV01@@Z */
1330 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_op_assign
, 8)
1331 SchedulerPolicy
* __thiscall
SchedulerPolicy_op_assign(
1332 SchedulerPolicy
*this, const SchedulerPolicy
*rhs
)
1334 TRACE("(%p %p)\n", this, rhs
);
1335 memcpy(this->policy_container
->policies
, rhs
->policy_container
->policies
,
1336 sizeof(this->policy_container
->policies
));
1340 /* ??0SchedulerPolicy@Concurrency@@QAE@ABV01@@Z */
1341 /* ??0SchedulerPolicy@Concurrency@@QEAA@AEBV01@@Z */
1342 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_copy_ctor
, 8)
1343 SchedulerPolicy
* __thiscall
SchedulerPolicy_copy_ctor(
1344 SchedulerPolicy
*this, const SchedulerPolicy
*rhs
)
1346 TRACE("(%p %p)\n", this, rhs
);
1347 SchedulerPolicy_ctor(this);
1348 return SchedulerPolicy_op_assign(this, rhs
);
1351 /* ??1SchedulerPolicy@Concurrency@@QAE@XZ */
1352 /* ??1SchedulerPolicy@Concurrency@@QEAA@XZ */
1353 DEFINE_THISCALL_WRAPPER(SchedulerPolicy_dtor
, 4)
1354 void __thiscall
SchedulerPolicy_dtor(SchedulerPolicy
*this)
1356 TRACE("(%p)\n", this);
1357 operator_delete(this->policy_container
);
1360 static void ThreadScheduler_dtor(ThreadScheduler
*this)
1363 struct scheduled_chore
*sc
, *next
;
1365 if(this->ref
!= 0) WARN("ref = %ld\n", this->ref
);
1366 SchedulerPolicy_dtor(&this->policy
);
1368 for(i
=0; i
<this->shutdown_count
; i
++)
1369 SetEvent(this->shutdown_events
[i
]);
1370 operator_delete(this->shutdown_events
);
1372 this->cs
.DebugInfo
->Spare
[0] = 0;
1373 DeleteCriticalSection(&this->cs
);
1375 if (!list_empty(&this->scheduled_chores
))
1376 ERR("scheduled chore list is not empty\n");
1377 LIST_FOR_EACH_ENTRY_SAFE(sc
, next
, &this->scheduled_chores
,
1378 struct scheduled_chore
, entry
)
1379 operator_delete(sc
);
1382 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Id
, 4)
1383 unsigned int __thiscall
ThreadScheduler_Id(const ThreadScheduler
*this)
1385 TRACE("(%p)\n", this);
1389 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetNumberOfVirtualProcessors
, 4)
1390 unsigned int __thiscall
ThreadScheduler_GetNumberOfVirtualProcessors(const ThreadScheduler
*this)
1392 TRACE("(%p)\n", this);
1393 return this->virt_proc_no
;
1396 DEFINE_THISCALL_WRAPPER(ThreadScheduler_GetPolicy
, 8)
1397 SchedulerPolicy
* __thiscall
ThreadScheduler_GetPolicy(
1398 const ThreadScheduler
*this, SchedulerPolicy
*ret
)
1400 TRACE("(%p %p)\n", this, ret
);
1401 return SchedulerPolicy_copy_ctor(ret
, &this->policy
);
1404 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Reference
, 4)
1405 unsigned int __thiscall
ThreadScheduler_Reference(ThreadScheduler
*this)
1407 TRACE("(%p)\n", this);
1408 return InterlockedIncrement(&this->ref
);
1411 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Release
, 4)
1412 unsigned int __thiscall
ThreadScheduler_Release(ThreadScheduler
*this)
1414 unsigned int ret
= InterlockedDecrement(&this->ref
);
1416 TRACE("(%p)\n", this);
1419 ThreadScheduler_dtor(this);
1420 operator_delete(this);
1425 DEFINE_THISCALL_WRAPPER(ThreadScheduler_RegisterShutdownEvent
, 8)
1426 void __thiscall
ThreadScheduler_RegisterShutdownEvent(ThreadScheduler
*this, HANDLE event
)
1428 HANDLE
*shutdown_events
;
1431 TRACE("(%p %p)\n", this, event
);
1433 EnterCriticalSection(&this->cs
);
1435 size
= this->shutdown_size
? this->shutdown_size
* 2 : 1;
1436 shutdown_events
= operator_new(size
* sizeof(*shutdown_events
));
1437 memcpy(shutdown_events
, this->shutdown_events
,
1438 this->shutdown_count
* sizeof(*shutdown_events
));
1439 operator_delete(this->shutdown_events
);
1440 this->shutdown_size
= size
;
1441 this->shutdown_events
= shutdown_events
;
1442 this->shutdown_events
[this->shutdown_count
++] = event
;
1444 LeaveCriticalSection(&this->cs
);
1447 DEFINE_THISCALL_WRAPPER(ThreadScheduler_Attach
, 4)
1448 void __thiscall
ThreadScheduler_Attach(ThreadScheduler
*this)
1450 ExternalContextBase
*context
= (ExternalContextBase
*)get_current_context();
1452 TRACE("(%p)\n", this);
1454 if(context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1455 ERR("unknown context set\n");
1459 if(context
->scheduler
.scheduler
== &this->scheduler
) {
1460 improper_scheduler_attach e
;
1461 improper_scheduler_attach_ctor_str(&e
, NULL
);
1462 _CxxThrowException(&e
, &improper_scheduler_attach_exception_type
);
1465 if(context
->scheduler
.scheduler
) {
1466 struct scheduler_list
*l
= operator_new(sizeof(*l
));
1467 *l
= context
->scheduler
;
1468 context
->scheduler
.next
= l
;
1470 context
->scheduler
.scheduler
= &this->scheduler
;
1471 ThreadScheduler_Reference(this);
1474 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup_loc
, 8)
1475 /*ScheduleGroup*/void* __thiscall
ThreadScheduler_CreateScheduleGroup_loc(
1476 ThreadScheduler
*this, /*location*/void *placement
)
1478 FIXME("(%p %p) stub\n", this, placement
);
1482 DEFINE_THISCALL_WRAPPER(ThreadScheduler_CreateScheduleGroup
, 4)
1483 /*ScheduleGroup*/void* __thiscall
ThreadScheduler_CreateScheduleGroup(ThreadScheduler
*this)
1485 FIXME("(%p) stub\n", this);
1491 void (__cdecl
*proc
)(void*);
1493 ThreadScheduler
*scheduler
;
1494 } schedule_task_arg
;
1496 void __cdecl
CurrentScheduler_Detach(void);
1498 static void WINAPI
schedule_task_proc(PTP_CALLBACK_INSTANCE instance
, void *context
, PTP_WORK work
)
1500 schedule_task_arg arg
;
1501 BOOL detach
= FALSE
;
1503 arg
= *(schedule_task_arg
*)context
;
1504 operator_delete(context
);
1506 if(&arg
.scheduler
->scheduler
!= get_current_scheduler()) {
1507 ThreadScheduler_Attach(arg
.scheduler
);
1510 ThreadScheduler_Release(arg
.scheduler
);
1515 CurrentScheduler_Detach();
1518 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask_loc
, 16)
1519 void __thiscall
ThreadScheduler_ScheduleTask_loc(ThreadScheduler
*this,
1520 void (__cdecl
*proc
)(void*), void* data
, /*location*/void *placement
)
1522 schedule_task_arg
*arg
;
1525 FIXME("(%p %p %p %p) stub\n", this, proc
, data
, placement
);
1527 arg
= operator_new(sizeof(*arg
));
1530 arg
->scheduler
= this;
1531 ThreadScheduler_Reference(this);
1533 work
= CreateThreadpoolWork(schedule_task_proc
, arg
, NULL
);
1535 scheduler_resource_allocation_error e
;
1537 ThreadScheduler_Release(this);
1538 operator_delete(arg
);
1539 scheduler_resource_allocation_error_ctor_name(&e
, NULL
,
1540 HRESULT_FROM_WIN32(GetLastError()));
1541 _CxxThrowException(&e
, &scheduler_resource_allocation_error_exception_type
);
1543 SubmitThreadpoolWork(work
);
1544 CloseThreadpoolWork(work
);
1547 DEFINE_THISCALL_WRAPPER(ThreadScheduler_ScheduleTask
, 12)
1548 void __thiscall
ThreadScheduler_ScheduleTask(ThreadScheduler
*this,
1549 void (__cdecl
*proc
)(void*), void* data
)
1551 FIXME("(%p %p %p) stub\n", this, proc
, data
);
1552 ThreadScheduler_ScheduleTask_loc(this, proc
, data
, NULL
);
1555 DEFINE_THISCALL_WRAPPER(ThreadScheduler_IsAvailableLocation
, 8)
1556 bool __thiscall
ThreadScheduler_IsAvailableLocation(
1557 const ThreadScheduler
*this, const /*location*/void *placement
)
1559 FIXME("(%p %p) stub\n", this, placement
);
1563 DEFINE_THISCALL_WRAPPER(ThreadScheduler_vector_dtor
, 8)
1564 Scheduler
* __thiscall
ThreadScheduler_vector_dtor(ThreadScheduler
*this, unsigned int flags
)
1566 TRACE("(%p %x)\n", this, flags
);
1568 /* we have an array, with the number of elements stored before the first object */
1569 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
1571 for(i
=*ptr
-1; i
>=0; i
--)
1572 ThreadScheduler_dtor(this+i
);
1573 operator_delete(ptr
);
1575 ThreadScheduler_dtor(this);
1577 operator_delete(this);
1580 return &this->scheduler
;
1583 static ThreadScheduler
* ThreadScheduler_ctor(ThreadScheduler
*this,
1584 const SchedulerPolicy
*policy
)
1588 TRACE("(%p)->()\n", this);
1590 this->scheduler
.vtable
= &ThreadScheduler_vtable
;
1592 this->id
= InterlockedIncrement(&scheduler_id
);
1593 SchedulerPolicy_copy_ctor(&this->policy
, policy
);
1596 this->virt_proc_no
= SchedulerPolicy_GetPolicyValue(&this->policy
, MaxConcurrency
);
1597 if(this->virt_proc_no
> si
.dwNumberOfProcessors
)
1598 this->virt_proc_no
= si
.dwNumberOfProcessors
;
1600 this->shutdown_count
= this->shutdown_size
= 0;
1601 this->shutdown_events
= NULL
;
1603 InitializeCriticalSection(&this->cs
);
1604 this->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": ThreadScheduler");
1606 list_init(&this->scheduled_chores
);
1610 /* ?Create@Scheduler@Concurrency@@SAPAV12@ABVSchedulerPolicy@2@@Z */
1611 /* ?Create@Scheduler@Concurrency@@SAPEAV12@AEBVSchedulerPolicy@2@@Z */
1612 Scheduler
* __cdecl
Scheduler_Create(const SchedulerPolicy
*policy
)
1614 ThreadScheduler
*ret
;
1616 TRACE("(%p)\n", policy
);
1618 ret
= operator_new(sizeof(*ret
));
1619 return &ThreadScheduler_ctor(ret
, policy
)->scheduler
;
1622 /* ?ResetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXXZ */
1623 void __cdecl
Scheduler_ResetDefaultSchedulerPolicy(void)
1627 EnterCriticalSection(&default_scheduler_cs
);
1628 if(default_scheduler_policy
.policy_container
)
1629 SchedulerPolicy_dtor(&default_scheduler_policy
);
1630 SchedulerPolicy_ctor(&default_scheduler_policy
);
1631 LeaveCriticalSection(&default_scheduler_cs
);
1634 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1635 /* ?SetDefaultSchedulerPolicy@Scheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1636 void __cdecl
Scheduler_SetDefaultSchedulerPolicy(const SchedulerPolicy
*policy
)
1638 TRACE("(%p)\n", policy
);
1640 EnterCriticalSection(&default_scheduler_cs
);
1641 if(!default_scheduler_policy
.policy_container
)
1642 SchedulerPolicy_copy_ctor(&default_scheduler_policy
, policy
);
1644 SchedulerPolicy_op_assign(&default_scheduler_policy
, policy
);
1645 LeaveCriticalSection(&default_scheduler_cs
);
1648 /* ?Create@CurrentScheduler@Concurrency@@SAXABVSchedulerPolicy@2@@Z */
1649 /* ?Create@CurrentScheduler@Concurrency@@SAXAEBVSchedulerPolicy@2@@Z */
1650 void __cdecl
CurrentScheduler_Create(const SchedulerPolicy
*policy
)
1652 Scheduler
*scheduler
;
1654 TRACE("(%p)\n", policy
);
1656 scheduler
= Scheduler_Create(policy
);
1657 call_Scheduler_Attach(scheduler
);
1660 /* ?Detach@CurrentScheduler@Concurrency@@SAXXZ */
1661 void __cdecl
CurrentScheduler_Detach(void)
1663 ExternalContextBase
*context
= (ExternalContextBase
*)try_get_current_context();
1668 improper_scheduler_detach e
;
1669 improper_scheduler_detach_ctor_str(&e
, NULL
);
1670 _CxxThrowException(&e
, &improper_scheduler_detach_exception_type
);
1673 if(context
->context
.vtable
!= &ExternalContextBase_vtable
) {
1674 ERR("unknown context set\n");
1678 if(!context
->scheduler
.next
) {
1679 improper_scheduler_detach e
;
1680 improper_scheduler_detach_ctor_str(&e
, NULL
);
1681 _CxxThrowException(&e
, &improper_scheduler_detach_exception_type
);
1684 call_Scheduler_Release(context
->scheduler
.scheduler
);
1685 if(!context
->scheduler
.next
) {
1686 context
->scheduler
.scheduler
= NULL
;
1688 struct scheduler_list
*entry
= context
->scheduler
.next
;
1689 context
->scheduler
.scheduler
= entry
->scheduler
;
1690 context
->scheduler
.next
= entry
->next
;
1691 operator_delete(entry
);
1695 static void create_default_scheduler(void)
1697 if(default_scheduler
)
1700 EnterCriticalSection(&default_scheduler_cs
);
1701 if(!default_scheduler
) {
1702 ThreadScheduler
*scheduler
;
1704 if(!default_scheduler_policy
.policy_container
)
1705 SchedulerPolicy_ctor(&default_scheduler_policy
);
1707 scheduler
= operator_new(sizeof(*scheduler
));
1708 ThreadScheduler_ctor(scheduler
, &default_scheduler_policy
);
1709 default_scheduler
= scheduler
;
1711 LeaveCriticalSection(&default_scheduler_cs
);
1714 /* ?Get@CurrentScheduler@Concurrency@@SAPAVScheduler@2@XZ */
1715 /* ?Get@CurrentScheduler@Concurrency@@SAPEAVScheduler@2@XZ */
1716 Scheduler
* __cdecl
CurrentScheduler_Get(void)
1719 return get_current_scheduler();
1722 #if _MSVCR_VER > 100
1723 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@AAVlocation@2@@Z */
1724 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@AEAVlocation@2@@Z */
1725 /*ScheduleGroup*/void* __cdecl
CurrentScheduler_CreateScheduleGroup_loc(/*location*/void *placement
)
1727 TRACE("(%p)\n", placement
);
1728 return call_Scheduler_CreateScheduleGroup_loc(get_current_scheduler(), placement
);
1732 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPAVScheduleGroup@2@XZ */
1733 /* ?CreateScheduleGroup@CurrentScheduler@Concurrency@@SAPEAVScheduleGroup@2@XZ */
1734 /*ScheduleGroup*/void* __cdecl
CurrentScheduler_CreateScheduleGroup(void)
1737 return call_Scheduler_CreateScheduleGroup(get_current_scheduler());
1740 /* ?GetNumberOfVirtualProcessors@CurrentScheduler@Concurrency@@SAIXZ */
1741 unsigned int __cdecl
CurrentScheduler_GetNumberOfVirtualProcessors(void)
1743 Scheduler
*scheduler
= try_get_current_scheduler();
1749 return call_Scheduler_GetNumberOfVirtualProcessors(scheduler
);
1752 /* ?GetPolicy@CurrentScheduler@Concurrency@@SA?AVSchedulerPolicy@2@XZ */
1753 SchedulerPolicy
* __cdecl
CurrentScheduler_GetPolicy(SchedulerPolicy
*policy
)
1755 TRACE("(%p)\n", policy
);
1756 return call_Scheduler_GetPolicy(get_current_scheduler(), policy
);
1759 /* ?Id@CurrentScheduler@Concurrency@@SAIXZ */
1760 unsigned int __cdecl
CurrentScheduler_Id(void)
1762 Scheduler
*scheduler
= try_get_current_scheduler();
1768 return call_Scheduler_Id(scheduler
);
1771 #if _MSVCR_VER > 100
1772 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NABVlocation@2@@Z */
1773 /* ?IsAvailableLocation@CurrentScheduler@Concurrency@@SA_NAEBVlocation@2@@Z */
1774 bool __cdecl
CurrentScheduler_IsAvailableLocation(const /*location*/void *placement
)
1776 Scheduler
*scheduler
= try_get_current_scheduler();
1778 TRACE("(%p)\n", placement
);
1782 return call_Scheduler_IsAvailableLocation(scheduler
, placement
);
1786 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPAX@Z */
1787 /* ?RegisterShutdownEvent@CurrentScheduler@Concurrency@@SAXPEAX@Z */
1788 void __cdecl
CurrentScheduler_RegisterShutdownEvent(HANDLE event
)
1790 TRACE("(%p)\n", event
);
1791 call_Scheduler_RegisterShutdownEvent(get_current_scheduler(), event
);
1794 #if _MSVCR_VER > 100
1795 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0AAVlocation@2@@Z */
1796 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0AEAVlocation@2@@Z */
1797 void __cdecl
CurrentScheduler_ScheduleTask_loc(void (__cdecl
*proc
)(void*),
1798 void *data
, /*location*/void *placement
)
1800 TRACE("(%p %p %p)\n", proc
, data
, placement
);
1801 call_Scheduler_ScheduleTask_loc(get_current_scheduler(), proc
, data
, placement
);
1805 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPAX@Z0@Z */
1806 /* ?ScheduleTask@CurrentScheduler@Concurrency@@SAXP6AXPEAX@Z0@Z */
1807 void __cdecl
CurrentScheduler_ScheduleTask(void (__cdecl
*proc
)(void*), void *data
)
1809 TRACE("(%p %p)\n", proc
, data
);
1810 call_Scheduler_ScheduleTask(get_current_scheduler(), proc
, data
);
1813 /* ??0_Scheduler@details@Concurrency@@QAE@PAVScheduler@2@@Z */
1814 /* ??0_Scheduler@details@Concurrency@@QEAA@PEAVScheduler@2@@Z */
1815 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor_sched
, 8)
1816 _Scheduler
* __thiscall
_Scheduler_ctor_sched(_Scheduler
*this, Scheduler
*scheduler
)
1818 TRACE("(%p %p)\n", this, scheduler
);
1820 this->scheduler
= scheduler
;
1824 /* ??_F_Scheduler@details@Concurrency@@QAEXXZ */
1825 /* ??_F_Scheduler@details@Concurrency@@QEAAXXZ */
1826 DEFINE_THISCALL_WRAPPER(_Scheduler_ctor
, 4)
1827 _Scheduler
* __thiscall
_Scheduler_ctor(_Scheduler
*this)
1829 return _Scheduler_ctor_sched(this, NULL
);
1832 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QAEPAVScheduler@3@XZ */
1833 /* ?_GetScheduler@_Scheduler@details@Concurrency@@QEAAPEAVScheduler@3@XZ */
1834 DEFINE_THISCALL_WRAPPER(_Scheduler__GetScheduler
, 4)
1835 Scheduler
* __thiscall
_Scheduler__GetScheduler(_Scheduler
*this)
1837 TRACE("(%p)\n", this);
1838 return this->scheduler
;
1841 /* ?_Reference@_Scheduler@details@Concurrency@@QAEIXZ */
1842 /* ?_Reference@_Scheduler@details@Concurrency@@QEAAIXZ */
1843 DEFINE_THISCALL_WRAPPER(_Scheduler__Reference
, 4)
1844 unsigned int __thiscall
_Scheduler__Reference(_Scheduler
*this)
1846 TRACE("(%p)\n", this);
1847 return call_Scheduler_Reference(this->scheduler
);
1850 /* ?_Release@_Scheduler@details@Concurrency@@QAEIXZ */
1851 /* ?_Release@_Scheduler@details@Concurrency@@QEAAIXZ */
1852 DEFINE_THISCALL_WRAPPER(_Scheduler__Release
, 4)
1853 unsigned int __thiscall
_Scheduler__Release(_Scheduler
*this)
1855 TRACE("(%p)\n", this);
1856 return call_Scheduler_Release(this->scheduler
);
1859 /* ?_Get@_CurrentScheduler@details@Concurrency@@SA?AV_Scheduler@23@XZ */
1860 _Scheduler
* __cdecl
_CurrentScheduler__Get(_Scheduler
*ret
)
1863 return _Scheduler_ctor_sched(ret
, get_current_scheduler());
1866 /* ?_GetNumberOfVirtualProcessors@_CurrentScheduler@details@Concurrency@@SAIXZ */
1867 unsigned int __cdecl
_CurrentScheduler__GetNumberOfVirtualProcessors(void)
1870 get_current_scheduler();
1871 return CurrentScheduler_GetNumberOfVirtualProcessors();
1874 /* ?_Id@_CurrentScheduler@details@Concurrency@@SAIXZ */
1875 unsigned int __cdecl
_CurrentScheduler__Id(void)
1878 get_current_scheduler();
1879 return CurrentScheduler_Id();
1882 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPAX@Z0@Z */
1883 /* ?_ScheduleTask@_CurrentScheduler@details@Concurrency@@SAXP6AXPEAX@Z0@Z */
1884 void __cdecl
_CurrentScheduler__ScheduleTask(void (__cdecl
*proc
)(void*), void *data
)
1886 TRACE("(%p %p)\n", proc
, data
);
1887 CurrentScheduler_ScheduleTask(proc
, data
);
1890 /* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
1891 unsigned int __cdecl
SpinCount__Value(void)
1893 static unsigned int val
= -1;
1901 val
= si
.dwNumberOfProcessors
>1 ? 4000 : 0;
1907 /* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
1908 /* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
1909 DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield
, 8)
1910 SpinWait
* __thiscall
SpinWait_ctor_yield(SpinWait
*this, yield_func yf
)
1912 TRACE("(%p %p)\n", this, yf
);
1914 this->state
= SPINWAIT_INIT
;
1916 this->yield_func
= yf
;
1920 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
1921 /* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
1922 DEFINE_THISCALL_WRAPPER(SpinWait_ctor
, 8)
1923 SpinWait
* __thiscall
SpinWait_ctor(SpinWait
*this, yield_func yf
)
1925 TRACE("(%p %p)\n", this, yf
);
1927 this->state
= SPINWAIT_INIT
;
1929 this->yield_func
= yf
;
1933 /* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
1934 /* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
1935 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
1936 /* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
1937 DEFINE_THISCALL_WRAPPER(SpinWait_dtor
, 4)
1938 void __thiscall
SpinWait_dtor(SpinWait
*this)
1940 TRACE("(%p)\n", this);
1943 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1944 /* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1945 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1946 /* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1947 DEFINE_THISCALL_WRAPPER(SpinWait__DoYield
, 4)
1948 void __thiscall
SpinWait__DoYield(SpinWait
*this)
1950 TRACE("(%p)\n", this);
1956 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
1957 /* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
1958 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
1959 /* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
1960 DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins
, 4)
1961 ULONG __thiscall
SpinWait__NumberOfSpins(SpinWait
*this)
1963 TRACE("(%p)\n", this);
1967 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
1968 /* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
1969 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
1970 /* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
1971 DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount
, 8)
1972 void __thiscall
SpinWait__SetSpinCount(SpinWait
*this, unsigned int spin
)
1974 TRACE("(%p %d)\n", this, spin
);
1977 this->state
= spin
? SPINWAIT_SPIN
: SPINWAIT_YIELD
;
1980 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
1981 /* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
1982 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
1983 /* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
1984 DEFINE_THISCALL_WRAPPER(SpinWait__Reset
, 4)
1985 void __thiscall
SpinWait__Reset(SpinWait
*this)
1987 SpinWait__SetSpinCount(this, SpinCount__Value());
1990 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
1991 /* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
1992 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
1993 /* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
1994 DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain
, 4)
1995 bool __thiscall
SpinWait__ShouldSpinAgain(SpinWait
*this)
1997 TRACE("(%p)\n", this);
2000 return this->spin
> 0;
2003 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
2004 /* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
2005 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
2006 /* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
2007 DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce
, 4)
2008 bool __thiscall
SpinWait__SpinOnce(SpinWait
*this)
2010 switch(this->state
) {
2012 SpinWait__Reset(this);
2015 InterlockedDecrement((LONG
*)&this->spin
);
2017 this->state
= this->unknown
? SPINWAIT_YIELD
: SPINWAIT_DONE
;
2019 case SPINWAIT_YIELD
:
2020 this->state
= SPINWAIT_DONE
;
2024 SpinWait__Reset(this);
2029 #if _MSVCR_VER >= 110
2031 /* ??0_StructuredTaskCollection@details@Concurrency@@QAE@PAV_CancellationTokenState@12@@Z */
2032 /* ??0_StructuredTaskCollection@details@Concurrency@@QEAA@PEAV_CancellationTokenState@12@@Z */
2033 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_ctor
, 8)
2034 _StructuredTaskCollection
* __thiscall
_StructuredTaskCollection_ctor(
2035 _StructuredTaskCollection
*this, /*_CancellationTokenState*/void *token
)
2037 TRACE("(%p)\n", this);
2040 FIXME("_StructuredTaskCollection with cancellation token not implemented!\n");
2042 memset(this, 0, sizeof(*this));
2043 this->finished
= FINISHED_INITIAL
;
2047 #endif /* _MSVCR_VER >= 110 */
2049 #if _MSVCR_VER >= 120
2051 /* ??1_StructuredTaskCollection@details@Concurrency@@QAA@XZ */
2052 /* ??1_StructuredTaskCollection@details@Concurrency@@QAE@XZ */
2053 /* ??1_StructuredTaskCollection@details@Concurrency@@QEAA@XZ */
2054 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection_dtor
, 4)
2055 void __thiscall
_StructuredTaskCollection_dtor(_StructuredTaskCollection
*this)
2057 FIXME("(%p): stub!\n", this);
2058 if (this->count
&& !__uncaught_exception()) {
2060 missing_wait_ctor_str(&e
, "Missing call to _RunAndWait");
2061 _CxxThrowException(&e
, &missing_wait_exception_type
);
2065 #endif /* _MSVCR_VER >= 120 */
2067 static ThreadScheduler
*get_thread_scheduler_from_context(Context
*context
)
2069 Scheduler
*scheduler
= get_scheduler_from_context(context
);
2070 if (scheduler
&& scheduler
->vtable
== &ThreadScheduler_vtable
)
2071 return (ThreadScheduler
*)scheduler
;
2075 struct execute_chore_data
{
2076 _UnrealizedChore
*chore
;
2077 _StructuredTaskCollection
*task_collection
;
2080 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAAXXZ */
2081 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QAEXXZ */
2082 /* ?_Cancel@_StructuredTaskCollection@details@Concurrency@@QEAAXXZ */
2083 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Cancel
, 4)
2084 void __thiscall
_StructuredTaskCollection__Cancel(
2085 _StructuredTaskCollection
*this)
2087 ThreadScheduler
*scheduler
;
2088 void *prev_exception
, *new_exception
;
2089 struct scheduled_chore
*sc
, *next
;
2091 LONG prev_finished
, new_finished
;
2093 TRACE("(%p)\n", this);
2096 this->context
= get_current_context();
2097 scheduler
= get_thread_scheduler_from_context(this->context
);
2101 new_exception
= this->exception
;
2103 prev_exception
= new_exception
;
2104 if ((ULONG_PTR
)prev_exception
& STRUCTURED_TASK_COLLECTION_CANCELLED
)
2106 new_exception
= (void*)((ULONG_PTR
)prev_exception
|
2107 STRUCTURED_TASK_COLLECTION_CANCELLED
);
2108 } while ((new_exception
= InterlockedCompareExchangePointer(
2109 &this->exception
, new_exception
, prev_exception
))
2112 EnterCriticalSection(&scheduler
->cs
);
2113 LIST_FOR_EACH_ENTRY_SAFE(sc
, next
, &scheduler
->scheduled_chores
,
2114 struct scheduled_chore
, entry
) {
2115 if (sc
->chore
->task_collection
!= this)
2117 sc
->chore
->task_collection
= NULL
;
2118 list_remove(&sc
->entry
);
2120 operator_delete(sc
);
2122 LeaveCriticalSection(&scheduler
->cs
);
2126 new_finished
= this->finished
;
2128 prev_finished
= new_finished
;
2129 if (prev_finished
== FINISHED_INITIAL
)
2130 new_finished
= removed
;
2132 new_finished
= prev_finished
+ removed
;
2133 } while ((new_finished
= InterlockedCompareExchange(&this->finished
,
2134 new_finished
, prev_finished
)) != prev_finished
);
2135 RtlWakeAddressAll((LONG
*)&this->finished
);
2138 static LONG CALLBACK
execute_chore_except(EXCEPTION_POINTERS
*pexc
, void *_data
)
2140 struct execute_chore_data
*data
= _data
;
2141 void *prev_exception
, *new_exception
;
2144 if (pexc
->ExceptionRecord
->ExceptionCode
!= CXX_EXCEPTION
)
2145 return EXCEPTION_CONTINUE_SEARCH
;
2147 _StructuredTaskCollection__Cancel(data
->task_collection
);
2149 ptr
= operator_new(sizeof(*ptr
));
2150 __ExceptionPtrCreate(ptr
);
2151 exception_ptr_from_record(ptr
, pexc
->ExceptionRecord
);
2153 new_exception
= data
->task_collection
->exception
;
2155 if ((ULONG_PTR
)new_exception
& ~STRUCTURED_TASK_COLLECTION_STATUS_MASK
) {
2156 __ExceptionPtrDestroy(ptr
);
2157 operator_delete(ptr
);
2160 prev_exception
= new_exception
;
2161 new_exception
= (void*)((ULONG_PTR
)new_exception
| (ULONG_PTR
)ptr
);
2162 } while ((new_exception
= InterlockedCompareExchangePointer(
2163 &data
->task_collection
->exception
, new_exception
,
2164 prev_exception
)) != prev_exception
);
2165 data
->task_collection
->event
= 0;
2166 return EXCEPTION_EXECUTE_HANDLER
;
2169 static void execute_chore(_UnrealizedChore
*chore
,
2170 _StructuredTaskCollection
*task_collection
)
2172 struct execute_chore_data data
= { chore
, task_collection
};
2174 TRACE("(%p %p)\n", chore
, task_collection
);
2178 if (!((ULONG_PTR
)task_collection
->exception
& ~STRUCTURED_TASK_COLLECTION_STATUS_MASK
) &&
2180 chore
->chore_proc(chore
);
2182 __EXCEPT_CTX(execute_chore_except
, &data
)
2188 static void CALLBACK
chore_wrapper_finally(BOOL normal
, void *data
)
2190 _UnrealizedChore
*chore
= data
;
2191 LONG count
, prev_finished
, new_finished
;
2194 TRACE("(%u %p)\n", normal
, data
);
2196 if (!chore
->task_collection
)
2198 ptr
= &chore
->task_collection
->finished
;
2199 count
= chore
->task_collection
->count
;
2200 chore
->task_collection
= NULL
;
2203 prev_finished
= *ptr
;
2204 if (prev_finished
== FINISHED_INITIAL
)
2207 new_finished
= prev_finished
+ 1;
2208 } while (InterlockedCompareExchange(ptr
, new_finished
, prev_finished
)
2210 if (new_finished
>= count
)
2211 RtlWakeAddressSingle((LONG
*)ptr
);
2214 static void __cdecl
chore_wrapper(_UnrealizedChore
*chore
)
2218 execute_chore(chore
, chore
->task_collection
);
2220 __FINALLY_CTX(chore_wrapper_finally
, chore
)
2223 static BOOL
pick_and_execute_chore(ThreadScheduler
*scheduler
)
2226 struct scheduled_chore
*sc
;
2227 _UnrealizedChore
*chore
;
2229 TRACE("(%p)\n", scheduler
);
2231 if (scheduler
->scheduler
.vtable
!= &ThreadScheduler_vtable
)
2233 ERR("unknown scheduler set\n");
2237 EnterCriticalSection(&scheduler
->cs
);
2238 entry
= list_head(&scheduler
->scheduled_chores
);
2241 LeaveCriticalSection(&scheduler
->cs
);
2245 sc
= LIST_ENTRY(entry
, struct scheduled_chore
, entry
);
2247 operator_delete(sc
);
2249 chore
->chore_wrapper(chore
);
2253 static void __cdecl
_StructuredTaskCollection_scheduler_cb(void *data
)
2255 pick_and_execute_chore((ThreadScheduler
*)get_current_scheduler());
2258 static bool schedule_chore(_StructuredTaskCollection
*this,
2259 _UnrealizedChore
*chore
, Scheduler
**pscheduler
)
2261 struct scheduled_chore
*sc
;
2262 ThreadScheduler
*scheduler
;
2264 if (chore
->task_collection
) {
2265 invalid_multiple_scheduling e
;
2266 invalid_multiple_scheduling_ctor_str(&e
, "Chore scheduled multiple times");
2267 _CxxThrowException(&e
, &invalid_multiple_scheduling_exception_type
);
2272 this->context
= get_current_context();
2273 scheduler
= get_thread_scheduler_from_context(this->context
);
2275 ERR("unknown context or scheduler set\n");
2279 sc
= operator_new(sizeof(*sc
));
2282 chore
->task_collection
= this;
2283 chore
->chore_wrapper
= chore_wrapper
;
2284 InterlockedIncrement(&this->count
);
2286 EnterCriticalSection(&scheduler
->cs
);
2287 list_add_head(&scheduler
->scheduled_chores
, &sc
->entry
);
2288 LeaveCriticalSection(&scheduler
->cs
);
2289 *pscheduler
= &scheduler
->scheduler
;
2293 #if _MSVCR_VER >= 110
2295 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2296 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@PAVlocation@3@@Z */
2297 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@PEAVlocation@3@@Z */
2298 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule_loc
, 12)
2299 void __thiscall
_StructuredTaskCollection__Schedule_loc(
2300 _StructuredTaskCollection
*this, _UnrealizedChore
*chore
,
2301 /*location*/void *placement
)
2303 Scheduler
*scheduler
;
2305 TRACE("(%p %p %p)\n", this, chore
, placement
);
2307 if (schedule_chore(this, chore
, &scheduler
))
2309 call_Scheduler_ScheduleTask_loc(scheduler
,
2310 _StructuredTaskCollection_scheduler_cb
, NULL
, placement
);
2314 #endif /* _MSVCR_VER >= 110 */
2316 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAAXPAV_UnrealizedChore@23@@Z */
2317 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QAEXPAV_UnrealizedChore@23@@Z */
2318 /* ?_Schedule@_StructuredTaskCollection@details@Concurrency@@QEAAXPEAV_UnrealizedChore@23@@Z */
2319 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__Schedule
, 8)
2320 void __thiscall
_StructuredTaskCollection__Schedule(
2321 _StructuredTaskCollection
*this, _UnrealizedChore
*chore
)
2323 Scheduler
*scheduler
;
2325 TRACE("(%p %p)\n", this, chore
);
2327 if (schedule_chore(this, chore
, &scheduler
))
2329 call_Scheduler_ScheduleTask(scheduler
,
2330 _StructuredTaskCollection_scheduler_cb
, NULL
);
2334 static void CALLBACK
exception_ptr_rethrow_finally(BOOL normal
, void *data
)
2336 exception_ptr
*ep
= data
;
2338 TRACE("(%u %p)\n", normal
, data
);
2340 __ExceptionPtrDestroy(ep
);
2341 operator_delete(ep
);
2344 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAA?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2345 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QAG?AW4_TaskCollectionStatus@23@PAV_UnrealizedChore@23@@Z */
2346 /* ?_RunAndWait@_StructuredTaskCollection@details@Concurrency@@QEAA?AW4_TaskCollectionStatus@23@PEAV_UnrealizedChore@23@@Z */
2347 _TaskCollectionStatus __stdcall
_StructuredTaskCollection__RunAndWait(
2348 _StructuredTaskCollection
*this, _UnrealizedChore
*chore
)
2351 ULONG_PTR exception
;
2354 TRACE("(%p %p)\n", this, chore
);
2357 if (chore
->task_collection
) {
2358 invalid_multiple_scheduling e
;
2359 invalid_multiple_scheduling_ctor_str(&e
, "Chore scheduled multiple times");
2360 _CxxThrowException(&e
, &invalid_multiple_scheduling_exception_type
);
2362 execute_chore(chore
, this);
2365 if (this->context
) {
2366 ThreadScheduler
*scheduler
= get_thread_scheduler_from_context(this->context
);
2368 while (pick_and_execute_chore(scheduler
)) ;
2372 expected
= this->count
? this->count
: FINISHED_INITIAL
;
2373 while ((val
= this->finished
) != expected
)
2374 RtlWaitOnAddress((LONG
*)&this->finished
, &val
, sizeof(val
), NULL
);
2379 exception
= (ULONG_PTR
)this->exception
;
2380 ep
= (exception_ptr
*)(exception
& ~STRUCTURED_TASK_COLLECTION_STATUS_MASK
);
2382 this->exception
= 0;
2385 __ExceptionPtrRethrow(ep
);
2387 __FINALLY_CTX(exception_ptr_rethrow_finally
, ep
)
2389 if (exception
& STRUCTURED_TASK_COLLECTION_CANCELLED
)
2390 return TASK_COLLECTION_CANCELLED
;
2391 return TASK_COLLECTION_SUCCESS
;
2394 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAA_NXZ */
2395 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QAE_NXZ */
2396 /* ?_IsCanceling@_StructuredTaskCollection@details@Concurrency@@QEAA_NXZ */
2397 DEFINE_THISCALL_WRAPPER(_StructuredTaskCollection__IsCanceling
, 4)
2398 bool __thiscall
_StructuredTaskCollection__IsCanceling(
2399 _StructuredTaskCollection
*this)
2401 TRACE("(%p)\n", this);
2402 return !!((ULONG_PTR
)this->exception
& STRUCTURED_TASK_COLLECTION_CANCELLED
);
2405 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IAEXXZ */
2406 /* ?_CheckTaskCollection@_UnrealizedChore@details@Concurrency@@IEAAXXZ */
2407 DEFINE_THISCALL_WRAPPER(_UnrealizedChore__CheckTaskCollection
, 4)
2408 void __thiscall
_UnrealizedChore__CheckTaskCollection(_UnrealizedChore
*this)
2413 /* ??0critical_section@Concurrency@@QAE@XZ */
2414 /* ??0critical_section@Concurrency@@QEAA@XZ */
2415 DEFINE_THISCALL_WRAPPER(critical_section_ctor
, 4)
2416 critical_section
* __thiscall
critical_section_ctor(critical_section
*this)
2418 TRACE("(%p)\n", this);
2420 this->unk_active
.ctx
= NULL
;
2421 this->head
= this->tail
= NULL
;
2425 /* ??1critical_section@Concurrency@@QAE@XZ */
2426 /* ??1critical_section@Concurrency@@QEAA@XZ */
2427 DEFINE_THISCALL_WRAPPER(critical_section_dtor
, 4)
2428 void __thiscall
critical_section_dtor(critical_section
*this)
2430 TRACE("(%p)\n", this);
2433 static void __cdecl
spin_wait_yield(void)
2438 static inline void spin_wait_for_next_cs(cs_queue
*q
)
2444 SpinWait_ctor(&sw
, &spin_wait_yield
);
2445 SpinWait__Reset(&sw
);
2447 SpinWait__SpinOnce(&sw
);
2451 static inline void cs_set_head(critical_section
*cs
, cs_queue
*q
)
2453 cs
->unk_active
.ctx
= get_current_context();
2454 cs
->unk_active
.next
= q
->next
;
2455 cs
->head
= &cs
->unk_active
;
2458 static inline void cs_lock(critical_section
*cs
, cs_queue
*q
)
2462 if(cs
->unk_active
.ctx
== get_current_context()) {
2464 improper_lock_ctor_str(&e
, "Already locked");
2465 _CxxThrowException(&e
, &improper_lock_exception_type
);
2468 memset(q
, 0, sizeof(*q
));
2469 q
->ctx
= get_current_context();
2470 last
= InterlockedExchangePointer(&cs
->tail
, q
);
2473 call_Context_Block(q
->ctx
);
2477 if(InterlockedCompareExchangePointer(&cs
->tail
, &cs
->unk_active
, q
) != q
) {
2478 spin_wait_for_next_cs(q
);
2479 cs
->unk_active
.next
= q
->next
;
2483 /* ?lock@critical_section@Concurrency@@QAEXXZ */
2484 /* ?lock@critical_section@Concurrency@@QEAAXXZ */
2485 DEFINE_THISCALL_WRAPPER(critical_section_lock
, 4)
2486 void __thiscall
critical_section_lock(critical_section
*this)
2490 TRACE("(%p)\n", this);
2494 /* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
2495 /* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
2496 DEFINE_THISCALL_WRAPPER(critical_section_try_lock
, 4)
2497 bool __thiscall
critical_section_try_lock(critical_section
*this)
2501 TRACE("(%p)\n", this);
2503 if(this->unk_active
.ctx
== get_current_context())
2506 memset(&q
, 0, sizeof(q
));
2507 if(!InterlockedCompareExchangePointer(&this->tail
, &q
, NULL
)) {
2508 cs_set_head(this, &q
);
2509 if(InterlockedCompareExchangePointer(&this->tail
, &this->unk_active
, &q
) != &q
) {
2510 spin_wait_for_next_cs(&q
);
2511 this->unk_active
.next
= q
.next
;
2518 /* ?unlock@critical_section@Concurrency@@QAEXXZ */
2519 /* ?unlock@critical_section@Concurrency@@QEAAXXZ */
2520 DEFINE_THISCALL_WRAPPER(critical_section_unlock
, 4)
2521 void __thiscall
critical_section_unlock(critical_section
*this)
2523 TRACE("(%p)\n", this);
2525 this->unk_active
.ctx
= NULL
;
2527 if(InterlockedCompareExchangePointer(&this->tail
, NULL
, &this->unk_active
)
2528 == &this->unk_active
) return;
2529 spin_wait_for_next_cs(&this->unk_active
);
2531 #if _MSVCR_VER >= 110
2535 if(!InterlockedExchange(&this->unk_active
.next
->free
, TRUE
))
2538 next
= this->unk_active
.next
;
2539 if(InterlockedCompareExchangePointer(&this->tail
, NULL
, next
) == next
) {
2540 HeapFree(GetProcessHeap(), 0, next
);
2543 spin_wait_for_next_cs(next
);
2545 this->unk_active
.next
= next
->next
;
2546 HeapFree(GetProcessHeap(), 0, next
);
2550 call_Context_Unblock(this->unk_active
.next
->ctx
);
2553 /* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
2554 /* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
2555 DEFINE_THISCALL_WRAPPER(critical_section_native_handle
, 4)
2556 critical_section
* __thiscall
critical_section_native_handle(critical_section
*this)
2558 TRACE("(%p)\n", this);
2562 static void set_timeout(FILETIME
*ft
, unsigned int timeout
)
2566 GetSystemTimeAsFileTime(ft
);
2567 to
.QuadPart
= ((LONGLONG
)ft
->dwHighDateTime
<< 32) +
2568 ft
->dwLowDateTime
+ (LONGLONG
)timeout
* TICKSPERMSEC
;
2569 ft
->dwHighDateTime
= to
.QuadPart
>> 32;
2570 ft
->dwLowDateTime
= to
.QuadPart
;
2573 struct timeout_unlock
2579 static void WINAPI
timeout_unlock(TP_CALLBACK_INSTANCE
*instance
, void *ctx
, TP_TIMER
*timer
)
2581 struct timeout_unlock
*tu
= ctx
;
2582 tu
->timed_out
= TRUE
;
2583 call_Context_Unblock(tu
->ctx
);
2586 /* returns TRUE if wait has timed out */
2587 static BOOL
block_context_for(Context
*ctx
, unsigned int timeout
)
2589 struct timeout_unlock tu
= { ctx
};
2593 if(timeout
== COOPERATIVE_TIMEOUT_INFINITE
) {
2594 call_Context_Block(ctx
);
2598 tp_timer
= CreateThreadpoolTimer(timeout_unlock
, &tu
, NULL
);
2600 FIXME("throw exception?\n");
2603 set_timeout(&ft
, timeout
);
2604 SetThreadpoolTimer(tp_timer
, &ft
, 0, 0);
2606 call_Context_Block(ctx
);
2608 SetThreadpoolTimer(tp_timer
, NULL
, 0, 0);
2609 WaitForThreadpoolTimerCallbacks(tp_timer
, TRUE
);
2610 CloseThreadpoolTimer(tp_timer
);
2611 return tu
.timed_out
;
2614 #if _MSVCR_VER >= 110
2615 /* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
2616 /* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
2617 DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for
, 8)
2618 bool __thiscall
critical_section_try_lock_for(
2619 critical_section
*this, unsigned int timeout
)
2621 Context
*ctx
= get_current_context();
2624 TRACE("(%p %d)\n", this, timeout
);
2626 if(this->unk_active
.ctx
== ctx
) {
2628 improper_lock_ctor_str(&e
, "Already locked");
2629 _CxxThrowException(&e
, &improper_lock_exception_type
);
2632 if(!(q
= HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY
, sizeof(*q
))))
2633 return critical_section_try_lock(this);
2636 last
= InterlockedExchangePointer(&this->tail
, q
);
2640 if(block_context_for(q
->ctx
, timeout
))
2642 if(!InterlockedExchange(&q
->free
, TRUE
))
2644 /* Context was unblocked because of timeout and unlock operation */
2645 call_Context_Block(ctx
);
2649 cs_set_head(this, q
);
2650 if(InterlockedCompareExchangePointer(&this->tail
, &this->unk_active
, q
) != q
) {
2651 spin_wait_for_next_cs(q
);
2652 this->unk_active
.next
= q
->next
;
2655 HeapFree(GetProcessHeap(), 0, q
);
2660 /* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
2661 /* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
2662 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor
, 8)
2663 critical_section_scoped_lock
* __thiscall
critical_section_scoped_lock_ctor(
2664 critical_section_scoped_lock
*this, critical_section
*cs
)
2666 TRACE("(%p %p)\n", this, cs
);
2668 cs_lock(this->cs
, &this->lock
.q
);
2672 /* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
2673 /* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
2674 DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor
, 4)
2675 void __thiscall
critical_section_scoped_lock_dtor(critical_section_scoped_lock
*this)
2677 TRACE("(%p)\n", this);
2678 critical_section_unlock(this->cs
);
2681 /* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2682 /* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2683 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor
, 4)
2684 _NonReentrantPPLLock
* __thiscall
_NonReentrantPPLLock_ctor(_NonReentrantPPLLock
*this)
2686 TRACE("(%p)\n", this);
2688 critical_section_ctor(&this->cs
);
2692 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2693 /* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2694 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire
, 8)
2695 void __thiscall
_NonReentrantPPLLock__Acquire(_NonReentrantPPLLock
*this, cs_queue
*q
)
2697 TRACE("(%p %p)\n", this, q
);
2698 cs_lock(&this->cs
, q
);
2701 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
2702 /* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2703 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release
, 4)
2704 void __thiscall
_NonReentrantPPLLock__Release(_NonReentrantPPLLock
*this)
2706 TRACE("(%p)\n", this);
2707 critical_section_unlock(&this->cs
);
2710 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2711 /* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2712 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor
, 8)
2713 _NonReentrantPPLLock__Scoped_lock
* __thiscall
_NonReentrantPPLLock__Scoped_lock_ctor(
2714 _NonReentrantPPLLock__Scoped_lock
*this, _NonReentrantPPLLock
*lock
)
2716 TRACE("(%p %p)\n", this, lock
);
2719 _NonReentrantPPLLock__Acquire(this->lock
, &this->wait
.q
);
2723 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
2724 /* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2725 DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor
, 4)
2726 void __thiscall
_NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock
*this)
2728 TRACE("(%p)\n", this);
2730 _NonReentrantPPLLock__Release(this->lock
);
2733 /* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2734 /* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2735 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor
, 4)
2736 _ReentrantPPLLock
* __thiscall
_ReentrantPPLLock_ctor(_ReentrantPPLLock
*this)
2738 TRACE("(%p)\n", this);
2740 critical_section_ctor(&this->cs
);
2746 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
2747 /* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
2748 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire
, 8)
2749 void __thiscall
_ReentrantPPLLock__Acquire(_ReentrantPPLLock
*this, cs_queue
*q
)
2751 TRACE("(%p %p)\n", this, q
);
2753 if(this->owner
== GetCurrentThreadId()) {
2758 cs_lock(&this->cs
, q
);
2760 this->owner
= GetCurrentThreadId();
2763 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
2764 /* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
2765 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release
, 4)
2766 void __thiscall
_ReentrantPPLLock__Release(_ReentrantPPLLock
*this)
2768 TRACE("(%p)\n", this);
2775 critical_section_unlock(&this->cs
);
2778 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
2779 /* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
2780 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor
, 8)
2781 _ReentrantPPLLock__Scoped_lock
* __thiscall
_ReentrantPPLLock__Scoped_lock_ctor(
2782 _ReentrantPPLLock__Scoped_lock
*this, _ReentrantPPLLock
*lock
)
2784 TRACE("(%p %p)\n", this, lock
);
2787 _ReentrantPPLLock__Acquire(this->lock
, &this->wait
.q
);
2791 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
2792 /* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
2793 DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor
, 4)
2794 void __thiscall
_ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock
*this)
2796 TRACE("(%p)\n", this);
2798 _ReentrantPPLLock__Release(this->lock
);
2801 /* ?_GetConcurrency@details@Concurrency@@YAIXZ */
2802 unsigned int __cdecl
_GetConcurrency(void)
2804 static unsigned int val
= -1;
2812 val
= si
.dwNumberOfProcessors
;
2818 static void evt_add_queue(thread_wait_entry
**head
, thread_wait_entry
*entry
)
2820 entry
->next
= *head
;
2822 if(*head
) (*head
)->prev
= entry
;
2826 static void evt_remove_queue(thread_wait_entry
**head
, thread_wait_entry
*entry
)
2829 *head
= entry
->next
;
2830 else if(entry
->prev
)
2831 entry
->prev
->next
= entry
->next
;
2832 if(entry
->next
) entry
->next
->prev
= entry
->prev
;
2835 static size_t evt_end_wait(thread_wait
*wait
, event
**events
, int count
)
2837 size_t i
, ret
= COOPERATIVE_WAIT_TIMEOUT
;
2839 for(i
= 0; i
< count
; i
++) {
2840 critical_section_lock(&events
[i
]->cs
);
2841 if(events
[i
] == wait
->signaled
) ret
= i
;
2842 evt_remove_queue(&events
[i
]->waiters
, &wait
->entries
[i
]);
2843 critical_section_unlock(&events
[i
]->cs
);
2849 static inline int evt_transition(void **state
, void *from
, void *to
)
2851 return InterlockedCompareExchangePointer(state
, to
, from
) == from
;
2854 static size_t evt_wait(thread_wait
*wait
, event
**events
, int count
, bool wait_all
, unsigned int timeout
)
2858 wait
->signaled
= EVT_RUNNING
;
2859 wait
->pending_waits
= wait_all
? count
: 1;
2860 for(i
= 0; i
< count
; i
++) {
2861 wait
->entries
[i
].wait
= wait
;
2863 critical_section_lock(&events
[i
]->cs
);
2864 evt_add_queue(&events
[i
]->waiters
, &wait
->entries
[i
]);
2865 if(events
[i
]->signaled
) {
2866 if(!InterlockedDecrement(&wait
->pending_waits
)) {
2867 wait
->signaled
= events
[i
];
2868 critical_section_unlock(&events
[i
]->cs
);
2870 return evt_end_wait(wait
, events
, i
+1);
2873 critical_section_unlock(&events
[i
]->cs
);
2877 return evt_end_wait(wait
, events
, count
);
2879 if(!evt_transition(&wait
->signaled
, EVT_RUNNING
, EVT_WAITING
))
2880 return evt_end_wait(wait
, events
, count
);
2882 if(block_context_for(wait
->ctx
, timeout
) &&
2883 !evt_transition(&wait
->signaled
, EVT_WAITING
, EVT_RUNNING
))
2884 call_Context_Block(wait
->ctx
);
2886 return evt_end_wait(wait
, events
, count
);
2889 /* ??0event@Concurrency@@QAE@XZ */
2890 /* ??0event@Concurrency@@QEAA@XZ */
2891 DEFINE_THISCALL_WRAPPER(event_ctor
, 4)
2892 event
* __thiscall
event_ctor(event
*this)
2894 TRACE("(%p)\n", this);
2896 this->waiters
= NULL
;
2897 this->signaled
= FALSE
;
2898 critical_section_ctor(&this->cs
);
2903 /* ??1event@Concurrency@@QAE@XZ */
2904 /* ??1event@Concurrency@@QEAA@XZ */
2905 DEFINE_THISCALL_WRAPPER(event_dtor
, 4)
2906 void __thiscall
event_dtor(event
*this)
2908 TRACE("(%p)\n", this);
2909 critical_section_dtor(&this->cs
);
2911 ERR("there's a wait on destroyed event\n");
2914 /* ?reset@event@Concurrency@@QAEXXZ */
2915 /* ?reset@event@Concurrency@@QEAAXXZ */
2916 DEFINE_THISCALL_WRAPPER(event_reset
, 4)
2917 void __thiscall
event_reset(event
*this)
2919 thread_wait_entry
*entry
;
2921 TRACE("(%p)\n", this);
2923 critical_section_lock(&this->cs
);
2924 if(this->signaled
) {
2925 this->signaled
= FALSE
;
2926 for(entry
=this->waiters
; entry
; entry
= entry
->next
)
2927 InterlockedIncrement(&entry
->wait
->pending_waits
);
2929 critical_section_unlock(&this->cs
);
2932 /* ?set@event@Concurrency@@QAEXXZ */
2933 /* ?set@event@Concurrency@@QEAAXXZ */
2934 DEFINE_THISCALL_WRAPPER(event_set
, 4)
2935 void __thiscall
event_set(event
*this)
2937 thread_wait_entry
*wakeup
= NULL
;
2938 thread_wait_entry
*entry
, *next
;
2940 TRACE("(%p)\n", this);
2942 critical_section_lock(&this->cs
);
2943 if(!this->signaled
) {
2944 this->signaled
= TRUE
;
2945 for(entry
=this->waiters
; entry
; entry
=next
) {
2947 if(!InterlockedDecrement(&entry
->wait
->pending_waits
)) {
2948 if(InterlockedExchangePointer(&entry
->wait
->signaled
, this) == EVT_WAITING
) {
2949 evt_remove_queue(&this->waiters
, entry
);
2950 evt_add_queue(&wakeup
, entry
);
2955 critical_section_unlock(&this->cs
);
2957 for(entry
=wakeup
; entry
; entry
=next
) {
2959 entry
->next
= entry
->prev
= NULL
;
2960 call_Context_Unblock(entry
->wait
->ctx
);
2964 /* ?wait@event@Concurrency@@QAEII@Z */
2965 /* ?wait@event@Concurrency@@QEAA_KI@Z */
2966 DEFINE_THISCALL_WRAPPER(event_wait
, 8)
2967 size_t __thiscall
event_wait(event
*this, unsigned int timeout
)
2972 TRACE("(%p %u)\n", this, timeout
);
2974 critical_section_lock(&this->cs
);
2975 signaled
= this->signaled
;
2976 critical_section_unlock(&this->cs
);
2978 if(!timeout
) return signaled
? 0 : COOPERATIVE_WAIT_TIMEOUT
;
2979 wait
.ctx
= get_current_context();
2980 return signaled
? 0 : evt_wait(&wait
, &this, 1, FALSE
, timeout
);
2983 /* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
2984 /* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
2985 int __cdecl
event_wait_for_multiple(event
**events
, size_t count
, bool wait_all
, unsigned int timeout
)
2990 TRACE("(%p %Iu %d %u)\n", events
, count
, wait_all
, timeout
);
2995 wait
= operator_new(FIELD_OFFSET(thread_wait
, entries
[count
]));
2996 wait
->ctx
= get_current_context();
2997 ret
= evt_wait(wait
, events
, count
, wait_all
, timeout
);
2998 operator_delete(wait
);
3003 #if _MSVCR_VER >= 110
3005 /* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
3006 /* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
3007 DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor
, 4)
3008 _Condition_variable
* __thiscall
_Condition_variable_ctor(_Condition_variable
*this)
3010 TRACE("(%p)\n", this);
3013 critical_section_ctor(&this->lock
);
3017 /* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
3018 /* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
3019 DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor
, 4)
3020 void __thiscall
_Condition_variable_dtor(_Condition_variable
*this)
3022 TRACE("(%p)\n", this);
3024 while(this->queue
) {
3025 cv_queue
*next
= this->queue
->next
;
3026 if(!this->queue
->expired
)
3027 ERR("there's an active wait\n");
3028 operator_delete(this->queue
);
3031 critical_section_dtor(&this->lock
);
3034 /* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
3035 /* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
3036 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait
, 8)
3037 void __thiscall
_Condition_variable_wait(_Condition_variable
*this, critical_section
*cs
)
3041 TRACE("(%p, %p)\n", this, cs
);
3043 q
.ctx
= get_current_context();
3045 critical_section_lock(&this->lock
);
3046 q
.next
= this->queue
;
3048 critical_section_unlock(&this->lock
);
3050 critical_section_unlock(cs
);
3051 call_Context_Block(q
.ctx
);
3052 critical_section_lock(cs
);
3055 /* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
3056 /* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
3057 DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for
, 12)
3058 bool __thiscall
_Condition_variable_wait_for(_Condition_variable
*this,
3059 critical_section
*cs
, unsigned int timeout
)
3063 TRACE("(%p %p %d)\n", this, cs
, timeout
);
3065 q
= operator_new(sizeof(cv_queue
));
3066 q
->ctx
= get_current_context();
3068 critical_section_lock(&this->lock
);
3069 q
->next
= this->queue
;
3071 critical_section_unlock(&this->lock
);
3073 critical_section_unlock(cs
);
3075 if(block_context_for(q
->ctx
, timeout
)) {
3076 if(!InterlockedExchange(&q
->expired
, TRUE
)) {
3077 critical_section_lock(cs
);
3080 call_Context_Block(q
->ctx
);
3084 critical_section_lock(cs
);
3088 /* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
3089 /* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
3090 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one
, 4)
3091 void __thiscall
_Condition_variable_notify_one(_Condition_variable
*this)
3095 TRACE("(%p)\n", this);
3101 critical_section_lock(&this->lock
);
3104 critical_section_unlock(&this->lock
);
3107 this->queue
= node
->next
;
3108 critical_section_unlock(&this->lock
);
3110 node
->next
= CV_WAKE
;
3111 if(!InterlockedExchange(&node
->expired
, TRUE
)) {
3112 call_Context_Unblock(node
->ctx
);
3115 operator_delete(node
);
3120 /* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
3121 /* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
3122 DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all
, 4)
3123 void __thiscall
_Condition_variable_notify_all(_Condition_variable
*this)
3127 TRACE("(%p)\n", this);
3132 critical_section_lock(&this->lock
);
3135 critical_section_unlock(&this->lock
);
3138 cv_queue
*next
= ptr
->next
;
3140 ptr
->next
= CV_WAKE
;
3141 if(!InterlockedExchange(&ptr
->expired
, TRUE
))
3142 call_Context_Unblock(ptr
->ctx
);
3144 operator_delete(ptr
);
3150 /* ??0reader_writer_lock@Concurrency@@QAE@XZ */
3151 /* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
3152 DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor
, 4)
3153 reader_writer_lock
* __thiscall
reader_writer_lock_ctor(reader_writer_lock
*this)
3155 TRACE("(%p)\n", this);
3157 memset(this, 0, sizeof(*this));
3161 /* ??1reader_writer_lock@Concurrency@@QAE@XZ */
3162 /* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
3163 DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor
, 4)
3164 void __thiscall
reader_writer_lock_dtor(reader_writer_lock
*this)
3166 TRACE("(%p)\n", this);
3168 if (this->thread_id
!= 0 || this->count
)
3169 WARN("destroying locked reader_writer_lock\n");
3172 static inline void spin_wait_for_next_rwl(rwl_queue
*q
)
3178 SpinWait_ctor(&sw
, &spin_wait_yield
);
3179 SpinWait__Reset(&sw
);
3181 SpinWait__SpinOnce(&sw
);
3185 /* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
3186 /* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
3187 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock
, 4)
3188 void __thiscall
reader_writer_lock_lock(reader_writer_lock
*this)
3190 rwl_queue q
= { NULL
, get_current_context() }, *last
;
3192 TRACE("(%p)\n", this);
3194 if (this->thread_id
== GetCurrentThreadId()) {
3196 improper_lock_ctor_str(&e
, "Already locked");
3197 _CxxThrowException(&e
, &improper_lock_exception_type
);
3200 last
= InterlockedExchangePointer((void**)&this->writer_tail
, &q
);
3203 call_Context_Block(q
.ctx
);
3205 this->writer_head
= &q
;
3206 if (InterlockedOr(&this->count
, WRITER_WAITING
))
3207 call_Context_Block(q
.ctx
);
3210 this->thread_id
= GetCurrentThreadId();
3211 this->writer_head
= &this->active
;
3212 this->active
.next
= NULL
;
3213 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &this->active
, &q
) != &q
) {
3214 spin_wait_for_next_rwl(&q
);
3215 this->active
.next
= q
.next
;
3219 /* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
3220 /* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
3221 DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read
, 4)
3222 void __thiscall
reader_writer_lock_lock_read(reader_writer_lock
*this)
3224 rwl_queue q
= { NULL
, get_current_context() };
3226 TRACE("(%p)\n", this);
3228 if (this->thread_id
== GetCurrentThreadId()) {
3230 improper_lock_ctor_str(&e
, "Already locked as writer");
3231 _CxxThrowException(&e
, &improper_lock_exception_type
);
3235 q
.next
= this->reader_head
;
3236 } while(InterlockedCompareExchangePointer((void**)&this->reader_head
, &q
, q
.next
) != q
.next
);
3242 while (!((count
= this->count
) & WRITER_WAITING
))
3243 if (InterlockedCompareExchange(&this->count
, count
+1, count
) == count
) break;
3245 if (count
& WRITER_WAITING
)
3246 call_Context_Block(q
.ctx
);
3248 head
= InterlockedExchangePointer((void**)&this->reader_head
, NULL
);
3249 while(head
&& head
!= &q
) {
3250 rwl_queue
*next
= head
->next
;
3251 InterlockedIncrement(&this->count
);
3252 call_Context_Unblock(head
->ctx
);
3256 call_Context_Block(q
.ctx
);
3260 /* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
3261 /* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
3262 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock
, 4)
3263 bool __thiscall
reader_writer_lock_try_lock(reader_writer_lock
*this)
3265 rwl_queue q
= { NULL
};
3267 TRACE("(%p)\n", this);
3269 if (this->thread_id
== GetCurrentThreadId())
3272 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &q
, NULL
))
3274 this->writer_head
= &q
;
3275 if (!InterlockedCompareExchange(&this->count
, WRITER_WAITING
, 0)) {
3276 this->thread_id
= GetCurrentThreadId();
3277 this->writer_head
= &this->active
;
3278 this->active
.next
= NULL
;
3279 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, &this->active
, &q
) != &q
) {
3280 spin_wait_for_next_rwl(&q
);
3281 this->active
.next
= q
.next
;
3286 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, NULL
, &q
) == &q
)
3288 spin_wait_for_next_rwl(&q
);
3289 this->writer_head
= q
.next
;
3290 if (!InterlockedOr(&this->count
, WRITER_WAITING
)) {
3291 this->thread_id
= GetCurrentThreadId();
3292 this->writer_head
= &this->active
;
3293 this->active
.next
= q
.next
;
3299 /* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
3300 /* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
3301 DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read
, 4)
3302 bool __thiscall
reader_writer_lock_try_lock_read(reader_writer_lock
*this)
3306 TRACE("(%p)\n", this);
3308 while (!((count
= this->count
) & WRITER_WAITING
))
3309 if (InterlockedCompareExchange(&this->count
, count
+1, count
) == count
) return TRUE
;
3313 /* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
3314 /* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
3315 DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock
, 4)
3316 void __thiscall
reader_writer_lock_unlock(reader_writer_lock
*this)
3319 rwl_queue
*head
, *next
;
3321 TRACE("(%p)\n", this);
3323 if ((count
= this->count
) & ~WRITER_WAITING
) {
3324 count
= InterlockedDecrement(&this->count
);
3325 if (count
!= WRITER_WAITING
)
3327 call_Context_Unblock(this->writer_head
->ctx
);
3331 this->thread_id
= 0;
3332 next
= this->writer_head
->next
;
3334 call_Context_Unblock(next
->ctx
);
3337 InterlockedAnd(&this->count
, ~WRITER_WAITING
);
3338 head
= InterlockedExchangePointer((void**)&this->reader_head
, NULL
);
3341 InterlockedIncrement(&this->count
);
3342 call_Context_Unblock(head
->ctx
);
3346 if (InterlockedCompareExchangePointer((void**)&this->writer_tail
, NULL
, this->writer_head
) == this->writer_head
)
3348 InterlockedOr(&this->count
, WRITER_WAITING
);
3351 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3352 /* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3353 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor
, 8)
3354 reader_writer_lock_scoped_lock
* __thiscall
reader_writer_lock_scoped_lock_ctor(
3355 reader_writer_lock_scoped_lock
*this, reader_writer_lock
*lock
)
3357 TRACE("(%p %p)\n", this, lock
);
3360 reader_writer_lock_lock(lock
);
3364 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
3365 /* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
3366 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor
, 4)
3367 void __thiscall
reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock
*this)
3369 TRACE("(%p)\n", this);
3370 reader_writer_lock_unlock(this->lock
);
3373 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
3374 /* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
3375 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor
, 8)
3376 reader_writer_lock_scoped_lock
* __thiscall
reader_writer_lock_scoped_lock_read_ctor(
3377 reader_writer_lock_scoped_lock
*this, reader_writer_lock
*lock
)
3379 TRACE("(%p %p)\n", this, lock
);
3382 reader_writer_lock_lock_read(lock
);
3386 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
3387 /* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
3388 DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor
, 4)
3389 void __thiscall
reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock
*this)
3391 TRACE("(%p)\n", this);
3392 reader_writer_lock_unlock(this->lock
);
3395 /* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3396 /* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3397 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor
, 4)
3398 _ReentrantBlockingLock
* __thiscall
_ReentrantBlockingLock_ctor(_ReentrantBlockingLock
*this)
3400 TRACE("(%p)\n", this);
3402 InitializeCriticalSection(&this->cs
);
3403 this->cs
.DebugInfo
->Spare
[0] = (DWORD_PTR
)(__FILE__
": _ReentrantBlockingLock");
3407 /* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
3408 /* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
3409 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor
, 4)
3410 void __thiscall
_ReentrantBlockingLock_dtor(_ReentrantBlockingLock
*this)
3412 TRACE("(%p)\n", this);
3414 this->cs
.DebugInfo
->Spare
[0] = 0;
3415 DeleteCriticalSection(&this->cs
);
3418 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3419 /* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3420 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire
, 4)
3421 void __thiscall
_ReentrantBlockingLock__Acquire(_ReentrantBlockingLock
*this)
3423 TRACE("(%p)\n", this);
3424 EnterCriticalSection(&this->cs
);
3427 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
3428 /* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
3429 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release
, 4)
3430 void __thiscall
_ReentrantBlockingLock__Release(_ReentrantBlockingLock
*this)
3432 TRACE("(%p)\n", this);
3433 LeaveCriticalSection(&this->cs
);
3436 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
3437 /* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
3438 DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire
, 4)
3439 bool __thiscall
_ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock
*this)
3441 TRACE("(%p)\n", this);
3442 return TryEnterCriticalSection(&this->cs
);
3445 /* ?wait@Concurrency@@YAXI@Z */
3446 void __cdecl
Concurrency_wait(unsigned int time
)
3448 TRACE("(%d)\n", time
);
3449 block_context_for(get_current_context(), time
);
3453 /* ?_Trace_agents@Concurrency@@YAXW4Agents_EventType@1@_JZZ */
3454 void WINAPIV
_Trace_agents(/*enum Concurrency::Agents_EventType*/int type
, __int64 id
, ...)
3456 FIXME("(%d %#I64x)\n", type
, id
);
3460 /* ?_Trace_ppl_function@Concurrency@@YAXABU_GUID@@EW4ConcRT_EventType@1@@Z */
3461 /* ?_Trace_ppl_function@Concurrency@@YAXAEBU_GUID@@EW4ConcRT_EventType@1@@Z */
3462 void __cdecl
_Trace_ppl_function(const GUID
*guid
, unsigned char level
, enum ConcRT_EventType type
)
3464 FIXME("(%s %u %i) stub\n", debugstr_guid(guid
), level
, type
);
3467 /* ??0_Timer@details@Concurrency@@IAE@I_N@Z */
3468 /* ??0_Timer@details@Concurrency@@IEAA@I_N@Z */
3469 DEFINE_THISCALL_WRAPPER(_Timer_ctor
, 12)
3470 _Timer
* __thiscall
_Timer_ctor(_Timer
*this, unsigned int elapse
, bool repeat
)
3472 TRACE("(%p %u %x)\n", this, elapse
, repeat
);
3474 this->vtable
= &_Timer_vtable
;
3476 this->elapse
= elapse
;
3477 this->repeat
= repeat
;
3481 static void WINAPI
timer_callback(TP_CALLBACK_INSTANCE
*instance
, void *ctx
, TP_TIMER
*timer
)
3484 TRACE("calling _Timer(%p) callback\n", this);
3485 call__Timer_callback(this);
3488 /* ?_Start@_Timer@details@Concurrency@@IAEXXZ */
3489 /* ?_Start@_Timer@details@Concurrency@@IEAAXXZ */
3490 DEFINE_THISCALL_WRAPPER(_Timer__Start
, 4)
3491 void __thiscall
_Timer__Start(_Timer
*this)
3496 TRACE("(%p)\n", this);
3498 this->timer
= CreateThreadpoolTimer(timer_callback
, this, NULL
);
3501 FIXME("throw exception?\n");
3505 ll
= -(LONGLONG
)this->elapse
* TICKSPERMSEC
;
3506 ft
.dwLowDateTime
= ll
& 0xffffffff;
3507 ft
.dwHighDateTime
= ll
>> 32;
3508 SetThreadpoolTimer(this->timer
, &ft
, this->repeat
? this->elapse
: 0, 0);
3511 /* ?_Stop@_Timer@details@Concurrency@@IAEXXZ */
3512 /* ?_Stop@_Timer@details@Concurrency@@IEAAXXZ */
3513 DEFINE_THISCALL_WRAPPER(_Timer__Stop
, 4)
3514 void __thiscall
_Timer__Stop(_Timer
*this)
3516 TRACE("(%p)\n", this);
3518 SetThreadpoolTimer(this->timer
, NULL
, 0, 0);
3519 WaitForThreadpoolTimerCallbacks(this->timer
, TRUE
);
3520 CloseThreadpoolTimer(this->timer
);
3524 /* ??1_Timer@details@Concurrency@@MAE@XZ */
3525 /* ??1_Timer@details@Concurrency@@MEAA@XZ */
3526 DEFINE_THISCALL_WRAPPER(_Timer_dtor
, 4)
3527 void __thiscall
_Timer_dtor(_Timer
*this)
3529 TRACE("(%p)\n", this);
3535 DEFINE_THISCALL_WRAPPER(_Timer_vector_dtor
, 8)
3536 _Timer
* __thiscall
_Timer_vector_dtor(_Timer
*this, unsigned int flags
)
3538 TRACE("(%p %x)\n", this, flags
);
3540 /* we have an array, with the number of elements stored before the first object */
3541 INT_PTR i
, *ptr
= (INT_PTR
*)this-1;
3543 for (i
=*ptr
-1; i
>=0; i
--)
3544 _Timer_dtor(this+i
);
3545 operator_delete(ptr
);
3549 operator_delete(this);
3555 #ifdef __ASM_USE_THISCALL_WRAPPER
3557 #define DEFINE_VTBL_WRAPPER(off) \
3558 __ASM_GLOBAL_FUNC(vtbl_wrapper_ ## off, \
3562 "movl 0(%ecx), %eax\n\t" \
3563 "jmp *" #off "(%eax)\n\t")
3565 DEFINE_VTBL_WRAPPER(0);
3566 DEFINE_VTBL_WRAPPER(4);
3567 DEFINE_VTBL_WRAPPER(8);
3568 DEFINE_VTBL_WRAPPER(12);
3569 DEFINE_VTBL_WRAPPER(16);
3570 DEFINE_VTBL_WRAPPER(20);
3571 DEFINE_VTBL_WRAPPER(24);
3572 DEFINE_VTBL_WRAPPER(28);
3573 DEFINE_VTBL_WRAPPER(32);
3574 DEFINE_VTBL_WRAPPER(36);
3575 DEFINE_VTBL_WRAPPER(40);
3576 DEFINE_VTBL_WRAPPER(44);
3577 DEFINE_VTBL_WRAPPER(48);
3581 DEFINE_RTTI_DATA0(Context
, 0, ".?AVContext@Concurrency@@")
3582 DEFINE_RTTI_DATA1(ContextBase
, 0, &Context_rtti_base_descriptor
, ".?AVContextBase@details@Concurrency@@")
3583 DEFINE_RTTI_DATA2(ExternalContextBase
, 0, &ContextBase_rtti_base_descriptor
,
3584 &Context_rtti_base_descriptor
, ".?AVExternalContextBase@details@Concurrency@@")
3585 DEFINE_RTTI_DATA0(Scheduler
, 0, ".?AVScheduler@Concurrency@@")
3586 DEFINE_RTTI_DATA1(SchedulerBase
, 0, &Scheduler_rtti_base_descriptor
, ".?AVSchedulerBase@details@Concurrency@@")
3587 DEFINE_RTTI_DATA2(ThreadScheduler
, 0, &SchedulerBase_rtti_base_descriptor
,
3588 &Scheduler_rtti_base_descriptor
, ".?AVThreadScheduler@details@Concurrency@@")
3589 DEFINE_RTTI_DATA0(_Timer
, 0, ".?AV_Timer@details@Concurrency@@");
3591 __ASM_BLOCK_BEGIN(concurrency_vtables
)
3592 __ASM_VTABLE(ExternalContextBase
,
3593 VTABLE_ADD_FUNC(ExternalContextBase_GetId
)
3594 VTABLE_ADD_FUNC(ExternalContextBase_GetVirtualProcessorId
)
3595 VTABLE_ADD_FUNC(ExternalContextBase_GetScheduleGroupId
)
3596 VTABLE_ADD_FUNC(ExternalContextBase_Unblock
)
3597 VTABLE_ADD_FUNC(ExternalContextBase_IsSynchronouslyBlocked
)
3598 VTABLE_ADD_FUNC(ExternalContextBase_vector_dtor
)
3599 VTABLE_ADD_FUNC(ExternalContextBase_Block
)
3600 VTABLE_ADD_FUNC(ExternalContextBase_Yield
)
3601 VTABLE_ADD_FUNC(ExternalContextBase_SpinYield
)
3602 VTABLE_ADD_FUNC(ExternalContextBase_Oversubscribe
)
3603 VTABLE_ADD_FUNC(ExternalContextBase_Alloc
)
3604 VTABLE_ADD_FUNC(ExternalContextBase_Free
)
3605 VTABLE_ADD_FUNC(ExternalContextBase_EnterCriticalRegionHelper
)
3606 VTABLE_ADD_FUNC(ExternalContextBase_EnterHyperCriticalRegionHelper
)
3607 VTABLE_ADD_FUNC(ExternalContextBase_ExitCriticalRegionHelper
)
3608 VTABLE_ADD_FUNC(ExternalContextBase_ExitHyperCriticalRegionHelper
)
3609 VTABLE_ADD_FUNC(ExternalContextBase_GetCriticalRegionType
)
3610 VTABLE_ADD_FUNC(ExternalContextBase_GetContextKind
));
3611 __ASM_VTABLE(ThreadScheduler
,
3612 VTABLE_ADD_FUNC(ThreadScheduler_vector_dtor
)
3613 VTABLE_ADD_FUNC(ThreadScheduler_Id
)
3614 VTABLE_ADD_FUNC(ThreadScheduler_GetNumberOfVirtualProcessors
)
3615 VTABLE_ADD_FUNC(ThreadScheduler_GetPolicy
)
3616 VTABLE_ADD_FUNC(ThreadScheduler_Reference
)
3617 VTABLE_ADD_FUNC(ThreadScheduler_Release
)
3618 VTABLE_ADD_FUNC(ThreadScheduler_RegisterShutdownEvent
)
3619 VTABLE_ADD_FUNC(ThreadScheduler_Attach
)
3620 #if _MSVCR_VER > 100
3621 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup_loc
)
3623 VTABLE_ADD_FUNC(ThreadScheduler_CreateScheduleGroup
)
3624 #if _MSVCR_VER > 100
3625 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask_loc
)
3627 VTABLE_ADD_FUNC(ThreadScheduler_ScheduleTask
)
3628 #if _MSVCR_VER > 100
3629 VTABLE_ADD_FUNC(ThreadScheduler_IsAvailableLocation
)
3632 __ASM_VTABLE(_Timer
,
3633 VTABLE_ADD_FUNC(_Timer_vector_dtor
));
3636 void msvcrt_init_concurrency(void *base
)
3639 init_cexception_rtti(base
);
3640 init_improper_lock_rtti(base
);
3641 init_improper_scheduler_attach_rtti(base
);
3642 init_improper_scheduler_detach_rtti(base
);
3643 init_invalid_multiple_scheduling_rtti(base
);
3644 init_invalid_scheduler_policy_key_rtti(base
);
3645 init_invalid_scheduler_policy_thread_specification_rtti(base
);
3646 init_invalid_scheduler_policy_value_rtti(base
);
3647 init_missing_wait_rtti(base
);
3648 init_scheduler_resource_allocation_error_rtti(base
);
3649 init_Context_rtti(base
);
3650 init_ContextBase_rtti(base
);
3651 init_ExternalContextBase_rtti(base
);
3652 init_Scheduler_rtti(base
);
3653 init_SchedulerBase_rtti(base
);
3654 init_ThreadScheduler_rtti(base
);
3655 init__Timer_rtti(base
);
3657 init_cexception_cxx_type_info(base
);
3658 init_improper_lock_cxx(base
);
3659 init_improper_scheduler_attach_cxx(base
);
3660 init_improper_scheduler_detach_cxx(base
);
3661 init_invalid_multiple_scheduling_cxx(base
);
3662 init_invalid_scheduler_policy_key_cxx(base
);
3663 init_invalid_scheduler_policy_thread_specification_cxx(base
);
3664 init_invalid_scheduler_policy_value_cxx(base
);
3665 #if _MSVCR_VER >= 120
3666 init_missing_wait_cxx(base
);
3668 init_scheduler_resource_allocation_error_cxx(base
);
3672 void msvcrt_free_concurrency(void)
3674 if (context_tls_index
!= TLS_OUT_OF_INDEXES
)
3675 TlsFree(context_tls_index
);
3676 if(default_scheduler_policy
.policy_container
)
3677 SchedulerPolicy_dtor(&default_scheduler_policy
);
3678 if(default_scheduler
) {
3679 ThreadScheduler_dtor(default_scheduler
);
3680 operator_delete(default_scheduler
);
3684 void msvcrt_free_scheduler_thread(void)
3686 Context
*context
= try_get_current_context();
3687 if (!context
) return;
3688 call_Context_dtor(context
, 1);
3691 #endif /* _MSVCR_VER >= 100 */