dinput: Clear DIA_APPNOMAP BuildActionMap flag with specific device semantic.
[wine.git] / dlls / concrt140 / details.c
blob978a7a8f96eea38c08e4218db621ca12766b46bf
1 /*
2 * Copyright 2010 Piotr Caban for CodeWeavers
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19 /* Keep in sync with msvcp90/detail.c */
21 #include <stdarg.h>
23 #include "wine/debug.h"
24 #include "wine/exception.h"
25 #include "details.h"
27 WINE_DEFAULT_DEBUG_CHANNEL(msvcp);
29 typedef struct _Page
31 struct _Page *_Next;
32 size_t _Mask;
33 char data[1];
34 } _Page;
36 typedef struct
38 LONG lock;
39 _Page *head;
40 _Page *tail;
41 size_t head_pos;
42 size_t tail_pos;
43 } threadsafe_queue;
45 #define QUEUES_NO 8
46 typedef struct
48 size_t tail_pos;
49 size_t head_pos;
50 threadsafe_queue queues[QUEUES_NO];
51 } queue_data;
53 typedef struct
55 const vtable_ptr *vtable;
56 queue_data *data; /* queue_data structure is not binary compatible */
57 size_t alloc_count;
58 size_t item_size;
59 } _Concurrent_queue_base_v4;
61 extern const vtable_ptr _Concurrent_queue_base_v4_vtable;
62 #define call__Concurrent_queue_base_v4__Move_item(this,dst,idx,src) CALL_VTBL_FUNC(this, \
63 0, void, (_Concurrent_queue_base_v4*,_Page*,size_t,void*), (this,dst,idx,src))
64 #define call__Concurrent_queue_base_v4__Copy_item(this,dst,idx,src) CALL_VTBL_FUNC(this, \
65 4, void, (_Concurrent_queue_base_v4*,_Page*,size_t,const void*), (this,dst,idx,src))
66 #define call__Concurrent_queue_base_v4__Assign_and_destroy_item(this,dst,src,idx) CALL_VTBL_FUNC(this, \
67 8, void, (_Concurrent_queue_base_v4*,void*,_Page*,size_t), (this,dst,src,idx))
68 #define call__Concurrent_queue_base_v4__Allocate_page(this) CALL_VTBL_FUNC(this, \
69 16, _Page*, (_Concurrent_queue_base_v4*), (this))
70 #define call__Concurrent_queue_base_v4__Deallocate_page(this, page) CALL_VTBL_FUNC(this, \
71 20, void, (_Concurrent_queue_base_v4*,_Page*), (this,page))
73 /* ?_Internal_throw_exception@_Concurrent_queue_base_v4@details@Concurrency@@IBEXXZ */
74 /* ?_Internal_throw_exception@_Concurrent_queue_base_v4@details@Concurrency@@IEBAXXZ */
75 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_throw_exception, 4)
76 void __thiscall _Concurrent_queue_base_v4__Internal_throw_exception(
77 const _Concurrent_queue_base_v4 *this)
79 TRACE("(%p)\n", this);
80 _Xmem();
83 /* ??0_Concurrent_queue_base_v4@details@Concurrency@@IAE@I@Z */
84 /* ??0_Concurrent_queue_base_v4@details@Concurrency@@IEAA@_K@Z */
85 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4_ctor, 8)
86 _Concurrent_queue_base_v4* __thiscall _Concurrent_queue_base_v4_ctor(
87 _Concurrent_queue_base_v4 *this, size_t size)
89 TRACE("(%p %Iu)\n", this, size);
91 this->data = operator_new(sizeof(*this->data));
92 memset(this->data, 0, sizeof(*this->data));
94 this->vtable = &_Concurrent_queue_base_v4_vtable;
95 this->item_size = size;
97 /* alloc_count needs to be power of 2 */
98 this->alloc_count =
99 size <= 8 ? 32 :
100 size <= 16 ? 16 :
101 size <= 32 ? 8 :
102 size <= 64 ? 4 :
103 size <= 128 ? 2 : 1;
104 return this;
107 /* ??1_Concurrent_queue_base_v4@details@Concurrency@@MAE@XZ */
108 /* ??1_Concurrent_queue_base_v4@details@Concurrency@@MEAA@XZ */
109 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4_dtor, 4)
110 void __thiscall _Concurrent_queue_base_v4_dtor(_Concurrent_queue_base_v4 *this)
112 TRACE("(%p)\n", this);
113 operator_delete(this->data);
116 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4_vector_dtor, 8)
117 _Concurrent_queue_base_v4* __thiscall _Concurrent_queue_base_v4_vector_dtor(
118 _Concurrent_queue_base_v4 *this, unsigned int flags)
120 TRACE("(%p %x)\n", this, flags);
121 if(flags & 2) {
122 /* we have an array, with the number of elements stored before the first object */
123 INT_PTR i, *ptr = (INT_PTR *)this-1;
125 for(i=*ptr-1; i>=0; i--)
126 _Concurrent_queue_base_v4_dtor(this+i);
127 operator_delete(ptr);
128 } else {
129 if(flags & 1)
130 _Concurrent_queue_base_v4_dtor(this);
131 operator_delete(this);
134 return this;
137 /* ?_Internal_finish_clear@_Concurrent_queue_base_v4@details@Concurrency@@IAEXXZ */
138 /* ?_Internal_finish_clear@_Concurrent_queue_base_v4@details@Concurrency@@IEAAXXZ */
139 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_finish_clear, 4)
140 void __thiscall _Concurrent_queue_base_v4__Internal_finish_clear(
141 _Concurrent_queue_base_v4 *this)
143 int i;
145 TRACE("(%p)\n", this);
147 for(i=0; i<QUEUES_NO; i++)
149 if(this->data->queues[i].tail)
150 call__Concurrent_queue_base_v4__Deallocate_page(this, this->data->queues[i].tail);
154 /* ?_Internal_empty@_Concurrent_queue_base_v4@details@Concurrency@@IBE_NXZ */
155 /* ?_Internal_empty@_Concurrent_queue_base_v4@details@Concurrency@@IEBA_NXZ */
156 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_empty, 4)
157 bool __thiscall _Concurrent_queue_base_v4__Internal_empty(
158 const _Concurrent_queue_base_v4 *this)
160 TRACE("(%p)\n", this);
161 return this->data->head_pos == this->data->tail_pos;
164 /* ?_Internal_size@_Concurrent_queue_base_v4@details@Concurrency@@IBEIXZ */
165 /* ?_Internal_size@_Concurrent_queue_base_v4@details@Concurrency@@IEBA_KXZ */
166 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_size, 4)
167 size_t __thiscall _Concurrent_queue_base_v4__Internal_size(
168 const _Concurrent_queue_base_v4 *this)
170 TRACE("(%p)\n", this);
171 return this->data->tail_pos - this->data->head_pos;
174 static void spin_wait(int *counter)
176 static int spin_limit = -1;
178 if(spin_limit == -1)
180 SYSTEM_INFO si;
181 GetSystemInfo(&si);
182 spin_limit = si.dwNumberOfProcessors>1 ? 4000 : 0;
185 if(*counter >= spin_limit)
187 *counter = 0;
188 Sleep(0);
190 else
192 (*counter)++;
196 static void CALLBACK queue_push_finally(BOOL normal, void *ctx)
198 threadsafe_queue *queue = ctx;
199 InterlockedIncrementSizeT(&queue->tail_pos);
202 static void threadsafe_queue_push(threadsafe_queue *queue, size_t id,
203 void *e, _Concurrent_queue_base_v4 *parent, BOOL copy)
205 size_t page_id = id & ~(parent->alloc_count-1);
206 int spin;
207 _Page *p;
209 spin = 0;
210 while(queue->tail_pos != id)
211 spin_wait(&spin);
213 if(page_id == id)
215 /* TODO: Add exception handling */
216 p = call__Concurrent_queue_base_v4__Allocate_page(parent);
217 p->_Next = NULL;
218 p->_Mask = 0;
220 spin = 0;
221 while(InterlockedCompareExchange(&queue->lock, 1, 0))
222 spin_wait(&spin);
223 if(queue->tail)
224 queue->tail->_Next = p;
225 queue->tail = p;
226 if(!queue->head)
227 queue->head = p;
228 WriteRelease(&queue->lock, 0);
230 else
232 p = queue->tail;
235 __TRY
237 if(copy)
238 call__Concurrent_queue_base_v4__Copy_item(parent, p, id-page_id, e);
239 else
240 call__Concurrent_queue_base_v4__Move_item(parent, p, id-page_id, e);
241 p->_Mask |= 1 << (id - page_id);
243 __FINALLY_CTX(queue_push_finally, queue);
246 static BOOL threadsafe_queue_pop(threadsafe_queue *queue, size_t id,
247 void *e, _Concurrent_queue_base_v4 *parent)
249 size_t page_id = id & ~(parent->alloc_count-1);
250 int spin;
251 _Page *p;
252 BOOL ret = FALSE;
254 spin = 0;
255 while(queue->tail_pos <= id)
256 spin_wait(&spin);
258 spin = 0;
259 while(queue->head_pos != id)
260 spin_wait(&spin);
262 p = queue->head;
263 if(p->_Mask & (1 << (id-page_id)))
265 /* TODO: Add exception handling */
266 call__Concurrent_queue_base_v4__Assign_and_destroy_item(parent, e, p, id-page_id);
267 ret = TRUE;
270 if(id == page_id+parent->alloc_count-1)
272 spin = 0;
273 while(InterlockedCompareExchange(&queue->lock, 1, 0))
274 spin_wait(&spin);
275 queue->head = p->_Next;
276 if(!queue->head)
277 queue->tail = NULL;
278 WriteRelease(&queue->lock, 0);
280 /* TODO: Add exception handling */
281 call__Concurrent_queue_base_v4__Deallocate_page(parent, p);
284 InterlockedIncrementSizeT(&queue->head_pos);
285 return ret;
288 /* ?_Internal_push@_Concurrent_queue_base_v4@details@Concurrency@@IAEXPBX@Z */
289 /* ?_Internal_push@_Concurrent_queue_base_v4@details@Concurrency@@IEAAXPEBX@Z */
290 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_push, 8)
291 void __thiscall _Concurrent_queue_base_v4__Internal_push(
292 _Concurrent_queue_base_v4 *this, void *e)
294 size_t id;
296 TRACE("(%p %p)\n", this, e);
298 id = InterlockedIncrementSizeT(&this->data->tail_pos)-1;
299 threadsafe_queue_push(this->data->queues + id % QUEUES_NO,
300 id / QUEUES_NO, e, this, TRUE);
303 /* ?_Internal_move_push@_Concurrent_queue_base_v4@details@Concurrency@@IAEXPAX@Z */
304 /* ?_Internal_move_push@_Concurrent_queue_base_v4@details@Concurrency@@IEAAXPEAX@Z */
305 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_move_push, 8)
306 void __thiscall _Concurrent_queue_base_v4__Internal_move_push(
307 _Concurrent_queue_base_v4 *this, void *e)
309 size_t id;
311 TRACE("(%p %p)\n", this, e);
313 id = InterlockedIncrementSizeT(&this->data->tail_pos)-1;
314 threadsafe_queue_push(this->data->queues + id % QUEUES_NO,
315 id / QUEUES_NO, e, this, FALSE);
318 /* ?_Internal_pop_if_present@_Concurrent_queue_base_v4@details@Concurrency@@IAE_NPAX@Z */
319 /* ?_Internal_pop_if_present@_Concurrent_queue_base_v4@details@Concurrency@@IEAA_NPEAX@Z */
320 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_pop_if_present, 8)
321 bool __thiscall _Concurrent_queue_base_v4__Internal_pop_if_present(
322 _Concurrent_queue_base_v4 *this, void *e)
324 size_t id;
326 TRACE("(%p %p)\n", this, e);
332 id = this->data->head_pos;
333 if(id == this->data->tail_pos) return FALSE;
334 } while(InterlockedCompareExchangePointer((void**)&this->data->head_pos,
335 (void*)(id+1), (void*)id) != (void*)id);
336 } while(!threadsafe_queue_pop(this->data->queues + id % QUEUES_NO,
337 id / QUEUES_NO, e, this));
338 return TRUE;
341 /* ?_Internal_swap@_Concurrent_queue_base_v4@details@Concurrency@@IAEXAAV123@@Z */
342 /* ?_Internal_swap@_Concurrent_queue_base_v4@details@Concurrency@@IEAAXAEAV123@@Z */
343 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4__Internal_swap, 8)
344 void __thiscall _Concurrent_queue_base_v4__Internal_swap(
345 _Concurrent_queue_base_v4 *this, _Concurrent_queue_base_v4 *r)
347 FIXME("(%p %p) stub\n", this, r);
350 DEFINE_THISCALL_WRAPPER(_Concurrent_queue_base_v4_dummy, 4)
351 void __thiscall _Concurrent_queue_base_v4_dummy(_Concurrent_queue_base_v4 *this)
353 ERR("unexpected call\n");
356 DEFINE_RTTI_DATA0(_Concurrent_queue_base_v4, 0, ".?AV_Concurrent_queue_base_v4@details@Concurrency@@")
358 static LONG _Runtime_object_id;
360 typedef struct
362 const vtable_ptr *vtable;
363 int id;
364 } _Runtime_object;
366 extern const vtable_ptr _Runtime_object_vtable;
368 /* ??0_Runtime_object@details@Concurrency@@QAE@H@Z */
369 /* ??0_Runtime_object@details@Concurrency@@QEAA@H@Z */
370 DEFINE_THISCALL_WRAPPER(_Runtime_object_ctor_id, 8)
371 _Runtime_object* __thiscall _Runtime_object_ctor_id(_Runtime_object *this, int id)
373 TRACE("(%p %d)\n", this, id);
374 this->vtable = &_Runtime_object_vtable;
375 this->id = id;
376 return this;
379 /* ??0_Runtime_object@details@Concurrency@@QAE@XZ */
380 /* ??0_Runtime_object@details@Concurrency@@QEAA@XZ */
381 DEFINE_THISCALL_WRAPPER(_Runtime_object_ctor, 4)
382 _Runtime_object* __thiscall _Runtime_object_ctor(_Runtime_object *this)
384 TRACE("(%p)\n", this);
385 this->vtable = &_Runtime_object_vtable;
386 this->id = InterlockedExchangeAdd(&_Runtime_object_id, 2);
387 return this;
390 DEFINE_THISCALL_WRAPPER(_Runtime_object__GetId, 4)
391 int __thiscall _Runtime_object__GetId(_Runtime_object *this)
393 TRACE("(%p)\n", this);
394 return this->id;
397 DEFINE_RTTI_DATA0(_Runtime_object, 0, ".?AV_Runtime_object@details@Concurrency@@")
399 typedef struct __Concurrent_vector_base_v4
401 void* (__cdecl *allocator)(struct __Concurrent_vector_base_v4 *, size_t);
402 void *storage[3];
403 size_t first_block;
404 size_t early_size;
405 void **segment;
406 } _Concurrent_vector_base_v4;
408 #define STORAGE_SIZE ARRAY_SIZE(this->storage)
409 #define SEGMENT_SIZE (sizeof(void*) * 8)
411 typedef struct compact_block
413 size_t first_block;
414 void *blocks[SEGMENT_SIZE];
415 int size_check;
416 }compact_block;
418 /* Return the integer base-2 logarithm of (x|1). Result is 0 for x == 0. */
419 static inline unsigned int log2i(unsigned int x)
421 ULONG index;
422 BitScanReverse(&index, x|1);
423 return index;
426 /* ?_Segment_index_of@_Concurrent_vector_base_v4@details@Concurrency@@KAII@Z */
427 /* ?_Segment_index_of@_Concurrent_vector_base_v4@details@Concurrency@@KA_K_K@Z */
428 size_t __cdecl _vector_base_v4__Segment_index_of(size_t x)
430 unsigned int half;
432 TRACE("(%Iu)\n", x);
434 if((sizeof(x) == 8) && (half = x >> 32))
435 return log2i(half) + 32;
437 return log2i(x);
440 /* ?_Internal_throw_exception@_Concurrent_vector_base_v4@details@Concurrency@@IBEXI@Z */
441 /* ?_Internal_throw_exception@_Concurrent_vector_base_v4@details@Concurrency@@IEBAX_K@Z */
442 DEFINE_THISCALL_WRAPPER(_vector_base_v4__Internal_throw_exception, 8)
443 void __thiscall _vector_base_v4__Internal_throw_exception(void/*_vector_base_v4*/ *this, size_t idx)
445 TRACE("(%p %Iu)\n", this, idx);
447 switch(idx) {
448 case 0: _Xout_of_range("Index out of range");
449 case 1: _Xout_of_range("Index out of segments table range");
450 case 2: throw_range_error("Index is inside segment which failed to be allocated");
454 #ifdef _WIN64
455 #define InterlockedCompareExchangeSizeT(dest, exchange, cmp) InterlockedCompareExchangeSize((size_t *)dest, (size_t)exchange, (size_t)cmp)
456 static size_t InterlockedCompareExchangeSize(size_t volatile *dest, size_t exchange, size_t cmp)
458 size_t v;
460 v = InterlockedCompareExchange64((LONGLONG*)dest, exchange, cmp);
462 return v;
464 #else
465 #define InterlockedCompareExchangeSizeT(dest, exchange, cmp) InterlockedCompareExchange((LONG*)dest, (size_t)exchange, (size_t)cmp)
466 #endif
468 #define SEGMENT_ALLOC_MARKER ((void*)1)
470 static void concurrent_vector_alloc_segment(_Concurrent_vector_base_v4 *this,
471 size_t seg, size_t element_size)
473 int spin;
475 while(!this->segment[seg] || this->segment[seg] == SEGMENT_ALLOC_MARKER)
477 spin = 0;
478 while(this->segment[seg] == SEGMENT_ALLOC_MARKER)
479 spin_wait(&spin);
480 if(!InterlockedCompareExchangeSizeT((this->segment + seg),
481 SEGMENT_ALLOC_MARKER, 0))
483 __TRY
485 if(seg == 0)
486 this->segment[seg] = this->allocator(this, element_size * (1 << this->first_block));
487 else if(seg < this->first_block)
488 this->segment[seg] = (BYTE**)this->segment[0]
489 + element_size * (1 << seg);
490 else
491 this->segment[seg] = this->allocator(this, element_size * (1 << seg));
493 __EXCEPT_ALL
495 this->segment[seg] = NULL;
496 _CxxThrowException(NULL, NULL);
498 __ENDTRY
499 if(!this->segment[seg])
500 _vector_base_v4__Internal_throw_exception(this, 2);
505 /* ??1_Concurrent_vector_base_v4@details@Concurrency@@IAE@XZ */
506 /* ??1_Concurrent_vector_base_v4@details@Concurrency@@IEAA@XZ */
507 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4_dtor, 4)
508 void __thiscall _Concurrent_vector_base_v4_dtor(
509 _Concurrent_vector_base_v4 *this)
511 TRACE("(%p)\n", this);
513 if(this->segment != this->storage)
514 free(this->segment);
517 /* ?_Internal_capacity@_Concurrent_vector_base_v4@details@Concurrency@@IBEIXZ */
518 /* ?_Internal_capacity@_Concurrent_vector_base_v4@details@Concurrency@@IEBA_KXZ */
519 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_capacity, 4)
520 size_t __thiscall _Concurrent_vector_base_v4__Internal_capacity(
521 const _Concurrent_vector_base_v4 *this)
523 size_t last_block;
524 int i;
526 TRACE("(%p)\n", this);
528 last_block = (this->segment == this->storage ? STORAGE_SIZE : SEGMENT_SIZE);
529 for(i = 0; i < last_block; i++)
531 if(!this->segment[i])
532 return !i ? 0 : 1 << i;
534 return 1 << i;
537 /* ?_Internal_reserve@_Concurrent_vector_base_v4@details@Concurrency@@IAEXIII@Z */
538 /* ?_Internal_reserve@_Concurrent_vector_base_v4@details@Concurrency@@IEAAX_K00@Z */
539 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_reserve, 16)
540 void __thiscall _Concurrent_vector_base_v4__Internal_reserve(
541 _Concurrent_vector_base_v4 *this, size_t size,
542 size_t element_size, size_t max_size)
544 size_t block_idx, capacity;
545 int i;
546 void **new_segment;
548 TRACE("(%p %Iu %Iu %Iu)\n", this, size, element_size, max_size);
550 if(size > max_size) _vector_base_v4__Internal_throw_exception(this, 0);
551 capacity = _Concurrent_vector_base_v4__Internal_capacity(this);
552 if(size <= capacity) return;
553 block_idx = _vector_base_v4__Segment_index_of(size - 1);
554 if(!this->first_block)
555 InterlockedCompareExchangeSizeT(&this->first_block, block_idx + 1, 0);
556 i = _vector_base_v4__Segment_index_of(capacity);
557 if(this->storage == this->segment) {
558 for(; i <= block_idx && i < STORAGE_SIZE; i++)
559 concurrent_vector_alloc_segment(this, i, element_size);
560 if(block_idx >= STORAGE_SIZE) {
561 new_segment = malloc(SEGMENT_SIZE * sizeof(void*));
562 if(new_segment == NULL) _vector_base_v4__Internal_throw_exception(this, 2);
563 memset(new_segment, 0, SEGMENT_SIZE * sizeof(*new_segment));
564 memcpy(new_segment, this->storage, STORAGE_SIZE * sizeof(*new_segment));
565 if(InterlockedCompareExchangePointer((void*)&this->segment, new_segment,
566 this->storage) != this->storage)
567 free(new_segment);
570 for(; i <= block_idx; i++)
571 concurrent_vector_alloc_segment(this, i, element_size);
574 /* ?_Internal_clear@_Concurrent_vector_base_v4@details@Concurrency@@IAEIP6AXPAXI@Z@Z */
575 /* ?_Internal_clear@_Concurrent_vector_base_v4@details@Concurrency@@IEAA_KP6AXPEAX_K@Z@Z */
576 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_clear, 8)
577 size_t __thiscall _Concurrent_vector_base_v4__Internal_clear(
578 _Concurrent_vector_base_v4 *this, void (__cdecl *clear)(void*, size_t))
580 size_t seg_no, elems;
581 int i;
583 TRACE("(%p %p)\n", this, clear);
585 seg_no = this->early_size ? _vector_base_v4__Segment_index_of(this->early_size) + 1 : 0;
586 for(i = seg_no - 1; i >= 0; i--) {
587 elems = this->early_size - (1 << i & ~1);
588 clear(this->segment[i], elems);
589 this->early_size -= elems;
591 while(seg_no < (this->segment == this->storage ? STORAGE_SIZE : SEGMENT_SIZE)) {
592 if(!this->segment[seg_no]) break;
593 seg_no++;
595 return seg_no;
598 /* ?_Internal_compact@_Concurrent_vector_base_v4@details@Concurrency@@IAEPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z */
599 /* ?_Internal_compact@_Concurrent_vector_base_v4@details@Concurrency@@IEAAPEAX_KPEAXP6AX10@ZP6AX1PEBX0@Z@Z */
600 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_compact, 20)
601 void * __thiscall _Concurrent_vector_base_v4__Internal_compact(
602 _Concurrent_vector_base_v4 *this, size_t element_size, void *v,
603 void (__cdecl *clear)(void*, size_t),
604 void (__cdecl *copy)(void*, const void*, size_t))
606 compact_block *b;
607 size_t size, alloc_size, seg_no, alloc_seg, copy_element, clear_element;
608 int i;
610 TRACE("(%p %Iu %p %p %p)\n", this, element_size, v, clear, copy);
612 size = this->early_size;
613 alloc_size = _Concurrent_vector_base_v4__Internal_capacity(this);
614 if(alloc_size == 0) return NULL;
615 alloc_seg = _vector_base_v4__Segment_index_of(alloc_size - 1);
616 if(!size) {
617 this->first_block = 0;
618 b = v;
619 b->first_block = alloc_seg + 1;
620 memset(b->blocks, 0, sizeof(b->blocks));
621 memcpy(b->blocks, this->segment,
622 (alloc_seg + 1) * sizeof(this->segment[0]));
623 memset(this->segment, 0, sizeof(this->segment[0]) * (alloc_seg + 1));
624 return v;
626 seg_no = _vector_base_v4__Segment_index_of(size - 1);
627 if(this->first_block == (seg_no + 1) && seg_no == alloc_seg) return NULL;
628 b = v;
629 b->first_block = this->first_block;
630 memset(b->blocks, 0, sizeof(b->blocks));
631 memcpy(b->blocks, this->segment,
632 (alloc_seg + 1) * sizeof(this->segment[0]));
633 if(this->first_block == (seg_no + 1) && seg_no != alloc_seg) {
634 memset(b->blocks, 0, sizeof(b->blocks[0]) * (seg_no + 1));
635 memset(&this->segment[seg_no + 1], 0, sizeof(this->segment[0]) * (alloc_seg - seg_no));
636 return v;
638 memset(this->segment, 0,
639 (alloc_seg + 1) * sizeof(this->segment[0]));
640 this->first_block = 0;
641 _Concurrent_vector_base_v4__Internal_reserve(this, size, element_size,
642 ~(size_t)0 / element_size);
643 for(i = 0; i < seg_no; i++)
644 copy(this->segment[i], b->blocks[i], i ? 1 << i : 2);
645 copy_element = size - ((1 << seg_no) & ~1);
646 if(copy_element > 0)
647 copy(this->segment[seg_no], b->blocks[seg_no], copy_element);
648 for(i = 0; i < seg_no; i++)
649 clear(b->blocks[i], i ? 1 << i : 2);
650 clear_element = size - ((1 << seg_no) & ~1);
651 if(clear_element > 0)
652 clear(b->blocks[seg_no], clear_element);
653 return v;
656 /* ?_Internal_copy@_Concurrent_vector_base_v4@details@Concurrency@@IAEXABV123@IP6AXPAXPBXI@Z@Z */
657 /* ?_Internal_copy@_Concurrent_vector_base_v4@details@Concurrency@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z */
658 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_copy, 16)
659 void __thiscall _Concurrent_vector_base_v4__Internal_copy(
660 _Concurrent_vector_base_v4 *this, const _Concurrent_vector_base_v4 *v,
661 size_t element_size, void (__cdecl *copy)(void*, const void*, size_t))
663 size_t seg_no, v_size;
664 int i;
666 TRACE("(%p %p %Iu %p)\n", this, v, element_size, copy);
668 v_size = v->early_size;
669 if(!v_size) {
670 this->early_size = 0;
671 return;
673 _Concurrent_vector_base_v4__Internal_reserve(this, v_size,
674 element_size, ~(size_t)0 / element_size);
675 seg_no = _vector_base_v4__Segment_index_of(v_size - 1);
676 for(i = 0; i < seg_no; i++)
677 copy(this->segment[i], v->segment[i], i ? 1 << i : 2);
678 copy(this->segment[i], v->segment[i], v_size - (1 << i & ~1));
679 this->early_size = v_size;
682 /* ?_Internal_assign@_Concurrent_vector_base_v4@details@Concurrency@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z */
683 /* ?_Internal_assign@_Concurrent_vector_base_v4@details@Concurrency@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z */
684 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_assign, 24)
685 void __thiscall _Concurrent_vector_base_v4__Internal_assign(
686 _Concurrent_vector_base_v4 *this, const _Concurrent_vector_base_v4 *v,
687 size_t element_size, void (__cdecl *clear)(void*, size_t),
688 void (__cdecl *assign)(void*, const void*, size_t),
689 void (__cdecl *copy)(void*, const void*, size_t))
691 size_t v_size, seg_no, v_seg_no, remain_element;
692 int i;
694 TRACE("(%p %p %Iu %p %p %p)\n", this, v, element_size, clear, assign, copy);
696 v_size = v->early_size;
697 if(!v_size) {
698 _Concurrent_vector_base_v4__Internal_clear(this, clear);
699 return;
701 if(!this->early_size) {
702 _Concurrent_vector_base_v4__Internal_copy(this, v, element_size, copy);
703 return;
705 seg_no = _vector_base_v4__Segment_index_of(this->early_size - 1);
706 v_seg_no = _vector_base_v4__Segment_index_of(v_size - 1);
708 for(i = 0; i < min(seg_no, v_seg_no); i++)
709 assign(this->segment[i], v->segment[i], i ? 1 << i : 2);
710 remain_element = min(this->early_size, v_size) - (1 << i & ~1);
711 if(remain_element != 0)
712 assign(this->segment[i], v->segment[i], remain_element);
714 if(this->early_size > v_size)
716 if((i ? 1 << i : 2) - remain_element > 0)
717 clear((BYTE**)this->segment[i] + element_size * remain_element,
718 (i ? 1 << i : 2) - remain_element);
719 if(i < seg_no)
721 for(i++; i < seg_no; i++)
722 clear(this->segment[i], 1 << i);
723 clear(this->segment[i], this->early_size - (1 << i));
726 else if(this->early_size < v_size)
728 if((i ? 1 << i : 2) - remain_element > 0)
729 copy((BYTE**)this->segment[i] + element_size * remain_element,
730 (BYTE**)v->segment[i] + element_size * remain_element,
731 (i ? 1 << i : 2) - remain_element);
732 if(i < v_seg_no)
734 _Concurrent_vector_base_v4__Internal_reserve(this, v_size,
735 element_size, ~(size_t)0 / element_size);
736 for(i++; i < v_seg_no; i++)
737 copy(this->segment[i], v->segment[i], 1 << i);
738 copy(this->segment[i], v->segment[i], v->early_size - (1 << i));
741 this->early_size = v_size;
744 /* ?_Internal_grow_by@_Concurrent_vector_base_v4@details@Concurrency@@IAEIIIP6AXPAXPBXI@Z1@Z */
745 /* ?_Internal_grow_by@_Concurrent_vector_base_v4@details@Concurrency@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z */
746 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_grow_by, 20)
747 size_t __thiscall _Concurrent_vector_base_v4__Internal_grow_by(
748 _Concurrent_vector_base_v4 *this, size_t count, size_t element_size,
749 void (__cdecl *copy)(void*, const void*, size_t), const void *v)
751 size_t size, seg_no, last_seg_no, remain_size;
753 TRACE("(%p %Iu %Iu %p %p)\n", this, count, element_size, copy, v);
755 if(count == 0) return this->early_size;
756 do {
757 size = this->early_size;
758 _Concurrent_vector_base_v4__Internal_reserve(this, size + count, element_size,
759 ~(size_t)0 / element_size);
760 } while(InterlockedCompareExchangeSizeT(&this->early_size, size + count, size) != size);
762 seg_no = size ? _vector_base_v4__Segment_index_of(size - 1) : 0;
763 last_seg_no = _vector_base_v4__Segment_index_of(size + count - 1);
764 remain_size = min(size + count, 1 << (seg_no + 1)) - size;
765 if(remain_size > 0)
766 copy(((BYTE**)this->segment[seg_no] + element_size * (size - ((1 << seg_no) & ~1))), v,
767 remain_size);
768 if(seg_no != last_seg_no)
770 for(seg_no++; seg_no < last_seg_no; seg_no++)
771 copy(this->segment[seg_no], v, 1 << seg_no);
772 copy(this->segment[last_seg_no], v, size + count - (1 << last_seg_no));
774 return size;
777 /* ?_Internal_grow_to_at_least_with_result@_Concurrent_vector_base_v4@details@Concurrency@@IAEIIIP6AXPAXPBXI@Z1@Z */
778 /* ?_Internal_grow_to_at_least_with_result@_Concurrent_vector_base_v4@details@Concurrency@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z */
779 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_grow_to_at_least_with_result, 20)
780 size_t __thiscall _Concurrent_vector_base_v4__Internal_grow_to_at_least_with_result(
781 _Concurrent_vector_base_v4 *this, size_t count, size_t element_size,
782 void (__cdecl *copy)(void*, const void*, size_t), const void *v)
784 size_t size, seg_no, last_seg_no, remain_size;
786 TRACE("(%p %Iu %Iu %p %p)\n", this, count, element_size, copy, v);
788 _Concurrent_vector_base_v4__Internal_reserve(this, count, element_size,
789 ~(size_t)0 / element_size);
790 do {
791 size = this->early_size;
792 if(size >= count) return size;
793 } while(InterlockedCompareExchangeSizeT(&this->early_size, count, size) != size);
795 seg_no = size ? _vector_base_v4__Segment_index_of(size - 1) : 0;
796 last_seg_no = _vector_base_v4__Segment_index_of(count - 1);
797 remain_size = min(count, 1 << (seg_no + 1)) - size;
798 if(remain_size > 0)
799 copy(((BYTE**)this->segment[seg_no] + element_size * (size - ((1 << seg_no) & ~1))), v,
800 remain_size);
801 if(seg_no != last_seg_no)
803 for(seg_no++; seg_no < last_seg_no; seg_no++)
804 copy(this->segment[seg_no], v, 1 << seg_no);
805 copy(this->segment[last_seg_no], v, count - (1 << last_seg_no));
807 return size;
810 /* ?_Internal_push_back@_Concurrent_vector_base_v4@details@Concurrency@@IAEPAXIAAI@Z */
811 /* ?_Internal_push_back@_Concurrent_vector_base_v4@details@Concurrency@@IEAAPEAX_KAEA_K@Z */
812 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_push_back, 12)
813 void * __thiscall _Concurrent_vector_base_v4__Internal_push_back(
814 _Concurrent_vector_base_v4 *this, size_t element_size, size_t *idx)
816 size_t index, seg, segment_base;
817 void *data;
819 TRACE("(%p %Iu %p)\n", this, element_size, idx);
821 do {
822 index = this->early_size;
823 _Concurrent_vector_base_v4__Internal_reserve(this, index + 1,
824 element_size, ~(size_t)0 / element_size);
825 } while(InterlockedCompareExchangeSizeT(&this->early_size, index + 1, index) != index);
826 seg = _vector_base_v4__Segment_index_of(index);
827 segment_base = (seg == 0) ? 0 : (1 << seg);
828 data = (BYTE*)this->segment[seg] + element_size * (index - segment_base);
829 *idx = index;
831 return data;
834 /* ?_Internal_resize@_Concurrent_vector_base_v4@details@Concurrency@@IAEXIIIP6AXPAXI@ZP6AX0PBXI@Z2@Z */
835 /* ?_Internal_resize@_Concurrent_vector_base_v4@details@Concurrency@@IEAAX_K00P6AXPEAX0@ZP6AX1PEBX0@Z3@Z */
836 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_resize, 28)
837 void __thiscall _Concurrent_vector_base_v4__Internal_resize(
838 _Concurrent_vector_base_v4 *this, size_t resize, size_t element_size,
839 size_t max_size, void (__cdecl *clear)(void*, size_t),
840 void (__cdecl *copy)(void*, const void*, size_t), const void *v)
842 size_t size, seg_no, end_seg_no, clear_element;
844 TRACE("(%p %Iu %Iu %Iu %p %p %p)\n", this, resize, element_size, max_size, clear, copy, v);
846 if(resize > max_size) _vector_base_v4__Internal_throw_exception(this, 0);
847 size = this->early_size;
848 if(resize > size)
849 _Concurrent_vector_base_v4__Internal_grow_to_at_least_with_result(this,
850 resize, element_size, copy, v);
851 else if(resize == 0)
852 _Concurrent_vector_base_v4__Internal_clear(this, clear);
853 else if(resize < size)
855 seg_no = _vector_base_v4__Segment_index_of(size - 1);
856 end_seg_no = _vector_base_v4__Segment_index_of(resize - 1);
857 clear_element = size - (seg_no ? 1 << seg_no : 2);
858 if(clear_element > 0)
859 clear(this->segment[seg_no], clear_element);
860 if(seg_no) seg_no--;
861 for(; seg_no > end_seg_no; seg_no--)
862 clear(this->segment[seg_no], 1 << seg_no);
863 clear_element = (1 << (end_seg_no + 1)) - resize;
864 if(clear_element > 0)
865 clear((BYTE**)this->segment[end_seg_no] + element_size * (resize - ((1 << end_seg_no) & ~1)),
866 clear_element);
867 this->early_size = resize;
871 /* ?_Internal_swap@_Concurrent_vector_base_v4@details@Concurrency@@IAEXAAV123@@Z */
872 /* ?_Internal_swap@_Concurrent_vector_base_v4@details@Concurrency@@IEAAXAEAV123@@Z */
873 DEFINE_THISCALL_WRAPPER(_Concurrent_vector_base_v4__Internal_swap, 8)
874 void __thiscall _Concurrent_vector_base_v4__Internal_swap(
875 _Concurrent_vector_base_v4 *this, _Concurrent_vector_base_v4 *v)
877 _Concurrent_vector_base_v4 temp;
879 TRACE("(%p %p)\n", this, v);
881 temp = *this;
882 *this = *v;
883 *v = temp;
884 if(v->segment == this->storage)
885 v->segment = v->storage;
886 if(this->segment == v->storage)
887 this->segment = this->storage;
890 /* ?is_current_task_group_canceling@Concurrency@@YA_NXZ */
891 bool __cdecl is_current_task_group_canceling(void)
893 return Context_IsCurrentTaskCollectionCanceling();
896 /* ?_GetCombinableSize@details@Concurrency@@YAIXZ */
897 /* ?_GetCombinableSize@details@Concurrency@@YA_KXZ */
898 size_t __cdecl _GetCombinableSize(void)
900 FIXME("() stub\n");
901 return 11;
904 typedef struct {
905 void *unk0;
906 BYTE unk1;
907 } task_continuation_context;
909 /* ??0task_continuation_context@Concurrency@@AAE@XZ */
910 /* ??0task_continuation_context@Concurrency@@AEAA@XZ */
911 DEFINE_THISCALL_WRAPPER(task_continuation_context_ctor, 4)
912 task_continuation_context* __thiscall task_continuation_context_ctor(task_continuation_context *this)
914 TRACE("(%p)\n", this);
915 memset(this, 0, sizeof(*this));
916 return this;
919 typedef struct {
920 const vtable_ptr *vtable;
921 void (__cdecl *func)(void);
922 int unk[4];
923 void *unk2[3];
924 void *this;
925 } function_void_cdecl_void;
927 /* ?_Assign@_ContextCallback@details@Concurrency@@AAEXPAX@Z */
928 /* ?_Assign@_ContextCallback@details@Concurrency@@AEAAXPEAX@Z */
929 DEFINE_THISCALL_WRAPPER(_ContextCallback__Assign, 8)
930 void __thiscall _ContextCallback__Assign(void *this, void *v)
932 TRACE("(%p %p)\n", this, v);
935 #define call_function_do_call(this) CALL_VTBL_FUNC(this, 8, void, (function_void_cdecl_void*), (this))
936 #define call_function_do_clean(this,b) CALL_VTBL_FUNC(this, 16, void, (function_void_cdecl_void*,bool), (this, b))
937 /* ?_CallInContext@_ContextCallback@details@Concurrency@@QBEXV?$function@$$A6AXXZ@std@@_N@Z */
938 /* ?_CallInContext@_ContextCallback@details@Concurrency@@QEBAXV?$function@$$A6AXXZ@std@@_N@Z */
939 DEFINE_THISCALL_WRAPPER(_ContextCallback__CallInContext, 48)
940 void __thiscall _ContextCallback__CallInContext(const void *this, function_void_cdecl_void func, bool b)
942 TRACE("(%p %p %x)\n", this, func.func, b);
943 call_function_do_call(func.this);
944 call_function_do_clean(func.this, func.this!=&func);
947 /* ?_Capture@_ContextCallback@details@Concurrency@@AAEXXZ */
948 /* ?_Capture@_ContextCallback@details@Concurrency@@AEAAXXZ */
949 DEFINE_THISCALL_WRAPPER(_ContextCallback__Capture, 4)
950 void __thiscall _ContextCallback__Capture(void *this)
952 TRACE("(%p)\n", this);
955 /* ?_Reset@_ContextCallback@details@Concurrency@@AAEXXZ */
956 /* ?_Reset@_ContextCallback@details@Concurrency@@AEAAXXZ */
957 DEFINE_THISCALL_WRAPPER(_ContextCallback__Reset, 4)
958 void __thiscall _ContextCallback__Reset(void *this)
960 TRACE("(%p)\n", this);
963 /* ?_IsCurrentOriginSTA@_ContextCallback@details@Concurrency@@CA_NXZ */
964 bool __cdecl _ContextCallback__IsCurrentOriginSTA(void *this)
966 TRACE("(%p)\n", this);
967 return FALSE;
970 typedef struct {
971 /*_Task_impl_base*/void *task;
972 bool scheduled;
973 bool started;
974 } _TaskEventLogger;
976 /* ?_LogCancelTask@_TaskEventLogger@details@Concurrency@@QAEXXZ */
977 /* ?_LogCancelTask@_TaskEventLogger@details@Concurrency@@QEAAXXZ */
978 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogCancelTask, 4)
979 void __thiscall _TaskEventLogger__LogCancelTask(_TaskEventLogger *this)
981 TRACE("(%p)\n", this);
984 /* ?_LogScheduleTask@_TaskEventLogger@details@Concurrency@@QAEX_N@Z */
985 /* ?_LogScheduleTask@_TaskEventLogger@details@Concurrency@@QEAAX_N@Z */
986 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogScheduleTask, 8)
987 void __thiscall _TaskEventLogger__LogScheduleTask(_TaskEventLogger *this, bool continuation)
989 TRACE("(%p %x)\n", this, continuation);
992 /* ?_LogTaskCompleted@_TaskEventLogger@details@Concurrency@@QAEXXZ */
993 /* ?_LogTaskCompleted@_TaskEventLogger@details@Concurrency@@QEAAXXZ */
994 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogTaskCompleted, 4)
995 void __thiscall _TaskEventLogger__LogTaskCompleted(_TaskEventLogger *this)
997 TRACE("(%p)\n", this);
1000 /* ?_LogTaskExecutionCompleted@_TaskEventLogger@details@Concurrency@@QAEXXZ */
1001 /* ?_LogTaskExecutionCompleted@_TaskEventLogger@details@Concurrency@@QEAAXXZ */
1002 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogTaskExecutionCompleted, 4)
1003 void __thiscall _TaskEventLogger__LogTaskExecutionCompleted(_TaskEventLogger *this)
1005 TRACE("(%p)\n", this);
1008 /* ?_LogWorkItemCompleted@_TaskEventLogger@details@Concurrency@@QAEXXZ */
1009 /* ?_LogWorkItemCompleted@_TaskEventLogger@details@Concurrency@@QEAAXXZ */
1010 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogWorkItemCompleted, 4)
1011 void __thiscall _TaskEventLogger__LogWorkItemCompleted(_TaskEventLogger *this)
1013 TRACE("(%p)\n", this);
1016 /* ?_LogWorkItemStarted@_TaskEventLogger@details@Concurrency@@QAEXXZ */
1017 /* ?_LogWorkItemStarted@_TaskEventLogger@details@Concurrency@@QEAAXXZ */
1018 DEFINE_THISCALL_WRAPPER(_TaskEventLogger__LogWorkItemStarted, 4)
1019 void __thiscall _TaskEventLogger__LogWorkItemStarted(_TaskEventLogger *this)
1021 TRACE("(%p)\n", this);
1024 typedef struct {
1025 PTP_WORK work;
1026 void (__cdecl *callback)(void*);
1027 void *arg;
1028 } _Threadpool_chore;
1030 /* ?_Reschedule_chore@details@Concurrency@@YAHPBU_Threadpool_chore@12@@Z */
1031 /* ?_Reschedule_chore@details@Concurrency@@YAHPEBU_Threadpool_chore@12@@Z */
1032 int __cdecl _Reschedule_chore(const _Threadpool_chore *chore)
1034 TRACE("(%p)\n", chore);
1036 SubmitThreadpoolWork(chore->work);
1037 return 0;
1040 static void WINAPI threadpool_callback(PTP_CALLBACK_INSTANCE instance, void *context, PTP_WORK work)
1042 _Threadpool_chore *chore = context;
1043 TRACE("calling chore callback: %p\n", chore);
1044 if (chore->callback)
1045 chore->callback(chore->arg);
1048 /* ?_Schedule_chore@details@Concurrency@@YAHPAU_Threadpool_chore@12@@Z */
1049 /* ?_Schedule_chore@details@Concurrency@@YAHPEAU_Threadpool_chore@12@@Z */
1050 int __cdecl _Schedule_chore(_Threadpool_chore *chore)
1052 TRACE("(%p)\n", chore);
1054 chore->work = CreateThreadpoolWork(threadpool_callback, chore, NULL);
1055 /* FIXME: what should be returned in case of error */
1056 if(!chore->work)
1057 return -1;
1059 return _Reschedule_chore(chore);
1062 /* ?_Release_chore@details@Concurrency@@YAXPAU_Threadpool_chore@12@@Z */
1063 /* ?_Release_chore@details@Concurrency@@YAXPEAU_Threadpool_chore@12@@Z */
1064 void __cdecl _Release_chore(_Threadpool_chore *chore)
1066 TRACE("(%p)\n", chore);
1068 if(!chore->work) return;
1069 CloseThreadpoolWork(chore->work);
1070 chore->work = NULL;
1073 /* ?_IsNonBlockingThread@_Task_impl_base@details@Concurrency@@SA_NXZ */
1074 bool __cdecl _Task_impl_base__IsNonBlockingThread(void)
1076 FIXME("() stub\n");
1077 return FALSE;
1080 /* ?ReportUnhandledError@_ExceptionHolder@details@Concurrency@@AAAXXZ */
1081 /* ?ReportUnhandledError@_ExceptionHolder@details@Concurrency@@AAEXXZ */
1082 /* ?ReportUnhandledError@_ExceptionHolder@details@Concurrency@@AEAAXXZ */
1083 DEFINE_THISCALL_WRAPPER(_ExceptionHolder__ReportUnhandledError, 4)
1084 void __thiscall _ExceptionHolder__ReportUnhandledError(void *this)
1086 FIXME("(%p) stub\n", this);
1089 __ASM_BLOCK_BEGIN(concurrency_details_vtables)
1090 __ASM_VTABLE(_Concurrent_queue_base_v4,
1091 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_dummy)
1092 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_dummy)
1093 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_dummy)
1094 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_vector_dtor)
1095 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_dummy)
1096 VTABLE_ADD_FUNC(_Concurrent_queue_base_v4_dummy));
1097 __ASM_VTABLE(_Runtime_object,
1098 VTABLE_ADD_FUNC(_Runtime_object__GetId));
1099 __ASM_BLOCK_END
1101 void init_concurrency_details(void *base)
1103 #ifdef __x86_64__
1104 init__Concurrent_queue_base_v4_rtti(base);
1105 init__Runtime_object_rtti(base);
1106 #endif
1109 /* ?_Byte_reverse_table@details@Concurrency@@3QBEB */
1110 const BYTE byte_reverse_table[256] =
1112 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
1113 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
1114 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
1115 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
1116 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
1117 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
1118 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
1119 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
1120 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
1121 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
1122 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
1123 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
1124 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
1125 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
1126 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
1127 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,