3 // Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
29 #include <bits/c++config.h>
30 #include <ext/concurrence.h>
31 #include <ext/mt_allocator.h>
39 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
40 _Thread_record
* _M_thread_freelist
;
41 _Thread_record
* _M_thread_freelist_array
;
42 size_t _M_max_threads
;
43 __gthread_key_t _M_key
;
47 if (_M_thread_freelist_array
)
49 __gthread_key_delete(_M_key
);
50 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
58 static __freelist freelist
;
65 static __gnu_cxx::__mutex freelist_mutex
;
66 return freelist_mutex
;
70 _M_destroy_thread_key(void* __id
)
72 // Return this thread id record to the front of thread_freelist.
73 __freelist
& freelist
= get_freelist();
75 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
76 size_t _M_id
= reinterpret_cast<size_t>(__id
);
78 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
79 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
80 __tr
->_M_next
= freelist
._M_thread_freelist
;
81 freelist
._M_thread_freelist
= __tr
;
85 } // anonymous namespace
87 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx
)
90 __pool
<false>::_M_destroy() throw()
92 if (_M_init
&& !_M_options
._M_force_new
)
94 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
96 _Bin_record
& __bin
= _M_bin
[__n
];
97 while (__bin
._M_address
)
99 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
100 ::operator delete(__bin
._M_address
->_M_initial
);
101 __bin
._M_address
= __tmp
;
103 ::operator delete(__bin
._M_first
);
105 ::operator delete(_M_bin
);
106 ::operator delete(_M_binmap
);
111 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
)
113 // Round up to power of 2 and figure out which bin to use.
114 const size_t __which
= _M_binmap
[__bytes
];
115 _Bin_record
& __bin
= _M_bin
[__which
];
117 char* __c
= __p
- _M_get_align();
118 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
120 // Single threaded application - return to global pool.
121 __block
->_M_next
= __bin
._M_first
[0];
122 __bin
._M_first
[0] = __block
;
126 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
128 // Round up to power of 2 and figure out which bin to use.
129 const size_t __which
= _M_binmap
[__bytes
];
130 _Bin_record
& __bin
= _M_bin
[__which
];
131 const _Tune
& __options
= _M_get_options();
132 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
133 + __options
._M_align
;
134 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
135 __block_count
/= __bin_size
;
137 // Get a new block dynamically, set it up for use.
138 void* __v
= ::operator new(__options
._M_chunk_size
);
139 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
140 __address
->_M_initial
= __v
;
141 __address
->_M_next
= __bin
._M_address
;
142 __bin
._M_address
= __address
;
144 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
145 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
146 __bin
._M_first
[__thread_id
] = __block
;
147 while (--__block_count
> 0)
150 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
151 __block
= __block
->_M_next
;
153 __block
->_M_next
= NULL
;
155 __block
= __bin
._M_first
[__thread_id
];
156 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
158 // NB: For alignment reasons, we can't use the first _M_align
159 // bytes, even when sizeof(_Block_record) < _M_align.
160 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
164 __pool
<false>::_M_initialize()
166 // _M_force_new must not change after the first allocate(), which
167 // in turn calls this method, so if it's false, it's false forever
168 // and we don't need to return here ever again.
169 if (_M_options
._M_force_new
)
176 // Calculate the number of bins required based on _M_max_bytes.
177 // _M_bin_size is statically-initialized to one.
178 size_t __bin_size
= _M_options
._M_min_bin
;
179 while (_M_options
._M_max_bytes
> __bin_size
)
185 // Setup the bin map for quick lookup of the relevant bin.
186 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
187 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
188 _Binmap_type
* __bp
= _M_binmap
;
189 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
190 _Binmap_type __bint
= 0;
191 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
193 if (__ct
> __bin_max
)
201 // Initialize _M_bin and its members.
202 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
203 _M_bin
= static_cast<_Bin_record
*>(__v
);
204 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
206 _Bin_record
& __bin
= _M_bin
[__n
];
207 __v
= ::operator new(sizeof(_Block_record
*));
208 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
209 __bin
._M_first
[0] = NULL
;
210 __bin
._M_address
= NULL
;
218 __pool
<true>::_M_destroy() throw()
220 if (_M_init
&& !_M_options
._M_force_new
)
222 if (__gthread_active_p())
224 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
226 _Bin_record
& __bin
= _M_bin
[__n
];
227 while (__bin
._M_address
)
229 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
230 ::operator delete(__bin
._M_address
->_M_initial
);
231 __bin
._M_address
= __tmp
;
233 ::operator delete(__bin
._M_first
);
234 ::operator delete(__bin
._M_free
);
235 ::operator delete(__bin
._M_used
);
236 ::operator delete(__bin
._M_mutex
);
241 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
243 _Bin_record
& __bin
= _M_bin
[__n
];
244 while (__bin
._M_address
)
246 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
247 ::operator delete(__bin
._M_address
->_M_initial
);
248 __bin
._M_address
= __tmp
;
250 ::operator delete(__bin
._M_first
);
253 ::operator delete(_M_bin
);
254 ::operator delete(_M_binmap
);
259 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
)
261 // Round up to power of 2 and figure out which bin to use.
262 const size_t __which
= _M_binmap
[__bytes
];
263 const _Bin_record
& __bin
= _M_bin
[__which
];
265 // Know __p not null, assume valid block.
266 char* __c
= __p
- _M_get_align();
267 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
268 if (__gthread_active_p())
270 // Calculate the number of records to remove from our freelist:
271 // in order to avoid too much contention we wait until the
272 // number of records is "high enough".
273 const size_t __thread_id
= _M_get_thread_id();
274 const _Tune
& __options
= _M_get_options();
275 const size_t __limit
= (100 * (_M_bin_size
- __which
)
276 * __options
._M_freelist_headroom
);
278 size_t __remove
= __bin
._M_free
[__thread_id
];
279 __remove
*= __options
._M_freelist_headroom
;
281 // NB: We assume that reads of _Atomic_words are atomic.
282 const size_t __max_threads
= __options
._M_max_threads
+ 1;
283 _Atomic_word
* const __reclaimed_base
=
284 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
285 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
286 const size_t __net_used
= __bin
._M_used
[__thread_id
] - __reclaimed
;
288 // NB: For performance sake we don't resync every time, in order
289 // to spare atomic ops. Note that if __reclaimed increased by,
290 // say, 1024, since the last sync, it means that the other
291 // threads executed the atomic in the else below at least the
292 // same number of times (at least, because _M_reserve_block may
293 // have decreased the counter), therefore one more cannot hurt.
294 if (__reclaimed
> 1024)
296 __bin
._M_used
[__thread_id
] -= __reclaimed
;
297 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
300 if (__remove
>= __net_used
)
301 __remove
-= __net_used
;
304 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
306 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
307 _Block_record
* __tmp
= __first
;
308 __remove
/= __options
._M_freelist_headroom
;
309 const size_t __removed
= __remove
;
310 while (--__remove
> 0)
311 __tmp
= __tmp
->_M_next
;
312 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
313 __bin
._M_free
[__thread_id
] -= __removed
;
315 __gthread_mutex_lock(__bin
._M_mutex
);
316 __tmp
->_M_next
= __bin
._M_first
[0];
317 __bin
._M_first
[0] = __first
;
318 __bin
._M_free
[0] += __removed
;
319 __gthread_mutex_unlock(__bin
._M_mutex
);
322 // Return this block to our list and update counters and
323 // owner id as needed.
324 if (__block
->_M_thread_id
== __thread_id
)
325 --__bin
._M_used
[__thread_id
];
327 __atomic_add(&__reclaimed_base
[__block
->_M_thread_id
], 1);
329 __block
->_M_next
= __bin
._M_first
[__thread_id
];
330 __bin
._M_first
[__thread_id
] = __block
;
332 ++__bin
._M_free
[__thread_id
];
336 // Not using threads, so single threaded application - return
338 __block
->_M_next
= __bin
._M_first
[0];
339 __bin
._M_first
[0] = __block
;
344 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
346 // Round up to power of 2 and figure out which bin to use.
347 const size_t __which
= _M_binmap
[__bytes
];
348 const _Tune
& __options
= _M_get_options();
349 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
350 + __options
._M_align
);
351 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
352 __block_count
/= __bin_size
;
354 // Are we using threads?
355 // - Yes, check if there are free blocks on the global
356 // list. If so, grab up to __block_count blocks in one
357 // lock and change ownership. If the global list is
358 // empty, we allocate a new chunk and add those blocks
359 // directly to our own freelist (with us as owner).
360 // - No, all operations are made directly to global pool 0
361 // no need to lock or change ownership but check for free
362 // blocks on global list (and if not add new ones) and
363 // get the first one.
364 _Bin_record
& __bin
= _M_bin
[__which
];
365 _Block_record
* __block
= NULL
;
366 if (__gthread_active_p())
368 // Resync the _M_used counters.
369 const size_t __max_threads
= __options
._M_max_threads
+ 1;
370 _Atomic_word
* const __reclaimed_base
=
371 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
372 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
373 __bin
._M_used
[__thread_id
] -= __reclaimed
;
374 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
376 __gthread_mutex_lock(__bin
._M_mutex
);
377 if (__bin
._M_first
[0] == NULL
)
379 void* __v
= ::operator new(__options
._M_chunk_size
);
380 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
381 __address
->_M_initial
= __v
;
382 __address
->_M_next
= __bin
._M_address
;
383 __bin
._M_address
= __address
;
384 __gthread_mutex_unlock(__bin
._M_mutex
);
386 // No need to hold the lock when we are adding a whole
387 // chunk to our own list.
388 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
389 __block
= reinterpret_cast<_Block_record
*>(__c
);
390 __bin
._M_free
[__thread_id
] = __block_count
;
391 __bin
._M_first
[__thread_id
] = __block
;
392 while (--__block_count
> 0)
395 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
396 __block
= __block
->_M_next
;
398 __block
->_M_next
= NULL
;
402 // Is the number of required blocks greater than or equal
403 // to the number that can be provided by the global free
405 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
406 if (__block_count
>= __bin
._M_free
[0])
408 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
409 __bin
._M_free
[0] = 0;
410 __bin
._M_first
[0] = NULL
;
414 __bin
._M_free
[__thread_id
] = __block_count
;
415 __bin
._M_free
[0] -= __block_count
;
416 __block
= __bin
._M_first
[0];
417 while (--__block_count
> 0)
418 __block
= __block
->_M_next
;
419 __bin
._M_first
[0] = __block
->_M_next
;
420 __block
->_M_next
= NULL
;
422 __gthread_mutex_unlock(__bin
._M_mutex
);
427 void* __v
= ::operator new(__options
._M_chunk_size
);
428 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
429 __address
->_M_initial
= __v
;
430 __address
->_M_next
= __bin
._M_address
;
431 __bin
._M_address
= __address
;
433 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
434 __block
= reinterpret_cast<_Block_record
*>(__c
);
435 __bin
._M_first
[0] = __block
;
436 while (--__block_count
> 0)
439 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
440 __block
= __block
->_M_next
;
442 __block
->_M_next
= NULL
;
445 __block
= __bin
._M_first
[__thread_id
];
446 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
448 if (__gthread_active_p())
450 __block
->_M_thread_id
= __thread_id
;
451 --__bin
._M_free
[__thread_id
];
452 ++__bin
._M_used
[__thread_id
];
455 // NB: For alignment reasons, we can't use the first _M_align
456 // bytes, even when sizeof(_Block_record) < _M_align.
457 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
461 __pool
<true>::_M_initialize()
463 // _M_force_new must not change after the first allocate(),
464 // which in turn calls this method, so if it's false, it's false
465 // forever and we don't need to return here ever again.
466 if (_M_options
._M_force_new
)
473 // Calculate the number of bins required based on _M_max_bytes.
474 // _M_bin_size is statically-initialized to one.
475 size_t __bin_size
= _M_options
._M_min_bin
;
476 while (_M_options
._M_max_bytes
> __bin_size
)
482 // Setup the bin map for quick lookup of the relevant bin.
483 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
484 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
485 _Binmap_type
* __bp
= _M_binmap
;
486 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
487 _Binmap_type __bint
= 0;
488 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
490 if (__ct
> __bin_max
)
498 // Initialize _M_bin and its members.
499 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
500 _M_bin
= static_cast<_Bin_record
*>(__v
);
502 // If __gthread_active_p() create and initialize the list of
503 // free thread ids. Single threaded applications use thread id 0
504 // directly and have no need for this.
505 if (__gthread_active_p())
507 __freelist
& freelist
= get_freelist();
509 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
511 if (!freelist
._M_thread_freelist_array
512 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
514 const size_t __k
= sizeof(_Thread_record
)
515 * _M_options
._M_max_threads
;
516 __v
= ::operator new(__k
);
517 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
519 // NOTE! The first assignable thread id is 1 since the
520 // global pool uses id 0
522 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
524 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
525 __tr
._M_next
= &_M_thread_freelist
[__i
];
530 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
531 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
533 if (!freelist
._M_thread_freelist_array
)
535 // Initialize per thread key to hold pointer to
536 // _M_thread_freelist.
537 __gthread_key_create(&freelist
._M_key
,
538 ::_M_destroy_thread_key
);
539 freelist
._M_thread_freelist
= _M_thread_freelist
;
543 _Thread_record
* _M_old_freelist
544 = freelist
._M_thread_freelist
;
545 _Thread_record
* _M_old_array
546 = freelist
._M_thread_freelist_array
;
547 freelist
._M_thread_freelist
548 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
549 while (_M_old_freelist
)
552 if (_M_old_freelist
->_M_next
)
553 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
555 next_id
= freelist
._M_max_threads
;
556 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
557 = &_M_thread_freelist
[next_id
];
558 _M_old_freelist
= _M_old_freelist
->_M_next
;
560 ::operator delete(static_cast<void*>(_M_old_array
));
562 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
563 freelist
._M_max_threads
= _M_options
._M_max_threads
;
567 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
568 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
570 _Bin_record
& __bin
= _M_bin
[__n
];
571 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
572 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
573 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
575 __bin
._M_address
= NULL
;
577 __v
= ::operator new(sizeof(size_t) * __max_threads
);
578 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
580 __bin
._M_free
= static_cast<size_t*>(__v
);
582 __v
= ::operator new(sizeof(size_t) * __max_threads
583 + sizeof(_Atomic_word
) * __max_threads
);
584 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
585 + sizeof(_Atomic_word
) * __max_threads
));
586 __bin
._M_used
= static_cast<size_t*>(__v
);
588 __v
= ::operator new(sizeof(__gthread_mutex_t
));
589 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
591 #ifdef __GTHREAD_MUTEX_INIT
593 // Do not copy a POSIX/gthr mutex once in use.
594 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
595 *__bin
._M_mutex
= __tmp
;
598 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
604 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
606 _Bin_record
& __bin
= _M_bin
[__n
];
607 __v
= ::operator new(sizeof(_Block_record
*));
608 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
609 __bin
._M_first
[0] = NULL
;
610 __bin
._M_address
= NULL
;
617 __pool
<true>::_M_get_thread_id()
619 // If we have thread support and it's active we check the thread
620 // key value and return its id or if it's not set we take the
621 // first record from _M_thread_freelist and sets the key and
623 if (__gthread_active_p())
625 __freelist
& freelist
= get_freelist();
626 void* v
= __gthread_getspecific(freelist
._M_key
);
627 size_t _M_id
= (size_t)v
;
631 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
632 if (freelist
._M_thread_freelist
)
634 _M_id
= freelist
._M_thread_freelist
->_M_id
;
635 freelist
._M_thread_freelist
636 = freelist
._M_thread_freelist
->_M_next
;
640 __gthread_setspecific(freelist
._M_key
, (void*)_M_id
);
642 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
645 // Otherwise (no thread support or inactive) all requests are
646 // served from the global pool 0.
650 // XXX GLIBCXX_ABI Deprecated
652 __pool
<true>::_M_destroy_thread_key(void*) { }
654 // XXX GLIBCXX_ABI Deprecated
656 __pool
<true>::_M_initialize(__destroy_handler
)
658 // _M_force_new must not change after the first allocate(),
659 // which in turn calls this method, so if it's false, it's false
660 // forever and we don't need to return here ever again.
661 if (_M_options
._M_force_new
)
668 // Calculate the number of bins required based on _M_max_bytes.
669 // _M_bin_size is statically-initialized to one.
670 size_t __bin_size
= _M_options
._M_min_bin
;
671 while (_M_options
._M_max_bytes
> __bin_size
)
677 // Setup the bin map for quick lookup of the relevant bin.
678 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
679 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
680 _Binmap_type
* __bp
= _M_binmap
;
681 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
682 _Binmap_type __bint
= 0;
683 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
685 if (__ct
> __bin_max
)
693 // Initialize _M_bin and its members.
694 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
695 _M_bin
= static_cast<_Bin_record
*>(__v
);
697 // If __gthread_active_p() create and initialize the list of
698 // free thread ids. Single threaded applications use thread id 0
699 // directly and have no need for this.
700 if (__gthread_active_p())
702 __freelist
& freelist
= get_freelist();
704 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
706 if (!freelist
._M_thread_freelist_array
707 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
709 const size_t __k
= sizeof(_Thread_record
)
710 * _M_options
._M_max_threads
;
711 __v
= ::operator new(__k
);
712 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
714 // NOTE! The first assignable thread id is 1 since the
715 // global pool uses id 0
717 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
719 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
720 __tr
._M_next
= &_M_thread_freelist
[__i
];
725 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
726 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
728 if (!freelist
._M_thread_freelist_array
)
730 // Initialize per thread key to hold pointer to
731 // _M_thread_freelist.
732 __gthread_key_create(&freelist
._M_key
,
733 ::_M_destroy_thread_key
);
734 freelist
._M_thread_freelist
= _M_thread_freelist
;
738 _Thread_record
* _M_old_freelist
739 = freelist
._M_thread_freelist
;
740 _Thread_record
* _M_old_array
741 = freelist
._M_thread_freelist_array
;
742 freelist
._M_thread_freelist
743 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
744 while (_M_old_freelist
)
747 if (_M_old_freelist
->_M_next
)
748 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
750 next_id
= freelist
._M_max_threads
;
751 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
752 = &_M_thread_freelist
[next_id
];
753 _M_old_freelist
= _M_old_freelist
->_M_next
;
755 ::operator delete(static_cast<void*>(_M_old_array
));
757 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
758 freelist
._M_max_threads
= _M_options
._M_max_threads
;
762 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
763 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
765 _Bin_record
& __bin
= _M_bin
[__n
];
766 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
767 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
768 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
770 __bin
._M_address
= NULL
;
772 __v
= ::operator new(sizeof(size_t) * __max_threads
);
773 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
774 __bin
._M_free
= static_cast<size_t*>(__v
);
776 __v
= ::operator new(sizeof(size_t) * __max_threads
+
777 sizeof(_Atomic_word
) * __max_threads
);
778 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
779 + sizeof(_Atomic_word
) * __max_threads
));
780 __bin
._M_used
= static_cast<size_t*>(__v
);
782 __v
= ::operator new(sizeof(__gthread_mutex_t
));
783 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
785 #ifdef __GTHREAD_MUTEX_INIT
787 // Do not copy a POSIX/gthr mutex once in use.
788 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
789 *__bin
._M_mutex
= __tmp
;
792 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
798 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
800 _Bin_record
& __bin
= _M_bin
[__n
];
801 __v
= ::operator new(sizeof(_Block_record
*));
802 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
803 __bin
._M_first
[0] = NULL
;
804 __bin
._M_address
= NULL
;
812 template class __mt_alloc
<char>;
813 template class __mt_alloc
<wchar_t>;
815 _GLIBCXX_END_NAMESPACE