PR gcov/profile/26570
[official-gcc.git] / libstdc++-v3 / src / mt_allocator.cc
blob191f3a528642c45527fb5bc20488e097ce86e9b2
1 // Allocator details.
3 // Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 // ISO C++ 14882:
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
38 namespace
40 #ifdef __GTHREADS
41 struct __freelist
43 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
44 _Thread_record* _M_thread_freelist;
45 _Thread_record* _M_thread_freelist_array;
46 size_t _M_max_threads;
47 __gthread_key_t _M_key;
49 ~__freelist()
51 if (_M_thread_freelist_array)
53 __gthread_key_delete(_M_key);
54 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
59 // Ensure freelist is constructed first.
60 static __freelist freelist;
61 static __glibcxx_mutex_define_initialized(freelist_mutex);
63 static void
64 _M_destroy_thread_key(void* __id)
66 // Return this thread id record to the front of thread_freelist.
67 __gnu_cxx::lock sentry(freelist_mutex);
68 size_t _M_id = reinterpret_cast<size_t>(__id);
70 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
71 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
72 __tr->_M_next = freelist._M_thread_freelist;
73 freelist._M_thread_freelist = __tr;
75 #endif
76 } // anonymous namespace
78 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
80 void
81 __pool<false>::_M_destroy() throw()
83 if (_M_init && !_M_options._M_force_new)
85 for (size_t __n = 0; __n < _M_bin_size; ++__n)
87 _Bin_record& __bin = _M_bin[__n];
88 while (__bin._M_address)
90 _Block_address* __tmp = __bin._M_address->_M_next;
91 ::operator delete(__bin._M_address->_M_initial);
92 __bin._M_address = __tmp;
94 ::operator delete(__bin._M_first);
96 ::operator delete(_M_bin);
97 ::operator delete(_M_binmap);
101 void
102 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
104 // Round up to power of 2 and figure out which bin to use.
105 const size_t __which = _M_binmap[__bytes];
106 _Bin_record& __bin = _M_bin[__which];
108 char* __c = __p - _M_get_align();
109 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
111 // Single threaded application - return to global pool.
112 __block->_M_next = __bin._M_first[0];
113 __bin._M_first[0] = __block;
116 char*
117 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
119 // Round up to power of 2 and figure out which bin to use.
120 const size_t __which = _M_binmap[__bytes];
121 _Bin_record& __bin = _M_bin[__which];
122 const _Tune& __options = _M_get_options();
123 const size_t __bin_size = (__options._M_min_bin << __which)
124 + __options._M_align;
125 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
126 __block_count /= __bin_size;
128 // Get a new block dynamically, set it up for use.
129 void* __v = ::operator new(__options._M_chunk_size);
130 _Block_address* __address = static_cast<_Block_address*>(__v);
131 __address->_M_initial = __v;
132 __address->_M_next = __bin._M_address;
133 __bin._M_address = __address;
135 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
136 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
137 __bin._M_first[__thread_id] = __block;
138 while (--__block_count > 0)
140 __c += __bin_size;
141 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
142 __block = __block->_M_next;
144 __block->_M_next = NULL;
146 __block = __bin._M_first[__thread_id];
147 __bin._M_first[__thread_id] = __block->_M_next;
149 // NB: For alignment reasons, we can't use the first _M_align
150 // bytes, even when sizeof(_Block_record) < _M_align.
151 return reinterpret_cast<char*>(__block) + __options._M_align;
154 void
155 __pool<false>::_M_initialize()
157 // _M_force_new must not change after the first allocate(), which
158 // in turn calls this method, so if it's false, it's false forever
159 // and we don't need to return here ever again.
160 if (_M_options._M_force_new)
162 _M_init = true;
163 return;
166 // Create the bins.
167 // Calculate the number of bins required based on _M_max_bytes.
168 // _M_bin_size is statically-initialized to one.
169 size_t __bin_size = _M_options._M_min_bin;
170 while (_M_options._M_max_bytes > __bin_size)
172 __bin_size <<= 1;
173 ++_M_bin_size;
176 // Setup the bin map for quick lookup of the relevant bin.
177 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
178 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
179 _Binmap_type* __bp = _M_binmap;
180 _Binmap_type __bin_max = _M_options._M_min_bin;
181 _Binmap_type __bint = 0;
182 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
184 if (__ct > __bin_max)
186 __bin_max <<= 1;
187 ++__bint;
189 *__bp++ = __bint;
192 // Initialize _M_bin and its members.
193 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
194 _M_bin = static_cast<_Bin_record*>(__v);
195 for (size_t __n = 0; __n < _M_bin_size; ++__n)
197 _Bin_record& __bin = _M_bin[__n];
198 __v = ::operator new(sizeof(_Block_record*));
199 __bin._M_first = static_cast<_Block_record**>(__v);
200 __bin._M_first[0] = NULL;
201 __bin._M_address = NULL;
203 _M_init = true;
207 #ifdef __GTHREADS
208 void
209 __pool<true>::_M_destroy() throw()
211 if (_M_init && !_M_options._M_force_new)
213 if (__gthread_active_p())
215 for (size_t __n = 0; __n < _M_bin_size; ++__n)
217 _Bin_record& __bin = _M_bin[__n];
218 while (__bin._M_address)
220 _Block_address* __tmp = __bin._M_address->_M_next;
221 ::operator delete(__bin._M_address->_M_initial);
222 __bin._M_address = __tmp;
224 ::operator delete(__bin._M_first);
225 ::operator delete(__bin._M_free);
226 ::operator delete(__bin._M_used);
227 ::operator delete(__bin._M_mutex);
230 else
232 for (size_t __n = 0; __n < _M_bin_size; ++__n)
234 _Bin_record& __bin = _M_bin[__n];
235 while (__bin._M_address)
237 _Block_address* __tmp = __bin._M_address->_M_next;
238 ::operator delete(__bin._M_address->_M_initial);
239 __bin._M_address = __tmp;
241 ::operator delete(__bin._M_first);
244 ::operator delete(_M_bin);
245 ::operator delete(_M_binmap);
249 void
250 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
252 // Round up to power of 2 and figure out which bin to use.
253 const size_t __which = _M_binmap[__bytes];
254 const _Bin_record& __bin = _M_bin[__which];
256 // Know __p not null, assume valid block.
257 char* __c = __p - _M_get_align();
258 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
259 if (__gthread_active_p())
261 // Calculate the number of records to remove from our freelist:
262 // in order to avoid too much contention we wait until the
263 // number of records is "high enough".
264 const size_t __thread_id = _M_get_thread_id();
265 const _Tune& __options = _M_get_options();
266 const unsigned long __limit = 100 * (_M_bin_size - __which)
267 * __options._M_freelist_headroom;
269 unsigned long __remove = __bin._M_free[__thread_id];
270 __remove *= __options._M_freelist_headroom;
271 if (__remove >= __bin._M_used[__thread_id])
272 __remove -= __bin._M_used[__thread_id];
273 else
274 __remove = 0;
275 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
277 _Block_record* __first = __bin._M_first[__thread_id];
278 _Block_record* __tmp = __first;
279 __remove /= __options._M_freelist_headroom;
280 const unsigned long __removed = __remove;
281 while (--__remove > 0)
282 __tmp = __tmp->_M_next;
283 __bin._M_first[__thread_id] = __tmp->_M_next;
284 __bin._M_free[__thread_id] -= __removed;
286 __gthread_mutex_lock(__bin._M_mutex);
287 __tmp->_M_next = __bin._M_first[0];
288 __bin._M_first[0] = __first;
289 __bin._M_free[0] += __removed;
290 __gthread_mutex_unlock(__bin._M_mutex);
293 // Return this block to our list and update counters and
294 // owner id as needed.
295 --__bin._M_used[__block->_M_thread_id];
297 __block->_M_next = __bin._M_first[__thread_id];
298 __bin._M_first[__thread_id] = __block;
300 ++__bin._M_free[__thread_id];
302 else
304 // Not using threads, so single threaded application - return
305 // to global pool.
306 __block->_M_next = __bin._M_first[0];
307 __bin._M_first[0] = __block;
311 char*
312 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
314 // Round up to power of 2 and figure out which bin to use.
315 const size_t __which = _M_binmap[__bytes];
316 const _Tune& __options = _M_get_options();
317 const size_t __bin_size = ((__options._M_min_bin << __which)
318 + __options._M_align);
319 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
320 __block_count /= __bin_size;
322 // Are we using threads?
323 // - Yes, check if there are free blocks on the global
324 // list. If so, grab up to __block_count blocks in one
325 // lock and change ownership. If the global list is
326 // empty, we allocate a new chunk and add those blocks
327 // directly to our own freelist (with us as owner).
328 // - No, all operations are made directly to global pool 0
329 // no need to lock or change ownership but check for free
330 // blocks on global list (and if not add new ones) and
331 // get the first one.
332 _Bin_record& __bin = _M_bin[__which];
333 _Block_record* __block = NULL;
334 if (__gthread_active_p())
336 __gthread_mutex_lock(__bin._M_mutex);
337 if (__bin._M_first[0] == NULL)
339 void* __v = ::operator new(__options._M_chunk_size);
340 _Block_address* __address = static_cast<_Block_address*>(__v);
341 __address->_M_initial = __v;
342 __address->_M_next = __bin._M_address;
343 __bin._M_address = __address;
344 __gthread_mutex_unlock(__bin._M_mutex);
346 // No need to hold the lock when we are adding a whole
347 // chunk to our own list.
348 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
349 __block = reinterpret_cast<_Block_record*>(__c);
350 __bin._M_free[__thread_id] = __block_count;
351 __bin._M_first[__thread_id] = __block;
352 while (--__block_count > 0)
354 __c += __bin_size;
355 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
356 __block = __block->_M_next;
358 __block->_M_next = NULL;
360 else
362 // Is the number of required blocks greater than or equal
363 // to the number that can be provided by the global free
364 // list?
365 __bin._M_first[__thread_id] = __bin._M_first[0];
366 if (__block_count >= __bin._M_free[0])
368 __bin._M_free[__thread_id] = __bin._M_free[0];
369 __bin._M_free[0] = 0;
370 __bin._M_first[0] = NULL;
372 else
374 __bin._M_free[__thread_id] = __block_count;
375 __bin._M_free[0] -= __block_count;
376 __block = __bin._M_first[0];
377 while (--__block_count > 0)
378 __block = __block->_M_next;
379 __bin._M_first[0] = __block->_M_next;
380 __block->_M_next = NULL;
382 __gthread_mutex_unlock(__bin._M_mutex);
385 else
387 void* __v = ::operator new(__options._M_chunk_size);
388 _Block_address* __address = static_cast<_Block_address*>(__v);
389 __address->_M_initial = __v;
390 __address->_M_next = __bin._M_address;
391 __bin._M_address = __address;
393 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
394 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
395 __bin._M_first[0] = __block;
396 while (--__block_count > 0)
398 __c += __bin_size;
399 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
400 __block = __block->_M_next;
402 __block->_M_next = NULL;
405 __block = __bin._M_first[__thread_id];
406 __bin._M_first[__thread_id] = __block->_M_next;
408 if (__gthread_active_p())
410 __block->_M_thread_id = __thread_id;
411 --__bin._M_free[__thread_id];
412 ++__bin._M_used[__thread_id];
415 // NB: For alignment reasons, we can't use the first _M_align
416 // bytes, even when sizeof(_Block_record) < _M_align.
417 return reinterpret_cast<char*>(__block) + __options._M_align;
420 void
421 __pool<true>::_M_initialize()
423 // _M_force_new must not change after the first allocate(),
424 // which in turn calls this method, so if it's false, it's false
425 // forever and we don't need to return here ever again.
426 if (_M_options._M_force_new)
428 _M_init = true;
429 return;
432 // Create the bins.
433 // Calculate the number of bins required based on _M_max_bytes.
434 // _M_bin_size is statically-initialized to one.
435 size_t __bin_size = _M_options._M_min_bin;
436 while (_M_options._M_max_bytes > __bin_size)
438 __bin_size <<= 1;
439 ++_M_bin_size;
442 // Setup the bin map for quick lookup of the relevant bin.
443 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
444 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
445 _Binmap_type* __bp = _M_binmap;
446 _Binmap_type __bin_max = _M_options._M_min_bin;
447 _Binmap_type __bint = 0;
448 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
450 if (__ct > __bin_max)
452 __bin_max <<= 1;
453 ++__bint;
455 *__bp++ = __bint;
458 // Initialize _M_bin and its members.
459 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
460 _M_bin = static_cast<_Bin_record*>(__v);
462 // If __gthread_active_p() create and initialize the list of
463 // free thread ids. Single threaded applications use thread id 0
464 // directly and have no need for this.
465 if (__gthread_active_p())
468 __gnu_cxx::lock sentry(freelist_mutex);
470 if (!freelist._M_thread_freelist_array
471 || freelist._M_max_threads
472 < _M_options._M_max_threads)
474 const size_t __k = sizeof(_Thread_record)
475 * _M_options._M_max_threads;
476 __v = ::operator new(__k);
477 _Thread_record* _M_thread_freelist
478 = static_cast<_Thread_record*>(__v);
480 // NOTE! The first assignable thread id is 1 since the
481 // global pool uses id 0
482 size_t __i;
483 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
485 _Thread_record& __tr = _M_thread_freelist[__i - 1];
486 __tr._M_next = &_M_thread_freelist[__i];
487 __tr._M_id = __i;
490 // Set last record.
491 _M_thread_freelist[__i - 1]._M_next = NULL;
492 _M_thread_freelist[__i - 1]._M_id = __i;
494 if (!freelist._M_thread_freelist_array)
496 // Initialize per thread key to hold pointer to
497 // _M_thread_freelist.
498 __gthread_key_create(&freelist._M_key,
499 ::_M_destroy_thread_key);
500 freelist._M_thread_freelist
501 = _M_thread_freelist;
503 else
505 _Thread_record* _M_old_freelist
506 = freelist._M_thread_freelist;
507 _Thread_record* _M_old_array
508 = freelist._M_thread_freelist_array;
509 freelist._M_thread_freelist
510 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
511 while (_M_old_freelist)
513 size_t next_id;
514 if (_M_old_freelist->_M_next)
515 next_id = _M_old_freelist->_M_next - _M_old_array;
516 else
517 next_id = freelist._M_max_threads;
518 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
519 = &_M_thread_freelist[next_id];
520 _M_old_freelist = _M_old_freelist->_M_next;
522 ::operator delete(static_cast<void*>(_M_old_array));
524 freelist._M_thread_freelist_array
525 = _M_thread_freelist;
526 freelist._M_max_threads
527 = _M_options._M_max_threads;
531 const size_t __max_threads = _M_options._M_max_threads + 1;
532 for (size_t __n = 0; __n < _M_bin_size; ++__n)
534 _Bin_record& __bin = _M_bin[__n];
535 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
536 __bin._M_first = static_cast<_Block_record**>(__v);
538 __bin._M_address = NULL;
540 __v = ::operator new(sizeof(size_t) * __max_threads);
541 __bin._M_free = static_cast<size_t*>(__v);
543 __v = ::operator new(sizeof(size_t) * __max_threads);
544 __bin._M_used = static_cast<size_t*>(__v);
546 __v = ::operator new(sizeof(__gthread_mutex_t));
547 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
549 #ifdef __GTHREAD_MUTEX_INIT
551 // Do not copy a POSIX/gthr mutex once in use.
552 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
553 *__bin._M_mutex = __tmp;
555 #else
556 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
557 #endif
558 for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
560 __bin._M_first[__threadn] = NULL;
561 __bin._M_free[__threadn] = 0;
562 __bin._M_used[__threadn] = 0;
566 else
568 for (size_t __n = 0; __n < _M_bin_size; ++__n)
570 _Bin_record& __bin = _M_bin[__n];
571 __v = ::operator new(sizeof(_Block_record*));
572 __bin._M_first = static_cast<_Block_record**>(__v);
573 __bin._M_first[0] = NULL;
574 __bin._M_address = NULL;
577 _M_init = true;
580 size_t
581 __pool<true>::_M_get_thread_id()
583 // If we have thread support and it's active we check the thread
584 // key value and return its id or if it's not set we take the
585 // first record from _M_thread_freelist and sets the key and
586 // returns it's id.
587 if (__gthread_active_p())
589 void* v = __gthread_getspecific(freelist._M_key);
590 size_t _M_id = (size_t)v;
591 if (_M_id == 0)
594 __gnu_cxx::lock sentry(freelist_mutex);
595 if (freelist._M_thread_freelist)
597 _M_id = freelist._M_thread_freelist->_M_id;
598 freelist._M_thread_freelist
599 = freelist._M_thread_freelist->_M_next;
603 __gthread_setspecific(freelist._M_key,
604 (void*)_M_id);
606 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
609 // Otherwise (no thread support or inactive) all requests are
610 // served from the global pool 0.
611 return 0;
614 // XXX GLIBCXX_ABI Deprecated
615 void
616 __pool<true>::_M_destroy_thread_key(void*) { }
618 // XXX GLIBCXX_ABI Deprecated
619 void
620 __pool<true>::_M_initialize(__destroy_handler)
622 // _M_force_new must not change after the first allocate(),
623 // which in turn calls this method, so if it's false, it's false
624 // forever and we don't need to return here ever again.
625 if (_M_options._M_force_new)
627 _M_init = true;
628 return;
631 // Create the bins.
632 // Calculate the number of bins required based on _M_max_bytes.
633 // _M_bin_size is statically-initialized to one.
634 size_t __bin_size = _M_options._M_min_bin;
635 while (_M_options._M_max_bytes > __bin_size)
637 __bin_size <<= 1;
638 ++_M_bin_size;
641 // Setup the bin map for quick lookup of the relevant bin.
642 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
643 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
644 _Binmap_type* __bp = _M_binmap;
645 _Binmap_type __bin_max = _M_options._M_min_bin;
646 _Binmap_type __bint = 0;
647 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
649 if (__ct > __bin_max)
651 __bin_max <<= 1;
652 ++__bint;
654 *__bp++ = __bint;
657 // Initialize _M_bin and its members.
658 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
659 _M_bin = static_cast<_Bin_record*>(__v);
661 // If __gthread_active_p() create and initialize the list of
662 // free thread ids. Single threaded applications use thread id 0
663 // directly and have no need for this.
664 if (__gthread_active_p())
667 __gnu_cxx::lock sentry(freelist_mutex);
669 if (!freelist._M_thread_freelist_array
670 || freelist._M_max_threads
671 < _M_options._M_max_threads)
673 const size_t __k = sizeof(_Thread_record)
674 * _M_options._M_max_threads;
675 __v = ::operator new(__k);
676 _Thread_record* _M_thread_freelist
677 = static_cast<_Thread_record*>(__v);
679 // NOTE! The first assignable thread id is 1 since the
680 // global pool uses id 0
681 size_t __i;
682 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
684 _Thread_record& __tr = _M_thread_freelist[__i - 1];
685 __tr._M_next = &_M_thread_freelist[__i];
686 __tr._M_id = __i;
689 // Set last record.
690 _M_thread_freelist[__i - 1]._M_next = NULL;
691 _M_thread_freelist[__i - 1]._M_id = __i;
693 if (!freelist._M_thread_freelist_array)
695 // Initialize per thread key to hold pointer to
696 // _M_thread_freelist.
697 __gthread_key_create(&freelist._M_key,
698 ::_M_destroy_thread_key);
699 freelist._M_thread_freelist = _M_thread_freelist;
701 else
703 _Thread_record* _M_old_freelist
704 = freelist._M_thread_freelist;
705 _Thread_record* _M_old_array
706 = freelist._M_thread_freelist_array;
707 freelist._M_thread_freelist
708 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
709 while (_M_old_freelist)
711 size_t next_id;
712 if (_M_old_freelist->_M_next)
713 next_id = _M_old_freelist->_M_next - _M_old_array;
714 else
715 next_id = freelist._M_max_threads;
716 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
717 = &_M_thread_freelist[next_id];
718 _M_old_freelist = _M_old_freelist->_M_next;
720 ::operator delete(static_cast<void*>(_M_old_array));
722 freelist._M_thread_freelist_array = _M_thread_freelist;
723 freelist._M_max_threads = _M_options._M_max_threads;
727 const size_t __max_threads = _M_options._M_max_threads + 1;
728 for (size_t __n = 0; __n < _M_bin_size; ++__n)
730 _Bin_record& __bin = _M_bin[__n];
731 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
732 __bin._M_first = static_cast<_Block_record**>(__v);
734 __bin._M_address = NULL;
736 __v = ::operator new(sizeof(size_t) * __max_threads);
737 __bin._M_free = static_cast<size_t*>(__v);
739 __v = ::operator new(sizeof(size_t) * __max_threads);
740 __bin._M_used = static_cast<size_t*>(__v);
742 __v = ::operator new(sizeof(__gthread_mutex_t));
743 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
745 #ifdef __GTHREAD_MUTEX_INIT
747 // Do not copy a POSIX/gthr mutex once in use.
748 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
749 *__bin._M_mutex = __tmp;
751 #else
752 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
753 #endif
754 for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
756 __bin._M_first[__threadn] = NULL;
757 __bin._M_free[__threadn] = 0;
758 __bin._M_used[__threadn] = 0;
762 else
764 for (size_t __n = 0; __n < _M_bin_size; ++__n)
766 _Bin_record& __bin = _M_bin[__n];
767 __v = ::operator new(sizeof(_Block_record*));
768 __bin._M_first = static_cast<_Block_record**>(__v);
769 __bin._M_first[0] = NULL;
770 __bin._M_address = NULL;
773 _M_init = true;
775 #endif
777 // Instantiations.
778 template class __mt_alloc<char>;
779 template class __mt_alloc<wchar_t>;
781 _GLIBCXX_END_NAMESPACE