* config/mips/mips.h (ISA_HAS_EXTS): New macro.
[official-gcc.git] / libstdc++-v3 / src / mt_allocator.cc
blob1e64227e4e3c80064abd1333710e4fbb8ee2cdb1
1 // Allocator details.
3 // Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 // ISO C++ 14882:
34 #include <bits/c++config.h>
35 #include <ext/concurrence.h>
36 #include <ext/mt_allocator.h>
37 #include <cstring>
39 namespace
41 #ifdef __GTHREADS
42 struct __freelist
44 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45 _Thread_record* _M_thread_freelist;
46 _Thread_record* _M_thread_freelist_array;
47 size_t _M_max_threads;
48 __gthread_key_t _M_key;
50 ~__freelist()
52 if (_M_thread_freelist_array)
54 __gthread_key_delete(_M_key);
55 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
60 // Ensure freelist is constructed first.
61 static __freelist freelist;
62 __gnu_cxx::__mutex freelist_mutex;
64 static void
65 _M_destroy_thread_key(void* __id)
67 // Return this thread id record to the front of thread_freelist.
68 __gnu_cxx::__scoped_lock sentry(freelist_mutex);
69 size_t _M_id = reinterpret_cast<size_t>(__id);
71 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
72 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
73 __tr->_M_next = freelist._M_thread_freelist;
74 freelist._M_thread_freelist = __tr;
76 #endif
77 } // anonymous namespace
79 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
81 void
82 __pool<false>::_M_destroy() throw()
84 if (_M_init && !_M_options._M_force_new)
86 for (size_t __n = 0; __n < _M_bin_size; ++__n)
88 _Bin_record& __bin = _M_bin[__n];
89 while (__bin._M_address)
91 _Block_address* __tmp = __bin._M_address->_M_next;
92 ::operator delete(__bin._M_address->_M_initial);
93 __bin._M_address = __tmp;
95 ::operator delete(__bin._M_first);
97 ::operator delete(_M_bin);
98 ::operator delete(_M_binmap);
102 void
103 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
105 // Round up to power of 2 and figure out which bin to use.
106 const size_t __which = _M_binmap[__bytes];
107 _Bin_record& __bin = _M_bin[__which];
109 char* __c = __p - _M_get_align();
110 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
112 // Single threaded application - return to global pool.
113 __block->_M_next = __bin._M_first[0];
114 __bin._M_first[0] = __block;
117 char*
118 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
120 // Round up to power of 2 and figure out which bin to use.
121 const size_t __which = _M_binmap[__bytes];
122 _Bin_record& __bin = _M_bin[__which];
123 const _Tune& __options = _M_get_options();
124 const size_t __bin_size = (__options._M_min_bin << __which)
125 + __options._M_align;
126 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
127 __block_count /= __bin_size;
129 // Get a new block dynamically, set it up for use.
130 void* __v = ::operator new(__options._M_chunk_size);
131 _Block_address* __address = static_cast<_Block_address*>(__v);
132 __address->_M_initial = __v;
133 __address->_M_next = __bin._M_address;
134 __bin._M_address = __address;
136 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
137 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
138 __bin._M_first[__thread_id] = __block;
139 while (--__block_count > 0)
141 __c += __bin_size;
142 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
143 __block = __block->_M_next;
145 __block->_M_next = NULL;
147 __block = __bin._M_first[__thread_id];
148 __bin._M_first[__thread_id] = __block->_M_next;
150 // NB: For alignment reasons, we can't use the first _M_align
151 // bytes, even when sizeof(_Block_record) < _M_align.
152 return reinterpret_cast<char*>(__block) + __options._M_align;
155 void
156 __pool<false>::_M_initialize()
158 // _M_force_new must not change after the first allocate(), which
159 // in turn calls this method, so if it's false, it's false forever
160 // and we don't need to return here ever again.
161 if (_M_options._M_force_new)
163 _M_init = true;
164 return;
167 // Create the bins.
168 // Calculate the number of bins required based on _M_max_bytes.
169 // _M_bin_size is statically-initialized to one.
170 size_t __bin_size = _M_options._M_min_bin;
171 while (_M_options._M_max_bytes > __bin_size)
173 __bin_size <<= 1;
174 ++_M_bin_size;
177 // Setup the bin map for quick lookup of the relevant bin.
178 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
179 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
180 _Binmap_type* __bp = _M_binmap;
181 _Binmap_type __bin_max = _M_options._M_min_bin;
182 _Binmap_type __bint = 0;
183 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
185 if (__ct > __bin_max)
187 __bin_max <<= 1;
188 ++__bint;
190 *__bp++ = __bint;
193 // Initialize _M_bin and its members.
194 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
195 _M_bin = static_cast<_Bin_record*>(__v);
196 for (size_t __n = 0; __n < _M_bin_size; ++__n)
198 _Bin_record& __bin = _M_bin[__n];
199 __v = ::operator new(sizeof(_Block_record*));
200 __bin._M_first = static_cast<_Block_record**>(__v);
201 __bin._M_first[0] = NULL;
202 __bin._M_address = NULL;
204 _M_init = true;
208 #ifdef __GTHREADS
209 void
210 __pool<true>::_M_destroy() throw()
212 if (_M_init && !_M_options._M_force_new)
214 if (__gthread_active_p())
216 for (size_t __n = 0; __n < _M_bin_size; ++__n)
218 _Bin_record& __bin = _M_bin[__n];
219 while (__bin._M_address)
221 _Block_address* __tmp = __bin._M_address->_M_next;
222 ::operator delete(__bin._M_address->_M_initial);
223 __bin._M_address = __tmp;
225 ::operator delete(__bin._M_first);
226 ::operator delete(__bin._M_free);
227 ::operator delete(__bin._M_used);
228 ::operator delete(__bin._M_mutex);
231 else
233 for (size_t __n = 0; __n < _M_bin_size; ++__n)
235 _Bin_record& __bin = _M_bin[__n];
236 while (__bin._M_address)
238 _Block_address* __tmp = __bin._M_address->_M_next;
239 ::operator delete(__bin._M_address->_M_initial);
240 __bin._M_address = __tmp;
242 ::operator delete(__bin._M_first);
245 ::operator delete(_M_bin);
246 ::operator delete(_M_binmap);
250 void
251 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
253 // Round up to power of 2 and figure out which bin to use.
254 const size_t __which = _M_binmap[__bytes];
255 const _Bin_record& __bin = _M_bin[__which];
257 // Know __p not null, assume valid block.
258 char* __c = __p - _M_get_align();
259 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
260 if (__gthread_active_p())
262 // Calculate the number of records to remove from our freelist:
263 // in order to avoid too much contention we wait until the
264 // number of records is "high enough".
265 const size_t __thread_id = _M_get_thread_id();
266 const _Tune& __options = _M_get_options();
267 const size_t __limit = (100 * (_M_bin_size - __which)
268 * __options._M_freelist_headroom);
270 size_t __remove = __bin._M_free[__thread_id];
271 __remove *= __options._M_freelist_headroom;
273 // NB: We assume that reads of _Atomic_words are atomic.
274 const size_t __max_threads = __options._M_max_threads + 1;
275 _Atomic_word* const __reclaimed_base =
276 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
277 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
278 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
280 // NB: For performance sake we don't resync every time, in order
281 // to spare atomic ops. Note that if __reclaimed increased by,
282 // say, 1024, since the last sync, it means that the other
283 // threads executed the atomic in the else below at least the
284 // same number of times (at least, because _M_reserve_block may
285 // have decreased the counter), therefore one more cannot hurt.
286 if (__reclaimed > 1024)
288 __bin._M_used[__thread_id] -= __reclaimed;
289 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
292 if (__remove >= __net_used)
293 __remove -= __net_used;
294 else
295 __remove = 0;
296 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
298 _Block_record* __first = __bin._M_first[__thread_id];
299 _Block_record* __tmp = __first;
300 __remove /= __options._M_freelist_headroom;
301 const size_t __removed = __remove;
302 while (--__remove > 0)
303 __tmp = __tmp->_M_next;
304 __bin._M_first[__thread_id] = __tmp->_M_next;
305 __bin._M_free[__thread_id] -= __removed;
307 __gthread_mutex_lock(__bin._M_mutex);
308 __tmp->_M_next = __bin._M_first[0];
309 __bin._M_first[0] = __first;
310 __bin._M_free[0] += __removed;
311 __gthread_mutex_unlock(__bin._M_mutex);
314 // Return this block to our list and update counters and
315 // owner id as needed.
316 if (__block->_M_thread_id == __thread_id)
317 --__bin._M_used[__thread_id];
318 else
319 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
321 __block->_M_next = __bin._M_first[__thread_id];
322 __bin._M_first[__thread_id] = __block;
324 ++__bin._M_free[__thread_id];
326 else
328 // Not using threads, so single threaded application - return
329 // to global pool.
330 __block->_M_next = __bin._M_first[0];
331 __bin._M_first[0] = __block;
335 char*
336 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
338 // Round up to power of 2 and figure out which bin to use.
339 const size_t __which = _M_binmap[__bytes];
340 const _Tune& __options = _M_get_options();
341 const size_t __bin_size = ((__options._M_min_bin << __which)
342 + __options._M_align);
343 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
344 __block_count /= __bin_size;
346 // Are we using threads?
347 // - Yes, check if there are free blocks on the global
348 // list. If so, grab up to __block_count blocks in one
349 // lock and change ownership. If the global list is
350 // empty, we allocate a new chunk and add those blocks
351 // directly to our own freelist (with us as owner).
352 // - No, all operations are made directly to global pool 0
353 // no need to lock or change ownership but check for free
354 // blocks on global list (and if not add new ones) and
355 // get the first one.
356 _Bin_record& __bin = _M_bin[__which];
357 _Block_record* __block = NULL;
358 if (__gthread_active_p())
360 // Resync the _M_used counters.
361 const size_t __max_threads = __options._M_max_threads + 1;
362 _Atomic_word* const __reclaimed_base =
363 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
364 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
365 __bin._M_used[__thread_id] -= __reclaimed;
366 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
368 __gthread_mutex_lock(__bin._M_mutex);
369 if (__bin._M_first[0] == NULL)
371 void* __v = ::operator new(__options._M_chunk_size);
372 _Block_address* __address = static_cast<_Block_address*>(__v);
373 __address->_M_initial = __v;
374 __address->_M_next = __bin._M_address;
375 __bin._M_address = __address;
376 __gthread_mutex_unlock(__bin._M_mutex);
378 // No need to hold the lock when we are adding a whole
379 // chunk to our own list.
380 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
381 __block = reinterpret_cast<_Block_record*>(__c);
382 __bin._M_free[__thread_id] = __block_count;
383 __bin._M_first[__thread_id] = __block;
384 while (--__block_count > 0)
386 __c += __bin_size;
387 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
388 __block = __block->_M_next;
390 __block->_M_next = NULL;
392 else
394 // Is the number of required blocks greater than or equal
395 // to the number that can be provided by the global free
396 // list?
397 __bin._M_first[__thread_id] = __bin._M_first[0];
398 if (__block_count >= __bin._M_free[0])
400 __bin._M_free[__thread_id] = __bin._M_free[0];
401 __bin._M_free[0] = 0;
402 __bin._M_first[0] = NULL;
404 else
406 __bin._M_free[__thread_id] = __block_count;
407 __bin._M_free[0] -= __block_count;
408 __block = __bin._M_first[0];
409 while (--__block_count > 0)
410 __block = __block->_M_next;
411 __bin._M_first[0] = __block->_M_next;
412 __block->_M_next = NULL;
414 __gthread_mutex_unlock(__bin._M_mutex);
417 else
419 void* __v = ::operator new(__options._M_chunk_size);
420 _Block_address* __address = static_cast<_Block_address*>(__v);
421 __address->_M_initial = __v;
422 __address->_M_next = __bin._M_address;
423 __bin._M_address = __address;
425 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
426 __block = reinterpret_cast<_Block_record*>(__c);
427 __bin._M_first[0] = __block;
428 while (--__block_count > 0)
430 __c += __bin_size;
431 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
432 __block = __block->_M_next;
434 __block->_M_next = NULL;
437 __block = __bin._M_first[__thread_id];
438 __bin._M_first[__thread_id] = __block->_M_next;
440 if (__gthread_active_p())
442 __block->_M_thread_id = __thread_id;
443 --__bin._M_free[__thread_id];
444 ++__bin._M_used[__thread_id];
447 // NB: For alignment reasons, we can't use the first _M_align
448 // bytes, even when sizeof(_Block_record) < _M_align.
449 return reinterpret_cast<char*>(__block) + __options._M_align;
452 void
453 __pool<true>::_M_initialize()
455 // _M_force_new must not change after the first allocate(),
456 // which in turn calls this method, so if it's false, it's false
457 // forever and we don't need to return here ever again.
458 if (_M_options._M_force_new)
460 _M_init = true;
461 return;
464 // Create the bins.
465 // Calculate the number of bins required based on _M_max_bytes.
466 // _M_bin_size is statically-initialized to one.
467 size_t __bin_size = _M_options._M_min_bin;
468 while (_M_options._M_max_bytes > __bin_size)
470 __bin_size <<= 1;
471 ++_M_bin_size;
474 // Setup the bin map for quick lookup of the relevant bin.
475 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
476 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
477 _Binmap_type* __bp = _M_binmap;
478 _Binmap_type __bin_max = _M_options._M_min_bin;
479 _Binmap_type __bint = 0;
480 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
482 if (__ct > __bin_max)
484 __bin_max <<= 1;
485 ++__bint;
487 *__bp++ = __bint;
490 // Initialize _M_bin and its members.
491 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
492 _M_bin = static_cast<_Bin_record*>(__v);
494 // If __gthread_active_p() create and initialize the list of
495 // free thread ids. Single threaded applications use thread id 0
496 // directly and have no need for this.
497 if (__gthread_active_p())
500 __gnu_cxx::__scoped_lock sentry(freelist_mutex);
502 if (!freelist._M_thread_freelist_array
503 || freelist._M_max_threads < _M_options._M_max_threads)
505 const size_t __k = sizeof(_Thread_record)
506 * _M_options._M_max_threads;
507 __v = ::operator new(__k);
508 _M_thread_freelist = static_cast<_Thread_record*>(__v);
510 // NOTE! The first assignable thread id is 1 since the
511 // global pool uses id 0
512 size_t __i;
513 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
515 _Thread_record& __tr = _M_thread_freelist[__i - 1];
516 __tr._M_next = &_M_thread_freelist[__i];
517 __tr._M_id = __i;
520 // Set last record.
521 _M_thread_freelist[__i - 1]._M_next = NULL;
522 _M_thread_freelist[__i - 1]._M_id = __i;
524 if (!freelist._M_thread_freelist_array)
526 // Initialize per thread key to hold pointer to
527 // _M_thread_freelist.
528 __gthread_key_create(&freelist._M_key,
529 ::_M_destroy_thread_key);
530 freelist._M_thread_freelist = _M_thread_freelist;
532 else
534 _Thread_record* _M_old_freelist
535 = freelist._M_thread_freelist;
536 _Thread_record* _M_old_array
537 = freelist._M_thread_freelist_array;
538 freelist._M_thread_freelist
539 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
540 while (_M_old_freelist)
542 size_t next_id;
543 if (_M_old_freelist->_M_next)
544 next_id = _M_old_freelist->_M_next - _M_old_array;
545 else
546 next_id = freelist._M_max_threads;
547 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
548 = &_M_thread_freelist[next_id];
549 _M_old_freelist = _M_old_freelist->_M_next;
551 ::operator delete(static_cast<void*>(_M_old_array));
553 freelist._M_thread_freelist_array = _M_thread_freelist;
554 freelist._M_max_threads = _M_options._M_max_threads;
558 const size_t __max_threads = _M_options._M_max_threads + 1;
559 for (size_t __n = 0; __n < _M_bin_size; ++__n)
561 _Bin_record& __bin = _M_bin[__n];
562 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
563 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
564 __bin._M_first = static_cast<_Block_record**>(__v);
566 __bin._M_address = NULL;
568 __v = ::operator new(sizeof(size_t) * __max_threads);
569 std::memset(__v, 0, sizeof(size_t) * __max_threads);
571 __bin._M_free = static_cast<size_t*>(__v);
573 __v = ::operator new(sizeof(size_t) * __max_threads
574 + sizeof(_Atomic_word) * __max_threads);
575 std::memset(__v, 0, (sizeof(size_t) * __max_threads
576 + sizeof(_Atomic_word) * __max_threads));
577 __bin._M_used = static_cast<size_t*>(__v);
579 __v = ::operator new(sizeof(__gthread_mutex_t));
580 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
582 #ifdef __GTHREAD_MUTEX_INIT
584 // Do not copy a POSIX/gthr mutex once in use.
585 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
586 *__bin._M_mutex = __tmp;
588 #else
589 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
590 #endif
593 else
595 for (size_t __n = 0; __n < _M_bin_size; ++__n)
597 _Bin_record& __bin = _M_bin[__n];
598 __v = ::operator new(sizeof(_Block_record*));
599 __bin._M_first = static_cast<_Block_record**>(__v);
600 __bin._M_first[0] = NULL;
601 __bin._M_address = NULL;
604 _M_init = true;
607 size_t
608 __pool<true>::_M_get_thread_id()
610 // If we have thread support and it's active we check the thread
611 // key value and return its id or if it's not set we take the
612 // first record from _M_thread_freelist and sets the key and
613 // returns its id.
614 if (__gthread_active_p())
616 void* v = __gthread_getspecific(freelist._M_key);
617 size_t _M_id = (size_t)v;
618 if (_M_id == 0)
621 __gnu_cxx::__scoped_lock sentry(freelist_mutex);
622 if (freelist._M_thread_freelist)
624 _M_id = freelist._M_thread_freelist->_M_id;
625 freelist._M_thread_freelist
626 = freelist._M_thread_freelist->_M_next;
630 __gthread_setspecific(freelist._M_key, (void*)_M_id);
632 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
635 // Otherwise (no thread support or inactive) all requests are
636 // served from the global pool 0.
637 return 0;
640 // XXX GLIBCXX_ABI Deprecated
641 void
642 __pool<true>::_M_destroy_thread_key(void*) { }
644 // XXX GLIBCXX_ABI Deprecated
645 void
646 __pool<true>::_M_initialize(__destroy_handler)
648 // _M_force_new must not change after the first allocate(),
649 // which in turn calls this method, so if it's false, it's false
650 // forever and we don't need to return here ever again.
651 if (_M_options._M_force_new)
653 _M_init = true;
654 return;
657 // Create the bins.
658 // Calculate the number of bins required based on _M_max_bytes.
659 // _M_bin_size is statically-initialized to one.
660 size_t __bin_size = _M_options._M_min_bin;
661 while (_M_options._M_max_bytes > __bin_size)
663 __bin_size <<= 1;
664 ++_M_bin_size;
667 // Setup the bin map for quick lookup of the relevant bin.
668 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
669 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
670 _Binmap_type* __bp = _M_binmap;
671 _Binmap_type __bin_max = _M_options._M_min_bin;
672 _Binmap_type __bint = 0;
673 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
675 if (__ct > __bin_max)
677 __bin_max <<= 1;
678 ++__bint;
680 *__bp++ = __bint;
683 // Initialize _M_bin and its members.
684 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
685 _M_bin = static_cast<_Bin_record*>(__v);
687 // If __gthread_active_p() create and initialize the list of
688 // free thread ids. Single threaded applications use thread id 0
689 // directly and have no need for this.
690 if (__gthread_active_p())
693 __gnu_cxx::__scoped_lock sentry(freelist_mutex);
695 if (!freelist._M_thread_freelist_array
696 || freelist._M_max_threads < _M_options._M_max_threads)
698 const size_t __k = sizeof(_Thread_record)
699 * _M_options._M_max_threads;
700 __v = ::operator new(__k);
701 _M_thread_freelist = static_cast<_Thread_record*>(__v);
703 // NOTE! The first assignable thread id is 1 since the
704 // global pool uses id 0
705 size_t __i;
706 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
708 _Thread_record& __tr = _M_thread_freelist[__i - 1];
709 __tr._M_next = &_M_thread_freelist[__i];
710 __tr._M_id = __i;
713 // Set last record.
714 _M_thread_freelist[__i - 1]._M_next = NULL;
715 _M_thread_freelist[__i - 1]._M_id = __i;
717 if (!freelist._M_thread_freelist_array)
719 // Initialize per thread key to hold pointer to
720 // _M_thread_freelist.
721 __gthread_key_create(&freelist._M_key,
722 ::_M_destroy_thread_key);
723 freelist._M_thread_freelist = _M_thread_freelist;
725 else
727 _Thread_record* _M_old_freelist
728 = freelist._M_thread_freelist;
729 _Thread_record* _M_old_array
730 = freelist._M_thread_freelist_array;
731 freelist._M_thread_freelist
732 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
733 while (_M_old_freelist)
735 size_t next_id;
736 if (_M_old_freelist->_M_next)
737 next_id = _M_old_freelist->_M_next - _M_old_array;
738 else
739 next_id = freelist._M_max_threads;
740 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
741 = &_M_thread_freelist[next_id];
742 _M_old_freelist = _M_old_freelist->_M_next;
744 ::operator delete(static_cast<void*>(_M_old_array));
746 freelist._M_thread_freelist_array = _M_thread_freelist;
747 freelist._M_max_threads = _M_options._M_max_threads;
751 const size_t __max_threads = _M_options._M_max_threads + 1;
752 for (size_t __n = 0; __n < _M_bin_size; ++__n)
754 _Bin_record& __bin = _M_bin[__n];
755 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
756 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
757 __bin._M_first = static_cast<_Block_record**>(__v);
759 __bin._M_address = NULL;
761 __v = ::operator new(sizeof(size_t) * __max_threads);
762 std::memset(__v, 0, sizeof(size_t) * __max_threads);
763 __bin._M_free = static_cast<size_t*>(__v);
765 __v = ::operator new(sizeof(size_t) * __max_threads +
766 sizeof(_Atomic_word) * __max_threads);
767 std::memset(__v, 0, (sizeof(size_t) * __max_threads
768 + sizeof(_Atomic_word) * __max_threads));
769 __bin._M_used = static_cast<size_t*>(__v);
771 __v = ::operator new(sizeof(__gthread_mutex_t));
772 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
774 #ifdef __GTHREAD_MUTEX_INIT
776 // Do not copy a POSIX/gthr mutex once in use.
777 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
778 *__bin._M_mutex = __tmp;
780 #else
781 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
782 #endif
785 else
787 for (size_t __n = 0; __n < _M_bin_size; ++__n)
789 _Bin_record& __bin = _M_bin[__n];
790 __v = ::operator new(sizeof(_Block_record*));
791 __bin._M_first = static_cast<_Block_record**>(__v);
792 __bin._M_first[0] = NULL;
793 __bin._M_address = NULL;
796 _M_init = true;
798 #endif
800 // Instantiations.
801 template class __mt_alloc<char>;
802 template class __mt_alloc<wchar_t>;
804 _GLIBCXX_END_NAMESPACE