Add comment to fix -Wfallthrough warning
[official-gcc.git] / libstdc++-v3 / src / c++98 / mt_allocator.cc
blob894d66c1f33a85c29fb93999a849b225d674fb14
1 // Allocator details.
3 // Copyright (C) 2004-2017 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
26 // ISO C++ 14882:
29 #include <bits/c++config.h>
30 #include <ext/concurrence.h>
31 #include <ext/mt_allocator.h>
32 #include <cstring>
34 // The include file is needed for uintptr_t. If this file does not compile,
35 // check to make sure the target has <stdint.h> and that it provides
36 // uintptr_t.
37 #include <stdint.h>
39 namespace
41 #ifdef __GTHREADS
42 struct __freelist
44 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
45 _Thread_record* _M_thread_freelist;
46 _Thread_record* _M_thread_freelist_array;
47 size_t _M_max_threads;
48 __gthread_key_t _M_key;
50 ~__freelist()
52 if (_M_thread_freelist_array)
54 __gthread_key_delete(_M_key);
55 ::operator delete(static_cast<void*>(_M_thread_freelist_array));
56 _M_thread_freelist = 0;
61 __freelist&
62 get_freelist()
64 static __freelist freelist;
65 return freelist;
68 __gnu_cxx::__mutex&
69 get_freelist_mutex()
71 static __gnu_cxx::__mutex freelist_mutex;
72 return freelist_mutex;
75 static void
76 _M_destroy_thread_key(void* __id)
78 // Return this thread id record to the front of thread_freelist.
79 __freelist& freelist = get_freelist();
81 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
82 uintptr_t _M_id = reinterpret_cast<uintptr_t>(__id);
84 typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record;
85 _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
86 __tr->_M_next = freelist._M_thread_freelist;
87 freelist._M_thread_freelist = __tr;
90 #endif
91 } // anonymous namespace
93 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
95 _GLIBCXX_BEGIN_NAMESPACE_VERSION
97 void
98 __pool<false>::_M_destroy() throw()
100 if (_M_init && !_M_options._M_force_new)
102 for (size_t __n = 0; __n < _M_bin_size; ++__n)
104 _Bin_record& __bin = _M_bin[__n];
105 while (__bin._M_address)
107 _Block_address* __tmp = __bin._M_address->_M_next;
108 ::operator delete(__bin._M_address->_M_initial);
109 __bin._M_address = __tmp;
111 ::operator delete(__bin._M_first);
113 ::operator delete(_M_bin);
114 ::operator delete(_M_binmap);
118 void
119 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
121 // Round up to power of 2 and figure out which bin to use.
122 const size_t __which = _M_binmap[__bytes];
123 _Bin_record& __bin = _M_bin[__which];
125 char* __c = __p - _M_get_align();
126 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
128 // Single threaded application - return to global pool.
129 __block->_M_next = __bin._M_first[0];
130 __bin._M_first[0] = __block;
133 char*
134 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
136 // Round up to power of 2 and figure out which bin to use.
137 const size_t __which = _M_binmap[__bytes];
138 _Bin_record& __bin = _M_bin[__which];
139 const _Tune& __options = _M_get_options();
140 const size_t __bin_size = (__options._M_min_bin << __which)
141 + __options._M_align;
142 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
143 __block_count /= __bin_size;
145 // Get a new block dynamically, set it up for use.
146 void* __v = ::operator new(__options._M_chunk_size);
147 _Block_address* __address = static_cast<_Block_address*>(__v);
148 __address->_M_initial = __v;
149 __address->_M_next = __bin._M_address;
150 __bin._M_address = __address;
152 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
153 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
154 __bin._M_first[__thread_id] = __block;
155 while (--__block_count > 0)
157 __c += __bin_size;
158 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
159 __block = __block->_M_next;
161 __block->_M_next = 0;
163 __block = __bin._M_first[__thread_id];
164 __bin._M_first[__thread_id] = __block->_M_next;
166 // NB: For alignment reasons, we can't use the first _M_align
167 // bytes, even when sizeof(_Block_record) < _M_align.
168 return reinterpret_cast<char*>(__block) + __options._M_align;
171 void
172 __pool<false>::_M_initialize()
174 // _M_force_new must not change after the first allocate(), which
175 // in turn calls this method, so if it's false, it's false forever
176 // and we don't need to return here ever again.
177 if (_M_options._M_force_new)
179 _M_init = true;
180 return;
183 // Create the bins.
184 // Calculate the number of bins required based on _M_max_bytes.
185 // _M_bin_size is statically-initialized to one.
186 size_t __bin_size = _M_options._M_min_bin;
187 while (_M_options._M_max_bytes > __bin_size)
189 __bin_size <<= 1;
190 ++_M_bin_size;
193 // Setup the bin map for quick lookup of the relevant bin.
194 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
195 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
196 _Binmap_type* __bp = _M_binmap;
197 _Binmap_type __bin_max = _M_options._M_min_bin;
198 _Binmap_type __bint = 0;
199 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
201 if (__ct > __bin_max)
203 __bin_max <<= 1;
204 ++__bint;
206 *__bp++ = __bint;
209 // Initialize _M_bin and its members.
210 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
211 _M_bin = static_cast<_Bin_record*>(__v);
212 for (size_t __n = 0; __n < _M_bin_size; ++__n)
214 _Bin_record& __bin = _M_bin[__n];
215 __v = ::operator new(sizeof(_Block_record*));
216 __bin._M_first = static_cast<_Block_record**>(__v);
217 __bin._M_first[0] = 0;
218 __bin._M_address = 0;
220 _M_init = true;
224 #ifdef __GTHREADS
225 void
226 __pool<true>::_M_destroy() throw()
228 if (_M_init && !_M_options._M_force_new)
230 if (__gthread_active_p())
232 for (size_t __n = 0; __n < _M_bin_size; ++__n)
234 _Bin_record& __bin = _M_bin[__n];
235 while (__bin._M_address)
237 _Block_address* __tmp = __bin._M_address->_M_next;
238 ::operator delete(__bin._M_address->_M_initial);
239 __bin._M_address = __tmp;
241 ::operator delete(__bin._M_first);
242 ::operator delete(__bin._M_free);
243 ::operator delete(__bin._M_used);
244 ::operator delete(__bin._M_mutex);
247 else
249 for (size_t __n = 0; __n < _M_bin_size; ++__n)
251 _Bin_record& __bin = _M_bin[__n];
252 while (__bin._M_address)
254 _Block_address* __tmp = __bin._M_address->_M_next;
255 ::operator delete(__bin._M_address->_M_initial);
256 __bin._M_address = __tmp;
258 ::operator delete(__bin._M_first);
261 ::operator delete(_M_bin);
262 ::operator delete(_M_binmap);
266 void
267 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw ()
269 // Round up to power of 2 and figure out which bin to use.
270 const size_t __which = _M_binmap[__bytes];
271 const _Bin_record& __bin = _M_bin[__which];
273 // Know __p not null, assume valid block.
274 char* __c = __p - _M_get_align();
275 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
276 if (__gthread_active_p())
278 // Calculate the number of records to remove from our freelist:
279 // in order to avoid too much contention we wait until the
280 // number of records is "high enough".
281 const size_t __thread_id = _M_get_thread_id();
282 const _Tune& __options = _M_get_options();
283 const size_t __limit = (100 * (_M_bin_size - __which)
284 * __options._M_freelist_headroom);
286 size_t __remove = __bin._M_free[__thread_id];
287 __remove *= __options._M_freelist_headroom;
289 // NB: We assume that reads of _Atomic_words are atomic.
290 const size_t __max_threads = __options._M_max_threads + 1;
291 _Atomic_word* const __reclaimed_base =
292 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
293 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
294 const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
296 // NB: For performance sake we don't resync every time, in order
297 // to spare atomic ops. Note that if __reclaimed increased by,
298 // say, 1024, since the last sync, it means that the other
299 // threads executed the atomic in the else below at least the
300 // same number of times (at least, because _M_reserve_block may
301 // have decreased the counter), therefore one more cannot hurt.
302 if (__reclaimed > 1024)
304 __bin._M_used[__thread_id] -= __reclaimed;
305 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
308 if (__remove >= __net_used)
309 __remove -= __net_used;
310 else
311 __remove = 0;
312 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
314 _Block_record* __first = __bin._M_first[__thread_id];
315 _Block_record* __tmp = __first;
316 __remove /= __options._M_freelist_headroom;
317 const size_t __removed = __remove;
318 while (--__remove > 0)
319 __tmp = __tmp->_M_next;
320 __bin._M_first[__thread_id] = __tmp->_M_next;
321 __bin._M_free[__thread_id] -= __removed;
323 __gthread_mutex_lock(__bin._M_mutex);
324 __tmp->_M_next = __bin._M_first[0];
325 __bin._M_first[0] = __first;
326 __bin._M_free[0] += __removed;
327 __gthread_mutex_unlock(__bin._M_mutex);
330 // Return this block to our list and update counters and
331 // owner id as needed.
332 if (__block->_M_thread_id == __thread_id)
333 --__bin._M_used[__thread_id];
334 else
335 __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
337 __block->_M_next = __bin._M_first[__thread_id];
338 __bin._M_first[__thread_id] = __block;
340 ++__bin._M_free[__thread_id];
342 else
344 // Not using threads, so single threaded application - return
345 // to global pool.
346 __block->_M_next = __bin._M_first[0];
347 __bin._M_first[0] = __block;
351 char*
352 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
354 // Round up to power of 2 and figure out which bin to use.
355 const size_t __which = _M_binmap[__bytes];
356 const _Tune& __options = _M_get_options();
357 const size_t __bin_size = ((__options._M_min_bin << __which)
358 + __options._M_align);
359 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
360 __block_count /= __bin_size;
362 // Are we using threads?
363 // - Yes, check if there are free blocks on the global
364 // list. If so, grab up to __block_count blocks in one
365 // lock and change ownership. If the global list is
366 // empty, we allocate a new chunk and add those blocks
367 // directly to our own freelist (with us as owner).
368 // - No, all operations are made directly to global pool 0
369 // no need to lock or change ownership but check for free
370 // blocks on global list (and if not add new ones) and
371 // get the first one.
372 _Bin_record& __bin = _M_bin[__which];
373 _Block_record* __block = 0;
374 if (__gthread_active_p())
376 // Resync the _M_used counters.
377 const size_t __max_threads = __options._M_max_threads + 1;
378 _Atomic_word* const __reclaimed_base =
379 reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
380 const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
381 __bin._M_used[__thread_id] -= __reclaimed;
382 __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
384 __gthread_mutex_lock(__bin._M_mutex);
385 if (__bin._M_first[0] == 0)
387 void* __v = ::operator new(__options._M_chunk_size);
388 _Block_address* __address = static_cast<_Block_address*>(__v);
389 __address->_M_initial = __v;
390 __address->_M_next = __bin._M_address;
391 __bin._M_address = __address;
392 __gthread_mutex_unlock(__bin._M_mutex);
394 // No need to hold the lock when we are adding a whole
395 // chunk to our own list.
396 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
397 __block = reinterpret_cast<_Block_record*>(__c);
398 __bin._M_free[__thread_id] = __block_count;
399 __bin._M_first[__thread_id] = __block;
400 while (--__block_count > 0)
402 __c += __bin_size;
403 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
404 __block = __block->_M_next;
406 __block->_M_next = 0;
408 else
410 // Is the number of required blocks greater than or equal
411 // to the number that can be provided by the global free
412 // list?
413 __bin._M_first[__thread_id] = __bin._M_first[0];
414 if (__block_count >= __bin._M_free[0])
416 __bin._M_free[__thread_id] = __bin._M_free[0];
417 __bin._M_free[0] = 0;
418 __bin._M_first[0] = 0;
420 else
422 __bin._M_free[__thread_id] = __block_count;
423 __bin._M_free[0] -= __block_count;
424 __block = __bin._M_first[0];
425 while (--__block_count > 0)
426 __block = __block->_M_next;
427 __bin._M_first[0] = __block->_M_next;
428 __block->_M_next = 0;
430 __gthread_mutex_unlock(__bin._M_mutex);
433 else
435 void* __v = ::operator new(__options._M_chunk_size);
436 _Block_address* __address = static_cast<_Block_address*>(__v);
437 __address->_M_initial = __v;
438 __address->_M_next = __bin._M_address;
439 __bin._M_address = __address;
441 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
442 __block = reinterpret_cast<_Block_record*>(__c);
443 __bin._M_first[0] = __block;
444 while (--__block_count > 0)
446 __c += __bin_size;
447 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
448 __block = __block->_M_next;
450 __block->_M_next = 0;
453 __block = __bin._M_first[__thread_id];
454 __bin._M_first[__thread_id] = __block->_M_next;
456 if (__gthread_active_p())
458 __block->_M_thread_id = __thread_id;
459 --__bin._M_free[__thread_id];
460 ++__bin._M_used[__thread_id];
463 // NB: For alignment reasons, we can't use the first _M_align
464 // bytes, even when sizeof(_Block_record) < _M_align.
465 return reinterpret_cast<char*>(__block) + __options._M_align;
468 void
469 __pool<true>::_M_initialize()
471 // _M_force_new must not change after the first allocate(),
472 // which in turn calls this method, so if it's false, it's false
473 // forever and we don't need to return here ever again.
474 if (_M_options._M_force_new)
476 _M_init = true;
477 return;
480 // Create the bins.
481 // Calculate the number of bins required based on _M_max_bytes.
482 // _M_bin_size is statically-initialized to one.
483 size_t __bin_size = _M_options._M_min_bin;
484 while (_M_options._M_max_bytes > __bin_size)
486 __bin_size <<= 1;
487 ++_M_bin_size;
490 // Setup the bin map for quick lookup of the relevant bin.
491 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
492 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
493 _Binmap_type* __bp = _M_binmap;
494 _Binmap_type __bin_max = _M_options._M_min_bin;
495 _Binmap_type __bint = 0;
496 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
498 if (__ct > __bin_max)
500 __bin_max <<= 1;
501 ++__bint;
503 *__bp++ = __bint;
506 // Initialize _M_bin and its members.
507 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
508 _M_bin = static_cast<_Bin_record*>(__v);
510 // If __gthread_active_p() create and initialize the list of
511 // free thread ids. Single threaded applications use thread id 0
512 // directly and have no need for this.
513 if (__gthread_active_p())
515 __freelist& freelist = get_freelist();
517 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
519 if (!freelist._M_thread_freelist_array
520 || freelist._M_max_threads < _M_options._M_max_threads)
522 const size_t __k = sizeof(_Thread_record)
523 * _M_options._M_max_threads;
524 __v = ::operator new(__k);
525 _M_thread_freelist = static_cast<_Thread_record*>(__v);
527 // NOTE! The first assignable thread id is 1 since the
528 // global pool uses id 0
529 size_t __i;
530 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
532 _Thread_record& __tr = _M_thread_freelist[__i - 1];
533 __tr._M_next = &_M_thread_freelist[__i];
534 __tr._M_id = __i;
537 // Set last record.
538 _M_thread_freelist[__i - 1]._M_next = 0;
539 _M_thread_freelist[__i - 1]._M_id = __i;
541 if (!freelist._M_thread_freelist_array)
543 // Initialize per thread key to hold pointer to
544 // _M_thread_freelist.
545 __gthread_key_create(&freelist._M_key,
546 ::_M_destroy_thread_key);
547 freelist._M_thread_freelist = _M_thread_freelist;
549 else
551 _Thread_record* _M_old_freelist
552 = freelist._M_thread_freelist;
553 _Thread_record* _M_old_array
554 = freelist._M_thread_freelist_array;
555 freelist._M_thread_freelist
556 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
557 while (_M_old_freelist)
559 size_t next_id;
560 if (_M_old_freelist->_M_next)
561 next_id = _M_old_freelist->_M_next - _M_old_array;
562 else
563 next_id = freelist._M_max_threads;
564 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
565 = &_M_thread_freelist[next_id];
566 _M_old_freelist = _M_old_freelist->_M_next;
568 ::operator delete(static_cast<void*>(_M_old_array));
570 freelist._M_thread_freelist_array = _M_thread_freelist;
571 freelist._M_max_threads = _M_options._M_max_threads;
575 const size_t __max_threads = _M_options._M_max_threads + 1;
576 for (size_t __n = 0; __n < _M_bin_size; ++__n)
578 _Bin_record& __bin = _M_bin[__n];
579 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
580 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
581 __bin._M_first = static_cast<_Block_record**>(__v);
583 __bin._M_address = 0;
585 __v = ::operator new(sizeof(size_t) * __max_threads);
586 std::memset(__v, 0, sizeof(size_t) * __max_threads);
588 __bin._M_free = static_cast<size_t*>(__v);
590 __v = ::operator new(sizeof(size_t) * __max_threads
591 + sizeof(_Atomic_word) * __max_threads);
592 std::memset(__v, 0, (sizeof(size_t) * __max_threads
593 + sizeof(_Atomic_word) * __max_threads));
594 __bin._M_used = static_cast<size_t*>(__v);
596 __v = ::operator new(sizeof(__gthread_mutex_t));
597 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
599 #ifdef __GTHREAD_MUTEX_INIT
601 // Do not copy a POSIX/gthr mutex once in use.
602 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
603 *__bin._M_mutex = __tmp;
605 #else
606 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
607 #endif
610 else
612 for (size_t __n = 0; __n < _M_bin_size; ++__n)
614 _Bin_record& __bin = _M_bin[__n];
615 __v = ::operator new(sizeof(_Block_record*));
616 __bin._M_first = static_cast<_Block_record**>(__v);
617 __bin._M_first[0] = 0;
618 __bin._M_address = 0;
621 _M_init = true;
624 size_t
625 __pool<true>::_M_get_thread_id()
627 // If we have thread support and it's active we check the thread
628 // key value and return its id or if it's not set we take the
629 // first record from _M_thread_freelist and sets the key and
630 // returns its id.
631 if (__gthread_active_p())
633 __freelist& freelist = get_freelist();
634 void* v = __gthread_getspecific(freelist._M_key);
635 uintptr_t _M_id = (uintptr_t)v;
636 if (_M_id == 0)
639 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
640 if (freelist._M_thread_freelist)
642 _M_id = freelist._M_thread_freelist->_M_id;
643 freelist._M_thread_freelist
644 = freelist._M_thread_freelist->_M_next;
648 __gthread_setspecific(freelist._M_key, (void*)_M_id);
650 return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
653 // Otherwise (no thread support or inactive) all requests are
654 // served from the global pool 0.
655 return 0;
658 // XXX GLIBCXX_ABI Deprecated
659 void
660 __pool<true>::_M_destroy_thread_key(void*) throw () { }
662 // XXX GLIBCXX_ABI Deprecated
663 void
664 __pool<true>::_M_initialize(__destroy_handler)
666 // _M_force_new must not change after the first allocate(),
667 // which in turn calls this method, so if it's false, it's false
668 // forever and we don't need to return here ever again.
669 if (_M_options._M_force_new)
671 _M_init = true;
672 return;
675 // Create the bins.
676 // Calculate the number of bins required based on _M_max_bytes.
677 // _M_bin_size is statically-initialized to one.
678 size_t __bin_size = _M_options._M_min_bin;
679 while (_M_options._M_max_bytes > __bin_size)
681 __bin_size <<= 1;
682 ++_M_bin_size;
685 // Setup the bin map for quick lookup of the relevant bin.
686 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
687 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
688 _Binmap_type* __bp = _M_binmap;
689 _Binmap_type __bin_max = _M_options._M_min_bin;
690 _Binmap_type __bint = 0;
691 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
693 if (__ct > __bin_max)
695 __bin_max <<= 1;
696 ++__bint;
698 *__bp++ = __bint;
701 // Initialize _M_bin and its members.
702 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
703 _M_bin = static_cast<_Bin_record*>(__v);
705 // If __gthread_active_p() create and initialize the list of
706 // free thread ids. Single threaded applications use thread id 0
707 // directly and have no need for this.
708 if (__gthread_active_p())
710 __freelist& freelist = get_freelist();
712 __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
714 if (!freelist._M_thread_freelist_array
715 || freelist._M_max_threads < _M_options._M_max_threads)
717 const size_t __k = sizeof(_Thread_record)
718 * _M_options._M_max_threads;
719 __v = ::operator new(__k);
720 _M_thread_freelist = static_cast<_Thread_record*>(__v);
722 // NOTE! The first assignable thread id is 1 since the
723 // global pool uses id 0
724 size_t __i;
725 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
727 _Thread_record& __tr = _M_thread_freelist[__i - 1];
728 __tr._M_next = &_M_thread_freelist[__i];
729 __tr._M_id = __i;
732 // Set last record.
733 _M_thread_freelist[__i - 1]._M_next = 0;
734 _M_thread_freelist[__i - 1]._M_id = __i;
736 if (!freelist._M_thread_freelist_array)
738 // Initialize per thread key to hold pointer to
739 // _M_thread_freelist.
740 __gthread_key_create(&freelist._M_key,
741 ::_M_destroy_thread_key);
742 freelist._M_thread_freelist = _M_thread_freelist;
744 else
746 _Thread_record* _M_old_freelist
747 = freelist._M_thread_freelist;
748 _Thread_record* _M_old_array
749 = freelist._M_thread_freelist_array;
750 freelist._M_thread_freelist
751 = &_M_thread_freelist[_M_old_freelist - _M_old_array];
752 while (_M_old_freelist)
754 size_t next_id;
755 if (_M_old_freelist->_M_next)
756 next_id = _M_old_freelist->_M_next - _M_old_array;
757 else
758 next_id = freelist._M_max_threads;
759 _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
760 = &_M_thread_freelist[next_id];
761 _M_old_freelist = _M_old_freelist->_M_next;
763 ::operator delete(static_cast<void*>(_M_old_array));
765 freelist._M_thread_freelist_array = _M_thread_freelist;
766 freelist._M_max_threads = _M_options._M_max_threads;
770 const size_t __max_threads = _M_options._M_max_threads + 1;
771 for (size_t __n = 0; __n < _M_bin_size; ++__n)
773 _Bin_record& __bin = _M_bin[__n];
774 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
775 std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
776 __bin._M_first = static_cast<_Block_record**>(__v);
778 __bin._M_address = 0;
780 __v = ::operator new(sizeof(size_t) * __max_threads);
781 std::memset(__v, 0, sizeof(size_t) * __max_threads);
782 __bin._M_free = static_cast<size_t*>(__v);
784 __v = ::operator new(sizeof(size_t) * __max_threads +
785 sizeof(_Atomic_word) * __max_threads);
786 std::memset(__v, 0, (sizeof(size_t) * __max_threads
787 + sizeof(_Atomic_word) * __max_threads));
788 __bin._M_used = static_cast<size_t*>(__v);
790 __v = ::operator new(sizeof(__gthread_mutex_t));
791 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
793 #ifdef __GTHREAD_MUTEX_INIT
795 // Do not copy a POSIX/gthr mutex once in use.
796 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
797 *__bin._M_mutex = __tmp;
799 #else
800 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
801 #endif
804 else
806 for (size_t __n = 0; __n < _M_bin_size; ++__n)
808 _Bin_record& __bin = _M_bin[__n];
809 __v = ::operator new(sizeof(_Block_record*));
810 __bin._M_first = static_cast<_Block_record**>(__v);
811 __bin._M_first[0] = 0;
812 __bin._M_address = 0;
815 _M_init = true;
817 #endif
819 // Instantiations.
820 template class __mt_alloc<char>;
821 template class __mt_alloc<wchar_t>;
823 _GLIBCXX_END_NAMESPACE_VERSION
824 } // namespace