3 // Copyright (C) 2004, 2005, 2006, 2009, 2010 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
29 #include <bits/c++config.h>
30 #include <ext/concurrence.h>
31 #include <ext/mt_allocator.h>
39 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
40 _Thread_record
* _M_thread_freelist
;
41 _Thread_record
* _M_thread_freelist_array
;
42 size_t _M_max_threads
;
43 __gthread_key_t _M_key
;
47 if (_M_thread_freelist_array
)
49 __gthread_key_delete(_M_key
);
50 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
58 static __freelist freelist
;
65 static __gnu_cxx::__mutex freelist_mutex
;
66 return freelist_mutex
;
70 _M_destroy_thread_key(void* __id
)
72 // Return this thread id record to the front of thread_freelist.
73 __freelist
& freelist
= get_freelist();
75 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
76 size_t _M_id
= reinterpret_cast<size_t>(__id
);
78 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
79 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
80 __tr
->_M_next
= freelist
._M_thread_freelist
;
81 freelist
._M_thread_freelist
= __tr
;
85 } // anonymous namespace
87 namespace __gnu_cxx
_GLIBCXX_VISIBILITY(default)
89 _GLIBCXX_BEGIN_NAMESPACE_VERSION
92 __pool
<false>::_M_destroy() throw()
94 if (_M_init
&& !_M_options
._M_force_new
)
96 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
98 _Bin_record
& __bin
= _M_bin
[__n
];
99 while (__bin
._M_address
)
101 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
102 ::operator delete(__bin
._M_address
->_M_initial
);
103 __bin
._M_address
= __tmp
;
105 ::operator delete(__bin
._M_first
);
107 ::operator delete(_M_bin
);
108 ::operator delete(_M_binmap
);
113 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
115 // Round up to power of 2 and figure out which bin to use.
116 const size_t __which
= _M_binmap
[__bytes
];
117 _Bin_record
& __bin
= _M_bin
[__which
];
119 char* __c
= __p
- _M_get_align();
120 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
122 // Single threaded application - return to global pool.
123 __block
->_M_next
= __bin
._M_first
[0];
124 __bin
._M_first
[0] = __block
;
128 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
130 // Round up to power of 2 and figure out which bin to use.
131 const size_t __which
= _M_binmap
[__bytes
];
132 _Bin_record
& __bin
= _M_bin
[__which
];
133 const _Tune
& __options
= _M_get_options();
134 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
135 + __options
._M_align
;
136 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
137 __block_count
/= __bin_size
;
139 // Get a new block dynamically, set it up for use.
140 void* __v
= ::operator new(__options
._M_chunk_size
);
141 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
142 __address
->_M_initial
= __v
;
143 __address
->_M_next
= __bin
._M_address
;
144 __bin
._M_address
= __address
;
146 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
147 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
148 __bin
._M_first
[__thread_id
] = __block
;
149 while (--__block_count
> 0)
152 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
153 __block
= __block
->_M_next
;
155 __block
->_M_next
= 0;
157 __block
= __bin
._M_first
[__thread_id
];
158 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
160 // NB: For alignment reasons, we can't use the first _M_align
161 // bytes, even when sizeof(_Block_record) < _M_align.
162 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
166 __pool
<false>::_M_initialize()
168 // _M_force_new must not change after the first allocate(), which
169 // in turn calls this method, so if it's false, it's false forever
170 // and we don't need to return here ever again.
171 if (_M_options
._M_force_new
)
178 // Calculate the number of bins required based on _M_max_bytes.
179 // _M_bin_size is statically-initialized to one.
180 size_t __bin_size
= _M_options
._M_min_bin
;
181 while (_M_options
._M_max_bytes
> __bin_size
)
187 // Setup the bin map for quick lookup of the relevant bin.
188 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
189 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
190 _Binmap_type
* __bp
= _M_binmap
;
191 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
192 _Binmap_type __bint
= 0;
193 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
195 if (__ct
> __bin_max
)
203 // Initialize _M_bin and its members.
204 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
205 _M_bin
= static_cast<_Bin_record
*>(__v
);
206 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
208 _Bin_record
& __bin
= _M_bin
[__n
];
209 __v
= ::operator new(sizeof(_Block_record
*));
210 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
211 __bin
._M_first
[0] = 0;
212 __bin
._M_address
= 0;
220 __pool
<true>::_M_destroy() throw()
222 if (_M_init
&& !_M_options
._M_force_new
)
224 if (__gthread_active_p())
226 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
228 _Bin_record
& __bin
= _M_bin
[__n
];
229 while (__bin
._M_address
)
231 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
232 ::operator delete(__bin
._M_address
->_M_initial
);
233 __bin
._M_address
= __tmp
;
235 ::operator delete(__bin
._M_first
);
236 ::operator delete(__bin
._M_free
);
237 ::operator delete(__bin
._M_used
);
238 ::operator delete(__bin
._M_mutex
);
243 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
245 _Bin_record
& __bin
= _M_bin
[__n
];
246 while (__bin
._M_address
)
248 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
249 ::operator delete(__bin
._M_address
->_M_initial
);
250 __bin
._M_address
= __tmp
;
252 ::operator delete(__bin
._M_first
);
255 ::operator delete(_M_bin
);
256 ::operator delete(_M_binmap
);
261 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
) throw ()
263 // Round up to power of 2 and figure out which bin to use.
264 const size_t __which
= _M_binmap
[__bytes
];
265 const _Bin_record
& __bin
= _M_bin
[__which
];
267 // Know __p not null, assume valid block.
268 char* __c
= __p
- _M_get_align();
269 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
270 if (__gthread_active_p())
272 // Calculate the number of records to remove from our freelist:
273 // in order to avoid too much contention we wait until the
274 // number of records is "high enough".
275 const size_t __thread_id
= _M_get_thread_id();
276 const _Tune
& __options
= _M_get_options();
277 const size_t __limit
= (100 * (_M_bin_size
- __which
)
278 * __options
._M_freelist_headroom
);
280 size_t __remove
= __bin
._M_free
[__thread_id
];
281 __remove
*= __options
._M_freelist_headroom
;
283 // NB: We assume that reads of _Atomic_words are atomic.
284 const size_t __max_threads
= __options
._M_max_threads
+ 1;
285 _Atomic_word
* const __reclaimed_base
=
286 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
287 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
288 const size_t __net_used
= __bin
._M_used
[__thread_id
] - __reclaimed
;
290 // NB: For performance sake we don't resync every time, in order
291 // to spare atomic ops. Note that if __reclaimed increased by,
292 // say, 1024, since the last sync, it means that the other
293 // threads executed the atomic in the else below at least the
294 // same number of times (at least, because _M_reserve_block may
295 // have decreased the counter), therefore one more cannot hurt.
296 if (__reclaimed
> 1024)
298 __bin
._M_used
[__thread_id
] -= __reclaimed
;
299 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
302 if (__remove
>= __net_used
)
303 __remove
-= __net_used
;
306 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
308 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
309 _Block_record
* __tmp
= __first
;
310 __remove
/= __options
._M_freelist_headroom
;
311 const size_t __removed
= __remove
;
312 while (--__remove
> 0)
313 __tmp
= __tmp
->_M_next
;
314 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
315 __bin
._M_free
[__thread_id
] -= __removed
;
317 __gthread_mutex_lock(__bin
._M_mutex
);
318 __tmp
->_M_next
= __bin
._M_first
[0];
319 __bin
._M_first
[0] = __first
;
320 __bin
._M_free
[0] += __removed
;
321 __gthread_mutex_unlock(__bin
._M_mutex
);
324 // Return this block to our list and update counters and
325 // owner id as needed.
326 if (__block
->_M_thread_id
== __thread_id
)
327 --__bin
._M_used
[__thread_id
];
329 __atomic_add(&__reclaimed_base
[__block
->_M_thread_id
], 1);
331 __block
->_M_next
= __bin
._M_first
[__thread_id
];
332 __bin
._M_first
[__thread_id
] = __block
;
334 ++__bin
._M_free
[__thread_id
];
338 // Not using threads, so single threaded application - return
340 __block
->_M_next
= __bin
._M_first
[0];
341 __bin
._M_first
[0] = __block
;
346 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
348 // Round up to power of 2 and figure out which bin to use.
349 const size_t __which
= _M_binmap
[__bytes
];
350 const _Tune
& __options
= _M_get_options();
351 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
352 + __options
._M_align
);
353 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
354 __block_count
/= __bin_size
;
356 // Are we using threads?
357 // - Yes, check if there are free blocks on the global
358 // list. If so, grab up to __block_count blocks in one
359 // lock and change ownership. If the global list is
360 // empty, we allocate a new chunk and add those blocks
361 // directly to our own freelist (with us as owner).
362 // - No, all operations are made directly to global pool 0
363 // no need to lock or change ownership but check for free
364 // blocks on global list (and if not add new ones) and
365 // get the first one.
366 _Bin_record
& __bin
= _M_bin
[__which
];
367 _Block_record
* __block
= 0;
368 if (__gthread_active_p())
370 // Resync the _M_used counters.
371 const size_t __max_threads
= __options
._M_max_threads
+ 1;
372 _Atomic_word
* const __reclaimed_base
=
373 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
374 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
375 __bin
._M_used
[__thread_id
] -= __reclaimed
;
376 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
378 __gthread_mutex_lock(__bin
._M_mutex
);
379 if (__bin
._M_first
[0] == 0)
381 void* __v
= ::operator new(__options
._M_chunk_size
);
382 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
383 __address
->_M_initial
= __v
;
384 __address
->_M_next
= __bin
._M_address
;
385 __bin
._M_address
= __address
;
386 __gthread_mutex_unlock(__bin
._M_mutex
);
388 // No need to hold the lock when we are adding a whole
389 // chunk to our own list.
390 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
391 __block
= reinterpret_cast<_Block_record
*>(__c
);
392 __bin
._M_free
[__thread_id
] = __block_count
;
393 __bin
._M_first
[__thread_id
] = __block
;
394 while (--__block_count
> 0)
397 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
398 __block
= __block
->_M_next
;
400 __block
->_M_next
= 0;
404 // Is the number of required blocks greater than or equal
405 // to the number that can be provided by the global free
407 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
408 if (__block_count
>= __bin
._M_free
[0])
410 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
411 __bin
._M_free
[0] = 0;
412 __bin
._M_first
[0] = 0;
416 __bin
._M_free
[__thread_id
] = __block_count
;
417 __bin
._M_free
[0] -= __block_count
;
418 __block
= __bin
._M_first
[0];
419 while (--__block_count
> 0)
420 __block
= __block
->_M_next
;
421 __bin
._M_first
[0] = __block
->_M_next
;
422 __block
->_M_next
= 0;
424 __gthread_mutex_unlock(__bin
._M_mutex
);
429 void* __v
= ::operator new(__options
._M_chunk_size
);
430 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
431 __address
->_M_initial
= __v
;
432 __address
->_M_next
= __bin
._M_address
;
433 __bin
._M_address
= __address
;
435 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
436 __block
= reinterpret_cast<_Block_record
*>(__c
);
437 __bin
._M_first
[0] = __block
;
438 while (--__block_count
> 0)
441 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
442 __block
= __block
->_M_next
;
444 __block
->_M_next
= 0;
447 __block
= __bin
._M_first
[__thread_id
];
448 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
450 if (__gthread_active_p())
452 __block
->_M_thread_id
= __thread_id
;
453 --__bin
._M_free
[__thread_id
];
454 ++__bin
._M_used
[__thread_id
];
457 // NB: For alignment reasons, we can't use the first _M_align
458 // bytes, even when sizeof(_Block_record) < _M_align.
459 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
463 __pool
<true>::_M_initialize()
465 // _M_force_new must not change after the first allocate(),
466 // which in turn calls this method, so if it's false, it's false
467 // forever and we don't need to return here ever again.
468 if (_M_options
._M_force_new
)
475 // Calculate the number of bins required based on _M_max_bytes.
476 // _M_bin_size is statically-initialized to one.
477 size_t __bin_size
= _M_options
._M_min_bin
;
478 while (_M_options
._M_max_bytes
> __bin_size
)
484 // Setup the bin map for quick lookup of the relevant bin.
485 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
486 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
487 _Binmap_type
* __bp
= _M_binmap
;
488 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
489 _Binmap_type __bint
= 0;
490 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
492 if (__ct
> __bin_max
)
500 // Initialize _M_bin and its members.
501 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
502 _M_bin
= static_cast<_Bin_record
*>(__v
);
504 // If __gthread_active_p() create and initialize the list of
505 // free thread ids. Single threaded applications use thread id 0
506 // directly and have no need for this.
507 if (__gthread_active_p())
509 __freelist
& freelist
= get_freelist();
511 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
513 if (!freelist
._M_thread_freelist_array
514 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
516 const size_t __k
= sizeof(_Thread_record
)
517 * _M_options
._M_max_threads
;
518 __v
= ::operator new(__k
);
519 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
521 // NOTE! The first assignable thread id is 1 since the
522 // global pool uses id 0
524 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
526 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
527 __tr
._M_next
= &_M_thread_freelist
[__i
];
532 _M_thread_freelist
[__i
- 1]._M_next
= 0;
533 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
535 if (!freelist
._M_thread_freelist_array
)
537 // Initialize per thread key to hold pointer to
538 // _M_thread_freelist.
539 __gthread_key_create(&freelist
._M_key
,
540 ::_M_destroy_thread_key
);
541 freelist
._M_thread_freelist
= _M_thread_freelist
;
545 _Thread_record
* _M_old_freelist
546 = freelist
._M_thread_freelist
;
547 _Thread_record
* _M_old_array
548 = freelist
._M_thread_freelist_array
;
549 freelist
._M_thread_freelist
550 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
551 while (_M_old_freelist
)
554 if (_M_old_freelist
->_M_next
)
555 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
557 next_id
= freelist
._M_max_threads
;
558 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
559 = &_M_thread_freelist
[next_id
];
560 _M_old_freelist
= _M_old_freelist
->_M_next
;
562 ::operator delete(static_cast<void*>(_M_old_array
));
564 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
565 freelist
._M_max_threads
= _M_options
._M_max_threads
;
569 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
570 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
572 _Bin_record
& __bin
= _M_bin
[__n
];
573 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
574 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
575 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
577 __bin
._M_address
= 0;
579 __v
= ::operator new(sizeof(size_t) * __max_threads
);
580 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
582 __bin
._M_free
= static_cast<size_t*>(__v
);
584 __v
= ::operator new(sizeof(size_t) * __max_threads
585 + sizeof(_Atomic_word
) * __max_threads
);
586 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
587 + sizeof(_Atomic_word
) * __max_threads
));
588 __bin
._M_used
= static_cast<size_t*>(__v
);
590 __v
= ::operator new(sizeof(__gthread_mutex_t
));
591 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
593 #ifdef __GTHREAD_MUTEX_INIT
595 // Do not copy a POSIX/gthr mutex once in use.
596 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
597 *__bin
._M_mutex
= __tmp
;
600 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
606 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
608 _Bin_record
& __bin
= _M_bin
[__n
];
609 __v
= ::operator new(sizeof(_Block_record
*));
610 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
611 __bin
._M_first
[0] = 0;
612 __bin
._M_address
= 0;
619 __pool
<true>::_M_get_thread_id()
621 // If we have thread support and it's active we check the thread
622 // key value and return its id or if it's not set we take the
623 // first record from _M_thread_freelist and sets the key and
625 if (__gthread_active_p())
627 __freelist
& freelist
= get_freelist();
628 void* v
= __gthread_getspecific(freelist
._M_key
);
629 size_t _M_id
= (size_t)v
;
633 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
634 if (freelist
._M_thread_freelist
)
636 _M_id
= freelist
._M_thread_freelist
->_M_id
;
637 freelist
._M_thread_freelist
638 = freelist
._M_thread_freelist
->_M_next
;
642 __gthread_setspecific(freelist
._M_key
, (void*)_M_id
);
644 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
647 // Otherwise (no thread support or inactive) all requests are
648 // served from the global pool 0.
652 // XXX GLIBCXX_ABI Deprecated
654 __pool
<true>::_M_destroy_thread_key(void*) throw () { }
656 // XXX GLIBCXX_ABI Deprecated
658 __pool
<true>::_M_initialize(__destroy_handler
)
660 // _M_force_new must not change after the first allocate(),
661 // which in turn calls this method, so if it's false, it's false
662 // forever and we don't need to return here ever again.
663 if (_M_options
._M_force_new
)
670 // Calculate the number of bins required based on _M_max_bytes.
671 // _M_bin_size is statically-initialized to one.
672 size_t __bin_size
= _M_options
._M_min_bin
;
673 while (_M_options
._M_max_bytes
> __bin_size
)
679 // Setup the bin map for quick lookup of the relevant bin.
680 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
681 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
682 _Binmap_type
* __bp
= _M_binmap
;
683 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
684 _Binmap_type __bint
= 0;
685 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
687 if (__ct
> __bin_max
)
695 // Initialize _M_bin and its members.
696 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
697 _M_bin
= static_cast<_Bin_record
*>(__v
);
699 // If __gthread_active_p() create and initialize the list of
700 // free thread ids. Single threaded applications use thread id 0
701 // directly and have no need for this.
702 if (__gthread_active_p())
704 __freelist
& freelist
= get_freelist();
706 __gnu_cxx::__scoped_lock
sentry(get_freelist_mutex());
708 if (!freelist
._M_thread_freelist_array
709 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
711 const size_t __k
= sizeof(_Thread_record
)
712 * _M_options
._M_max_threads
;
713 __v
= ::operator new(__k
);
714 _M_thread_freelist
= static_cast<_Thread_record
*>(__v
);
716 // NOTE! The first assignable thread id is 1 since the
717 // global pool uses id 0
719 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
721 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
722 __tr
._M_next
= &_M_thread_freelist
[__i
];
727 _M_thread_freelist
[__i
- 1]._M_next
= 0;
728 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
730 if (!freelist
._M_thread_freelist_array
)
732 // Initialize per thread key to hold pointer to
733 // _M_thread_freelist.
734 __gthread_key_create(&freelist
._M_key
,
735 ::_M_destroy_thread_key
);
736 freelist
._M_thread_freelist
= _M_thread_freelist
;
740 _Thread_record
* _M_old_freelist
741 = freelist
._M_thread_freelist
;
742 _Thread_record
* _M_old_array
743 = freelist
._M_thread_freelist_array
;
744 freelist
._M_thread_freelist
745 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
746 while (_M_old_freelist
)
749 if (_M_old_freelist
->_M_next
)
750 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
752 next_id
= freelist
._M_max_threads
;
753 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
754 = &_M_thread_freelist
[next_id
];
755 _M_old_freelist
= _M_old_freelist
->_M_next
;
757 ::operator delete(static_cast<void*>(_M_old_array
));
759 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
760 freelist
._M_max_threads
= _M_options
._M_max_threads
;
764 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
765 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
767 _Bin_record
& __bin
= _M_bin
[__n
];
768 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
769 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
770 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
772 __bin
._M_address
= 0;
774 __v
= ::operator new(sizeof(size_t) * __max_threads
);
775 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
776 __bin
._M_free
= static_cast<size_t*>(__v
);
778 __v
= ::operator new(sizeof(size_t) * __max_threads
+
779 sizeof(_Atomic_word
) * __max_threads
);
780 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
781 + sizeof(_Atomic_word
) * __max_threads
));
782 __bin
._M_used
= static_cast<size_t*>(__v
);
784 __v
= ::operator new(sizeof(__gthread_mutex_t
));
785 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
787 #ifdef __GTHREAD_MUTEX_INIT
789 // Do not copy a POSIX/gthr mutex once in use.
790 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
791 *__bin
._M_mutex
= __tmp
;
794 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
800 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
802 _Bin_record
& __bin
= _M_bin
[__n
];
803 __v
= ::operator new(sizeof(_Block_record
*));
804 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
805 __bin
._M_first
[0] = 0;
806 __bin
._M_address
= 0;
814 template class __mt_alloc
<char>;
815 template class __mt_alloc
<wchar_t>;
817 _GLIBCXX_END_NAMESPACE_VERSION