3 // Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
34 #include <bits/c++config.h>
35 #include <ext/concurrence.h>
36 #include <ext/mt_allocator.h>
44 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
45 _Thread_record
* _M_thread_freelist
;
46 _Thread_record
* _M_thread_freelist_array
;
47 size_t _M_max_threads
;
48 __gthread_key_t _M_key
;
52 if (_M_thread_freelist_array
)
54 __gthread_key_delete(_M_key
);
55 ::operator delete(static_cast<void*>(_M_thread_freelist_array
));
60 // Ensure freelist is constructed first.
61 static __freelist freelist
;
62 __gnu_cxx::__mutex freelist_mutex
;
65 _M_destroy_thread_key(void* __id
)
67 // Return this thread id record to the front of thread_freelist.
68 __gnu_cxx::__scoped_lock
sentry(freelist_mutex
);
69 size_t _M_id
= reinterpret_cast<size_t>(__id
);
71 typedef __gnu_cxx::__pool
<true>::_Thread_record _Thread_record
;
72 _Thread_record
* __tr
= &freelist
._M_thread_freelist_array
[_M_id
- 1];
73 __tr
->_M_next
= freelist
._M_thread_freelist
;
74 freelist
._M_thread_freelist
= __tr
;
77 } // anonymous namespace
79 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx
)
82 __pool
<false>::_M_destroy() throw()
84 if (_M_init
&& !_M_options
._M_force_new
)
86 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
88 _Bin_record
& __bin
= _M_bin
[__n
];
89 while (__bin
._M_address
)
91 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
92 ::operator delete(__bin
._M_address
->_M_initial
);
93 __bin
._M_address
= __tmp
;
95 ::operator delete(__bin
._M_first
);
97 ::operator delete(_M_bin
);
98 ::operator delete(_M_binmap
);
103 __pool
<false>::_M_reclaim_block(char* __p
, size_t __bytes
)
105 // Round up to power of 2 and figure out which bin to use.
106 const size_t __which
= _M_binmap
[__bytes
];
107 _Bin_record
& __bin
= _M_bin
[__which
];
109 char* __c
= __p
- _M_get_align();
110 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
112 // Single threaded application - return to global pool.
113 __block
->_M_next
= __bin
._M_first
[0];
114 __bin
._M_first
[0] = __block
;
118 __pool
<false>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
120 // Round up to power of 2 and figure out which bin to use.
121 const size_t __which
= _M_binmap
[__bytes
];
122 _Bin_record
& __bin
= _M_bin
[__which
];
123 const _Tune
& __options
= _M_get_options();
124 const size_t __bin_size
= (__options
._M_min_bin
<< __which
)
125 + __options
._M_align
;
126 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
127 __block_count
/= __bin_size
;
129 // Get a new block dynamically, set it up for use.
130 void* __v
= ::operator new(__options
._M_chunk_size
);
131 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
132 __address
->_M_initial
= __v
;
133 __address
->_M_next
= __bin
._M_address
;
134 __bin
._M_address
= __address
;
136 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
137 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
138 __bin
._M_first
[__thread_id
] = __block
;
139 while (--__block_count
> 0)
142 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
143 __block
= __block
->_M_next
;
145 __block
->_M_next
= NULL
;
147 __block
= __bin
._M_first
[__thread_id
];
148 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
150 // NB: For alignment reasons, we can't use the first _M_align
151 // bytes, even when sizeof(_Block_record) < _M_align.
152 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
156 __pool
<false>::_M_initialize()
158 // _M_force_new must not change after the first allocate(), which
159 // in turn calls this method, so if it's false, it's false forever
160 // and we don't need to return here ever again.
161 if (_M_options
._M_force_new
)
168 // Calculate the number of bins required based on _M_max_bytes.
169 // _M_bin_size is statically-initialized to one.
170 size_t __bin_size
= _M_options
._M_min_bin
;
171 while (_M_options
._M_max_bytes
> __bin_size
)
177 // Setup the bin map for quick lookup of the relevant bin.
178 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
179 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
180 _Binmap_type
* __bp
= _M_binmap
;
181 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
182 _Binmap_type __bint
= 0;
183 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
185 if (__ct
> __bin_max
)
193 // Initialize _M_bin and its members.
194 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
195 _M_bin
= static_cast<_Bin_record
*>(__v
);
196 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
198 _Bin_record
& __bin
= _M_bin
[__n
];
199 __v
= ::operator new(sizeof(_Block_record
*));
200 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
201 __bin
._M_first
[0] = NULL
;
202 __bin
._M_address
= NULL
;
210 __pool
<true>::_M_destroy() throw()
212 if (_M_init
&& !_M_options
._M_force_new
)
214 if (__gthread_active_p())
216 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
218 _Bin_record
& __bin
= _M_bin
[__n
];
219 while (__bin
._M_address
)
221 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
222 ::operator delete(__bin
._M_address
->_M_initial
);
223 __bin
._M_address
= __tmp
;
225 ::operator delete(__bin
._M_first
);
226 ::operator delete(__bin
._M_free
);
227 ::operator delete(__bin
._M_used
);
228 ::operator delete(__bin
._M_mutex
);
233 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
235 _Bin_record
& __bin
= _M_bin
[__n
];
236 while (__bin
._M_address
)
238 _Block_address
* __tmp
= __bin
._M_address
->_M_next
;
239 ::operator delete(__bin
._M_address
->_M_initial
);
240 __bin
._M_address
= __tmp
;
242 ::operator delete(__bin
._M_first
);
245 ::operator delete(_M_bin
);
246 ::operator delete(_M_binmap
);
251 __pool
<true>::_M_reclaim_block(char* __p
, size_t __bytes
)
253 // Round up to power of 2 and figure out which bin to use.
254 const size_t __which
= _M_binmap
[__bytes
];
255 const _Bin_record
& __bin
= _M_bin
[__which
];
257 // Know __p not null, assume valid block.
258 char* __c
= __p
- _M_get_align();
259 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
260 if (__gthread_active_p())
262 // Calculate the number of records to remove from our freelist:
263 // in order to avoid too much contention we wait until the
264 // number of records is "high enough".
265 const size_t __thread_id
= _M_get_thread_id();
266 const _Tune
& __options
= _M_get_options();
267 const size_t __limit
= (100 * (_M_bin_size
- __which
)
268 * __options
._M_freelist_headroom
);
270 size_t __remove
= __bin
._M_free
[__thread_id
];
271 __remove
*= __options
._M_freelist_headroom
;
273 // NB: We assume that reads of _Atomic_words are atomic.
274 const size_t __max_threads
= __options
._M_max_threads
+ 1;
275 _Atomic_word
* const __reclaimed_base
=
276 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
277 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
278 const size_t __net_used
= __bin
._M_used
[__thread_id
] - __reclaimed
;
280 // NB: For performance sake we don't resync every time, in order
281 // to spare atomic ops. Note that if __reclaimed increased by,
282 // say, 1024, since the last sync, it means that the other
283 // threads executed the atomic in the else below at least the
284 // same number of times (at least, because _M_reserve_block may
285 // have decreased the counter), therefore one more cannot hurt.
286 if (__reclaimed
> 1024)
288 __bin
._M_used
[__thread_id
] -= __reclaimed
;
289 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
292 if (__remove
>= __net_used
)
293 __remove
-= __net_used
;
296 if (__remove
> __limit
&& __remove
> __bin
._M_free
[__thread_id
])
298 _Block_record
* __first
= __bin
._M_first
[__thread_id
];
299 _Block_record
* __tmp
= __first
;
300 __remove
/= __options
._M_freelist_headroom
;
301 const size_t __removed
= __remove
;
302 while (--__remove
> 0)
303 __tmp
= __tmp
->_M_next
;
304 __bin
._M_first
[__thread_id
] = __tmp
->_M_next
;
305 __bin
._M_free
[__thread_id
] -= __removed
;
307 __gthread_mutex_lock(__bin
._M_mutex
);
308 __tmp
->_M_next
= __bin
._M_first
[0];
309 __bin
._M_first
[0] = __first
;
310 __bin
._M_free
[0] += __removed
;
311 __gthread_mutex_unlock(__bin
._M_mutex
);
314 // Return this block to our list and update counters and
315 // owner id as needed.
316 if (__block
->_M_thread_id
== __thread_id
)
317 --__bin
._M_used
[__thread_id
];
319 __atomic_add(&__reclaimed_base
[__block
->_M_thread_id
], 1);
321 __block
->_M_next
= __bin
._M_first
[__thread_id
];
322 __bin
._M_first
[__thread_id
] = __block
;
324 ++__bin
._M_free
[__thread_id
];
328 // Not using threads, so single threaded application - return
330 __block
->_M_next
= __bin
._M_first
[0];
331 __bin
._M_first
[0] = __block
;
336 __pool
<true>::_M_reserve_block(size_t __bytes
, const size_t __thread_id
)
338 // Round up to power of 2 and figure out which bin to use.
339 const size_t __which
= _M_binmap
[__bytes
];
340 const _Tune
& __options
= _M_get_options();
341 const size_t __bin_size
= ((__options
._M_min_bin
<< __which
)
342 + __options
._M_align
);
343 size_t __block_count
= __options
._M_chunk_size
- sizeof(_Block_address
);
344 __block_count
/= __bin_size
;
346 // Are we using threads?
347 // - Yes, check if there are free blocks on the global
348 // list. If so, grab up to __block_count blocks in one
349 // lock and change ownership. If the global list is
350 // empty, we allocate a new chunk and add those blocks
351 // directly to our own freelist (with us as owner).
352 // - No, all operations are made directly to global pool 0
353 // no need to lock or change ownership but check for free
354 // blocks on global list (and if not add new ones) and
355 // get the first one.
356 _Bin_record
& __bin
= _M_bin
[__which
];
357 _Block_record
* __block
= NULL
;
358 if (__gthread_active_p())
360 // Resync the _M_used counters.
361 const size_t __max_threads
= __options
._M_max_threads
+ 1;
362 _Atomic_word
* const __reclaimed_base
=
363 reinterpret_cast<_Atomic_word
*>(__bin
._M_used
+ __max_threads
);
364 const _Atomic_word __reclaimed
= __reclaimed_base
[__thread_id
];
365 __bin
._M_used
[__thread_id
] -= __reclaimed
;
366 __atomic_add(&__reclaimed_base
[__thread_id
], -__reclaimed
);
368 __gthread_mutex_lock(__bin
._M_mutex
);
369 if (__bin
._M_first
[0] == NULL
)
371 void* __v
= ::operator new(__options
._M_chunk_size
);
372 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
373 __address
->_M_initial
= __v
;
374 __address
->_M_next
= __bin
._M_address
;
375 __bin
._M_address
= __address
;
376 __gthread_mutex_unlock(__bin
._M_mutex
);
378 // No need to hold the lock when we are adding a whole
379 // chunk to our own list.
380 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
381 __block
= reinterpret_cast<_Block_record
*>(__c
);
382 __bin
._M_free
[__thread_id
] = __block_count
;
383 __bin
._M_first
[__thread_id
] = __block
;
384 while (--__block_count
> 0)
387 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
388 __block
= __block
->_M_next
;
390 __block
->_M_next
= NULL
;
394 // Is the number of required blocks greater than or equal
395 // to the number that can be provided by the global free
397 __bin
._M_first
[__thread_id
] = __bin
._M_first
[0];
398 if (__block_count
>= __bin
._M_free
[0])
400 __bin
._M_free
[__thread_id
] = __bin
._M_free
[0];
401 __bin
._M_free
[0] = 0;
402 __bin
._M_first
[0] = NULL
;
406 __bin
._M_free
[__thread_id
] = __block_count
;
407 __bin
._M_free
[0] -= __block_count
;
408 __block
= __bin
._M_first
[0];
409 while (--__block_count
> 0)
410 __block
= __block
->_M_next
;
411 __bin
._M_first
[0] = __block
->_M_next
;
412 __block
->_M_next
= NULL
;
414 __gthread_mutex_unlock(__bin
._M_mutex
);
419 void* __v
= ::operator new(__options
._M_chunk_size
);
420 _Block_address
* __address
= static_cast<_Block_address
*>(__v
);
421 __address
->_M_initial
= __v
;
422 __address
->_M_next
= __bin
._M_address
;
423 __bin
._M_address
= __address
;
425 char* __c
= static_cast<char*>(__v
) + sizeof(_Block_address
);
426 _Block_record
* __block
= reinterpret_cast<_Block_record
*>(__c
);
427 __bin
._M_first
[0] = __block
;
428 while (--__block_count
> 0)
431 __block
->_M_next
= reinterpret_cast<_Block_record
*>(__c
);
432 __block
= __block
->_M_next
;
434 __block
->_M_next
= NULL
;
437 __block
= __bin
._M_first
[__thread_id
];
438 __bin
._M_first
[__thread_id
] = __block
->_M_next
;
440 if (__gthread_active_p())
442 __block
->_M_thread_id
= __thread_id
;
443 --__bin
._M_free
[__thread_id
];
444 ++__bin
._M_used
[__thread_id
];
447 // NB: For alignment reasons, we can't use the first _M_align
448 // bytes, even when sizeof(_Block_record) < _M_align.
449 return reinterpret_cast<char*>(__block
) + __options
._M_align
;
453 __pool
<true>::_M_initialize()
455 // _M_force_new must not change after the first allocate(),
456 // which in turn calls this method, so if it's false, it's false
457 // forever and we don't need to return here ever again.
458 if (_M_options
._M_force_new
)
465 // Calculate the number of bins required based on _M_max_bytes.
466 // _M_bin_size is statically-initialized to one.
467 size_t __bin_size
= _M_options
._M_min_bin
;
468 while (_M_options
._M_max_bytes
> __bin_size
)
474 // Setup the bin map for quick lookup of the relevant bin.
475 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
476 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
477 _Binmap_type
* __bp
= _M_binmap
;
478 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
479 _Binmap_type __bint
= 0;
480 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
482 if (__ct
> __bin_max
)
490 // Initialize _M_bin and its members.
491 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
492 _M_bin
= static_cast<_Bin_record
*>(__v
);
494 // If __gthread_active_p() create and initialize the list of
495 // free thread ids. Single threaded applications use thread id 0
496 // directly and have no need for this.
497 if (__gthread_active_p())
500 __gnu_cxx::__scoped_lock
sentry(freelist_mutex
);
502 if (!freelist
._M_thread_freelist_array
503 || freelist
._M_max_threads
< _M_options
._M_max_threads
)
505 const size_t __k
= sizeof(_Thread_record
)
506 * _M_options
._M_max_threads
;
507 __v
= ::operator new(__k
);
508 _Thread_record
* _M_thread_freelist
509 = static_cast<_Thread_record
*>(__v
);
511 // NOTE! The first assignable thread id is 1 since the
512 // global pool uses id 0
514 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
516 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
517 __tr
._M_next
= &_M_thread_freelist
[__i
];
522 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
523 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
525 if (!freelist
._M_thread_freelist_array
)
527 // Initialize per thread key to hold pointer to
528 // _M_thread_freelist.
529 __gthread_key_create(&freelist
._M_key
,
530 ::_M_destroy_thread_key
);
531 freelist
._M_thread_freelist
532 = _M_thread_freelist
;
536 _Thread_record
* _M_old_freelist
537 = freelist
._M_thread_freelist
;
538 _Thread_record
* _M_old_array
539 = freelist
._M_thread_freelist_array
;
540 freelist
._M_thread_freelist
541 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
542 while (_M_old_freelist
)
545 if (_M_old_freelist
->_M_next
)
546 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
548 next_id
= freelist
._M_max_threads
;
549 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
550 = &_M_thread_freelist
[next_id
];
551 _M_old_freelist
= _M_old_freelist
->_M_next
;
553 ::operator delete(static_cast<void*>(_M_old_array
));
555 freelist
._M_thread_freelist_array
556 = _M_thread_freelist
;
557 freelist
._M_max_threads
558 = _M_options
._M_max_threads
;
562 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
563 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
565 _Bin_record
& __bin
= _M_bin
[__n
];
566 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
567 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
568 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
570 __bin
._M_address
= NULL
;
572 __v
= ::operator new(sizeof(size_t) * __max_threads
);
573 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
574 __bin
._M_free
= static_cast<size_t*>(__v
);
576 __v
= ::operator new(sizeof(size_t) * __max_threads
577 + sizeof(_Atomic_word
) * __max_threads
);
578 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
579 + sizeof(_Atomic_word
) * __max_threads
));
580 __bin
._M_used
= static_cast<size_t*>(__v
);
582 __v
= ::operator new(sizeof(__gthread_mutex_t
));
583 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
585 #ifdef __GTHREAD_MUTEX_INIT
587 // Do not copy a POSIX/gthr mutex once in use.
588 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
589 *__bin
._M_mutex
= __tmp
;
592 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
598 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
600 _Bin_record
& __bin
= _M_bin
[__n
];
601 __v
= ::operator new(sizeof(_Block_record
*));
602 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
603 __bin
._M_first
[0] = NULL
;
604 __bin
._M_address
= NULL
;
611 __pool
<true>::_M_get_thread_id()
613 // If we have thread support and it's active we check the thread
614 // key value and return its id or if it's not set we take the
615 // first record from _M_thread_freelist and sets the key and
617 if (__gthread_active_p())
619 void* v
= __gthread_getspecific(freelist
._M_key
);
620 size_t _M_id
= (size_t)v
;
624 __gnu_cxx::__scoped_lock
sentry(freelist_mutex
);
625 if (freelist
._M_thread_freelist
)
627 _M_id
= freelist
._M_thread_freelist
->_M_id
;
628 freelist
._M_thread_freelist
629 = freelist
._M_thread_freelist
->_M_next
;
633 __gthread_setspecific(freelist
._M_key
,
636 return _M_id
>= _M_options
._M_max_threads
? 0 : _M_id
;
639 // Otherwise (no thread support or inactive) all requests are
640 // served from the global pool 0.
644 // XXX GLIBCXX_ABI Deprecated
646 __pool
<true>::_M_destroy_thread_key(void*) { }
648 // XXX GLIBCXX_ABI Deprecated
650 __pool
<true>::_M_initialize(__destroy_handler
)
652 // _M_force_new must not change after the first allocate(),
653 // which in turn calls this method, so if it's false, it's false
654 // forever and we don't need to return here ever again.
655 if (_M_options
._M_force_new
)
662 // Calculate the number of bins required based on _M_max_bytes.
663 // _M_bin_size is statically-initialized to one.
664 size_t __bin_size
= _M_options
._M_min_bin
;
665 while (_M_options
._M_max_bytes
> __bin_size
)
671 // Setup the bin map for quick lookup of the relevant bin.
672 const size_t __j
= (_M_options
._M_max_bytes
+ 1) * sizeof(_Binmap_type
);
673 _M_binmap
= static_cast<_Binmap_type
*>(::operator new(__j
));
674 _Binmap_type
* __bp
= _M_binmap
;
675 _Binmap_type __bin_max
= _M_options
._M_min_bin
;
676 _Binmap_type __bint
= 0;
677 for (_Binmap_type __ct
= 0; __ct
<= _M_options
._M_max_bytes
; ++__ct
)
679 if (__ct
> __bin_max
)
687 // Initialize _M_bin and its members.
688 void* __v
= ::operator new(sizeof(_Bin_record
) * _M_bin_size
);
689 _M_bin
= static_cast<_Bin_record
*>(__v
);
691 // If __gthread_active_p() create and initialize the list of
692 // free thread ids. Single threaded applications use thread id 0
693 // directly and have no need for this.
694 if (__gthread_active_p())
697 __gnu_cxx::__scoped_lock
sentry(freelist_mutex
);
699 if (!freelist
._M_thread_freelist_array
700 || freelist
._M_max_threads
701 < _M_options
._M_max_threads
)
703 const size_t __k
= sizeof(_Thread_record
)
704 * _M_options
._M_max_threads
;
705 __v
= ::operator new(__k
);
706 _Thread_record
* _M_thread_freelist
707 = static_cast<_Thread_record
*>(__v
);
709 // NOTE! The first assignable thread id is 1 since the
710 // global pool uses id 0
712 for (__i
= 1; __i
< _M_options
._M_max_threads
; ++__i
)
714 _Thread_record
& __tr
= _M_thread_freelist
[__i
- 1];
715 __tr
._M_next
= &_M_thread_freelist
[__i
];
720 _M_thread_freelist
[__i
- 1]._M_next
= NULL
;
721 _M_thread_freelist
[__i
- 1]._M_id
= __i
;
723 if (!freelist
._M_thread_freelist_array
)
725 // Initialize per thread key to hold pointer to
726 // _M_thread_freelist.
727 __gthread_key_create(&freelist
._M_key
,
728 ::_M_destroy_thread_key
);
729 freelist
._M_thread_freelist
= _M_thread_freelist
;
733 _Thread_record
* _M_old_freelist
734 = freelist
._M_thread_freelist
;
735 _Thread_record
* _M_old_array
736 = freelist
._M_thread_freelist_array
;
737 freelist
._M_thread_freelist
738 = &_M_thread_freelist
[_M_old_freelist
- _M_old_array
];
739 while (_M_old_freelist
)
742 if (_M_old_freelist
->_M_next
)
743 next_id
= _M_old_freelist
->_M_next
- _M_old_array
;
745 next_id
= freelist
._M_max_threads
;
746 _M_thread_freelist
[_M_old_freelist
->_M_id
- 1]._M_next
747 = &_M_thread_freelist
[next_id
];
748 _M_old_freelist
= _M_old_freelist
->_M_next
;
750 ::operator delete(static_cast<void*>(_M_old_array
));
752 freelist
._M_thread_freelist_array
= _M_thread_freelist
;
753 freelist
._M_max_threads
= _M_options
._M_max_threads
;
757 const size_t __max_threads
= _M_options
._M_max_threads
+ 1;
758 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
760 _Bin_record
& __bin
= _M_bin
[__n
];
761 __v
= ::operator new(sizeof(_Block_record
*) * __max_threads
);
762 std::memset(__v
, 0, sizeof(_Block_record
*) * __max_threads
);
763 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
765 __bin
._M_address
= NULL
;
767 __v
= ::operator new(sizeof(size_t) * __max_threads
);
768 std::memset(__v
, 0, sizeof(size_t) * __max_threads
);
769 __bin
._M_free
= static_cast<size_t*>(__v
);
771 __v
= ::operator new(sizeof(size_t) * __max_threads
+
772 sizeof(_Atomic_word
) * __max_threads
);
773 std::memset(__v
, 0, (sizeof(size_t) * __max_threads
774 + sizeof(_Atomic_word
) * __max_threads
));
775 __bin
._M_used
= static_cast<size_t*>(__v
);
777 __v
= ::operator new(sizeof(__gthread_mutex_t
));
778 __bin
._M_mutex
= static_cast<__gthread_mutex_t
*>(__v
);
780 #ifdef __GTHREAD_MUTEX_INIT
782 // Do not copy a POSIX/gthr mutex once in use.
783 __gthread_mutex_t __tmp
= __GTHREAD_MUTEX_INIT
;
784 *__bin
._M_mutex
= __tmp
;
787 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin
._M_mutex
); }
793 for (size_t __n
= 0; __n
< _M_bin_size
; ++__n
)
795 _Bin_record
& __bin
= _M_bin
[__n
];
796 __v
= ::operator new(sizeof(_Block_record
*));
797 __bin
._M_first
= static_cast<_Block_record
**>(__v
);
798 __bin
._M_first
[0] = NULL
;
799 __bin
._M_address
= NULL
;
807 template class __mt_alloc
<char>;
808 template class __mt_alloc
<wchar_t>;
810 _GLIBCXX_END_NAMESPACE