PR c++/18378
[official-gcc.git] / libstdc++-v3 / src / mt_allocator.cc
blobbb6ab899cafe5d910b97803bb414007025f9ac77
1 // Allocator details.
3 // Copyright (C) 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Librarbooly. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 // ISO C++ 14882:
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
38 namespace __gnu_internal
40 __glibcxx_mutex_define_initialized(freelist_mutex);
42 #ifdef __GTHREADS
43 __gthread_key_t freelist_key;
44 #endif
47 namespace __gnu_cxx
49 void
50 __pool<false>::_M_destroy() throw()
52 if (_M_init && !_M_options._M_force_new)
54 for (size_t __n = 0; __n < _M_bin_size; ++__n)
56 _Bin_record& __bin = _M_bin[__n];
57 while (__bin._M_address)
59 _Block_address* __tmp = __bin._M_address->_M_next;
60 ::operator delete(__bin._M_address->_M_initial);
61 __bin._M_address = __tmp;
63 ::operator delete(__bin._M_first);
65 ::operator delete(_M_bin);
66 ::operator delete(_M_binmap);
70 void
71 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
73 // Round up to power of 2 and figure out which bin to use.
74 const size_t __which = _M_binmap[__bytes];
75 _Bin_record& __bin = _M_bin[__which];
77 char* __c = __p - _M_get_align();
78 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
80 // Single threaded application - return to global pool.
81 __block->_M_next = __bin._M_first[0];
82 __bin._M_first[0] = __block;
85 char*
86 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
88 // Round up to power of 2 and figure out which bin to use.
89 const size_t __which = _M_binmap[__bytes];
90 _Bin_record& __bin = _M_bin[__which];
91 const _Tune& __options = _M_get_options();
92 const size_t __bin_size = (__options._M_min_bin << __which)
93 + __options._M_align;
94 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
95 __block_count /= __bin_size;
97 // Get a new block dynamically, set it up for use.
98 void* __v = ::operator new(__options._M_chunk_size);
99 _Block_address* __address = static_cast<_Block_address*>(__v);
100 __address->_M_initial = __v;
101 __address->_M_next = __bin._M_address;
102 __bin._M_address = __address;
104 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
105 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
106 __bin._M_first[__thread_id] = __block;
107 while (--__block_count > 0)
109 __c += __bin_size;
110 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
111 __block = __block->_M_next;
113 __block->_M_next = NULL;
115 __block = __bin._M_first[__thread_id];
116 __bin._M_first[__thread_id] = __block->_M_next;
118 // NB: For alignment reasons, we can't use the first _M_align
119 // bytes, even when sizeof(_Block_record) < _M_align.
120 return reinterpret_cast<char*>(__block) + __options._M_align;
123 void
124 __pool<false>::_M_initialize()
126 // _M_force_new must not change after the first allocate(), which
127 // in turn calls this method, so if it's false, it's false forever
128 // and we don't need to return here ever again.
129 if (_M_options._M_force_new)
131 _M_init = true;
132 return;
135 // Create the bins.
136 // Calculate the number of bins required based on _M_max_bytes.
137 // _M_bin_size is statically-initialized to one.
138 size_t __bin_size = _M_options._M_min_bin;
139 while (_M_options._M_max_bytes > __bin_size)
141 __bin_size <<= 1;
142 ++_M_bin_size;
145 // Setup the bin map for quick lookup of the relevant bin.
146 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
147 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
148 _Binmap_type* __bp = _M_binmap;
149 _Binmap_type __bin_max = _M_options._M_min_bin;
150 _Binmap_type __bint = 0;
151 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
153 if (__ct > __bin_max)
155 __bin_max <<= 1;
156 ++__bint;
158 *__bp++ = __bint;
161 // Initialize _M_bin and its members.
162 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
163 _M_bin = static_cast<_Bin_record*>(__v);
164 for (size_t __n = 0; __n < _M_bin_size; ++__n)
166 _Bin_record& __bin = _M_bin[__n];
167 __v = ::operator new(sizeof(_Block_record*));
168 __bin._M_first = static_cast<_Block_record**>(__v);
169 __bin._M_first[0] = NULL;
170 __bin._M_address = NULL;
172 _M_init = true;
175 #ifdef __GTHREADS
176 void
177 __pool<true>::_M_destroy() throw()
179 if (_M_init && !_M_options._M_force_new)
181 if (__gthread_active_p())
183 for (size_t __n = 0; __n < _M_bin_size; ++__n)
185 _Bin_record& __bin = _M_bin[__n];
186 while (__bin._M_address)
188 _Block_address* __tmp = __bin._M_address->_M_next;
189 ::operator delete(__bin._M_address->_M_initial);
190 __bin._M_address = __tmp;
192 ::operator delete(__bin._M_first);
193 ::operator delete(__bin._M_free);
194 ::operator delete(__bin._M_used);
195 ::operator delete(__bin._M_mutex);
197 ::operator delete(_M_thread_freelist_initial);
199 else
201 for (size_t __n = 0; __n < _M_bin_size; ++__n)
203 _Bin_record& __bin = _M_bin[__n];
204 while (__bin._M_address)
206 _Block_address* __tmp = __bin._M_address->_M_next;
207 ::operator delete(__bin._M_address->_M_initial);
208 __bin._M_address = __tmp;
210 ::operator delete(__bin._M_first);
213 ::operator delete(_M_bin);
214 ::operator delete(_M_binmap);
218 void
219 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
221 // Round up to power of 2 and figure out which bin to use.
222 const size_t __which = _M_binmap[__bytes];
223 const _Bin_record& __bin = _M_bin[__which];
225 // Know __p not null, assume valid block.
226 char* __c = __p - _M_get_align();
227 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
228 if (__gthread_active_p())
230 // Calculate the number of records to remove from our freelist:
231 // in order to avoid too much contention we wait until the
232 // number of records is "high enough".
233 const size_t __thread_id = _M_get_thread_id();
234 const _Tune& __options = _M_get_options();
235 const unsigned long __limit = 100 * (_M_bin_size - __which)
236 * __options._M_freelist_headroom;
238 unsigned long __remove = __bin._M_free[__thread_id];
239 __remove *= __options._M_freelist_headroom;
240 if (__remove >= __bin._M_used[__thread_id])
241 __remove -= __bin._M_used[__thread_id];
242 else
243 __remove = 0;
244 if (__remove > __limit && __remove > __bin._M_free[__thread_id])
246 _Block_record* __first = __bin._M_first[__thread_id];
247 _Block_record* __tmp = __first;
248 __remove /= __options._M_freelist_headroom;
249 const unsigned long __removed = __remove;
250 while (--__remove > 0)
251 __tmp = __tmp->_M_next;
252 __bin._M_first[__thread_id] = __tmp->_M_next;
253 __bin._M_free[__thread_id] -= __removed;
255 __gthread_mutex_lock(__bin._M_mutex);
256 __tmp->_M_next = __bin._M_first[0];
257 __bin._M_first[0] = __first;
258 __bin._M_free[0] += __removed;
259 __gthread_mutex_unlock(__bin._M_mutex);
262 // Return this block to our list and update counters and
263 // owner id as needed.
264 --__bin._M_used[__block->_M_thread_id];
266 __block->_M_next = __bin._M_first[__thread_id];
267 __bin._M_first[__thread_id] = __block;
269 ++__bin._M_free[__thread_id];
271 else
273 // Not using threads, so single threaded application - return
274 // to global pool.
275 __block->_M_next = __bin._M_first[0];
276 __bin._M_first[0] = __block;
280 char*
281 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
283 // Round up to power of 2 and figure out which bin to use.
284 const size_t __which = _M_binmap[__bytes];
285 const _Tune& __options = _M_get_options();
286 const size_t __bin_size = ((__options._M_min_bin << __which)
287 + __options._M_align);
288 size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
289 __block_count /= __bin_size;
291 // Are we using threads?
292 // - Yes, check if there are free blocks on the global
293 // list. If so, grab up to __block_count blocks in one
294 // lock and change ownership. If the global list is
295 // empty, we allocate a new chunk and add those blocks
296 // directly to our own freelist (with us as owner).
297 // - No, all operations are made directly to global pool 0
298 // no need to lock or change ownership but check for free
299 // blocks on global list (and if not add new ones) and
300 // get the first one.
301 _Bin_record& __bin = _M_bin[__which];
302 _Block_record* __block = NULL;
303 if (__gthread_active_p())
305 __gthread_mutex_lock(__bin._M_mutex);
306 if (__bin._M_first[0] == NULL)
308 void* __v = ::operator new(__options._M_chunk_size);
309 _Block_address* __address = static_cast<_Block_address*>(__v);
310 __address->_M_initial = __v;
311 __address->_M_next = __bin._M_address;
312 __bin._M_address = __address;
313 __gthread_mutex_unlock(__bin._M_mutex);
315 // No need to hold the lock when we are adding a whole
316 // chunk to our own list.
317 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
318 __block = reinterpret_cast<_Block_record*>(__c);
319 __bin._M_free[__thread_id] = __block_count;
320 __bin._M_first[__thread_id] = __block;
321 while (--__block_count > 0)
323 __c += __bin_size;
324 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
325 __block = __block->_M_next;
327 __block->_M_next = NULL;
329 else
331 // Is the number of required blocks greater than or equal
332 // to the number that can be provided by the global free
333 // list?
334 __bin._M_first[__thread_id] = __bin._M_first[0];
335 if (__block_count >= __bin._M_free[0])
337 __bin._M_free[__thread_id] = __bin._M_free[0];
338 __bin._M_free[0] = 0;
339 __bin._M_first[0] = NULL;
341 else
343 __bin._M_free[__thread_id] = __block_count;
344 __bin._M_free[0] -= __block_count;
345 __block = __bin._M_first[0];
346 while (--__block_count > 0)
347 __block = __block->_M_next;
348 __bin._M_first[0] = __block->_M_next;
349 __block->_M_next = NULL;
351 __gthread_mutex_unlock(__bin._M_mutex);
354 else
356 void* __v = ::operator new(__options._M_chunk_size);
357 _Block_address* __address = static_cast<_Block_address*>(__v);
358 __address->_M_initial = __v;
359 __address->_M_next = __bin._M_address;
360 __bin._M_address = __address;
362 char* __c = static_cast<char*>(__v) + sizeof(_Block_address);
363 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
364 __bin._M_first[0] = __block;
365 while (--__block_count > 0)
367 __c += __bin_size;
368 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
369 __block = __block->_M_next;
371 __block->_M_next = NULL;
374 __block = __bin._M_first[__thread_id];
375 __bin._M_first[__thread_id] = __block->_M_next;
377 if (__gthread_active_p())
379 __block->_M_thread_id = __thread_id;
380 --__bin._M_free[__thread_id];
381 ++__bin._M_used[__thread_id];
384 // NB: For alignment reasons, we can't use the first _M_align
385 // bytes, even when sizeof(_Block_record) < _M_align.
386 return reinterpret_cast<char*>(__block) + __options._M_align;
389 void
390 __pool<true>::_M_initialize(__destroy_handler __d)
392 // _M_force_new must not change after the first allocate(),
393 // which in turn calls this method, so if it's false, it's false
394 // forever and we don't need to return here ever again.
395 if (_M_options._M_force_new)
397 _M_init = true;
398 return;
401 // Create the bins.
402 // Calculate the number of bins required based on _M_max_bytes.
403 // _M_bin_size is statically-initialized to one.
404 size_t __bin_size = _M_options._M_min_bin;
405 while (_M_options._M_max_bytes > __bin_size)
407 __bin_size <<= 1;
408 ++_M_bin_size;
411 // Setup the bin map for quick lookup of the relevant bin.
412 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
413 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
414 _Binmap_type* __bp = _M_binmap;
415 _Binmap_type __bin_max = _M_options._M_min_bin;
416 _Binmap_type __bint = 0;
417 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
419 if (__ct > __bin_max)
421 __bin_max <<= 1;
422 ++__bint;
424 *__bp++ = __bint;
427 // Initialize _M_bin and its members.
428 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
429 _M_bin = static_cast<_Bin_record*>(__v);
431 // If __gthread_active_p() create and initialize the list of
432 // free thread ids. Single threaded applications use thread id 0
433 // directly and have no need for this.
434 if (__gthread_active_p())
436 const size_t __k = sizeof(_Thread_record) * _M_options._M_max_threads;
437 __v = ::operator new(__k);
438 _M_thread_freelist = static_cast<_Thread_record*>(__v);
439 _M_thread_freelist_initial = __v;
441 // NOTE! The first assignable thread id is 1 since the
442 // global pool uses id 0
443 size_t __i;
444 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
446 _Thread_record& __tr = _M_thread_freelist[__i - 1];
447 __tr._M_next = &_M_thread_freelist[__i];
448 __tr._M_id = __i;
451 // Set last record.
452 _M_thread_freelist[__i - 1]._M_next = NULL;
453 _M_thread_freelist[__i - 1]._M_id = __i;
455 // Initialize per thread key to hold pointer to
456 // _M_thread_freelist.
457 __gthread_key_create(&__gnu_internal::freelist_key, __d);
459 const size_t __max_threads = _M_options._M_max_threads + 1;
460 for (size_t __n = 0; __n < _M_bin_size; ++__n)
462 _Bin_record& __bin = _M_bin[__n];
463 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
464 __bin._M_first = static_cast<_Block_record**>(__v);
466 __bin._M_address = NULL;
468 __v = ::operator new(sizeof(size_t) * __max_threads);
469 __bin._M_free = static_cast<size_t*>(__v);
471 __v = ::operator new(sizeof(size_t) * __max_threads);
472 __bin._M_used = static_cast<size_t*>(__v);
474 __v = ::operator new(sizeof(__gthread_mutex_t));
475 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
477 #ifdef __GTHREAD_MUTEX_INIT
479 // Do not copy a POSIX/gthr mutex once in use.
480 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
481 *__bin._M_mutex = __tmp;
483 #else
484 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
485 #endif
486 for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
488 __bin._M_first[__threadn] = NULL;
489 __bin._M_free[__threadn] = 0;
490 __bin._M_used[__threadn] = 0;
494 else
496 for (size_t __n = 0; __n < _M_bin_size; ++__n)
498 _Bin_record& __bin = _M_bin[__n];
499 __v = ::operator new(sizeof(_Block_record*));
500 __bin._M_first = static_cast<_Block_record**>(__v);
501 __bin._M_first[0] = NULL;
502 __bin._M_address = NULL;
505 _M_init = true;
508 size_t
509 __pool<true>::_M_get_thread_id()
511 // If we have thread support and it's active we check the thread
512 // key value and return its id or if it's not set we take the
513 // first record from _M_thread_freelist and sets the key and
514 // returns it's id.
515 if (__gthread_active_p())
517 void* v = __gthread_getspecific(__gnu_internal::freelist_key);
518 _Thread_record* __freelist_pos = static_cast<_Thread_record*>(v);
519 if (__freelist_pos == NULL)
521 // Since _M_options._M_max_threads must be larger than
522 // the theoretical max number of threads of the OS the
523 // list can never be empty.
525 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
526 __freelist_pos = _M_thread_freelist;
527 _M_thread_freelist = _M_thread_freelist->_M_next;
530 __gthread_setspecific(__gnu_internal::freelist_key,
531 static_cast<void*>(__freelist_pos));
533 return __freelist_pos->_M_id;
536 // Otherwise (no thread support or inactive) all requests are
537 // served from the global pool 0.
538 return 0;
541 void
542 __pool<true>::_M_destroy_thread_key(void* __freelist_pos)
544 // Return this thread id record to front of thread_freelist.
545 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
546 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
547 __tr->_M_next = _M_thread_freelist;
548 _M_thread_freelist = __tr;
550 #endif
552 // Instantiations.
553 template class __mt_alloc<char>;
554 template class __mt_alloc<wchar_t>;
555 } // namespace __gnu_cxx