2004-10-06 Benjamin Kosnik <bkoz@redhat.com>
[official-gcc.git] / libstdc++-v3 / src / mt_allocator.cc
blob1e45f4cfb1b7d5237dc47b10248935ba36fd87a1
1 // Allocator details.
3 // Copyright (C) 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Librarbooly. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 // ISO C++ 14882:
34 #include <bits/c++config.h>
35 #include <bits/concurrence.h>
36 #include <ext/mt_allocator.h>
38 namespace __gnu_internal
40 __glibcxx_mutex_define_initialized(freelist_mutex);
42 #ifdef __GTHREADS
43 __gthread_key_t freelist_key;
44 #endif
47 namespace __gnu_cxx
49 __pool<false>::~__pool()
51 if (_M_init && !_M_options._M_force_new)
53 for (size_t __n = 0; __n < _M_bin_size; ++__n)
55 _Bin_record& __bin = _M_bin[__n];
56 while (__bin._M_address)
58 _Block_address* __tmp = __bin._M_address->_M_next;
59 ::operator delete(__bin._M_address->_M_initial);
60 delete __bin._M_address;
61 __bin._M_address = __tmp;
63 delete __bin._M_first;
65 delete _M_bin;
66 delete _M_binmap;
70 void
71 __pool<false>::_M_reclaim_block(char* __p, size_t __bytes)
73 // Round up to power of 2 and figure out which bin to use.
74 const size_t __which = _M_binmap[__bytes];
75 _Bin_record& __bin = _M_bin[__which];
77 const _Tune& __options = _M_get_options();
78 char* __c = __p - __options._M_align;
79 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
81 // Single threaded application - return to global pool.
82 __block->_M_next = __bin._M_first[0];
83 __bin._M_first[0] = __block;
86 char*
87 __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
89 // Round up to power of 2 and figure out which bin to use.
90 const size_t __which = _M_binmap[__bytes];
91 const _Tune& __options = _M_get_options();
92 const size_t __bin_size = ((__options._M_min_bin << __which)
93 + __options._M_align);
94 size_t __block_count = __options._M_chunk_size / __bin_size;
96 // Get a new block dynamically, set it up for use.
97 void* __v = ::operator new(__options._M_chunk_size);
98 _Block_record* __block = static_cast<_Block_record*>(__v);
99 --__block_count;
100 _Block_record* __tmp = __block;
101 while (__block_count-- > 0)
103 char* __c = reinterpret_cast<char*>(__tmp) + __bin_size;
104 __tmp->_M_next = reinterpret_cast<_Block_record*>(__c);
105 __tmp = __tmp->_M_next;
107 __tmp->_M_next = NULL;
109 // Update _Bin_record fields.
110 _Bin_record& __bin = _M_bin[__which];
111 __bin._M_first[__thread_id] = __block->_M_next;
112 _Block_address* __address = new _Block_address;
113 __address->_M_initial = __v;
114 __address->_M_next = __bin._M_address;
115 __bin._M_address = __address;
117 // NB: For alignment reasons, we can't use the first _M_align
118 // bytes, even when sizeof(_Block_record) < _M_align.
119 return reinterpret_cast<char*>(__block) + __options._M_align;
122 void
123 __pool<false>::_M_initialize()
125 // _M_force_new must not change after the first allocate(), which
126 // in turn calls this method, so if it's false, it's false forever
127 // and we don't need to return here ever again.
128 if (_M_options._M_force_new)
130 _M_init = true;
131 return;
134 // Create the bins.
135 // Calculate the number of bins required based on _M_max_bytes.
136 // _M_bin_size is statically-initialized to one.
137 size_t __bin_size = _M_options._M_min_bin;
138 while (_M_options._M_max_bytes > __bin_size)
140 __bin_size <<= 1;
141 ++_M_bin_size;
144 // Setup the bin map for quick lookup of the relevant bin.
145 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
146 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
147 _Binmap_type* __bp = _M_binmap;
148 _Binmap_type __bin_max = _M_options._M_min_bin;
149 _Binmap_type __bint = 0;
150 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
152 if (__ct > __bin_max)
154 __bin_max <<= 1;
155 ++__bint;
157 *__bp++ = __bint;
160 // Initialize _M_bin and its members.
161 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
162 _M_bin = static_cast<_Bin_record*>(__v);
163 for (size_t __n = 0; __n < _M_bin_size; ++__n)
165 _Bin_record& __bin = _M_bin[__n];
166 __v = ::operator new(sizeof(_Block_record*));
167 __bin._M_first = static_cast<_Block_record**>(__v);
168 __bin._M_first[0] = NULL;
169 __bin._M_address = NULL;
171 _M_init = true;
174 #ifdef __GTHREADS
175 __pool<true>::~__pool()
177 if (_M_init && !_M_options._M_force_new)
179 if (__gthread_active_p())
181 for (size_t __n = 0; __n < _M_bin_size; ++__n)
183 _Bin_record& __bin = _M_bin[__n];
184 while (__bin._M_address)
186 _Block_address* __tmp = __bin._M_address->_M_next;
187 ::operator delete(__bin._M_address->_M_initial);
188 delete __bin._M_address;
189 __bin._M_address = __tmp;
191 delete __bin._M_first;
192 delete __bin._M_free;
193 delete __bin._M_used;
194 delete __bin._M_mutex;
196 ::operator delete(_M_thread_freelist_initial);
198 else
200 for (size_t __n = 0; __n < _M_bin_size; ++__n)
202 _Bin_record& __bin = _M_bin[__n];
203 while (__bin._M_address)
205 _Block_address* __tmp = __bin._M_address->_M_next;
206 ::operator delete(__bin._M_address->_M_initial);
207 delete __bin._M_address;
208 __bin._M_address = __tmp;
210 delete __bin._M_first;
213 delete _M_bin;
214 delete _M_binmap;
218 void
219 __pool<true>::_M_reclaim_block(char* __p, size_t __bytes)
221 // Round up to power of 2 and figure out which bin to use.
222 const size_t __which = _M_binmap[__bytes];
223 const _Bin_record& __bin = _M_bin[__which];
225 const _Tune& __options = _M_get_options();
226 char* __c = __p - __options._M_align;
227 _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
228 if (__gthread_active_p())
230 // Calculate the number of records to remove from our freelist:
231 // in order to avoid too much contention we wait until the
232 // number of records is "high enough".
233 const size_t __thread_id = _M_get_thread_id();
235 long __remove = ((__bin._M_free[__thread_id]
236 * __options._M_freelist_headroom)
237 - __bin._M_used[__thread_id]);
238 if (__remove > static_cast<long>(100 * (_M_bin_size - __which)
239 * __options._M_freelist_headroom)
240 && __remove > static_cast<long>(__bin._M_free[__thread_id]))
242 _Block_record* __tmp = __bin._M_first[__thread_id];
243 _Block_record* __first = __tmp;
244 __remove /= __options._M_freelist_headroom;
245 const long __removed = __remove;
246 --__remove;
247 while (__remove-- > 0)
248 __tmp = __tmp->_M_next;
249 __bin._M_first[__thread_id] = __tmp->_M_next;
250 __bin._M_free[__thread_id] -= __removed;
252 __gthread_mutex_lock(__bin._M_mutex);
253 __tmp->_M_next = __bin._M_first[0];
254 __bin._M_first[0] = __first;
255 __bin._M_free[0] += __removed;
256 __gthread_mutex_unlock(__bin._M_mutex);
259 // Return this block to our list and update counters and
260 // owner id as needed.
261 --__bin._M_used[__block->_M_thread_id];
263 __block->_M_next = __bin._M_first[__thread_id];
264 __bin._M_first[__thread_id] = __block;
266 ++__bin._M_free[__thread_id];
268 else
270 // Not using threads, so single threaded application - return
271 // to global pool.
272 __block->_M_next = __bin._M_first[0];
273 __bin._M_first[0] = __block;
277 char*
278 __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id)
280 // Round up to power of 2 and figure out which bin to use.
281 const size_t __which = _M_binmap[__bytes];
282 const _Tune& __options = _M_get_options();
283 const size_t __bin_size = ((__options._M_min_bin << __which)
284 + __options._M_align);
285 size_t __block_count = __options._M_chunk_size / __bin_size;
287 // Are we using threads?
288 // - Yes, check if there are free blocks on the global
289 // list. If so, grab up to __block_count blocks in one
290 // lock and change ownership. If the global list is
291 // empty, we allocate a new chunk and add those blocks
292 // directly to our own freelist (with us as owner).
293 // - No, all operations are made directly to global pool 0
294 // no need to lock or change ownership but check for free
295 // blocks on global list (and if not add new ones) and
296 // get the first one.
297 _Bin_record& __bin = _M_bin[__which];
298 _Block_record* __block = NULL;
299 if (__gthread_active_p())
301 __gthread_mutex_lock(__bin._M_mutex);
302 if (__bin._M_first[0] == NULL)
304 // No need to hold the lock when we are adding a whole
305 // chunk to our own list.
306 __gthread_mutex_unlock(__bin._M_mutex);
308 void* __v = ::operator new(__options._M_chunk_size);
309 __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
310 __bin._M_free[__thread_id] = __block_count;
311 --__block_count;
312 __block = __bin._M_first[__thread_id];
313 while (__block_count-- > 0)
315 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
316 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
317 __block = __block->_M_next;
319 __block->_M_next = NULL;
321 __gthread_mutex_lock(__bin._M_mutex);
322 _Block_address* __address = new _Block_address;
323 __address->_M_initial = __v;
324 __address->_M_next = __bin._M_address;
325 __bin._M_address = __address;
326 __gthread_mutex_unlock(__bin._M_mutex);
328 else
330 // Is the number of required blocks greater than or equal
331 // to the number that can be provided by the global free
332 // list?
333 __bin._M_first[__thread_id] = __bin._M_first[0];
334 if (__block_count >= __bin._M_free[0])
336 __bin._M_free[__thread_id] = __bin._M_free[0];
337 __bin._M_free[0] = 0;
338 __bin._M_first[0] = NULL;
340 else
342 __bin._M_free[__thread_id] = __block_count;
343 __bin._M_free[0] -= __block_count;
344 --__block_count;
345 __block = __bin._M_first[0];
346 while (__block_count-- > 0)
347 __block = __block->_M_next;
348 __bin._M_first[0] = __block->_M_next;
349 __block->_M_next = NULL;
351 __gthread_mutex_unlock(__bin._M_mutex);
354 else
356 void* __v = ::operator new(__options._M_chunk_size);
357 __block = static_cast<_Block_record*>(__v);
358 __bin._M_first[0] = __block;
359 --__block_count;
360 while (__block_count-- > 0)
362 char* __c = reinterpret_cast<char*>(__block) + __bin_size;
363 __block->_M_next = reinterpret_cast<_Block_record*>(__c);
364 __block = __block->_M_next;
366 __block->_M_next = NULL;
368 _Block_address* __address = new _Block_address;
369 __address->_M_initial = __v;
370 __address->_M_next = __bin._M_address;
371 __bin._M_address = __address;
374 __block = __bin._M_first[__thread_id];
375 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
377 if (__gthread_active_p())
379 __block->_M_thread_id = __thread_id;
380 --__bin._M_free[__thread_id];
381 ++__bin._M_used[__thread_id];
384 // NB: For alignment reasons, we can't use the first _M_align
385 // bytes, even when sizeof(_Block_record) < _M_align.
386 return reinterpret_cast<char*>(__block) + __options._M_align;
389 void
390 __pool<true>::_M_initialize(__destroy_handler __d)
392 // _M_force_new must not change after the first allocate(),
393 // which in turn calls this method, so if it's false, it's false
394 // forever and we don't need to return here ever again.
395 if (_M_options._M_force_new)
397 _M_init = true;
398 return;
401 // Create the bins.
402 // Calculate the number of bins required based on _M_max_bytes.
403 // _M_bin_size is statically-initialized to one.
404 size_t __bin_size = _M_options._M_min_bin;
405 while (_M_options._M_max_bytes > __bin_size)
407 __bin_size <<= 1;
408 ++_M_bin_size;
411 // Setup the bin map for quick lookup of the relevant bin.
412 const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
413 _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
414 _Binmap_type* __bp = _M_binmap;
415 _Binmap_type __bin_max = _M_options._M_min_bin;
416 _Binmap_type __bint = 0;
417 for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
419 if (__ct > __bin_max)
421 __bin_max <<= 1;
422 ++__bint;
424 *__bp++ = __bint;
427 // Initialize _M_bin and its members.
428 void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
429 _M_bin = static_cast<_Bin_record*>(__v);
431 // If __gthread_active_p() create and initialize the list of
432 // free thread ids. Single threaded applications use thread id 0
433 // directly and have no need for this.
434 if (__gthread_active_p())
436 const size_t __k = sizeof(_Thread_record) * _M_options._M_max_threads;
437 __v = ::operator new(__k);
438 _M_thread_freelist = static_cast<_Thread_record*>(__v);
439 _M_thread_freelist_initial = __v;
441 // NOTE! The first assignable thread id is 1 since the
442 // global pool uses id 0
443 size_t __i;
444 for (__i = 1; __i < _M_options._M_max_threads; ++__i)
446 _Thread_record& __tr = _M_thread_freelist[__i - 1];
447 __tr._M_next = &_M_thread_freelist[__i];
448 __tr._M_id = __i;
451 // Set last record.
452 _M_thread_freelist[__i - 1]._M_next = NULL;
453 _M_thread_freelist[__i - 1]._M_id = __i;
455 // Initialize per thread key to hold pointer to
456 // _M_thread_freelist.
457 __gthread_key_create(&__gnu_internal::freelist_key, __d);
459 const size_t __max_threads = _M_options._M_max_threads + 1;
460 for (size_t __n = 0; __n < _M_bin_size; ++__n)
462 _Bin_record& __bin = _M_bin[__n];
463 __v = ::operator new(sizeof(_Block_record*) * __max_threads);
464 __bin._M_first = static_cast<_Block_record**>(__v);
466 __bin._M_address = NULL;
468 __v = ::operator new(sizeof(size_t) * __max_threads);
469 __bin._M_free = static_cast<size_t*>(__v);
471 __v = ::operator new(sizeof(size_t) * __max_threads);
472 __bin._M_used = static_cast<size_t*>(__v);
474 __v = ::operator new(sizeof(__gthread_mutex_t));
475 __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
477 #ifdef __GTHREAD_MUTEX_INIT
479 // Do not copy a POSIX/gthr mutex once in use.
480 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
481 *__bin._M_mutex = __tmp;
483 #else
484 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
485 #endif
486 for (size_t __threadn = 0; __threadn < __max_threads; ++__threadn)
488 __bin._M_first[__threadn] = NULL;
489 __bin._M_free[__threadn] = 0;
490 __bin._M_used[__threadn] = 0;
494 else
496 for (size_t __n = 0; __n < _M_bin_size; ++__n)
498 _Bin_record& __bin = _M_bin[__n];
499 __v = ::operator new(sizeof(_Block_record*));
500 __bin._M_first = static_cast<_Block_record**>(__v);
501 __bin._M_first[0] = NULL;
502 __bin._M_address = NULL;
505 _M_init = true;
508 size_t
509 __pool<true>::_M_get_thread_id()
511 // If we have thread support and it's active we check the thread
512 // key value and return its id or if it's not set we take the
513 // first record from _M_thread_freelist and sets the key and
514 // returns it's id.
515 if (__gthread_active_p())
517 void* v = __gthread_getspecific(__gnu_internal::freelist_key);
518 _Thread_record* __freelist_pos = static_cast<_Thread_record*>(v);
519 if (__freelist_pos == NULL)
521 // Since _M_options._M_max_threads must be larger than
522 // the theoretical max number of threads of the OS the
523 // list can never be empty.
525 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
526 __freelist_pos = _M_thread_freelist;
527 _M_thread_freelist = _M_thread_freelist->_M_next;
530 __gthread_setspecific(__gnu_internal::freelist_key,
531 static_cast<void*>(__freelist_pos));
533 return __freelist_pos->_M_id;
536 // Otherwise (no thread support or inactive) all requests are
537 // served from the global pool 0.
538 return 0;
541 void
542 __pool<true>::_M_destroy_thread_key(void* __freelist_pos)
544 // Return this thread id record to front of thread_freelist.
545 __gnu_cxx::lock sentry(__gnu_internal::freelist_mutex);
546 _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
547 __tr->_M_next = _M_thread_freelist;
548 _M_thread_freelist = __tr;
550 #endif
552 // Instantiations.
553 template class __mt_alloc<char>;
554 template class __mt_alloc<wchar_t>;
555 } // namespace __gnu_cxx