This commit was manufactured by cvs2svn to create branch
[official-gcc.git] / libstdc++-v3 / include / ext / mt_allocator.h
blob7204d84174503bb0357a143ee6475d983b881d42
1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
32 * You should only include this header if you are using GCC 3 or later.
35 #ifndef _MT_ALLOCATOR_H
36 #define _MT_ALLOCATOR_H 1
38 #include <new>
39 #include <cstdlib>
40 #include <bits/functexcept.h>
41 #include <bits/gthr.h>
42 #include <bits/atomicity.h>
44 namespace __gnu_cxx
46 /**
47 * This is a fixed size (power of 2) allocator which - when
48 * compiled with thread support - will maintain one freelist per
49 * size per thread plus a "global" one. Steps are taken to limit
50 * the per thread freelist sizes (by returning excess back to
51 * "global").
53 * Further details:
54 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
56 template<typename _Tp>
57 class __mt_alloc
59 public:
60 typedef size_t size_type;
61 typedef ptrdiff_t difference_type;
62 typedef _Tp* pointer;
63 typedef const _Tp* const_pointer;
64 typedef _Tp& reference;
65 typedef const _Tp& const_reference;
66 typedef _Tp value_type;
68 template<typename _Tp1>
69 struct rebind
70 { typedef __mt_alloc<_Tp1> other; };
72 __mt_alloc() throw()
74 // XXX
77 __mt_alloc(const __mt_alloc&) throw()
79 // XXX
82 template<typename _Tp1>
83 __mt_alloc(const __mt_alloc<_Tp1>& obj) throw()
85 // XXX
88 ~__mt_alloc() throw() { }
90 pointer
91 address(reference __x) const { return &__x; }
93 const_pointer
94 address(const_reference __x) const { return &__x; }
96 size_type
97 max_size() const throw()
98 { return size_t(-1) / sizeof(_Tp); }
100 // _GLIBCXX_RESOLVE_LIB_DEFECTS
101 // 402. wrong new expression in [some_] allocator::construct
102 void
103 construct(pointer __p, const _Tp& __val)
104 { ::new(__p) _Tp(__val); }
106 void
107 destroy(pointer __p) { __p->~_Tp(); }
109 pointer
110 allocate(size_t __n, const void* = 0);
112 void
113 deallocate(pointer __p, size_type __n);
115 // Variables used to configure the behavior of the allocator,
116 // assigned and explained in detail below.
117 struct tune
119 // Allocation requests (after round-up to power of 2) below
120 // this value will be handled by the allocator. A raw new/
121 // call will be used for requests larger than this value.
122 size_t _M_max_bytes;
124 // In order to avoid fragmenting and minimize the number of
125 // new() calls we always request new memory using this
126 // value. Based on previous discussions on the libstdc++
127 // mailing list we have choosen the value below.
128 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
129 size_t _M_chunk_size;
131 // The maximum number of supported threads. Our Linux 2.4.18
132 // reports 4070 in /proc/sys/kernel/threads-max
133 size_t _M_max_threads;
135 // Each time a deallocation occurs in a threaded application
136 // we make sure that there are no more than
137 // _M_freelist_headroom % of used memory on the freelist. If
138 // the number of additional records is more than
139 // _M_freelist_headroom % of the freelist, we move these
140 // records back to the global pool.
141 size_t _M_freelist_headroom;
143 // Set to true forces all allocations to use new().
144 bool _M_force_new;
146 explicit tune()
147 : _M_max_bytes(128), _M_chunk_size(4096 - 4 * sizeof(void*)),
148 #ifdef __GTHREADS
149 _M_max_threads(4096),
150 #else
151 _M_max_threads(0),
152 #endif
153 _M_freelist_headroom(10),
154 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
155 { }
157 explicit tune(size_t __maxb, size_t __chunk, size_t __maxthreads,
158 size_t __headroom, bool __force)
159 : _M_max_bytes(__maxb), _M_chunk_size(__chunk),
160 _M_max_threads(__maxthreads), _M_freelist_headroom(__headroom),
161 _M_force_new(__force)
162 { }
165 private:
166 // We need to create the initial lists and set up some variables
167 // before we can answer to the first request for memory.
168 #ifdef __GTHREADS
169 static __gthread_once_t _S_once;
170 #endif
171 static bool _S_init;
173 static void
174 _S_initialize();
176 // Configuration options.
177 static tune _S_options;
179 static const tune
180 _S_get_options() { return _S_options; }
182 static void
183 _S_set_options(tune __t)
185 if (!_S_init)
186 _S_options = __t;
189 // Using short int as type for the binmap implies we are never
190 // caching blocks larger than 65535 with this allocator
191 typedef unsigned short int binmap_type;
192 static binmap_type* _S_binmap;
194 // Each requesting thread is assigned an id ranging from 1 to
195 // _S_max_threads. Thread id 0 is used as a global memory pool.
196 // In order to get constant performance on the thread assignment
197 // routine, we keep a list of free ids. When a thread first
198 // requests memory we remove the first record in this list and
199 // stores the address in a __gthread_key. When initializing the
200 // __gthread_key we specify a destructor. When this destructor
201 // (i.e. the thread dies) is called, we return the thread id to
202 // the front of this list.
203 #ifdef __GTHREADS
204 struct thread_record
206 // Points to next free thread id record. NULL if last record in list.
207 thread_record* volatile next;
209 // Thread id ranging from 1 to _S_max_threads.
210 size_t id;
213 static thread_record* volatile _S_thread_freelist_first;
214 static __gthread_mutex_t _S_thread_freelist_mutex;
215 static __gthread_key_t _S_thread_key;
217 static void
218 _S_destroy_thread_key(void* freelist_pos);
219 #endif
221 static size_t
222 _S_get_thread_id();
224 struct block_record
226 // Points to the next block_record for its thread_id.
227 block_record* volatile next;
229 // The thread id of the thread which has requested this block.
230 #ifdef __GTHREADS
231 size_t thread_id;
232 #endif
235 struct bin_record
237 // An "array" of pointers to the first free block for each
238 // thread id. Memory to this "array" is allocated in _S_initialize()
239 // for _S_max_threads + global pool 0.
240 block_record** volatile first;
242 // An "array" of counters used to keep track of the amount of
243 // blocks that are on the freelist/used for each thread id.
244 // Memory to these "arrays" is allocated in _S_initialize() for
245 // _S_max_threads + global pool 0.
246 size_t* volatile free;
247 size_t* volatile used;
249 // Each bin has its own mutex which is used to ensure data
250 // integrity while changing "ownership" on a block. The mutex
251 // is initialized in _S_initialize().
252 #ifdef __GTHREADS
253 __gthread_mutex_t* mutex;
254 #endif
257 // An "array" of bin_records each of which represents a specific
258 // power of 2 size. Memory to this "array" is allocated in
259 // _S_initialize().
260 static bin_record* volatile _S_bin;
262 // Actual value calculated in _S_initialize().
263 static size_t _S_bin_size;
266 template<typename _Tp>
267 typename __mt_alloc<_Tp>::pointer
268 __mt_alloc<_Tp>::
269 allocate(size_t __n, const void*)
271 // Although the test in __gthread_once() would suffice, we wrap
272 // test of the once condition in our own unlocked check. This
273 // saves one function call to pthread_once() (which itself only
274 // tests for the once value unlocked anyway and immediately
275 // returns if set)
276 if (!_S_init)
278 #ifdef __GTHREADS
279 if (__gthread_active_p())
280 __gthread_once(&_S_once, _S_initialize);
281 #endif
282 if (!_S_init)
283 _S_initialize();
286 // Requests larger than _M_max_bytes are handled by new/delete
287 // directly.
288 const size_t __bytes = __n * sizeof(_Tp);
289 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
291 void* __ret = ::operator new(__bytes);
292 return static_cast<_Tp*>(__ret);
295 // Round up to power of 2 and figure out which bin to use.
296 const size_t __which = _S_binmap[__bytes];
297 const size_t __thread_id = _S_get_thread_id();
299 // Find out if we have blocks on our freelist. If so, go ahead
300 // and use them directly without having to lock anything.
301 const bin_record& __bin = _S_bin[__which];
302 block_record* block = NULL;
303 if (__bin.first[__thread_id] == NULL)
305 // Are we using threads?
306 // - Yes, check if there are free blocks on the global
307 // list. If so, grab up to block_count blocks in one
308 // lock and change ownership. If the global list is
309 // empty, we allocate a new chunk and add those blocks
310 // directly to our own freelist (with us as owner).
311 // - No, all operations are made directly to global pool 0
312 // no need to lock or change ownership but check for free
313 // blocks on global list (and if not add new ones) and
314 // get the first one.
315 #ifdef __GTHREADS
316 if (__gthread_active_p())
318 const size_t bin_size = (1 << __which) + sizeof(block_record);
319 size_t block_count = _S_options._M_chunk_size / bin_size;
321 __gthread_mutex_lock(__bin.mutex);
322 if (__bin.first[0] == NULL)
324 // No need to hold the lock when we are adding a
325 // whole chunk to our own list.
326 __gthread_mutex_unlock(__bin.mutex);
328 void* v = ::operator new(_S_options._M_chunk_size);
329 __bin.first[__thread_id] = static_cast<block_record*>(v);
331 __bin.free[__thread_id] = block_count;
332 block_count--;
333 block = __bin.first[__thread_id];
335 while (block_count > 0)
337 char* c = reinterpret_cast<char*>(block) + bin_size;
338 block->next = reinterpret_cast<block_record*>(c);
339 block->thread_id = __thread_id;
340 block = block->next;
341 block_count--;
344 block->next = NULL;
345 block->thread_id = __thread_id;
347 else
349 size_t global_count = 0;
350 block_record* tmp;
351 while (__bin.first[0] != NULL && global_count < block_count)
353 tmp = __bin.first[0]->next;
354 block = __bin.first[0];
356 if (__bin.first[__thread_id] == NULL)
358 __bin.first[__thread_id] = block;
359 block->next = NULL;
361 else
363 block->next = __bin.first[__thread_id];
364 __bin.first[__thread_id] = block;
367 block->thread_id = __thread_id;
368 __bin.free[__thread_id]++;
369 __bin.first[0] = tmp;
370 global_count++;
372 __gthread_mutex_unlock(__bin.mutex);
375 // Return the first newly added block in our list and
376 // update the counters
377 block = __bin.first[__thread_id];
378 __bin.first[__thread_id] = __bin.first[__thread_id]->next;
379 __bin.free[__thread_id]--;
380 __bin.used[__thread_id]++;
382 else
383 #endif
385 void* __v = ::operator new(_S_options._M_chunk_size);
386 __bin.first[0] = static_cast<block_record*>(__v);
388 const size_t bin_size = (1 << __which) + sizeof(block_record);
389 size_t block_count = _S_options._M_chunk_size / bin_size;
391 block_count--;
392 block = __bin.first[0];
393 while (block_count > 0)
395 char* __c = reinterpret_cast<char*>(block) + bin_size;
396 block->next = reinterpret_cast<block_record*>(__c);
397 block = block->next;
398 block_count--;
400 block->next = NULL;
402 // Remove from list.
403 block = __bin.first[0];
404 __bin.first[0] = __bin.first[0]->next;
407 else
409 // "Default" operation - we have blocks on our own freelist
410 // grab the first record and update the counters.
411 block = __bin.first[__thread_id];
412 __bin.first[__thread_id] = __bin.first[__thread_id]->next;
414 #ifdef __GTHREADS
415 if (__gthread_active_p())
417 __bin.free[__thread_id]--;
418 __bin.used[__thread_id]++;
420 #endif
422 char* __c = reinterpret_cast<char*>(block) + sizeof(block_record);
423 return static_cast<_Tp*>(static_cast<void*>(__c));
427 template<typename _Tp>
428 void
429 __mt_alloc<_Tp>::
430 deallocate(pointer __p, size_type __n)
432 // Requests larger than _M_max_bytes are handled by operators
433 // new/delete directly.
434 const size_t __bytes = __n * sizeof(_Tp);
435 if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
437 ::operator delete(__p);
438 return;
441 // Round up to power of 2 and figure out which bin to use.
442 const size_t __which = _S_binmap[__bytes];
443 const size_t thread_id = _S_get_thread_id();
444 const bin_record& __bin = _S_bin[__which];
446 char* __c = reinterpret_cast<char*>(__p) - sizeof(block_record);
447 block_record* block = reinterpret_cast<block_record*>(__c);
449 #ifdef __GTHREADS
450 if (__gthread_active_p())
452 // Calculate the number of records to remove from our freelist.
453 int remove = __bin.free[thread_id] -
454 (__bin.used[thread_id] / _S_options._M_freelist_headroom);
456 // The calculation above will almost always tell us to
457 // remove one or two records at a time, but this creates too
458 // much contention when locking and therefore we wait until
459 // the number of records is "high enough".
460 int __cond1 = static_cast<int>(100 * (_S_bin_size - __which));
461 int __cond2 = static_cast<int>(__bin.free[thread_id] / _S_options._M_freelist_headroom);
462 if (remove > __cond1 && remove > __cond2)
464 __gthread_mutex_lock(__bin.mutex);
465 block_record* tmp;
466 while (remove > 0)
468 tmp = __bin.first[thread_id]->next;
469 if (__bin.first[0] == NULL)
471 __bin.first[0] = __bin.first[thread_id];
472 __bin.first[0]->next = NULL;
474 else
476 __bin.first[thread_id]->next = __bin.first[0];
477 __bin.first[0] = __bin.first[thread_id];
480 __bin.first[thread_id] = tmp;
481 __bin.free[thread_id]--;
482 remove--;
484 __gthread_mutex_unlock(__bin.mutex);
487 // Return this block to our list and update counters and
488 // owner id as needed.
489 if (__bin.first[thread_id] == NULL)
491 __bin.first[thread_id] = block;
492 block->next = NULL;
494 else
496 block->next = __bin.first[thread_id];
497 __bin.first[thread_id] = block;
500 __bin.free[thread_id]++;
502 if (thread_id == block->thread_id)
503 __bin.used[thread_id]--;
504 else
506 __bin.used[block->thread_id]--;
507 block->thread_id = thread_id;
510 else
511 #endif
513 // Single threaded application - return to global pool.
514 if (__bin.first[0] == NULL)
516 __bin.first[0] = block;
517 block->next = NULL;
519 else
521 block->next = __bin.first[0];
522 __bin.first[0] = block;
527 template<typename _Tp>
528 void
529 __mt_alloc<_Tp>::
530 _S_initialize()
532 if (_S_options._M_force_new)
533 return;
535 // Calculate the number of bins required based on _M_max_bytes.
536 // _S_bin_size is statically-initialized to one.
537 size_t __bin_size = 1;
538 while (_S_options._M_max_bytes > __bin_size)
540 __bin_size = __bin_size << 1;
541 _S_bin_size++;
544 // Setup the bin map for quick lookup of the relevant bin.
545 const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(binmap_type);
546 _S_binmap = static_cast<binmap_type*>(::operator new(__j));
548 binmap_type* __bp = _S_binmap;
549 binmap_type __bin_max = 1;
550 binmap_type __bint = 0;
551 for (binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; __ct++)
553 if (__ct > __bin_max)
555 __bin_max <<= 1;
556 __bint++;
558 *__bp++ = __bint;
561 // If __gthread_active_p() create and initialize the list of
562 // free thread ids. Single threaded applications use thread id 0
563 // directly and have no need for this.
564 void* __v;
565 #ifdef __GTHREADS
566 if (__gthread_active_p())
568 const size_t __k = sizeof(thread_record) * _S_options._M_max_threads;
569 __v = ::operator new(__k);
570 _S_thread_freelist_first = static_cast<thread_record*>(__v);
572 // NOTE! The first assignable thread id is 1 since the
573 // global pool uses id 0
574 size_t __i;
575 for (__i = 1; __i < _S_options._M_max_threads; __i++)
577 thread_record& __tr = _S_thread_freelist_first[__i - 1];
578 __tr.next = &_S_thread_freelist_first[__i];
579 __tr.id = __i;
582 // Set last record.
583 _S_thread_freelist_first[__i - 1].next = NULL;
584 _S_thread_freelist_first[__i - 1].id = __i;
587 // Make sure this is initialized.
588 #ifndef __GTHREAD_MUTEX_INIT
589 __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
590 #endif
591 // Initialize per thread key to hold pointer to
592 // _S_thread_freelist.
593 __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
595 #endif
597 // Initialize _S_bin and its members.
598 __v = ::operator new(sizeof(bin_record) * _S_bin_size);
599 _S_bin = static_cast<bin_record*>(__v);
601 // Maximum number of threads.
602 size_t __max_threads = 1;
603 #ifdef __GTHREADS
604 if (__gthread_active_p())
605 __max_threads = _S_options._M_max_threads + 1;
606 #endif
608 for (size_t __n = 0; __n < _S_bin_size; __n++)
610 bin_record& __bin = _S_bin[__n];
611 __v = ::operator new(sizeof(block_record*) * __max_threads);
612 __bin.first = static_cast<block_record**>(__v);
614 #ifdef __GTHREADS
615 if (__gthread_active_p())
617 __v = ::operator new(sizeof(size_t) * __max_threads);
618 __bin.free = static_cast<size_t*>(__v);
620 __v = ::operator new(sizeof(size_t) * __max_threads);
621 __bin.used = static_cast<size_t*>(__v);
623 __v = ::operator new(sizeof(__gthread_mutex_t));
624 __bin.mutex = static_cast<__gthread_mutex_t*>(__v);
626 #ifdef __GTHREAD_MUTEX_INIT
628 // Do not copy a POSIX/gthr mutex once in use.
629 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
630 *__bin.mutex = __tmp;
632 #else
633 { __GTHREAD_MUTEX_INIT_FUNCTION(__bin.mutex); }
634 #endif
636 #endif
638 for (size_t __threadn = 0; __threadn < __max_threads; __threadn++)
640 __bin.first[__threadn] = NULL;
641 #ifdef __GTHREADS
642 if (__gthread_active_p())
644 __bin.free[__threadn] = 0;
645 __bin.used[__threadn] = 0;
647 #endif
650 _S_init = true;
653 template<typename _Tp>
654 size_t
655 __mt_alloc<_Tp>::
656 _S_get_thread_id()
658 #ifdef __GTHREADS
659 // If we have thread support and it's active we check the thread
660 // key value and return it's id or if it's not set we take the
661 // first record from _S_thread_freelist and sets the key and
662 // returns it's id.
663 if (__gthread_active_p())
665 thread_record* __freelist_pos = static_cast<thread_record*>(__gthread_getspecific(_S_thread_key));
666 if (__freelist_pos == NULL)
668 // Since _S_options._M_max_threads must be larger than
669 // the theoretical max number of threads of the OS the
670 // list can never be empty.
671 __gthread_mutex_lock(&_S_thread_freelist_mutex);
672 __freelist_pos = _S_thread_freelist_first;
673 _S_thread_freelist_first = _S_thread_freelist_first->next;
674 __gthread_mutex_unlock(&_S_thread_freelist_mutex);
676 __gthread_setspecific(_S_thread_key,
677 static_cast<void*>(__freelist_pos));
679 return __freelist_pos->id;
681 #endif
682 // Otherwise (no thread support or inactive) all requests are
683 // served from the global pool 0.
684 return 0;
687 #ifdef __GTHREADS
688 template<typename _Tp>
689 void
690 __mt_alloc<_Tp>::
691 _S_destroy_thread_key(void* __freelist_pos)
693 // Return this thread id record to front of thread_freelist.
694 __gthread_mutex_lock(&_S_thread_freelist_mutex);
695 thread_record* __tr = static_cast<thread_record*>(__freelist_pos);
696 __tr->next = _S_thread_freelist_first;
697 _S_thread_freelist_first = __tr;
698 __gthread_mutex_unlock(&_S_thread_freelist_mutex);
700 #endif
702 template<typename _Tp>
703 inline bool
704 operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
705 { return true; }
707 template<typename _Tp>
708 inline bool
709 operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
710 { return false; }
712 template<typename _Tp>
713 bool __mt_alloc<_Tp>::_S_init = false;
715 template<typename _Tp>
716 typename __mt_alloc<_Tp>::tune __mt_alloc<_Tp>::_S_options;
718 template<typename _Tp>
719 typename __mt_alloc<_Tp>::binmap_type* __mt_alloc<_Tp>::_S_binmap;
721 template<typename _Tp>
722 typename __mt_alloc<_Tp>::bin_record* volatile __mt_alloc<_Tp>::_S_bin;
724 template<typename _Tp>
725 size_t __mt_alloc<_Tp>::_S_bin_size = 1;
727 // Actual initialization in _S_initialize().
728 #ifdef __GTHREADS
729 template<typename _Tp>
730 __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
732 template<typename _Tp>
733 typename __mt_alloc<_Tp>::thread_record*
734 volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
736 template<typename _Tp>
737 __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
739 template<typename _Tp>
740 __gthread_mutex_t
741 #ifdef __GTHREAD_MUTEX_INIT
742 __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
743 #else
744 __mt_alloc<_Tp>::_S_thread_freelist_mutex;
745 #endif
746 #endif
747 } // namespace __gnu_cxx
749 #endif