2004-10-11 Benjamin Kosnik <bkoz@redhat.com>
[official-gcc.git] / libstdc++-v3 / include / ext / mt_allocator.h
blob5803f5a835b307d841d348db34c70c05050be29a
1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
32 * You should only include this header if you are using GCC 3 or later.
35 #ifndef _MT_ALLOCATOR_H
36 #define _MT_ALLOCATOR_H 1
38 #include <new>
39 #include <cstdlib>
40 #include <bits/functexcept.h>
41 #include <bits/gthr.h>
42 #include <bits/atomicity.h>
44 namespace __gnu_cxx
46 /**
47 * This is a fixed size (power of 2) allocator which - when
48 * compiled with thread support - will maintain one freelist per
49 * size per thread plus a "global" one. Steps are taken to limit
50 * the per thread freelist sizes (by returning excess back to
51 * "global").
53 * Further details:
54 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
56 typedef void (*__destroy_handler)(void*);
57 typedef void (*__create_handler)(void);
59 struct __pool_base
61 // Using short int as type for the binmap implies we are never
62 // caching blocks larger than 65535 with this allocator.
63 typedef unsigned short int _Binmap_type;
65 // Variables used to configure the behavior of the allocator,
66 // assigned and explained in detail below.
67 struct _Tune
69 // Compile time constants for the default _Tune values.
70 enum { _S_align = 8 };
71 enum { _S_max_bytes = 128 };
72 enum { _S_min_bin = 8 };
73 enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
74 enum { _S_max_threads = 4096 };
75 enum { _S_freelist_headroom = 10 };
77 // Alignment needed.
78 // NB: In any case must be >= sizeof(_Block_record), that
79 // is 4 on 32 bit machines and 8 on 64 bit machines.
80 size_t _M_align;
82 // Allocation requests (after round-up to power of 2) below
83 // this value will be handled by the allocator. A raw new/
84 // call will be used for requests larger than this value.
85 size_t _M_max_bytes;
87 // Size in bytes of the smallest bin.
88 // NB: Must be a power of 2 and >= _M_align.
89 size_t _M_min_bin;
91 // In order to avoid fragmenting and minimize the number of
92 // new() calls we always request new memory using this
93 // value. Based on previous discussions on the libstdc++
94 // mailing list we have choosen the value below.
95 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
96 size_t _M_chunk_size;
98 // The maximum number of supported threads. For
99 // single-threaded operation, use one. Maximum values will
100 // vary depending on details of the underlying system. (For
101 // instance, Linux 2.4.18 reports 4070 in
102 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
103 // 65534)
104 size_t _M_max_threads;
106 // Each time a deallocation occurs in a threaded application
107 // we make sure that there are no more than
108 // _M_freelist_headroom % of used memory on the freelist. If
109 // the number of additional records is more than
110 // _M_freelist_headroom % of the freelist, we move these
111 // records back to the global pool.
112 size_t _M_freelist_headroom;
114 // Set to true forces all allocations to use new().
115 bool _M_force_new;
117 explicit
118 _Tune()
119 : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
120 _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
121 _M_freelist_headroom(_S_freelist_headroom),
122 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
125 explicit
126 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
127 size_t __maxthreads, size_t __headroom, bool __force)
128 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
129 _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
130 _M_freelist_headroom(__headroom), _M_force_new(__force)
133 bool
134 is_default() const
136 bool __ret = true;
137 __ret &= _M_align == _S_align;
138 __ret &= _M_max_bytes == _S_max_bytes;
139 __ret &= _M_min_bin == _S_min_bin;
140 __ret &= _M_chunk_size == _S_chunk_size;
141 __ret &= _M_max_threads == _S_max_threads;
142 __ret &= _M_freelist_headroom == _S_freelist_headroom;
143 return __ret;
147 struct _Block_address
149 void* _M_initial;
150 _Block_address* _M_next;
153 const _Tune&
154 _M_get_options() const
155 { return _M_options; }
157 void
158 _M_set_options(_Tune __t)
160 if (!_M_init)
161 _M_options = __t;
164 bool
165 _M_check_threshold(size_t __bytes)
166 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
168 size_t
169 _M_get_binmap(size_t __bytes)
170 { return _M_binmap[__bytes]; }
172 explicit __pool_base()
173 : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
175 explicit __pool_base(const _Tune& __tune)
176 : _M_options(__tune), _M_binmap(NULL), _M_init(false) { }
178 protected:
179 // Configuration options.
180 _Tune _M_options;
182 _Binmap_type* _M_binmap;
184 // We need to create the initial lists and set up some variables
185 // before we can answer to the first request for memory.
186 bool _M_init;
189 // Data describing the underlying memory pool, parameterized on
190 // threading support.
191 template<bool _Thread>
192 class __pool;
194 template<>
195 class __pool<true>;
197 template<>
198 class __pool<false>;
201 #ifdef __GTHREADS
202 // Specialization for thread enabled, via gthreads.h.
203 template<>
204 class __pool<true> : public __pool_base
206 public:
207 // Each requesting thread is assigned an id ranging from 1 to
208 // _S_max_threads. Thread id 0 is used as a global memory pool.
209 // In order to get constant performance on the thread assignment
210 // routine, we keep a list of free ids. When a thread first
211 // requests memory we remove the first record in this list and
212 // stores the address in a __gthread_key. When initializing the
213 // __gthread_key we specify a destructor. When this destructor
214 // (i.e. the thread dies) is called, we return the thread id to
215 // the front of this list.
216 struct _Thread_record
218 // Points to next free thread id record. NULL if last record in list.
219 _Thread_record* volatile _M_next;
221 // Thread id ranging from 1 to _S_max_threads.
222 size_t _M_id;
225 union _Block_record
227 // Points to the block_record of the next free block.
228 _Block_record* volatile _M_next;
230 // The thread id of the thread which has requested this block.
231 size_t _M_thread_id;
234 struct _Bin_record
236 // An "array" of pointers to the first free block for each
237 // thread id. Memory to this "array" is allocated in
238 // _S_initialize() for _S_max_threads + global pool 0.
239 _Block_record** volatile _M_first;
241 // A list of the initial addresses of all allocated blocks.
242 _Block_address* _M_address;
244 // An "array" of counters used to keep track of the amount of
245 // blocks that are on the freelist/used for each thread id.
246 // Memory to these "arrays" is allocated in _S_initialize() for
247 // _S_max_threads + global pool 0.
248 size_t* volatile _M_free;
249 size_t* volatile _M_used;
251 // Each bin has its own mutex which is used to ensure data
252 // integrity while changing "ownership" on a block. The mutex
253 // is initialized in _S_initialize().
254 __gthread_mutex_t* _M_mutex;
257 void
258 _M_initialize(__destroy_handler __d);
260 void
261 _M_initialize_once(__create_handler __c)
263 // Although the test in __gthread_once() would suffice, we
264 // wrap test of the once condition in our own unlocked
265 // check. This saves one function call to pthread_once()
266 // (which itself only tests for the once value unlocked anyway
267 // and immediately returns if set)
268 if (__builtin_expect(_M_init == false, false))
270 if (__gthread_active_p())
271 __gthread_once(&_M_once, __c);
272 if (!_M_init)
273 __c();
277 char*
278 _M_reserve_block(size_t __bytes, const size_t __thread_id);
280 void
281 _M_reclaim_block(char* __p, size_t __bytes);
283 const _Bin_record&
284 _M_get_bin(size_t __which)
285 { return _M_bin[__which]; }
287 void
288 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
289 size_t __thread_id)
291 if (__gthread_active_p())
293 __block->_M_thread_id = __thread_id;
294 --__bin._M_free[__thread_id];
295 ++__bin._M_used[__thread_id];
299 void
300 _M_destroy_thread_key(void* __freelist_pos);
302 size_t
303 _M_get_thread_id();
305 explicit __pool()
306 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
308 // On some platforms, __gthread_once_t is an aggregate.
309 __gthread_once_t __tmp = __GTHREAD_ONCE_INIT;
310 _M_once = __tmp;
313 explicit __pool(const __pool_base::_Tune& __tune)
314 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
315 _M_thread_freelist(NULL)
317 // On some platforms, __gthread_once_t is an aggregate.
318 __gthread_once_t __tmp = __GTHREAD_ONCE_INIT;
319 _M_once = __tmp;
322 ~__pool();
324 private:
325 // An "array" of bin_records each of which represents a specific
326 // power of 2 size. Memory to this "array" is allocated in
327 // _M_initialize().
328 _Bin_record* volatile _M_bin;
330 // Actual value calculated in _M_initialize().
331 size_t _M_bin_size;
333 __gthread_once_t _M_once;
335 _Thread_record* _M_thread_freelist;
336 void* _M_thread_freelist_initial;
338 #endif
340 // Specialization for single thread.
341 template<>
342 class __pool<false> : public __pool_base
344 public:
345 union _Block_record
347 // Points to the block_record of the next free block.
348 _Block_record* volatile _M_next;
351 struct _Bin_record
353 // An "array" of pointers to the first free block.
354 _Block_record** volatile _M_first;
356 // A list of the initial addresses of all allocated blocks.
357 _Block_address* _M_address;
360 void
361 _M_initialize_once()
363 if (__builtin_expect(_M_init == false, false))
364 _M_initialize();
367 char*
368 _M_reserve_block(size_t __bytes, const size_t __thread_id);
370 void
371 _M_reclaim_block(char* __p, size_t __bytes);
373 size_t
374 _M_get_thread_id() { return 0; }
376 const _Bin_record&
377 _M_get_bin(size_t __which)
378 { return _M_bin[__which]; }
380 void
381 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
384 explicit __pool()
385 : _M_bin(NULL), _M_bin_size(1) { }
387 explicit __pool(const __pool_base::_Tune& __tune)
388 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
390 ~__pool();
392 private:
393 // An "array" of bin_records each of which represents a specific
394 // power of 2 size. Memory to this "array" is allocated in
395 // _M_initialize().
396 _Bin_record* volatile _M_bin;
398 // Actual value calculated in _M_initialize().
399 size_t _M_bin_size;
401 void
402 _M_initialize();
405 template<bool _Thread>
406 struct __common_pool_policy
408 typedef __pool<_Thread> __pool_type;
410 template<typename _Tp1, bool _Thread1 = _Thread>
411 struct _M_rebind;
413 template<typename _Tp1>
414 struct _M_rebind<_Tp1, true>
415 { typedef __common_pool_policy<true> other; };
417 template<typename _Tp1>
418 struct _M_rebind<_Tp1, false>
419 { typedef __common_pool_policy<false> other; };
421 static __pool_type&
422 _S_get_pool()
424 static __pool_type _S_pool;
425 return _S_pool;
428 static void
429 _S_initialize_once()
431 static bool __init;
432 if (__builtin_expect(__init == false, false))
434 _S_get_pool()._M_initialize_once();
435 __init = true;
440 template<>
441 struct __common_pool_policy<true>;
443 #ifdef __GTHREADS
444 template<>
445 struct __common_pool_policy<true>
447 typedef __pool<true> __pool_type;
449 template<typename _Tp1, bool _Thread1 = true>
450 struct _M_rebind;
452 template<typename _Tp1>
453 struct _M_rebind<_Tp1, true>
454 { typedef __common_pool_policy<true> other; };
456 template<typename _Tp1>
457 struct _M_rebind<_Tp1, false>
458 { typedef __common_pool_policy<false> other; };
460 static __pool_type&
461 _S_get_pool()
463 static __pool_type _S_pool;
464 return _S_pool;
467 static void
468 _S_destroy_thread_key(void* __freelist_pos)
469 { _S_get_pool()._M_destroy_thread_key(__freelist_pos); }
471 static void
472 _S_initialize()
473 { _S_get_pool()._M_initialize(_S_destroy_thread_key); }
475 static void
476 _S_initialize_once()
478 static bool __init;
479 if (__builtin_expect(__init == false, false))
481 _S_get_pool()._M_initialize_once(_S_initialize);
482 __init = true;
486 #endif
489 template<typename _Tp, bool _Thread>
490 struct __per_type_pool_policy
492 typedef __pool<_Thread> __pool_type;
494 template<typename _Tp1, bool _Thread1 = _Thread>
495 struct _M_rebind;
497 template<typename _Tp1>
498 struct _M_rebind<_Tp1, false>
499 { typedef __per_type_pool_policy<_Tp1, false> other; };
501 template<typename _Tp1>
502 struct _M_rebind<_Tp1, true>
503 { typedef __per_type_pool_policy<_Tp1, true> other; };
505 // Avoid static initialization ordering issues.
506 static __pool_type&
507 _S_get_pool()
509 // Sane defaults for the __pool_type.
510 const static size_t __align = __alignof__(_Tp) >= sizeof(typename __pool_type::_Block_record) ? __alignof__(_Tp) : sizeof(typename __pool_type::_Block_record);
511 static __pool_base::_Tune _S_tune(__align, sizeof(_Tp) * 128, (sizeof(_Tp) * 2) >= __align ? sizeof(_Tp) * 2 : __align, __pool_type::_Tune::_S_chunk_size, __pool_type::_Tune::_S_max_threads, __pool_type::_Tune::_S_freelist_headroom, getenv("GLIBCXX_FORCE_NEW") ? true : false);
512 static __pool_type _S_pool(_S_tune);
513 return _S_pool;
516 static void
517 _S_initialize_once()
519 static bool __init;
520 if (__builtin_expect(__init == false, false))
522 _S_get_pool()._M_initialize_once();
523 __init = true;
528 template<typename _Tp>
529 struct __per_type_pool_policy<_Tp, true>;
531 #ifdef __GTHREADS
532 template<typename _Tp>
533 struct __per_type_pool_policy<_Tp, true>
535 typedef __pool<true> __pool_type;
537 template<typename _Tp1, bool _Thread1 = true>
538 struct _M_rebind;
540 template<typename _Tp1>
541 struct _M_rebind<_Tp1, false>
542 { typedef __per_type_pool_policy<_Tp1, false> other; };
544 template<typename _Tp1>
545 struct _M_rebind<_Tp1, true>
546 { typedef __per_type_pool_policy<_Tp1, true> other; };
548 // Avoid static initialization ordering issues.
549 static __pool_type&
550 _S_get_pool( )
552 // Sane defaults for the __pool_type.
553 const static size_t __align = __alignof__(_Tp) >= sizeof(typename __pool_type::_Block_record) ? __alignof__(_Tp) : sizeof(typename __pool_type::_Block_record);
554 static __pool_base::_Tune _S_tune(__align, sizeof(_Tp) * 128, (sizeof(_Tp) * 2) >= __align ? sizeof(_Tp) * 2 : __align, __pool_type::_Tune::_S_chunk_size, __pool_type::_Tune::_S_max_threads, __pool_type::_Tune::_S_freelist_headroom, getenv("GLIBCXX_FORCE_NEW") ? true : false);
555 static __pool_type _S_pool(_S_tune);
556 return _S_pool;
559 static void
560 _S_destroy_thread_key(void* __freelist_pos)
561 { _S_get_pool()._M_destroy_thread_key(__freelist_pos); }
563 static void
564 _S_initialize()
565 { _S_get_pool()._M_initialize(_S_destroy_thread_key); }
567 static void
568 _S_initialize_once()
570 static bool __init;
571 if (__builtin_expect(__init == false, false))
573 _S_get_pool()._M_initialize_once(_S_initialize);
574 __init = true;
578 #endif
580 template<typename _Tp>
581 class __mt_alloc_base
583 public:
584 typedef size_t size_type;
585 typedef ptrdiff_t difference_type;
586 typedef _Tp* pointer;
587 typedef const _Tp* const_pointer;
588 typedef _Tp& reference;
589 typedef const _Tp& const_reference;
590 typedef _Tp value_type;
592 pointer
593 address(reference __x) const
594 { return &__x; }
596 const_pointer
597 address(const_reference __x) const
598 { return &__x; }
600 size_type
601 max_size() const throw()
602 { return size_t(-1) / sizeof(_Tp); }
604 // _GLIBCXX_RESOLVE_LIB_DEFECTS
605 // 402. wrong new expression in [some_] allocator::construct
606 void
607 construct(pointer __p, const _Tp& __val)
608 { ::new(__p) _Tp(__val); }
610 void
611 destroy(pointer __p) { __p->~_Tp(); }
614 #ifdef __GTHREADS
615 #define __default_policy __common_pool_policy<true>
616 #else
617 #define __default_policy __common_pool_policy<false>
618 #endif
620 template<typename _Tp, typename _Poolp = __default_policy>
621 class __mt_alloc : public __mt_alloc_base<_Tp>, _Poolp
623 public:
624 typedef size_t size_type;
625 typedef ptrdiff_t difference_type;
626 typedef _Tp* pointer;
627 typedef const _Tp* const_pointer;
628 typedef _Tp& reference;
629 typedef const _Tp& const_reference;
630 typedef _Tp value_type;
631 typedef _Poolp __policy_type;
632 typedef typename _Poolp::__pool_type __pool_type;
634 template<typename _Tp1, typename _Poolp1 = _Poolp>
635 struct rebind
637 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
638 typedef __mt_alloc<_Tp1, pol_type> other;
641 // Create pool instance so that order of construction will be
642 // pool_type first, then allocator. This is necessary for
643 // correct global and static object construction/destruction.
644 __mt_alloc() throw()
645 { __policy_type::_S_get_pool(); }
647 __mt_alloc(const __mt_alloc&) throw()
648 { __policy_type::_S_get_pool(); }
650 template<typename _Tp1, typename _Poolp1>
651 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>& obj) throw()
652 { __policy_type::_S_get_pool(); }
654 ~__mt_alloc() throw() { }
656 pointer
657 allocate(size_type __n, const void* = 0);
659 void
660 deallocate(pointer __p, size_type __n);
662 const __pool_base::_Tune
663 _M_get_options()
665 // Return a copy, not a reference, for external consumption.
666 return __pool_base::_Tune(this->_S_get_pool()._M_get_options());
669 void
670 _M_set_options(__pool_base::_Tune __t)
671 { this->_S_get_pool()._M_set_options(__t); }
674 template<typename _Tp, typename _Poolp>
675 typename __mt_alloc<_Tp, _Poolp>::pointer
676 __mt_alloc<_Tp, _Poolp>::
677 allocate(size_type __n, const void*)
679 this->_S_initialize_once();
681 // Requests larger than _M_max_bytes are handled by operator
682 // new/delete directly.
683 __pool_type& __pool = this->_S_get_pool();
684 const size_t __bytes = __n * sizeof(_Tp);
685 if (__pool._M_check_threshold(__bytes))
687 void* __ret = ::operator new(__bytes);
688 return static_cast<_Tp*>(__ret);
691 // Round up to power of 2 and figure out which bin to use.
692 const size_t __which = __pool._M_get_binmap(__bytes);
693 const size_t __thread_id = __pool._M_get_thread_id();
695 // Find out if we have blocks on our freelist. If so, go ahead
696 // and use them directly without having to lock anything.
697 char* __c;
698 typedef typename __pool_type::_Bin_record _Bin_record;
699 const _Bin_record& __bin = __pool._M_get_bin(__which);
700 if (__bin._M_first[__thread_id])
702 // Already reserved.
703 typedef typename __pool_type::_Block_record _Block_record;
704 _Block_record* __block = __bin._M_first[__thread_id];
705 __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
707 __pool._M_adjust_freelist(__bin, __block, __thread_id);
708 const __pool_base::_Tune& __options = __pool._M_get_options();
709 __c = reinterpret_cast<char*>(__block) + __options._M_align;
711 else
713 // Null, reserve.
714 __c = __pool._M_reserve_block(__bytes, __thread_id);
716 return static_cast<_Tp*>(static_cast<void*>(__c));
719 template<typename _Tp, typename _Poolp>
720 void
721 __mt_alloc<_Tp, _Poolp>::
722 deallocate(pointer __p, size_type __n)
724 // Requests larger than _M_max_bytes are handled by operators
725 // new/delete directly.
726 __pool_type& __pool = this->_S_get_pool();
727 const size_t __bytes = __n * sizeof(_Tp);
728 if (__pool._M_check_threshold(__bytes))
729 ::operator delete(__p);
730 else
731 __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
734 template<typename _Tp, typename _Poolp>
735 inline bool
736 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
737 { return true; }
739 template<typename _Tp, typename _Poolp>
740 inline bool
741 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
742 { return false; }
744 #undef __default_policy
745 } // namespace __gnu_cxx
747 #endif