Remove old autovect-branch by moving to "dead" directory.
[official-gcc.git] / old-autovect-branch / libstdc++-v3 / include / ext / mt_allocator.h
blob014e62a57c19072d870a0374eede842bfe7fc6bc
1 // MT-optimized allocator -*- C++ -*-
3 // Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
9 // any later version.
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 // USA.
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
30 /** @file ext/mt_allocator.h
31 * This file is a GNU extension to the Standard C++ Library.
34 #ifndef _MT_ALLOCATOR_H
35 #define _MT_ALLOCATOR_H 1
37 #include <new>
38 #include <cstdlib>
39 #include <bits/functexcept.h>
40 #include <bits/gthr.h>
41 #include <bits/atomicity.h>
43 namespace __gnu_cxx
45 typedef void (*__destroy_handler)(void*);
47 /// @brief Base class for pool object.
48 struct __pool_base
50 // Using short int as type for the binmap implies we are never
51 // caching blocks larger than 65535 with this allocator.
52 typedef unsigned short int _Binmap_type;
54 // Variables used to configure the behavior of the allocator,
55 // assigned and explained in detail below.
56 struct _Tune
58 // Compile time constants for the default _Tune values.
59 enum { _S_align = 8 };
60 enum { _S_max_bytes = 128 };
61 enum { _S_min_bin = 8 };
62 enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
63 enum { _S_max_threads = 4096 };
64 enum { _S_freelist_headroom = 10 };
66 // Alignment needed.
67 // NB: In any case must be >= sizeof(_Block_record), that
68 // is 4 on 32 bit machines and 8 on 64 bit machines.
69 size_t _M_align;
71 // Allocation requests (after round-up to power of 2) below
72 // this value will be handled by the allocator. A raw new/
73 // call will be used for requests larger than this value.
74 size_t _M_max_bytes;
76 // Size in bytes of the smallest bin.
77 // NB: Must be a power of 2 and >= _M_align.
78 size_t _M_min_bin;
80 // In order to avoid fragmenting and minimize the number of
81 // new() calls we always request new memory using this
82 // value. Based on previous discussions on the libstdc++
83 // mailing list we have choosen the value below.
84 // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
85 size_t _M_chunk_size;
87 // The maximum number of supported threads. For
88 // single-threaded operation, use one. Maximum values will
89 // vary depending on details of the underlying system. (For
90 // instance, Linux 2.4.18 reports 4070 in
91 // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
92 // 65534)
93 size_t _M_max_threads;
95 // Each time a deallocation occurs in a threaded application
96 // we make sure that there are no more than
97 // _M_freelist_headroom % of used memory on the freelist. If
98 // the number of additional records is more than
99 // _M_freelist_headroom % of the freelist, we move these
100 // records back to the global pool.
101 size_t _M_freelist_headroom;
103 // Set to true forces all allocations to use new().
104 bool _M_force_new;
106 explicit
107 _Tune()
108 : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
109 _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
110 _M_freelist_headroom(_S_freelist_headroom),
111 _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
114 explicit
115 _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
116 size_t __maxthreads, size_t __headroom, bool __force)
117 : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
118 _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
119 _M_freelist_headroom(__headroom), _M_force_new(__force)
123 struct _Block_address
125 void* _M_initial;
126 _Block_address* _M_next;
129 const _Tune&
130 _M_get_options() const
131 { return _M_options; }
133 void
134 _M_set_options(_Tune __t)
136 if (!_M_init)
137 _M_options = __t;
140 bool
141 _M_check_threshold(size_t __bytes)
142 { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
144 size_t
145 _M_get_binmap(size_t __bytes)
146 { return _M_binmap[__bytes]; }
148 const size_t
149 _M_get_align()
150 { return _M_options._M_align; }
152 explicit
153 __pool_base()
154 : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
156 explicit
157 __pool_base(const _Tune& __options)
158 : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
160 private:
161 explicit
162 __pool_base(const __pool_base&);
164 __pool_base&
165 operator=(const __pool_base&);
167 protected:
168 // Configuration options.
169 _Tune _M_options;
171 _Binmap_type* _M_binmap;
173 // Configuration of the pool object via _M_options can happen
174 // after construction but before initialization. After
175 // initialization is complete, this variable is set to true.
176 bool _M_init;
181 * @brief Data describing the underlying memory pool, parameterized on
182 * threading support.
184 template<bool _Thread>
185 class __pool;
187 /// Specialization for single thread.
188 template<>
189 class __pool<false> : public __pool_base
191 public:
192 union _Block_record
194 // Points to the block_record of the next free block.
195 _Block_record* volatile _M_next;
198 struct _Bin_record
200 // An "array" of pointers to the first free block.
201 _Block_record** volatile _M_first;
203 // A list of the initial addresses of all allocated blocks.
204 _Block_address* _M_address;
207 void
208 _M_initialize_once()
210 if (__builtin_expect(_M_init == false, false))
211 _M_initialize();
214 void
215 _M_destroy() throw();
217 char*
218 _M_reserve_block(size_t __bytes, const size_t __thread_id);
220 void
221 _M_reclaim_block(char* __p, size_t __bytes);
223 size_t
224 _M_get_thread_id() { return 0; }
226 const _Bin_record&
227 _M_get_bin(size_t __which)
228 { return _M_bin[__which]; }
230 void
231 _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
234 explicit __pool()
235 : _M_bin(NULL), _M_bin_size(1) { }
237 explicit __pool(const __pool_base::_Tune& __tune)
238 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
240 private:
241 // An "array" of bin_records each of which represents a specific
242 // power of 2 size. Memory to this "array" is allocated in
243 // _M_initialize().
244 _Bin_record* volatile _M_bin;
246 // Actual value calculated in _M_initialize().
247 size_t _M_bin_size;
249 void
250 _M_initialize();
253 #ifdef __GTHREADS
254 /// Specialization for thread enabled, via gthreads.h.
255 template<>
256 class __pool<true> : public __pool_base
258 public:
259 // Each requesting thread is assigned an id ranging from 1 to
260 // _S_max_threads. Thread id 0 is used as a global memory pool.
261 // In order to get constant performance on the thread assignment
262 // routine, we keep a list of free ids. When a thread first
263 // requests memory we remove the first record in this list and
264 // stores the address in a __gthread_key. When initializing the
265 // __gthread_key we specify a destructor. When this destructor
266 // (i.e. the thread dies) is called, we return the thread id to
267 // the front of this list.
268 struct _Thread_record
270 // Points to next free thread id record. NULL if last record in list.
271 _Thread_record* volatile _M_next;
273 // Thread id ranging from 1 to _S_max_threads.
274 size_t _M_id;
277 union _Block_record
279 // Points to the block_record of the next free block.
280 _Block_record* volatile _M_next;
282 // The thread id of the thread which has requested this block.
283 size_t _M_thread_id;
286 struct _Bin_record
288 // An "array" of pointers to the first free block for each
289 // thread id. Memory to this "array" is allocated in
290 // _S_initialize() for _S_max_threads + global pool 0.
291 _Block_record** volatile _M_first;
293 // A list of the initial addresses of all allocated blocks.
294 _Block_address* _M_address;
296 // An "array" of counters used to keep track of the amount of
297 // blocks that are on the freelist/used for each thread id.
298 // Memory to these "arrays" is allocated in _S_initialize() for
299 // _S_max_threads + global pool 0.
300 size_t* volatile _M_free;
301 size_t* volatile _M_used;
303 // Each bin has its own mutex which is used to ensure data
304 // integrity while changing "ownership" on a block. The mutex
305 // is initialized in _S_initialize().
306 __gthread_mutex_t* _M_mutex;
309 // XXX GLIBCXX_ABI Deprecated
310 void
311 _M_initialize(__destroy_handler);
313 void
314 _M_initialize_once()
316 if (__builtin_expect(_M_init == false, false))
317 _M_initialize();
320 void
321 _M_destroy() throw();
323 char*
324 _M_reserve_block(size_t __bytes, const size_t __thread_id);
326 void
327 _M_reclaim_block(char* __p, size_t __bytes);
329 const _Bin_record&
330 _M_get_bin(size_t __which)
331 { return _M_bin[__which]; }
333 void
334 _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
335 size_t __thread_id)
337 if (__gthread_active_p())
339 __block->_M_thread_id = __thread_id;
340 --__bin._M_free[__thread_id];
341 ++__bin._M_used[__thread_id];
345 // XXX GLIBCXX_ABI Deprecated
346 void
347 _M_destroy_thread_key(void*);
349 size_t
350 _M_get_thread_id();
352 explicit __pool()
353 : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
356 explicit __pool(const __pool_base::_Tune& __tune)
357 : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
358 _M_thread_freelist(NULL)
361 private:
362 // An "array" of bin_records each of which represents a specific
363 // power of 2 size. Memory to this "array" is allocated in
364 // _M_initialize().
365 _Bin_record* volatile _M_bin;
367 // Actual value calculated in _M_initialize().
368 size_t _M_bin_size;
370 _Thread_record* _M_thread_freelist;
371 void* _M_thread_freelist_initial;
373 void
374 _M_initialize();
376 #endif
378 template<template <bool> class _PoolTp, bool _Thread>
379 struct __common_pool
381 typedef _PoolTp<_Thread> pool_type;
383 static pool_type&
384 _S_get_pool()
386 static pool_type _S_pool;
387 return _S_pool;
391 template<template <bool> class _PoolTp, bool _Thread>
392 struct __common_pool_base;
394 template<template <bool> class _PoolTp>
395 struct __common_pool_base<_PoolTp, false>
396 : public __common_pool<_PoolTp, false>
398 using __common_pool<_PoolTp, false>::_S_get_pool;
400 static void
401 _S_initialize_once()
403 static bool __init;
404 if (__builtin_expect(__init == false, false))
406 _S_get_pool()._M_initialize_once();
407 __init = true;
412 #ifdef __GTHREADS
413 template<template <bool> class _PoolTp>
414 struct __common_pool_base<_PoolTp, true>
415 : public __common_pool<_PoolTp, true>
417 using __common_pool<_PoolTp, true>::_S_get_pool;
419 static void
420 _S_initialize()
421 { _S_get_pool()._M_initialize_once(); }
423 static void
424 _S_initialize_once()
426 static bool __init;
427 if (__builtin_expect(__init == false, false))
429 if (__gthread_active_p())
431 // On some platforms, __gthread_once_t is an aggregate.
432 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
433 __gthread_once(&__once, _S_initialize);
436 // Double check initialization. May be necessary on some
437 // systems for proper construction when not compiling with
438 // thread flags.
439 _S_get_pool()._M_initialize_once();
440 __init = true;
444 #endif
446 /// @brief Policy for shared __pool objects.
447 template<template <bool> class _PoolTp, bool _Thread>
448 struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
450 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
451 bool _Thread1 = _Thread>
452 struct _M_rebind
453 { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
455 using __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
456 using __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
460 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
461 struct __per_type_pool
463 typedef _Tp value_type;
464 typedef _PoolTp<_Thread> pool_type;
466 static pool_type&
467 _S_get_pool()
469 // Sane defaults for the _PoolTp.
470 typedef typename pool_type::_Block_record _Block_record;
471 const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
472 ? __alignof__(_Tp) : sizeof(_Block_record));
474 typedef typename __pool_base::_Tune _Tune;
475 static _Tune _S_tune(__a, sizeof(_Tp) * 64,
476 sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
477 sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
478 _Tune::_S_max_threads,
479 _Tune::_S_freelist_headroom,
480 getenv("GLIBCXX_FORCE_NEW") ? true : false);
481 static pool_type _S_pool(_S_tune);
482 return _S_pool;
486 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
487 struct __per_type_pool_base;
489 template<typename _Tp, template <bool> class _PoolTp>
490 struct __per_type_pool_base<_Tp, _PoolTp, false>
491 : public __per_type_pool<_Tp, _PoolTp, false>
493 using __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
495 static void
496 _S_initialize_once()
498 static bool __init;
499 if (__builtin_expect(__init == false, false))
501 _S_get_pool()._M_initialize_once();
502 __init = true;
507 #ifdef __GTHREADS
508 template<typename _Tp, template <bool> class _PoolTp>
509 struct __per_type_pool_base<_Tp, _PoolTp, true>
510 : public __per_type_pool<_Tp, _PoolTp, true>
512 using __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
514 static void
515 _S_initialize()
516 { _S_get_pool()._M_initialize_once(); }
518 static void
519 _S_initialize_once()
521 static bool __init;
522 if (__builtin_expect(__init == false, false))
524 if (__gthread_active_p())
526 // On some platforms, __gthread_once_t is an aggregate.
527 static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
528 __gthread_once(&__once, _S_initialize);
531 // Double check initialization. May be necessary on some
532 // systems for proper construction when not compiling with
533 // thread flags.
534 _S_get_pool()._M_initialize_once();
535 __init = true;
539 #endif
541 /// @brief Policy for individual __pool objects.
542 template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
543 struct __per_type_pool_policy
544 : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
546 template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
547 bool _Thread1 = _Thread>
548 struct _M_rebind
549 { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
551 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
552 using __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
556 /// @brief Base class for _Tp dependent member functions.
557 template<typename _Tp>
558 class __mt_alloc_base
560 public:
561 typedef size_t size_type;
562 typedef ptrdiff_t difference_type;
563 typedef _Tp* pointer;
564 typedef const _Tp* const_pointer;
565 typedef _Tp& reference;
566 typedef const _Tp& const_reference;
567 typedef _Tp value_type;
569 pointer
570 address(reference __x) const
571 { return &__x; }
573 const_pointer
574 address(const_reference __x) const
575 { return &__x; }
577 size_type
578 max_size() const throw()
579 { return size_t(-1) / sizeof(_Tp); }
581 // _GLIBCXX_RESOLVE_LIB_DEFECTS
582 // 402. wrong new expression in [some_] allocator::construct
583 void
584 construct(pointer __p, const _Tp& __val)
585 { ::new(__p) _Tp(__val); }
587 void
588 destroy(pointer __p) { __p->~_Tp(); }
591 #ifdef __GTHREADS
592 #define __thread_default true
593 #else
594 #define __thread_default false
595 #endif
598 * @brief This is a fixed size (power of 2) allocator which - when
599 * compiled with thread support - will maintain one freelist per
600 * size per thread plus a "global" one. Steps are taken to limit
601 * the per thread freelist sizes (by returning excess back to
602 * the "global" list).
604 * Further details:
605 * http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
607 template<typename _Tp,
608 typename _Poolp = __common_pool_policy<__pool, __thread_default> >
609 class __mt_alloc : public __mt_alloc_base<_Tp>
611 public:
612 typedef size_t size_type;
613 typedef ptrdiff_t difference_type;
614 typedef _Tp* pointer;
615 typedef const _Tp* const_pointer;
616 typedef _Tp& reference;
617 typedef const _Tp& const_reference;
618 typedef _Tp value_type;
619 typedef _Poolp __policy_type;
620 typedef typename _Poolp::pool_type __pool_type;
622 template<typename _Tp1, typename _Poolp1 = _Poolp>
623 struct rebind
625 typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
626 typedef __mt_alloc<_Tp1, pol_type> other;
629 __mt_alloc() throw() { }
631 __mt_alloc(const __mt_alloc&) throw() { }
633 template<typename _Tp1, typename _Poolp1>
634 __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>& obj) throw() { }
636 ~__mt_alloc() throw() { }
638 pointer
639 allocate(size_type __n, const void* = 0);
641 void
642 deallocate(pointer __p, size_type __n);
644 const __pool_base::_Tune
645 _M_get_options()
647 // Return a copy, not a reference, for external consumption.
648 return __policy_type::_S_get_pool()._M_get_options();
651 void
652 _M_set_options(__pool_base::_Tune __t)
653 { __policy_type::_S_get_pool()._M_set_options(__t); }
656 template<typename _Tp, typename _Poolp>
657 typename __mt_alloc<_Tp, _Poolp>::pointer
658 __mt_alloc<_Tp, _Poolp>::
659 allocate(size_type __n, const void*)
661 if (__builtin_expect(__n > this->max_size(), false))
662 std::__throw_bad_alloc();
664 __policy_type::_S_initialize_once();
666 // Requests larger than _M_max_bytes are handled by operator
667 // new/delete directly.
668 __pool_type& __pool = __policy_type::_S_get_pool();
669 const size_t __bytes = __n * sizeof(_Tp);
670 if (__pool._M_check_threshold(__bytes))
672 void* __ret = ::operator new(__bytes);
673 return static_cast<_Tp*>(__ret);
676 // Round up to power of 2 and figure out which bin to use.
677 const size_t __which = __pool._M_get_binmap(__bytes);
678 const size_t __thread_id = __pool._M_get_thread_id();
680 // Find out if we have blocks on our freelist. If so, go ahead
681 // and use them directly without having to lock anything.
682 char* __c;
683 typedef typename __pool_type::_Bin_record _Bin_record;
684 const _Bin_record& __bin = __pool._M_get_bin(__which);
685 if (__bin._M_first[__thread_id])
687 // Already reserved.
688 typedef typename __pool_type::_Block_record _Block_record;
689 _Block_record* __block = __bin._M_first[__thread_id];
690 __bin._M_first[__thread_id] = __block->_M_next;
692 __pool._M_adjust_freelist(__bin, __block, __thread_id);
693 __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
695 else
697 // Null, reserve.
698 __c = __pool._M_reserve_block(__bytes, __thread_id);
700 return static_cast<_Tp*>(static_cast<void*>(__c));
703 template<typename _Tp, typename _Poolp>
704 void
705 __mt_alloc<_Tp, _Poolp>::
706 deallocate(pointer __p, size_type __n)
708 if (__builtin_expect(__p != 0, true))
710 // Requests larger than _M_max_bytes are handled by
711 // operators new/delete directly.
712 __pool_type& __pool = __policy_type::_S_get_pool();
713 const size_t __bytes = __n * sizeof(_Tp);
714 if (__pool._M_check_threshold(__bytes))
715 ::operator delete(__p);
716 else
717 __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
721 template<typename _Tp, typename _Poolp>
722 inline bool
723 operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
724 { return true; }
726 template<typename _Tp, typename _Poolp>
727 inline bool
728 operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
729 { return false; }
731 #undef __thread_default
732 } // namespace __gnu_cxx
734 #endif