3 // Copyright (C) 2004, 2005, 2006, 2009 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
34 #include <bits/c++config.h>
36 #include <ext/pool_allocator.h>
43 static __gnu_cxx::__mutex palloc_mutex
;
46 } // anonymous namespace
48 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx
)
50 // Definitions for __pool_alloc_base.
51 __pool_alloc_base::_Obj
* volatile*
52 __pool_alloc_base::_M_get_free_list(size_t __bytes
)
54 size_t __i
= ((__bytes
+ (size_t)_S_align
- 1) / (size_t)_S_align
- 1);
55 return _S_free_list
+ __i
;
59 __pool_alloc_base::_M_get_mutex()
60 { return get_palloc_mutex(); }
62 // Allocate memory in large chunks in order to avoid fragmenting the
63 // heap too much. Assume that __n is properly aligned. We hold the
66 __pool_alloc_base::_M_allocate_chunk(size_t __n
, int& __nobjs
)
69 size_t __total_bytes
= __n
* __nobjs
;
70 size_t __bytes_left
= _S_end_free
- _S_start_free
;
72 if (__bytes_left
>= __total_bytes
)
74 __result
= _S_start_free
;
75 _S_start_free
+= __total_bytes
;
78 else if (__bytes_left
>= __n
)
80 __nobjs
= (int)(__bytes_left
/ __n
);
81 __total_bytes
= __n
* __nobjs
;
82 __result
= _S_start_free
;
83 _S_start_free
+= __total_bytes
;
88 // Try to make use of the left-over piece.
91 _Obj
* volatile* __free_list
= _M_get_free_list(__bytes_left
);
92 ((_Obj
*)(void*)_S_start_free
)->_M_free_list_link
= *__free_list
;
93 *__free_list
= (_Obj
*)(void*)_S_start_free
;
96 size_t __bytes_to_get
= (2 * __total_bytes
97 + _M_round_up(_S_heap_size
>> 4));
100 _S_start_free
= static_cast<char*>(::operator new(__bytes_to_get
));
104 // Try to make do with what we have. That can't hurt. We
105 // do not try smaller requests, since that tends to result
106 // in disaster on multi-process machines.
108 for (; __i
<= (size_t) _S_max_bytes
; __i
+= (size_t) _S_align
)
110 _Obj
* volatile* __free_list
= _M_get_free_list(__i
);
111 _Obj
* __p
= *__free_list
;
114 *__free_list
= __p
->_M_free_list_link
;
115 _S_start_free
= (char*)__p
;
116 _S_end_free
= _S_start_free
+ __i
;
117 return _M_allocate_chunk(__n
, __nobjs
);
118 // Any leftover piece will eventually make it to the
122 // What we have wasn't enough. Rethrow.
123 _S_start_free
= _S_end_free
= 0; // We have no chunk.
124 __throw_exception_again
;
126 _S_heap_size
+= __bytes_to_get
;
127 _S_end_free
= _S_start_free
+ __bytes_to_get
;
128 return _M_allocate_chunk(__n
, __nobjs
);
132 // Returns an object of size __n, and optionally adds to "size
133 // __n"'s free list. We assume that __n is properly aligned. We
134 // hold the allocation lock.
136 __pool_alloc_base::_M_refill(size_t __n
)
139 char* __chunk
= _M_allocate_chunk(__n
, __nobjs
);
140 _Obj
* volatile* __free_list
;
147 __free_list
= _M_get_free_list(__n
);
149 // Build free list in chunk.
150 __result
= (_Obj
*)(void*)__chunk
;
151 *__free_list
= __next_obj
= (_Obj
*)(void*)(__chunk
+ __n
);
152 for (int __i
= 1; ; __i
++)
154 __current_obj
= __next_obj
;
155 __next_obj
= (_Obj
*)(void*)((char*)__next_obj
+ __n
);
156 if (__nobjs
- 1 == __i
)
158 __current_obj
->_M_free_list_link
= 0;
162 __current_obj
->_M_free_list_link
= __next_obj
;
167 __pool_alloc_base::_Obj
* volatile __pool_alloc_base::_S_free_list
[_S_free_list_size
];
169 char* __pool_alloc_base::_S_start_free
= 0;
171 char* __pool_alloc_base::_S_end_free
= 0;
173 size_t __pool_alloc_base::_S_heap_size
= 0;
176 template class __pool_alloc
<char>;
177 template class __pool_alloc
<wchar_t>;
179 _GLIBCXX_END_NAMESPACE