1 // Allocators -*- C++ -*-
3 // Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 2, or (at your option)
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
16 // You should have received a copy of the GNU General Public License along
17 // with this library; see the file COPYING. If not, write to the Free
18 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
21 // As a special exception, you may use this file as part of a free software
22 // library without restriction. Specifically, if other files instantiate
23 // templates or use macros or inline functions from this file, or you compile
24 // this file and link it with other files to produce an executable, this
25 // file does not by itself cause the resulting executable to be covered by
26 // the GNU General Public License. This exception does not however
27 // invalidate any other reasons why the executable file might be covered by
28 // the GNU General Public License.
31 * Copyright (c) 1996-1997
32 * Silicon Graphics Computer Systems, Inc.
34 * Permission to use, copy, modify, distribute and sell this software
35 * and its documentation for any purpose is hereby granted without fee,
36 * provided that the above copyright notice appear in all copies and
37 * that both that copyright notice and this permission notice appear
38 * in supporting documentation. Silicon Graphics makes no
39 * representations about the suitability of this software for any
40 * purpose. It is provided "as is" without express or implied warranty.
43 /** @file ext/debug_allocator.h
44 * This file is a GNU extension to the Standard C++ Library.
45 * You should only include this header if you are using GCC 3 or later.
48 #ifndef _POOL_ALLOCATOR_H
49 #define _POOL_ALLOCATOR_H 1
51 #include <bits/functexcept.h>
52 #include <bits/stl_threads.h>
53 #include <bits/atomicity.h>
54 #include <bits/allocator_traits.h>
55 #include <ext/new_allocator.h>
59 using std::_STL_mutex_lock
;
60 using std::__throw_bad_alloc
;
64 * Default node allocator. "SGI" style. Uses various allocators to
65 * fulfill underlying requests (and makes as few requests as possible
66 * when in default high-speed pool mode).
68 * Important implementation properties:
69 * 0. If globally mandated, then allocate objects from __new_alloc
70 * 1. If the clients request an object of size > _S_max_bytes, the resulting
71 * object will be obtained directly from __new_alloc
72 * 2. In all other cases, we allocate an object of size exactly
73 * _S_round_up(requested_size). Thus the client has enough size
74 * information that we can return the object to the proper free list
75 * without permanently losing part of the object.
77 * The first template parameter specifies whether more than one thread may
78 * use this allocator. It is safe to allocate an object from one instance
79 * of a default_alloc and deallocate it with another one. This effectively
80 * transfers its ownership to the second one. This may have undesirable
81 * effects on reference locality.
83 * The second parameter is unused and serves only to allow the creation of
84 * multiple default_alloc instances. Note that containers built on different
85 * allocator instances have different types, limiting the utility of this
86 * approach. If you do not wish to share the free lists with the main
87 * default_alloc instance, instantiate this with a non-zero __inst.
90 * (See @link Allocators allocators info @endlink for more.)
92 template<bool __threads
, int __inst
>
97 enum {_S_max_bytes
= 128};
98 enum {_S_freelists
= _S_max_bytes
/ _S_align
};
102 union _Obj
* _M_free_list_link
;
103 char _M_client_data
[1]; // The client sees this.
106 static _Obj
* volatile _S_free_list
[_S_freelists
];
108 // Chunk allocation state.
109 static char* _S_start_free
;
110 static char* _S_end_free
;
111 static size_t _S_heap_size
;
113 static _STL_mutex_lock _S_lock
;
114 static _Atomic_word _S_force_new
;
117 _S_round_up(size_t __bytes
)
118 { return ((__bytes
+ (size_t)_S_align
- 1) & ~((size_t)_S_align
- 1)); }
121 _S_freelist_index(size_t __bytes
)
122 { return ((__bytes
+ (size_t)_S_align
- 1)/(size_t)_S_align
- 1); }
124 // Returns an object of size __n, and optionally adds to size __n
127 _S_refill(size_t __n
);
129 // Allocates a chunk for nobjs of size size. nobjs may be reduced
130 // if it is inconvenient to allocate the requested number.
132 _S_chunk_alloc(size_t __n
, int& __nobjs
);
134 // It would be nice to use _STL_auto_lock here. But we need a
135 // test whether threads are in use.
138 _Lock() { if (__threads
) _S_lock
._M_acquire_lock(); }
139 ~_Lock() { if (__threads
) _S_lock
._M_release_lock(); }
140 } __attribute__ ((__unused__
));
146 allocate(size_t __n
);
150 deallocate(void* __p
, size_t __n
);
153 template<bool __threads
, int __inst
>
155 operator==(const __pool_alloc
<__threads
,__inst
>&,
156 const __pool_alloc
<__threads
,__inst
>&)
159 template<bool __threads
, int __inst
>
161 operator!=(const __pool_alloc
<__threads
,__inst
>&,
162 const __pool_alloc
<__threads
,__inst
>&)
166 // Allocate memory in large chunks in order to avoid fragmenting the
167 // heap too much. Assume that __n is properly aligned. We hold
168 // the allocation lock.
169 template<bool __threads
, int __inst
>
171 __pool_alloc
<__threads
, __inst
>::_S_chunk_alloc(size_t __n
, int& __nobjs
)
174 size_t __total_bytes
= __n
* __nobjs
;
175 size_t __bytes_left
= _S_end_free
- _S_start_free
;
177 if (__bytes_left
>= __total_bytes
)
179 __result
= _S_start_free
;
180 _S_start_free
+= __total_bytes
;
183 else if (__bytes_left
>= __n
)
185 __nobjs
= (int)(__bytes_left
/__n
);
186 __total_bytes
= __n
* __nobjs
;
187 __result
= _S_start_free
;
188 _S_start_free
+= __total_bytes
;
193 size_t __bytes_to_get
=
194 2 * __total_bytes
+ _S_round_up(_S_heap_size
>> 4);
195 // Try to make use of the left-over piece.
196 if (__bytes_left
> 0)
198 _Obj
* volatile* __free_list
=
199 _S_free_list
+ _S_freelist_index(__bytes_left
);
201 ((_Obj
*)(void*)_S_start_free
)->_M_free_list_link
= *__free_list
;
202 *__free_list
= (_Obj
*)(void*)_S_start_free
;
204 _S_start_free
= (char*) __new_alloc::allocate(__bytes_to_get
);
205 if (_S_start_free
== 0)
208 _Obj
* volatile* __free_list
;
210 // Try to make do with what we have. That can't hurt. We
211 // do not try smaller requests, since that tends to result
212 // in disaster on multi-process machines.
214 for (; __i
<= (size_t) _S_max_bytes
; __i
+= (size_t) _S_align
)
216 __free_list
= _S_free_list
+ _S_freelist_index(__i
);
220 *__free_list
= __p
-> _M_free_list_link
;
221 _S_start_free
= (char*)__p
;
222 _S_end_free
= _S_start_free
+ __i
;
223 return _S_chunk_alloc(__n
, __nobjs
);
224 // Any leftover piece will eventually make it to the
228 _S_end_free
= 0; // In case of exception.
229 _S_start_free
= (char*)__new_alloc::allocate(__bytes_to_get
);
230 // This should either throw an exception or remedy the situation.
231 // Thus we assume it succeeded.
233 _S_heap_size
+= __bytes_to_get
;
234 _S_end_free
= _S_start_free
+ __bytes_to_get
;
235 return _S_chunk_alloc(__n
, __nobjs
);
239 // Returns an object of size __n, and optionally adds to "size
240 // __n"'s free list. We assume that __n is properly aligned. We
241 // hold the allocation lock.
242 template<bool __threads
, int __inst
>
244 __pool_alloc
<__threads
, __inst
>::_S_refill(size_t __n
)
247 char* __chunk
= _S_chunk_alloc(__n
, __nobjs
);
248 _Obj
* volatile* __free_list
;
256 __free_list
= _S_free_list
+ _S_freelist_index(__n
);
258 // Build free list in chunk.
259 __result
= (_Obj
*)(void*)__chunk
;
260 *__free_list
= __next_obj
= (_Obj
*)(void*)(__chunk
+ __n
);
261 for (__i
= 1; ; __i
++)
263 __current_obj
= __next_obj
;
264 __next_obj
= (_Obj
*)(void*)((char*)__next_obj
+ __n
);
265 if (__nobjs
- 1 == __i
)
267 __current_obj
-> _M_free_list_link
= 0;
271 __current_obj
-> _M_free_list_link
= __next_obj
;
276 template<bool __threads
, int __inst
>
278 __pool_alloc
<__threads
, __inst
>::allocate(size_t __n
)
282 // If there is a race through here, assume answer from getenv
283 // will resolve in same direction. Inspired by techniques
284 // to efficiently support threading found in basic_string.h.
285 if (_S_force_new
== 0)
287 if (getenv("GLIBCXX_FORCE_NEW"))
288 __atomic_add(&_S_force_new
, 1);
290 __atomic_add(&_S_force_new
, -1);
293 if ((__n
> (size_t) _S_max_bytes
) || (_S_force_new
> 0))
294 __ret
= __new_alloc::allocate(__n
);
297 _Obj
* volatile* __free_list
= _S_free_list
+ _S_freelist_index(__n
);
298 // Acquire the lock here with a constructor call. This
299 // ensures that it is released in exit or during stack
301 _Lock __lock_instance
;
302 _Obj
* __restrict__ __result
= *__free_list
;
303 if (__builtin_expect(__result
== 0, 0))
304 __ret
= _S_refill(_S_round_up(__n
));
307 *__free_list
= __result
-> _M_free_list_link
;
310 if (__builtin_expect(__ret
== 0, 0))
316 template<bool __threads
, int __inst
>
318 __pool_alloc
<__threads
, __inst
>::deallocate(void* __p
, size_t __n
)
320 if ((__n
> (size_t) _S_max_bytes
) || (_S_force_new
> 0))
321 __new_alloc::deallocate(__p
, __n
);
324 _Obj
* volatile* __free_list
= _S_free_list
+ _S_freelist_index(__n
);
325 _Obj
* __q
= (_Obj
*)__p
;
327 // Acquire the lock here with a constructor call. This
328 // ensures that it is released in exit or during stack
330 _Lock __lock_instance
;
331 __q
-> _M_free_list_link
= *__free_list
;
336 template<bool __threads
, int __inst
>
337 typename __pool_alloc
<__threads
, __inst
>::_Obj
* volatile
338 __pool_alloc
<__threads
, __inst
>::_S_free_list
[_S_freelists
];
340 template<bool __threads
, int __inst
>
341 char* __pool_alloc
<__threads
, __inst
>::_S_start_free
= 0;
343 template<bool __threads
, int __inst
>
344 char* __pool_alloc
<__threads
, __inst
>::_S_end_free
= 0;
346 template<bool __threads
, int __inst
>
347 size_t __pool_alloc
<__threads
, __inst
>::_S_heap_size
= 0;
349 template<bool __threads
, int __inst
>
351 __pool_alloc
<__threads
, __inst
>::_S_lock __STL_MUTEX_INITIALIZER
;
353 template<bool __threads
, int __inst
> _Atomic_word
354 __pool_alloc
<__threads
, __inst
>::_S_force_new
= 0;
356 // Inhibit implicit instantiations for required instantiations,
357 // which are defined via explicit instantiations elsewhere.
358 // NB: This syntax is a GNU extension.
359 #if _GLIBCXX_EXTERN_TEMPLATE
360 extern template class __pool_alloc
<true, 0>;
362 } // namespace __gnu_cxx
367 /// Versions for the predefined "SGI" style allocators.
368 template<typename _Tp
, bool __thr
, int __inst
>
369 struct _Alloc_traits
<_Tp
, __gnu_cxx::__pool_alloc
<__thr
, __inst
> >
371 static const bool _S_instanceless
= true;
372 typedef __gnu_cxx::__pool_alloc
<__thr
, __inst
> base_alloc_type
;
373 typedef __simple_alloc
<_Tp
, base_alloc_type
> _Alloc_type
;
374 typedef __allocator
<_Tp
, base_alloc_type
> allocator_type
;
379 /// Versions for the __allocator adaptor used with the predefined
380 /// "SGI" style allocators.
381 template<typename _Tp
, typename _Tp1
, bool __thr
, int __inst
>
382 struct _Alloc_traits
<_Tp
, __allocator
<_Tp1
,
383 __gnu_cxx::__pool_alloc
<__thr
, __inst
> > >
385 static const bool _S_instanceless
= true;
386 typedef __gnu_cxx::__pool_alloc
<__thr
, __inst
> base_alloc_type
;
387 typedef __simple_alloc
<_Tp
, base_alloc_type
> _Alloc_type
;
388 typedef __allocator
<_Tp
, base_alloc_type
> allocator_type
;