1 /* Functions to support a pool of allocatable objects
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@cgsoftware.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
25 extern void dump_alloc_pool_statistics (void);
27 typedef unsigned long ALLOC_POOL_ID_TYPE
;
29 /* Type based memory pool allocator. */
34 /* Default constructor for pool allocator called NAME. Each block
35 has NUM elements. The allocator support EXTRA_SIZE and can
36 potentially IGNORE_TYPE_SIZE. */
37 pool_allocator (const char *name
, size_t num
, size_t extra_size
= 0,
38 bool ignore_type_size
= false);
41 void release_if_empty ();
42 T
*allocate () ATTRIBUTE_MALLOC
;
43 void remove (T
*object
);
46 struct allocation_pool_list
48 allocation_pool_list
*next
;
51 /* Initialize a pool allocator. */
55 struct allocation_object
57 /* The ID of alloc pool which the object was allocated from. */
58 ALLOC_POOL_ID_TYPE id
;
62 /* The data of the object. */
65 /* Because we want any type of data to be well aligned after the ID,
66 the following elements are here. They are never accessed so
67 the allocated object may be even smaller than this structure.
68 We do not care about alignment for floating-point types. */
73 static inline allocation_object
<U
> *get_instance (void *data_ptr
)
75 return (allocation_object
<U
> *)(((char *)(data_ptr
))
76 - offsetof (allocation_object
<U
>,
80 static inline U
*get_data (void *instance_ptr
)
82 return (U
*)(((allocation_object
<U
> *) instance_ptr
)->u
.data
);
87 size_t align_eight (size_t x
)
89 return (((x
+7) >> 3) << 3);
93 ALLOC_POOL_ID_TYPE m_id
;
94 size_t m_elts_per_block
;
96 /* These are the elements that have been allocated at least once
98 allocation_pool_list
*m_returned_free_list
;
100 /* These are the elements that have not yet been allocated out of
101 the last block obtained from XNEWVEC. */
102 char* m_virgin_free_list
;
104 /* The number of elements in the virgin_free_list that can be
105 allocated before needing another block. */
106 size_t m_virgin_elts_remaining
;
107 /* The number of elements that are allocated. */
108 size_t m_elts_allocated
;
109 /* The number of elements that are released. */
111 /* The number of allocated blocks. */
112 size_t m_blocks_allocated
;
113 /* List of blocks that are used to allocate new objects. */
114 allocation_pool_list
*m_block_list
;
115 /* The number of elements in a block. */
117 /* Size of a pool elements in bytes. */
119 /* Flag if we shoul ignore size of a type. */
120 bool m_ignore_type_size
;
121 /* Extra size in bytes that should be allocated for each element. */
123 /* Flag if a pool allocator is initialized. */
128 extern ALLOC_POOL_ID_TYPE last_id
;
130 /* Store information about each particular alloc_pool. Note that this
131 will underestimate the amount the amount of storage used by a small amount:
132 1) The overhead in a pool is not accounted for.
133 2) The unallocated elements in a block are not accounted for. Note
134 that this can at worst case be one element smaller that the block
135 size for that pool. */
136 struct alloc_pool_descriptor
138 /* Number of pools allocated. */
139 unsigned long created
;
140 /* Gross allocated storage. */
141 unsigned long allocated
;
142 /* Amount of currently active storage. */
143 unsigned long current
;
144 /* Peak amount of storage used. */
146 /* Size of element in the pool. */
151 /* Hashtable mapping alloc_pool names to descriptors. */
152 extern hash_map
<const char *, alloc_pool_descriptor
> *alloc_pool_hash
;
154 /* For given name, return descriptor, create new if needed. */
155 alloc_pool_descriptor
*
156 allocate_pool_descriptor (const char *name
);
158 template <typename T
>
160 pool_allocator
<T
>::pool_allocator (const char *name
, size_t num
,
161 size_t extra_size
, bool ignore_type_size
):
162 m_name (name
), m_elts_per_block (num
), m_returned_free_list (NULL
),
163 m_virgin_free_list (NULL
), m_virgin_elts_remaining (0), m_elts_allocated (0),
164 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL
),
165 m_ignore_type_size (ignore_type_size
), m_extra_size (extra_size
),
166 m_initialized (false) {}
168 /* Initialize a pool allocator. */
170 template <typename T
>
172 pool_allocator
<T
>::initialize ()
174 gcc_checking_assert (!m_initialized
);
175 m_initialized
= true;
178 size_t size
= (m_ignore_type_size
? 0 : sizeof (T
)) + m_extra_size
;
180 gcc_checking_assert (m_name
);
182 /* Make size large enough to store the list header. */
183 if (size
< sizeof (allocation_pool_list
*))
184 size
= sizeof (allocation_pool_list
*);
186 /* Now align the size to a multiple of 4. */
187 size
= align_eight (size
);
189 /* Add the aligned size of ID. */
190 size
+= offsetof (allocation_object
<T
>, u
.data
);
192 /* Um, we can't really allocate 0 elements per block. */
193 gcc_checking_assert (m_elts_per_block
);
197 if (GATHER_STATISTICS
)
199 alloc_pool_descriptor
*desc
= allocate_pool_descriptor (m_name
);
200 desc
->elt_size
= size
;
204 /* List header size should be a multiple of 8. */
205 header_size
= align_eight (sizeof (allocation_pool_list
));
207 m_block_size
= (size
* m_elts_per_block
) + header_size
;
209 #ifdef ENABLE_CHECKING
210 /* Increase the last used ID and use it for this pool.
211 ID == 0 is used for free elements of pool so skip it. */
221 /* Free all memory allocated for the given memory pool. */
222 template <typename T
>
224 pool_allocator
<T
>::release ()
229 allocation_pool_list
*block
, *next_block
;
231 /* Free each block allocated to the pool. */
232 for (block
= m_block_list
; block
!= NULL
; block
= next_block
)
234 next_block
= block
->next
;
238 if (GATHER_STATISTICS
&& false)
240 alloc_pool_descriptor
*desc
= allocate_pool_descriptor (m_name
);
241 desc
->current
-= (m_elts_allocated
- m_elts_free
) * m_elt_size
;
244 m_returned_free_list
= NULL
;
245 m_virgin_free_list
= NULL
;
246 m_virgin_elts_remaining
= 0;
247 m_elts_allocated
= 0;
249 m_blocks_allocated
= 0;
253 template <typename T
>
255 inline pool_allocator
<T
>::release_if_empty ()
257 if (m_elts_free
== m_elts_allocated
)
261 template <typename T
>
262 inline pool_allocator
<T
>::~pool_allocator ()
267 /* Allocates one element from the pool specified. */
268 template <typename T
>
270 pool_allocator
<T
>::allocate ()
275 allocation_pool_list
*header
;
276 #ifdef ENABLE_VALGRIND_ANNOTATIONS
280 if (GATHER_STATISTICS
)
282 alloc_pool_descriptor
*desc
= allocate_pool_descriptor (m_name
);
284 desc
->allocated
+= m_elt_size
;
285 desc
->current
+= m_elt_size
;
286 if (desc
->peak
< desc
->current
)
287 desc
->peak
= desc
->current
;
290 #ifdef ENABLE_VALGRIND_ANNOTATIONS
291 size
= m_elt_size
- offsetof (allocation_object
<T
>, u
.data
);
294 /* If there are no more free elements, make some more!. */
295 if (!m_returned_free_list
)
298 if (!m_virgin_elts_remaining
)
300 allocation_pool_list
*block_header
;
302 /* Make the block. */
303 block
= XNEWVEC (char, m_block_size
);
304 block_header
= (allocation_pool_list
*) block
;
305 block
+= align_eight (sizeof (allocation_pool_list
));
307 /* Throw it on the block list. */
308 block_header
->next
= m_block_list
;
309 m_block_list
= block_header
;
311 /* Make the block available for allocation. */
312 m_virgin_free_list
= block
;
313 m_virgin_elts_remaining
= m_elts_per_block
;
315 /* Also update the number of elements we have free/allocated, and
316 increment the allocated block count. */
317 m_elts_allocated
+= m_elts_per_block
;
318 m_elts_free
+= m_elts_per_block
;
319 m_blocks_allocated
+= 1;
322 /* We now know that we can take the first elt off the virgin list and
323 put it on the returned list. */
324 block
= m_virgin_free_list
;
325 header
= (allocation_pool_list
*) allocation_object
<T
>::get_data (block
);
327 #ifdef ENABLE_CHECKING
328 /* Mark the element to be free. */
329 ((allocation_object
<T
> *) block
)->id
= 0;
331 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header
,size
));
332 m_returned_free_list
= header
;
333 m_virgin_free_list
+= m_elt_size
;
334 m_virgin_elts_remaining
--;
338 /* Pull the first free element from the free list, and return it. */
339 header
= m_returned_free_list
;
340 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header
, sizeof (*header
)));
341 m_returned_free_list
= header
->next
;
344 #ifdef ENABLE_CHECKING
345 /* Set the ID for element. */
346 allocation_object
<T
>::get_instance (header
)->id
= m_id
;
348 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header
, size
));
350 /* Call default constructor. */
351 return (T
*)(header
);
354 /* Puts PTR back on POOL's free list. */
355 template <typename T
>
357 pool_allocator
<T
>::remove (T
*object
)
359 gcc_checking_assert (m_initialized
);
361 allocation_pool_list
*header
;
362 int size ATTRIBUTE_UNUSED
;
363 size
= m_elt_size
- offsetof (allocation_object
<T
>, u
.data
);
365 #ifdef ENABLE_CHECKING
367 /* Check if we free more than we allocated, which is Bad (TM). */
368 && m_elts_free
< m_elts_allocated
369 /* Check whether the PTR was allocated from POOL. */
370 && m_id
== allocation_object
<T
>::get_instance (object
)->id
);
372 memset (object
, 0xaf, size
);
374 /* Mark the element to be free. */
375 allocation_object
<T
>::get_instance (object
)->id
= 0;
378 header
= (allocation_pool_list
*) object
;
379 header
->next
= m_returned_free_list
;
380 m_returned_free_list
= header
;
381 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object
, size
));
384 if (GATHER_STATISTICS
)
386 alloc_pool_descriptor
*desc
= allocate_pool_descriptor (m_name
);
387 desc
->current
-= m_elt_size
;