2015-06-11 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / alloc-pool.h
blob1785df5c2ce2c9740cc6faab90a36ac0f7d6d3ba
1 /* Functions to support a pool of allocatable objects
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@cgsoftware.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20 #ifndef ALLOC_POOL_H
21 #define ALLOC_POOL_H
24 extern void dump_alloc_pool_statistics (void);
26 typedef unsigned long ALLOC_POOL_ID_TYPE;
28 /* Pool allocator memory usage. */
29 struct pool_usage: public mem_usage
31 /* Default contructor. */
32 pool_usage (): m_element_size (0), m_pool_name ("") {}
33 /* Constructor. */
34 pool_usage (size_t allocated, size_t times, size_t peak,
35 size_t instances, size_t element_size,
36 const char *pool_name)
37 : mem_usage (allocated, times, peak, instances),
38 m_element_size (element_size),
39 m_pool_name (pool_name) {}
41 /* Sum the usage with SECOND usage. */
42 pool_usage
43 operator+ (const pool_usage &second)
45 return pool_usage (m_allocated + second.m_allocated,
46 m_times + second.m_times,
47 m_peak + second.m_peak,
48 m_instances + second.m_instances,
49 m_element_size, m_pool_name);
52 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
53 inline void
54 dump (mem_location *loc, mem_usage &total) const
56 char *location_string = loc->to_string ();
58 fprintf (stderr, "%-32s%-48s %6li%10li:%5.1f%%%10li%10li:%5.1f%%%12li\n",
59 m_pool_name, location_string, (long)m_instances,
60 (long)m_allocated, get_percent (m_allocated, total.m_allocated),
61 (long)m_peak, (long)m_times,
62 get_percent (m_times, total.m_times),
63 (long)m_element_size);
65 free (location_string);
68 /* Dump header with NAME. */
69 static inline void
70 dump_header (const char *name)
72 fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name,
73 "Pools", "Leak", "Peak", "Times", "Elt size");
74 print_dash_line ();
77 /* Dump footer. */
78 inline void
79 dump_footer ()
81 print_dash_line ();
82 fprintf (stderr, "%s%82li%10li\n", "Total", (long)m_instances,
83 (long)m_allocated);
84 print_dash_line ();
87 /* Element size. */
88 size_t m_element_size;
89 /* Pool name. */
90 const char *m_pool_name;
93 extern mem_alloc_description<pool_usage> pool_allocator_usage;
95 /* Type based memory pool allocator. */
96 template <typename T>
97 class pool_allocator
99 public:
100 /* Default constructor for pool allocator called NAME. Each block
101 has NUM elements. The allocator support EXTRA_SIZE and can
102 potentially IGNORE_TYPE_SIZE. */
103 pool_allocator (const char *name, size_t num, size_t extra_size = 0,
104 bool ignore_type_size = false CXX_MEM_STAT_INFO);
105 ~pool_allocator ();
106 void release ();
107 void release_if_empty ();
108 T *allocate () ATTRIBUTE_MALLOC;
109 void remove (T *object);
111 private:
112 struct allocation_pool_list
114 allocation_pool_list *next;
117 /* Initialize a pool allocator. */
118 void initialize ();
120 template <typename U>
121 struct allocation_object
123 /* The ID of alloc pool which the object was allocated from. */
124 ALLOC_POOL_ID_TYPE id;
126 union
128 /* The data of the object. */
129 char data[1];
131 /* Because we want any type of data to be well aligned after the ID,
132 the following elements are here. They are never accessed so
133 the allocated object may be even smaller than this structure.
134 We do not care about alignment for floating-point types. */
135 char *align_p;
136 int64_t align_i;
137 } u;
139 static inline allocation_object<U> *
140 get_instance (void *data_ptr)
142 return (allocation_object<U> *)(((char *)(data_ptr))
143 - offsetof (allocation_object<U>,
144 u.data));
147 static inline U *
148 get_data (void *instance_ptr)
150 return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
154 /* Align X to 8. */
155 size_t
156 align_eight (size_t x)
158 return (((x+7) >> 3) << 3);
161 const char *m_name;
162 ALLOC_POOL_ID_TYPE m_id;
163 size_t m_elts_per_block;
165 /* These are the elements that have been allocated at least once
166 and freed. */
167 allocation_pool_list *m_returned_free_list;
169 /* These are the elements that have not yet been allocated out of
170 the last block obtained from XNEWVEC. */
171 char* m_virgin_free_list;
173 /* The number of elements in the virgin_free_list that can be
174 allocated before needing another block. */
175 size_t m_virgin_elts_remaining;
176 /* The number of elements that are allocated. */
177 size_t m_elts_allocated;
178 /* The number of elements that are released. */
179 size_t m_elts_free;
180 /* The number of allocated blocks. */
181 size_t m_blocks_allocated;
182 /* List of blocks that are used to allocate new objects. */
183 allocation_pool_list *m_block_list;
184 /* The number of elements in a block. */
185 size_t m_block_size;
186 /* Size of a pool elements in bytes. */
187 size_t m_elt_size;
188 /* Flag if we shoul ignore size of a type. */
189 bool m_ignore_type_size;
190 /* Extra size in bytes that should be allocated for each element. */
191 size_t m_extra_size;
192 /* Flag if a pool allocator is initialized. */
193 bool m_initialized;
194 /* Memory allocation location. */
195 mem_location m_location;
198 /* Last used ID. */
199 extern ALLOC_POOL_ID_TYPE last_id;
201 /* Store information about each particular alloc_pool. Note that this
202 will underestimate the amount the amount of storage used by a small amount:
203 1) The overhead in a pool is not accounted for.
204 2) The unallocated elements in a block are not accounted for. Note
205 that this can at worst case be one element smaller that the block
206 size for that pool. */
207 struct alloc_pool_descriptor
209 /* Number of pools allocated. */
210 unsigned long created;
211 /* Gross allocated storage. */
212 unsigned long allocated;
213 /* Amount of currently active storage. */
214 unsigned long current;
215 /* Peak amount of storage used. */
216 unsigned long peak;
217 /* Size of element in the pool. */
218 int elt_size;
222 /* Hashtable mapping alloc_pool names to descriptors. */
223 extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
225 template <typename T>
226 inline
227 pool_allocator<T>::pool_allocator (const char *name, size_t num,
228 size_t extra_size, bool ignore_type_size
229 MEM_STAT_DECL):
230 m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
231 m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
232 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
233 m_block_size (0), m_ignore_type_size (ignore_type_size),
234 m_extra_size (extra_size), m_initialized (false),
235 m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
237 /* Initialize a pool allocator. */
239 template <typename T>
240 void
241 pool_allocator<T>::initialize ()
243 gcc_checking_assert (!m_initialized);
244 m_initialized = true;
246 size_t header_size;
247 size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
249 gcc_checking_assert (m_name);
251 /* Make size large enough to store the list header. */
252 if (size < sizeof (allocation_pool_list*))
253 size = sizeof (allocation_pool_list*);
255 /* Now align the size to a multiple of 4. */
256 size = align_eight (size);
258 /* Add the aligned size of ID. */
259 size += offsetof (allocation_object<T>, u.data);
261 /* Um, we can't really allocate 0 elements per block. */
262 gcc_checking_assert (m_elts_per_block);
264 m_elt_size = size;
266 if (GATHER_STATISTICS)
268 pool_usage *u = pool_allocator_usage.register_descriptor
269 (this, new mem_location (m_location));
271 u->m_element_size = m_elt_size;
272 u->m_pool_name = m_name;
275 /* List header size should be a multiple of 8. */
276 header_size = align_eight (sizeof (allocation_pool_list));
278 m_block_size = (size * m_elts_per_block) + header_size;
280 #ifdef ENABLE_CHECKING
281 /* Increase the last used ID and use it for this pool.
282 ID == 0 is used for free elements of pool so skip it. */
283 last_id++;
284 if (last_id == 0)
285 last_id++;
287 m_id = last_id;
288 #endif
291 /* Free all memory allocated for the given memory pool. */
292 template <typename T>
293 inline void
294 pool_allocator<T>::release ()
296 if (!m_initialized)
297 return;
299 allocation_pool_list *block, *next_block;
301 /* Free each block allocated to the pool. */
302 for (block = m_block_list; block != NULL; block = next_block)
304 next_block = block->next;
305 free (block);
308 if (GATHER_STATISTICS)
310 pool_allocator_usage.release_instance_overhead
311 (this, (m_elts_allocated - m_elts_free) * m_elt_size);
314 m_returned_free_list = NULL;
315 m_virgin_free_list = NULL;
316 m_virgin_elts_remaining = 0;
317 m_elts_allocated = 0;
318 m_elts_free = 0;
319 m_blocks_allocated = 0;
320 m_block_list = NULL;
323 template <typename T>
324 void
325 inline pool_allocator<T>::release_if_empty ()
327 if (m_elts_free == m_elts_allocated)
328 release ();
331 template <typename T>
332 inline pool_allocator<T>::~pool_allocator ()
334 release ();
337 /* Allocates one element from the pool specified. */
338 template <typename T>
339 inline T *
340 pool_allocator<T>::allocate ()
342 if (!m_initialized)
343 initialize ();
345 allocation_pool_list *header;
346 #ifdef ENABLE_VALGRIND_ANNOTATIONS
347 int size;
348 #endif
350 if (GATHER_STATISTICS)
352 pool_allocator_usage.register_instance_overhead (m_elt_size, this);
355 #ifdef ENABLE_VALGRIND_ANNOTATIONS
356 size = m_elt_size - offsetof (allocation_object<T>, u.data);
357 #endif
359 /* If there are no more free elements, make some more!. */
360 if (!m_returned_free_list)
362 char *block;
363 if (!m_virgin_elts_remaining)
365 allocation_pool_list *block_header;
367 /* Make the block. */
368 block = XNEWVEC (char, m_block_size);
369 block_header = (allocation_pool_list*) block;
370 block += align_eight (sizeof (allocation_pool_list));
372 /* Throw it on the block list. */
373 block_header->next = m_block_list;
374 m_block_list = block_header;
376 /* Make the block available for allocation. */
377 m_virgin_free_list = block;
378 m_virgin_elts_remaining = m_elts_per_block;
380 /* Also update the number of elements we have free/allocated, and
381 increment the allocated block count. */
382 m_elts_allocated += m_elts_per_block;
383 m_elts_free += m_elts_per_block;
384 m_blocks_allocated += 1;
387 /* We now know that we can take the first elt off the virgin list and
388 put it on the returned list. */
389 block = m_virgin_free_list;
390 header = (allocation_pool_list*) allocation_object<T>::get_data (block);
391 header->next = NULL;
392 #ifdef ENABLE_CHECKING
393 /* Mark the element to be free. */
394 ((allocation_object<T> *) block)->id = 0;
395 #endif
396 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
397 m_returned_free_list = header;
398 m_virgin_free_list += m_elt_size;
399 m_virgin_elts_remaining--;
403 /* Pull the first free element from the free list, and return it. */
404 header = m_returned_free_list;
405 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
406 m_returned_free_list = header->next;
407 m_elts_free--;
409 #ifdef ENABLE_CHECKING
410 /* Set the ID for element. */
411 allocation_object<T>::get_instance (header)->id = m_id;
412 #endif
413 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
415 /* Call default constructor. */
416 return (T *)(header);
419 /* Puts PTR back on POOL's free list. */
420 template <typename T>
421 void
422 pool_allocator<T>::remove (T *object)
424 gcc_checking_assert (m_initialized);
426 allocation_pool_list *header;
427 int size ATTRIBUTE_UNUSED;
428 size = m_elt_size - offsetof (allocation_object<T>, u.data);
430 #ifdef ENABLE_CHECKING
431 gcc_assert (object
432 /* Check if we free more than we allocated, which is Bad (TM). */
433 && m_elts_free < m_elts_allocated
434 /* Check whether the PTR was allocated from POOL. */
435 && m_id == allocation_object<T>::get_instance (object)->id);
437 memset (object, 0xaf, size);
439 /* Mark the element to be free. */
440 allocation_object<T>::get_instance (object)->id = 0;
441 #endif
443 header = (allocation_pool_list*) object;
444 header->next = m_returned_free_list;
445 m_returned_free_list = header;
446 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
447 m_elts_free++;
449 if (GATHER_STATISTICS)
451 pool_allocator_usage.release_instance_overhead (this, m_elt_size);
455 #endif