Port pool-allocator memory stats to a new infrastructure.
[official-gcc.git] / gcc / alloc-pool.h
blobb1bd386156ba6d812c661276dd470f27723d9109
1 /* Functions to support a pool of allocatable objects
2 Copyright (C) 1997-2015 Free Software Foundation, Inc.
3 Contributed by Daniel Berlin <dan@cgsoftware.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20 #ifndef ALLOC_POOL_H
21 #define ALLOC_POOL_H
23 #include "hash-map.h"
25 extern void dump_alloc_pool_statistics (void);
27 typedef unsigned long ALLOC_POOL_ID_TYPE;
29 /* Pool allocator memory usage. */
30 struct pool_usage: public mem_usage
32 /* Default contructor. */
33 pool_usage (): m_element_size (0), m_pool_name ("") {}
34 /* Constructor. */
35 pool_usage (size_t allocated, size_t times, size_t peak,
36 size_t instances, size_t element_size,
37 const char *pool_name)
38 : mem_usage (allocated, times, peak, instances),
39 m_element_size (element_size),
40 m_pool_name (pool_name) {}
42 /* Sum the usage with SECOND usage. */
43 pool_usage operator+ (const pool_usage &second)
45 return pool_usage (m_allocated + second.m_allocated,
46 m_times + second.m_times,
47 m_peak + second.m_peak,
48 m_instances + second.m_instances,
49 m_element_size, m_pool_name);
52 /* Dump usage coupled to LOC location, where TOTAL is sum of all rows. */
53 inline void dump (mem_location *loc, mem_usage &total) const
55 char *location_string = loc->to_string ();
57 fprintf (stderr, "%-32s%-48s %6li%10li:%5.1f%%%10li%10li:%5.1f%%%12li\n",
58 m_pool_name, location_string, (long)m_instances,
59 (long)m_allocated, get_percent (m_allocated, total.m_allocated),
60 (long)m_peak, (long)m_times,
61 get_percent (m_times, total.m_times),
62 (long)m_element_size);
64 free (location_string);
67 /* Dump header with NAME. */
68 static inline void dump_header (const char *name)
70 fprintf (stderr, "%-32s%-48s %6s%11s%16s%17s%12s\n", "Pool name", name,
71 "Pools", "Leak", "Peak", "Times", "Elt size");
72 print_dash_line ();
75 /* Dump footer. */
76 inline void dump_footer ()
78 print_dash_line ();
79 fprintf (stderr, "%s%75li%10li\n", "Total", (long)m_instances,
80 (long)m_allocated);
81 print_dash_line ();
84 /* Element size. */
85 size_t m_element_size;
86 /* Pool name. */
87 const char *m_pool_name;
90 extern mem_alloc_description<pool_usage> pool_allocator_usage;
92 /* Type based memory pool allocator. */
93 template <typename T>
94 class pool_allocator
96 public:
97 /* Default constructor for pool allocator called NAME. Each block
98 has NUM elements. The allocator support EXTRA_SIZE and can
99 potentially IGNORE_TYPE_SIZE. */
100 pool_allocator (const char *name, size_t num, size_t extra_size = 0,
101 bool ignore_type_size = false CXX_MEM_STAT_INFO);
102 ~pool_allocator ();
103 void release ();
104 void release_if_empty ();
105 T *allocate () ATTRIBUTE_MALLOC;
106 void remove (T *object);
108 private:
109 struct allocation_pool_list
111 allocation_pool_list *next;
114 /* Initialize a pool allocator. */
115 void initialize ();
117 template <typename U>
118 struct allocation_object
120 /* The ID of alloc pool which the object was allocated from. */
121 ALLOC_POOL_ID_TYPE id;
123 union
125 /* The data of the object. */
126 char data[1];
128 /* Because we want any type of data to be well aligned after the ID,
129 the following elements are here. They are never accessed so
130 the allocated object may be even smaller than this structure.
131 We do not care about alignment for floating-point types. */
132 char *align_p;
133 int64_t align_i;
134 } u;
136 static inline allocation_object<U> *get_instance (void *data_ptr)
138 return (allocation_object<U> *)(((char *)(data_ptr))
139 - offsetof (allocation_object<U>,
140 u.data));
143 static inline U *get_data (void *instance_ptr)
145 return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
149 /* Align X to 8. */
150 size_t align_eight (size_t x)
152 return (((x+7) >> 3) << 3);
155 const char *m_name;
156 ALLOC_POOL_ID_TYPE m_id;
157 size_t m_elts_per_block;
159 /* These are the elements that have been allocated at least once
160 and freed. */
161 allocation_pool_list *m_returned_free_list;
163 /* These are the elements that have not yet been allocated out of
164 the last block obtained from XNEWVEC. */
165 char* m_virgin_free_list;
167 /* The number of elements in the virgin_free_list that can be
168 allocated before needing another block. */
169 size_t m_virgin_elts_remaining;
170 /* The number of elements that are allocated. */
171 size_t m_elts_allocated;
172 /* The number of elements that are released. */
173 size_t m_elts_free;
174 /* The number of allocated blocks. */
175 size_t m_blocks_allocated;
176 /* List of blocks that are used to allocate new objects. */
177 allocation_pool_list *m_block_list;
178 /* The number of elements in a block. */
179 size_t m_block_size;
180 /* Size of a pool elements in bytes. */
181 size_t m_elt_size;
182 /* Flag if we shoul ignore size of a type. */
183 bool m_ignore_type_size;
184 /* Extra size in bytes that should be allocated for each element. */
185 size_t m_extra_size;
186 /* Flag if a pool allocator is initialized. */
187 bool m_initialized;
188 /* Memory allocation location. */
189 mem_location m_location;
192 /* Last used ID. */
193 extern ALLOC_POOL_ID_TYPE last_id;
195 /* Store information about each particular alloc_pool. Note that this
196 will underestimate the amount the amount of storage used by a small amount:
197 1) The overhead in a pool is not accounted for.
198 2) The unallocated elements in a block are not accounted for. Note
199 that this can at worst case be one element smaller that the block
200 size for that pool. */
201 struct alloc_pool_descriptor
203 /* Number of pools allocated. */
204 unsigned long created;
205 /* Gross allocated storage. */
206 unsigned long allocated;
207 /* Amount of currently active storage. */
208 unsigned long current;
209 /* Peak amount of storage used. */
210 unsigned long peak;
211 /* Size of element in the pool. */
212 int elt_size;
216 /* Hashtable mapping alloc_pool names to descriptors. */
217 extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
219 template <typename T>
220 inline
221 pool_allocator<T>::pool_allocator (const char *name, size_t num,
222 size_t extra_size, bool ignore_type_size
223 MEM_STAT_DECL):
224 m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
225 m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
226 m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
227 m_block_size (0), m_ignore_type_size (ignore_type_size),
228 m_extra_size (extra_size), m_initialized (false),
229 m_location (ALLOC_POOL, false PASS_MEM_STAT) {}
231 /* Initialize a pool allocator. */
233 template <typename T>
234 void
235 pool_allocator<T>::initialize ()
237 gcc_checking_assert (!m_initialized);
238 m_initialized = true;
240 size_t header_size;
241 size_t size = (m_ignore_type_size ? 0 : sizeof (T)) + m_extra_size;
243 gcc_checking_assert (m_name);
245 /* Make size large enough to store the list header. */
246 if (size < sizeof (allocation_pool_list*))
247 size = sizeof (allocation_pool_list*);
249 /* Now align the size to a multiple of 4. */
250 size = align_eight (size);
252 /* Add the aligned size of ID. */
253 size += offsetof (allocation_object<T>, u.data);
255 /* Um, we can't really allocate 0 elements per block. */
256 gcc_checking_assert (m_elts_per_block);
258 m_elt_size = size;
260 if (GATHER_STATISTICS)
262 pool_usage *u = pool_allocator_usage.register_descriptor
263 (this, new mem_location (m_location));
265 u->m_element_size = m_elt_size;
266 u->m_pool_name = m_name;
269 /* List header size should be a multiple of 8. */
270 header_size = align_eight (sizeof (allocation_pool_list));
272 m_block_size = (size * m_elts_per_block) + header_size;
274 #ifdef ENABLE_CHECKING
275 /* Increase the last used ID and use it for this pool.
276 ID == 0 is used for free elements of pool so skip it. */
277 last_id++;
278 if (last_id == 0)
279 last_id++;
281 m_id = last_id;
282 #endif
285 /* Free all memory allocated for the given memory pool. */
286 template <typename T>
287 inline void
288 pool_allocator<T>::release ()
290 if (!m_initialized)
291 return;
293 allocation_pool_list *block, *next_block;
295 /* Free each block allocated to the pool. */
296 for (block = m_block_list; block != NULL; block = next_block)
298 next_block = block->next;
299 free (block);
302 if (GATHER_STATISTICS)
304 pool_allocator_usage.release_instance_overhead
305 (this, (m_elts_allocated - m_elts_free) * m_elt_size);
308 m_returned_free_list = NULL;
309 m_virgin_free_list = NULL;
310 m_virgin_elts_remaining = 0;
311 m_elts_allocated = 0;
312 m_elts_free = 0;
313 m_blocks_allocated = 0;
314 m_block_list = NULL;
317 template <typename T>
318 void
319 inline pool_allocator<T>::release_if_empty ()
321 if (m_elts_free == m_elts_allocated)
322 release ();
325 template <typename T>
326 inline pool_allocator<T>::~pool_allocator ()
328 release ();
331 /* Allocates one element from the pool specified. */
332 template <typename T>
333 inline T *
334 pool_allocator<T>::allocate ()
336 if (!m_initialized)
337 initialize ();
339 allocation_pool_list *header;
340 #ifdef ENABLE_VALGRIND_ANNOTATIONS
341 int size;
342 #endif
344 if (GATHER_STATISTICS)
346 pool_allocator_usage.register_instance_overhead (m_elt_size, this);
349 #ifdef ENABLE_VALGRIND_ANNOTATIONS
350 size = m_elt_size - offsetof (allocation_object<T>, u.data);
351 #endif
353 /* If there are no more free elements, make some more!. */
354 if (!m_returned_free_list)
356 char *block;
357 if (!m_virgin_elts_remaining)
359 allocation_pool_list *block_header;
361 /* Make the block. */
362 block = XNEWVEC (char, m_block_size);
363 block_header = (allocation_pool_list*) block;
364 block += align_eight (sizeof (allocation_pool_list));
366 /* Throw it on the block list. */
367 block_header->next = m_block_list;
368 m_block_list = block_header;
370 /* Make the block available for allocation. */
371 m_virgin_free_list = block;
372 m_virgin_elts_remaining = m_elts_per_block;
374 /* Also update the number of elements we have free/allocated, and
375 increment the allocated block count. */
376 m_elts_allocated += m_elts_per_block;
377 m_elts_free += m_elts_per_block;
378 m_blocks_allocated += 1;
381 /* We now know that we can take the first elt off the virgin list and
382 put it on the returned list. */
383 block = m_virgin_free_list;
384 header = (allocation_pool_list*) allocation_object<T>::get_data (block);
385 header->next = NULL;
386 #ifdef ENABLE_CHECKING
387 /* Mark the element to be free. */
388 ((allocation_object<T> *) block)->id = 0;
389 #endif
390 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
391 m_returned_free_list = header;
392 m_virgin_free_list += m_elt_size;
393 m_virgin_elts_remaining--;
397 /* Pull the first free element from the free list, and return it. */
398 header = m_returned_free_list;
399 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
400 m_returned_free_list = header->next;
401 m_elts_free--;
403 #ifdef ENABLE_CHECKING
404 /* Set the ID for element. */
405 allocation_object<T>::get_instance (header)->id = m_id;
406 #endif
407 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
409 /* Call default constructor. */
410 return (T *)(header);
413 /* Puts PTR back on POOL's free list. */
414 template <typename T>
415 void
416 pool_allocator<T>::remove (T *object)
418 gcc_checking_assert (m_initialized);
420 allocation_pool_list *header;
421 int size ATTRIBUTE_UNUSED;
422 size = m_elt_size - offsetof (allocation_object<T>, u.data);
424 #ifdef ENABLE_CHECKING
425 gcc_assert (object
426 /* Check if we free more than we allocated, which is Bad (TM). */
427 && m_elts_free < m_elts_allocated
428 /* Check whether the PTR was allocated from POOL. */
429 && m_id == allocation_object<T>::get_instance (object)->id);
431 memset (object, 0xaf, size);
433 /* Mark the element to be free. */
434 allocation_object<T>::get_instance (object)->id = 0;
435 #endif
437 header = (allocation_pool_list*) object;
438 header->next = m_returned_free_list;
439 m_returned_free_list = header;
440 VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
441 m_elts_free++;
443 if (GATHER_STATISTICS)
445 pool_allocator_usage.release_instance_overhead (this, m_elt_size);
449 #endif