Remove reduntant dumps and make tp_first_run dump more compact.
[official-gcc.git] / libgcc / libgcov-profiler.c
blob47c00ee4b4a105b6e8597960512cce5a3ad3dfae
1 /* Routines required for instrumenting a program. */
2 /* Compile this one with gcc. */
3 /* Copyright (C) 1989-2018 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
26 #include "libgcov.h"
27 #if !defined(inhibit_libc)
29 /* Detect whether target can support atomic update of profilers. */
30 #if __SIZEOF_LONG_LONG__ == 4 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
31 #define GCOV_SUPPORTS_ATOMIC 1
32 #else
33 #if __SIZEOF_LONG_LONG__ == 8 && __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
34 #define GCOV_SUPPORTS_ATOMIC 1
35 #else
36 #define GCOV_SUPPORTS_ATOMIC 0
37 #endif
38 #endif
40 #ifdef L_gcov_interval_profiler
41 /* If VALUE is in interval <START, START + STEPS - 1>, then increases the
42 corresponding counter in COUNTERS. If the VALUE is above or below
43 the interval, COUNTERS[STEPS] or COUNTERS[STEPS + 1] is increased
44 instead. */
46 void
47 __gcov_interval_profiler (gcov_type *counters, gcov_type value,
48 int start, unsigned steps)
50 gcov_type delta = value - start;
51 if (delta < 0)
52 counters[steps + 1]++;
53 else if (delta >= steps)
54 counters[steps]++;
55 else
56 counters[delta]++;
58 #endif
60 #if defined(L_gcov_interval_profiler_atomic) && GCOV_SUPPORTS_ATOMIC
61 /* If VALUE is in interval <START, START + STEPS - 1>, then increases the
62 corresponding counter in COUNTERS. If the VALUE is above or below
63 the interval, COUNTERS[STEPS] or COUNTERS[STEPS + 1] is increased
64 instead. Function is thread-safe. */
66 void
67 __gcov_interval_profiler_atomic (gcov_type *counters, gcov_type value,
68 int start, unsigned steps)
70 gcov_type delta = value - start;
71 if (delta < 0)
72 __atomic_fetch_add (&counters[steps + 1], 1, __ATOMIC_RELAXED);
73 else if (delta >= steps)
74 __atomic_fetch_add (&counters[steps], 1, __ATOMIC_RELAXED);
75 else
76 __atomic_fetch_add (&counters[delta], 1, __ATOMIC_RELAXED);
78 #endif
80 #ifdef L_gcov_pow2_profiler
81 /* If VALUE is a power of two, COUNTERS[1] is incremented. Otherwise
82 COUNTERS[0] is incremented. */
84 void
85 __gcov_pow2_profiler (gcov_type *counters, gcov_type value)
87 if (value == 0 || (value & (value - 1)))
88 counters[0]++;
89 else
90 counters[1]++;
92 #endif
94 #if defined(L_gcov_pow2_profiler_atomic) && GCOV_SUPPORTS_ATOMIC
95 /* If VALUE is a power of two, COUNTERS[1] is incremented. Otherwise
96 COUNTERS[0] is incremented. Function is thread-safe. */
98 void
99 __gcov_pow2_profiler_atomic (gcov_type *counters, gcov_type value)
101 if (value == 0 || (value & (value - 1)))
102 __atomic_fetch_add (&counters[0], 1, __ATOMIC_RELAXED);
103 else
104 __atomic_fetch_add (&counters[1], 1, __ATOMIC_RELAXED);
106 #endif
109 /* Tries to determine the most common value among its inputs. Checks if the
110 value stored in COUNTERS[0] matches VALUE. If this is the case, COUNTERS[1]
111 is incremented. If this is not the case and COUNTERS[1] is not zero,
112 COUNTERS[1] is decremented. Otherwise COUNTERS[1] is set to one and
113 VALUE is stored to COUNTERS[0]. This algorithm guarantees that if this
114 function is called more than 50% of the time with one value, this value
115 will be in COUNTERS[0] in the end.
117 In any case, COUNTERS[2] is incremented. If USE_ATOMIC is set to 1,
118 COUNTERS[2] is updated with an atomic instruction. */
120 static inline void
121 __gcov_one_value_profiler_body (gcov_type *counters, gcov_type value,
122 int use_atomic)
124 if (value == counters[0])
125 counters[1]++;
126 else if (counters[1] == 0)
128 counters[1] = 1;
129 counters[0] = value;
131 else
132 counters[1]--;
134 if (use_atomic)
135 __atomic_fetch_add (&counters[2], 1, __ATOMIC_RELAXED);
136 else
137 counters[2]++;
140 #ifdef L_gcov_one_value_profiler
141 void
142 __gcov_one_value_profiler (gcov_type *counters, gcov_type value)
144 __gcov_one_value_profiler_body (counters, value, 0);
146 #endif
148 #if defined(L_gcov_one_value_profiler_atomic) && GCOV_SUPPORTS_ATOMIC
150 /* Update one value profilers (COUNTERS) for a given VALUE.
152 CAVEAT: Following function is not thread-safe, only total number
153 of executions (COUNTERS[2]) is update with an atomic instruction.
154 Problem is that one cannot atomically update two counters
155 (COUNTERS[0] and COUNTERS[1]), for more information please read
156 following email thread:
157 https://gcc.gnu.org/ml/gcc-patches/2016-08/msg00024.html. */
159 void
160 __gcov_one_value_profiler_atomic (gcov_type *counters, gcov_type value)
162 __gcov_one_value_profiler_body (counters, value, 1);
164 #endif
166 #ifdef L_gcov_indirect_call_topn_profiler
167 /* Tries to keep track the most frequent N values in the counters where
168 N is specified by parameter TOPN_VAL. To track top N values, 2*N counter
169 entries are used.
170 counter[0] --- the accumative count of the number of times one entry in
171 in the counters gets evicted/replaced due to limited capacity.
172 When this value reaches a threshold, the bottom N values are
173 cleared.
174 counter[1] through counter[2*N] records the top 2*N values collected so far.
175 Each value is represented by two entries: count[2*i+1] is the ith value, and
176 count[2*i+2] is the number of times the value is seen. */
178 static void
179 __gcov_topn_value_profiler_body (gcov_type *counters, gcov_type value)
181 unsigned i, found = 0, have_zero_count = 0;
182 gcov_type *entry;
183 gcov_type *lfu_entry = &counters[1];
184 gcov_type *value_array = &counters[1];
185 gcov_type *num_eviction = &counters[0];
186 gcov_unsigned_t topn_val = GCOV_ICALL_TOPN_VAL;
188 /* There are 2*topn_val values tracked, each value takes two slots in the
189 counter array. */
190 for (i = 0; i < (topn_val << 2); i += 2)
192 entry = &value_array[i];
193 if (entry[0] == value)
195 entry[1]++ ;
196 found = 1;
197 break;
199 else if (entry[1] == 0)
201 lfu_entry = entry;
202 have_zero_count = 1;
204 else if (entry[1] < lfu_entry[1])
205 lfu_entry = entry;
208 if (found)
209 return;
211 /* lfu_entry is either an empty entry or an entry
212 with lowest count, which will be evicted. */
213 lfu_entry[0] = value;
214 lfu_entry[1] = 1;
216 #define GCOV_ICALL_COUNTER_CLEAR_THRESHOLD 3000
218 /* Too many evictions -- time to clear bottom entries to
219 avoid hot values bumping each other out. */
220 if (!have_zero_count
221 && ++*num_eviction >= GCOV_ICALL_COUNTER_CLEAR_THRESHOLD)
223 unsigned i, j;
224 gcov_type *p, minv;
225 gcov_type* tmp_cnts
226 = (gcov_type *)alloca (topn_val * sizeof (gcov_type));
228 *num_eviction = 0;
230 for (i = 0; i < topn_val; i++)
231 tmp_cnts[i] = 0;
233 /* Find the largest topn_val values from the group of
234 2*topn_val values and put them into tmp_cnts. */
236 for (i = 0; i < 2 * topn_val; i += 2)
238 p = 0;
239 for (j = 0; j < topn_val; j++)
241 if (!p || tmp_cnts[j] < *p)
242 p = &tmp_cnts[j];
244 if (value_array[i + 1] > *p)
245 *p = value_array[i + 1];
248 minv = tmp_cnts[0];
249 for (j = 1; j < topn_val; j++)
251 if (tmp_cnts[j] < minv)
252 minv = tmp_cnts[j];
254 /* Zero out low value entries. */
255 for (i = 0; i < 2 * topn_val; i += 2)
257 if (value_array[i + 1] < minv)
259 value_array[i] = 0;
260 value_array[i + 1] = 0;
266 /* These two variables are used to actually track caller and callee. Keep
267 them in TLS memory so races are not common (they are written to often).
268 The variables are set directly by GCC instrumented code, so declaration
269 here must match one in tree-profile.c. */
271 #if defined(HAVE_CC_TLS) && !defined (USE_EMUTLS)
272 __thread
273 #endif
274 struct indirect_call_tuple __gcov_indirect_call_topn;
276 #ifdef TARGET_VTABLE_USES_DESCRIPTORS
277 #define VTABLE_USES_DESCRIPTORS 1
278 #else
279 #define VTABLE_USES_DESCRIPTORS 0
280 #endif
282 /* This fucntion is instrumented at function entry to track topn indirect
283 calls to CUR_FUNC. */
285 void
286 __gcov_indirect_call_topn_profiler (gcov_type value, void* cur_func)
288 void *callee_func = __gcov_indirect_call_topn.callee;
289 /* If the C++ virtual tables contain function descriptors then one
290 function may have multiple descriptors and we need to dereference
291 the descriptors to see if they point to the same function. */
292 if (cur_func == callee_func
293 || (VTABLE_USES_DESCRIPTORS && callee_func
294 && *(void **) cur_func == *(void **) callee_func))
295 __gcov_topn_value_profiler_body (__gcov_indirect_call_topn.counters, value);
297 #endif
299 #ifdef L_gcov_indirect_call_profiler_v2
301 /* These two variables are used to actually track caller and callee. Keep
302 them in TLS memory so races are not common (they are written to often).
303 The variables are set directly by GCC instrumented code, so declaration
304 here must match one in tree-profile.c */
306 #if defined(HAVE_CC_TLS) && !defined (USE_EMUTLS)
307 __thread
308 #endif
309 struct indirect_call_tuple __gcov_indirect_call;
311 /* By default, the C++ compiler will use function addresses in the
312 vtable entries. Setting TARGET_VTABLE_USES_DESCRIPTORS to nonzero
313 tells the compiler to use function descriptors instead. The value
314 of this macro says how many words wide the descriptor is (normally 2).
316 It is assumed that the address of a function descriptor may be treated
317 as a pointer to a function. */
319 /* Tries to determine the most common value among its inputs. */
320 void
321 __gcov_indirect_call_profiler_v2 (gcov_type value, void* cur_func)
323 /* If the C++ virtual tables contain function descriptors then one
324 function may have multiple descriptors and we need to dereference
325 the descriptors to see if they point to the same function. */
326 if (cur_func == __gcov_indirect_call.callee
327 || (__LIBGCC_VTABLE_USES_DESCRIPTORS__
328 && *(void **) cur_func == *(void **) __gcov_indirect_call.callee))
329 __gcov_one_value_profiler_body (__gcov_indirect_call.counters, value, 0);
331 __gcov_indirect_call.callee = NULL;
333 #endif
335 #ifdef L_gcov_time_profiler
337 /* Counter for first visit of each function. */
338 gcov_type __gcov_time_profiler_counter ATTRIBUTE_HIDDEN = 1;
340 #endif
342 #ifdef L_gcov_average_profiler
343 /* Increase corresponding COUNTER by VALUE. FIXME: Perhaps we want
344 to saturate up. */
346 void
347 __gcov_average_profiler (gcov_type *counters, gcov_type value)
349 counters[0] += value;
350 counters[1] ++;
352 #endif
354 #if defined(L_gcov_average_profiler_atomic) && GCOV_SUPPORTS_ATOMIC
355 /* Increase corresponding COUNTER by VALUE. FIXME: Perhaps we want
356 to saturate up. Function is thread-safe. */
358 void
359 __gcov_average_profiler_atomic (gcov_type *counters, gcov_type value)
361 __atomic_fetch_add (&counters[0], value, __ATOMIC_RELAXED);
362 __atomic_fetch_add (&counters[1], 1, __ATOMIC_RELAXED);
364 #endif
366 #ifdef L_gcov_ior_profiler
367 /* Bitwise-OR VALUE into COUNTER. */
369 void
370 __gcov_ior_profiler (gcov_type *counters, gcov_type value)
372 *counters |= value;
374 #endif
376 #if defined(L_gcov_ior_profiler_atomic) && GCOV_SUPPORTS_ATOMIC
377 /* Bitwise-OR VALUE into COUNTER. Function is thread-safe. */
379 void
380 __gcov_ior_profiler_atomic (gcov_type *counters, gcov_type value)
382 __atomic_fetch_or (&counters[0], value, __ATOMIC_RELAXED);
384 #endif
387 #endif /* inhibit_libc */