1 /* Profile counter container type.
2 Copyright (C) 2017-2022 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
24 #include "profile-count.h"
27 #include "basic-block.h"
31 #include "data-streamer.h"
36 /* Names from profile_quality enum values. */
38 const char *profile_quality_names
[] =
43 "guessed_global0adjusted",
50 /* Get a string describing QUALITY. */
53 profile_quality_as_string (enum profile_quality quality
)
55 return profile_quality_names
[quality
];
58 /* Parse VALUE as profile quality and return true when a valid QUALITY. */
61 parse_profile_quality (const char *value
, profile_quality
*quality
)
63 for (unsigned i
= 0; i
< ARRAY_SIZE (profile_quality_names
); i
++)
64 if (strcmp (profile_quality_names
[i
], value
) == 0)
66 *quality
= (profile_quality
)i
;
73 /* Display names from profile_quality enum values. */
75 const char *profile_quality_display_names
[] =
79 "estimated locally, globally 0",
80 "estimated locally, globally 0 adjusted",
87 /* Dump THIS to BUFFER. */
90 profile_count::dump (char *buffer
) const
92 if (!initialized_p ())
93 sprintf (buffer
, "uninitialized");
95 sprintf (buffer
, "%" PRId64
" (%s)", m_val
,
96 profile_quality_display_names
[m_quality
]);
102 profile_count::dump (FILE *f
) const
109 /* Dump THIS to stderr. */
112 profile_count::debug () const
115 fprintf (stderr
, "\n");
118 /* Return true if THIS differs from OTHER; tolerate small differences. */
121 profile_count::differs_from_p (profile_count other
) const
123 gcc_checking_assert (compatible_p (other
));
124 if (!initialized_p () || !other
.initialized_p ())
126 if ((uint64_t)m_val
- (uint64_t)other
.m_val
< 100
127 || (uint64_t)other
.m_val
- (uint64_t)m_val
< 100)
131 int64_t ratio
= (int64_t)m_val
* 100 / other
.m_val
;
132 return ratio
< 99 || ratio
> 101;
135 /* Stream THIS from IB. */
138 profile_count::stream_in (class lto_input_block
*ib
)
141 ret
.m_val
= streamer_read_gcov_count (ib
);
142 ret
.m_quality
= (profile_quality
) streamer_read_uhwi (ib
);
146 /* Stream THIS to OB. */
149 profile_count::stream_out (struct output_block
*ob
)
151 streamer_write_gcov_count (ob
, m_val
);
152 streamer_write_uhwi (ob
, m_quality
);
155 /* Stream THIS to OB. */
158 profile_count::stream_out (struct lto_output_stream
*ob
)
160 streamer_write_gcov_count_stream (ob
, m_val
);
161 streamer_write_uhwi_stream (ob
, m_quality
);
165 /* Output THIS to BUFFER. */
168 profile_probability::dump (char *buffer
) const
170 if (!initialized_p ())
171 sprintf (buffer
, "uninitialized");
174 /* Make difference between 0.00 as a roundoff error and actual 0.
177 buffer
+= sprintf (buffer
, "never");
178 else if (m_val
== max_probability
)
179 buffer
+= sprintf (buffer
, "always");
181 buffer
+= sprintf (buffer
, "%3.1f%%", (double)m_val
* 100 / max_probability
);
183 if (m_quality
== ADJUSTED
)
184 sprintf (buffer
, " (adjusted)");
185 else if (m_quality
== AFDO
)
186 sprintf (buffer
, " (auto FDO)");
187 else if (m_quality
== GUESSED
)
188 sprintf (buffer
, " (guessed)");
192 /* Dump THIS to F. */
195 profile_probability::dump (FILE *f
) const
202 /* Dump THIS to stderr. */
205 profile_probability::debug () const
208 fprintf (stderr
, "\n");
211 /* Return true if THIS differs from OTHER; tolerate small differences. */
214 profile_probability::differs_from_p (profile_probability other
) const
216 if (!initialized_p () || !other
.initialized_p ())
218 if ((uint64_t)m_val
- (uint64_t)other
.m_val
< max_probability
/ 1000
219 || (uint64_t)other
.m_val
- (uint64_t)max_probability
< 1000)
223 int64_t ratio
= (int64_t)m_val
* 100 / other
.m_val
;
224 return ratio
< 99 || ratio
> 101;
227 /* Return true if THIS differs significantly from OTHER. */
230 profile_probability::differs_lot_from_p (profile_probability other
) const
232 if (!initialized_p () || !other
.initialized_p ())
234 uint32_t d
= m_val
> other
.m_val
? m_val
- other
.m_val
: other
.m_val
- m_val
;
235 return d
> max_probability
/ 2;
238 /* Stream THIS from IB. */
241 profile_probability::stream_in (class lto_input_block
*ib
)
243 profile_probability ret
;
244 ret
.m_val
= streamer_read_uhwi (ib
);
245 ret
.m_quality
= (profile_quality
) streamer_read_uhwi (ib
);
249 /* Stream THIS to OB. */
252 profile_probability::stream_out (struct output_block
*ob
)
254 streamer_write_uhwi (ob
, m_val
);
255 streamer_write_uhwi (ob
, m_quality
);
258 /* Stream THIS to OB. */
261 profile_probability::stream_out (struct lto_output_stream
*ob
)
263 streamer_write_uhwi_stream (ob
, m_val
);
264 streamer_write_uhwi_stream (ob
, m_quality
);
267 /* Compute RES=(a*b + c/2)/c capping and return false if overflow happened. */
270 slow_safe_scale_64bit (uint64_t a
, uint64_t b
, uint64_t c
, uint64_t *res
)
272 FIXED_WIDE_INT (128) tmp
= a
;
273 wi::overflow_type overflow
;
274 tmp
= wi::udiv_floor (wi::umul (tmp
, b
, &overflow
) + (c
/ 2), c
);
275 gcc_checking_assert (!overflow
);
276 if (wi::fits_uhwi_p (tmp
))
278 *res
= tmp
.to_uhwi ();
281 *res
= (uint64_t) -1;
285 /* Return count as frequency within FUN scaled in range 0 to REG_FREQ_MAX
286 Used for legacy code and should not be used anymore. */
289 profile_count::to_frequency (struct function
*fun
) const
291 if (!initialized_p ())
293 if (*this == zero ())
295 STATIC_ASSERT (REG_BR_PROB_BASE
== BB_FREQ_MAX
);
296 gcc_assert (fun
->cfg
->count_max
.initialized_p ());
297 profile_probability prob
= probability_in (fun
->cfg
->count_max
);
298 if (!prob
.initialized_p ())
299 return REG_BR_PROB_BASE
;
300 return prob
.to_reg_br_prob_base ();
303 /* Return count as frequency within FUN scaled in range 0 to CGRAPH_FREQ_MAX
304 where CGRAPH_FREQ_BASE means that count equals to entry block count.
305 Used for legacy code and should not be used anymore. */
308 profile_count::to_cgraph_frequency (profile_count entry_bb_count
) const
310 if (!initialized_p () || !entry_bb_count
.initialized_p ())
311 return CGRAPH_FREQ_BASE
;
312 if (*this == zero ())
314 gcc_checking_assert (entry_bb_count
.initialized_p ());
316 gcc_checking_assert (compatible_p (entry_bb_count
));
317 if (!safe_scale_64bit (!entry_bb_count
.m_val
? m_val
+ 1 : m_val
,
318 CGRAPH_FREQ_BASE
, MAX (1, entry_bb_count
.m_val
), &scale
))
319 return CGRAPH_FREQ_MAX
;
320 return MIN (scale
, CGRAPH_FREQ_MAX
);
323 /* Return THIS/IN as sreal value. */
326 profile_count::to_sreal_scale (profile_count in
, bool *known
) const
328 if (!initialized_p () || !in
.initialized_p ())
336 /* Watch for cases where one count is IPA and other is not. */
337 if (in
.ipa ().initialized_p ())
339 gcc_checking_assert (ipa ().initialized_p ());
340 /* If current count is inter-procedurally 0 and IN is inter-procedurally
341 non-zero, return 0. */
342 if (in
.ipa ().nonzero_p ()
343 && !ipa().nonzero_p ())
347 /* We can handle correctly 0 IPA count within locally estimated
348 profile, but otherwise we are lost and this should not happen. */
349 gcc_checking_assert (!ipa ().initialized_p () || !ipa ().nonzero_p ());
350 if (*this == zero ())
352 if (m_val
== in
.m_val
)
354 gcc_checking_assert (compatible_p (in
));
362 return (sreal
)m_val
/ (sreal
)in
.m_val
;
365 /* We want to scale profile across function boundary from NUM to DEN.
366 Take care of the side case when DEN is zeros. We still want to behave
367 sanely here which means
368 - scale to profile_count::zero () if NUM is profile_count::zero
369 - do not affect anything if NUM == DEN
370 - preserve counter value but adjust quality in other cases. */
373 profile_count::adjust_for_ipa_scaling (profile_count
*num
,
376 /* Scaling is no-op if NUM and DEN are the same. */
379 /* Scaling to zero is always zero. */
382 /* If den is non-zero we are safe. */
383 if (den
->force_nonzero () == *den
)
385 /* Force both to non-zero so we do not push profiles to 0 when
386 both num == 0 and den == 0. */
387 *den
= den
->force_nonzero ();
388 *num
= num
->force_nonzero ();
391 /* THIS is a count of bb which is known to be executed IPA times.
392 Combine this information into bb counter. This means returning IPA
393 if it is nonzero, not changing anything if IPA is uninitialized
394 and if IPA is zero, turning THIS into corresponding local profile with
398 profile_count::combine_with_ipa_count (profile_count ipa
)
400 if (!initialized_p ())
403 if (ipa
.nonzero_p ())
405 if (!ipa
.initialized_p () || *this == zero ())
408 return this->global0 ();
409 return this->global0adjusted ();
412 /* Sae as profile_count::combine_with_ipa_count but within function with count
415 profile_count::combine_with_ipa_count_within (profile_count ipa
,
419 if (!initialized_p ())
421 if (ipa2
.ipa () == ipa2
&& ipa
.initialized_p ())
424 ret
= combine_with_ipa_count (ipa
);
425 gcc_checking_assert (ret
.compatible_p (ipa2
));
429 /* The profiling runtime uses gcov_type, which is usually 64bit integer.
430 Conversions back and forth are used to read the coverage and get it
431 into internal representation. */
434 profile_count::from_gcov_type (gcov_type v
, profile_quality quality
)
437 gcc_checking_assert (v
>= 0);
438 if (dump_file
&& v
>= (gcov_type
)max_count
)
440 "Capping gcov count %" PRId64
" to max_count %" PRId64
"\n",
441 (int64_t) v
, (int64_t) max_count
);
442 ret
.m_val
= MIN (v
, (gcov_type
)max_count
);
443 ret
.m_quality
= quality
;
447 /* COUNT1 times event happens with *THIS probability, COUNT2 times OTHER
448 happens with COUNT2 probability. Return probability that either *THIS or
452 profile_probability::combine_with_count (profile_count count1
,
453 profile_probability other
,
454 profile_count count2
) const
456 /* If probabilities are same, we are done.
457 If counts are nonzero we can distribute accordingly. In remaining
458 cases just average the values and hope for the best. */
459 if (*this == other
|| count1
== count2
460 || (count2
== profile_count::zero ()
461 && !(count1
== profile_count::zero ())))
463 if (count1
== profile_count::zero () && !(count2
== profile_count::zero ()))
465 else if (count1
.nonzero_p () || count2
.nonzero_p ())
466 return *this * count1
.probability_in (count1
+ count2
)
467 + other
* count2
.probability_in (count1
+ count2
);
469 return *this * even () + other
* even ();
472 /* Return probability as sreal in range [0, 1]. */
475 profile_probability::to_sreal () const
477 gcc_checking_assert (initialized_p ());
478 return ((sreal
)m_val
) >> (n_bits
- 2);