1 /* File format for coverage information
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
42 GCOV_LINKAGE
struct gcov_var
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 static inline gcov_position_t
70 gcc_assert (gcov_var
.mode
> 0);
71 return gcov_var
.start
+ gcov_var
.offset
;
74 /* Return nonzero if the error flag is set. */
78 return gcov_var
.file
? gcov_var
.error
: 1;
82 /* Move to beginning of file and initialize for writing. */
83 GCOV_LINKAGE
inline void
86 gcc_assert (gcov_var
.mode
> 0);
90 fseek (gcov_var
.file
, 0L, SEEK_SET
);
94 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
99 value
= (value
>> 16) | (value
<< 16);
100 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
106 /* Open a gcov file. NAME is the name of the file to open and MODE
107 indicates whether a new file should be created, or an existing file
108 opened. If MODE is >= 0 an existing file will be opened, if
109 possible, and if MODE is <= 0, a new file will be created. Use
110 MODE=0 to attempt to reopen an existing file and then fall back on
111 creating a new one. If MODE < 0, the file will be opened in
112 read-only mode. Otherwise it will be opened for modification.
113 Return zero on failure, >0 on opening an existing file and <0 on
114 creating a new one. */
118 gcov_open (const char *name
)
120 gcov_open (const char *name
, int mode
)
127 struct flock s_flock
;
130 s_flock
.l_whence
= SEEK_SET
;
132 s_flock
.l_len
= 0; /* Until EOF. */
133 s_flock
.l_pid
= getpid ();
136 gcc_assert (!gcov_var
.file
);
138 gcov_var
.offset
= gcov_var
.length
= 0;
139 gcov_var
.overread
= -1u;
147 /* Read-only mode - acquire a read-lock. */
148 s_flock
.l_type
= F_RDLCK
;
149 /* pass mode (ignored) for compatibility */
150 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
154 /* Write mode - acquire a write-lock. */
155 s_flock
.l_type
= F_WRLCK
;
156 fd
= open (name
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
160 /* Read-Write mode - acquire a write-lock. */
161 s_flock
.l_type
= F_WRLCK
;
162 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
167 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
170 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
184 if (fstat (fd
, &st
) < 0)
186 fclose (gcov_var
.file
);
193 gcov_var
.mode
= mode
* 2 + 1;
196 gcov_var
.mode
= mode
* 2 + 1;
199 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
205 gcov_var
.file
= fopen (name
, "w+b");
207 gcov_var
.mode
= mode
* 2 + 1;
213 setbuf (gcov_var
.file
, (char *)0);
218 /* Close the current gcov file. Flushes data to disk. Returns nonzero
219 on failure or error flag set. */
227 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
228 gcov_write_block (gcov_var
.offset
);
230 fclose (gcov_var
.file
);
235 free (gcov_var
.buffer
);
240 return gcov_var
.error
;
244 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
245 file. Returns +1 for same endian, -1 for other endian and zero for
249 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
251 if (magic
== expected
)
253 magic
= (magic
>> 16) | (magic
<< 16);
254 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
255 if (magic
== expected
)
266 gcov_allocate (unsigned length
)
268 size_t new_size
= gcov_var
.alloc
;
271 new_size
= GCOV_BLOCK_SIZE
;
275 gcov_var
.alloc
= new_size
;
276 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
281 /* Write out the current block, if needs be. */
284 gcov_write_block (unsigned size
)
286 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
288 gcov_var
.start
+= size
;
289 gcov_var
.offset
-= size
;
292 /* Allocate space to write BYTES bytes to the gcov file. Return a
293 pointer to those bytes, or NULL on failure. */
295 static gcov_unsigned_t
*
296 gcov_write_words (unsigned words
)
298 gcov_unsigned_t
*result
;
300 gcc_assert (gcov_var
.mode
< 0);
302 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
304 gcov_write_block (GCOV_BLOCK_SIZE
);
307 gcc_assert (gcov_var
.offset
== 1);
308 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
312 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
313 gcov_allocate (gcov_var
.offset
+ words
);
315 result
= &gcov_var
.buffer
[gcov_var
.offset
];
316 gcov_var
.offset
+= words
;
321 /* Write unsigned VALUE to coverage file. Sets error flag
325 gcov_write_unsigned (gcov_unsigned_t value
)
327 gcov_unsigned_t
*buffer
= gcov_write_words (1);
332 /* Write counter VALUE to coverage file. Sets error flag
337 gcov_write_counter (gcov_type value
)
339 gcov_unsigned_t
*buffer
= gcov_write_words (2);
341 buffer
[0] = (gcov_unsigned_t
) value
;
342 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
343 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
347 #endif /* IN_LIBGCOV */
350 /* Write STRING to coverage file. Sets error flag on file
351 error, overflow flag on overflow */
354 gcov_write_string (const char *string
)
358 gcov_unsigned_t
*buffer
;
362 length
= strlen (string
);
363 alloc
= (length
+ 4) >> 2;
366 buffer
= gcov_write_words (1 + alloc
);
370 memcpy (&buffer
[1], string
, length
);
375 /* Write a tag TAG and reserve space for the record length. Return a
376 value to be used for gcov_write_length. */
378 GCOV_LINKAGE gcov_position_t
379 gcov_write_tag (gcov_unsigned_t tag
)
381 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
382 gcov_unsigned_t
*buffer
= gcov_write_words (2);
390 /* Write a record length using POSITION, which was returned by
391 gcov_write_tag. The current file position is the end of the
392 record, and is restored before returning. Returns nonzero on
396 gcov_write_length (gcov_position_t position
)
399 gcov_unsigned_t length
;
400 gcov_unsigned_t
*buffer
;
402 gcc_assert (gcov_var
.mode
< 0);
403 gcc_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
404 gcc_assert (position
>= gcov_var
.start
);
405 offset
= position
- gcov_var
.start
;
406 length
= gcov_var
.offset
- offset
- 2;
407 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
409 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
410 gcov_write_block (gcov_var
.offset
);
413 #else /* IN_LIBGCOV */
415 /* Write a tag TAG and length LENGTH. */
418 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
420 gcov_unsigned_t
*buffer
= gcov_write_words (2);
426 /* Write a summary structure to the gcov file. Return nonzero on
430 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
432 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
433 const struct gcov_ctr_summary
*csum
;
434 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
436 /* Count number of non-zero histogram entries, and fill in a bit vector
437 of non-zero indices. The histogram is only currently computed for arc
439 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
440 histo_bitvector
[bv_ix
] = 0;
441 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
442 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
444 if (csum
->histogram
[h_ix
].num_counters
> 0)
446 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
450 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
451 gcov_write_unsigned (summary
->checksum
);
452 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
454 gcov_write_unsigned (csum
->num
);
455 gcov_write_unsigned (csum
->runs
);
456 gcov_write_counter (csum
->sum_all
);
457 gcov_write_counter (csum
->run_max
);
458 gcov_write_counter (csum
->sum_max
);
459 if (ix
!= GCOV_COUNTER_ARCS
)
461 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
462 gcov_write_unsigned (0);
465 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
466 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
467 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
469 if (!csum
->histogram
[h_ix
].num_counters
)
471 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
472 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
473 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
477 #endif /* IN_LIBGCOV */
481 /* Return a pointer to read BYTES bytes from the gcov file. Returns
482 NULL on failure (read past EOF). */
484 static const gcov_unsigned_t
*
485 gcov_read_words (unsigned words
)
487 const gcov_unsigned_t
*result
;
488 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
490 gcc_assert (gcov_var
.mode
> 0);
493 gcov_var
.start
+= gcov_var
.offset
;
497 gcc_assert (excess
== 1);
498 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
501 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, excess
* 4);
504 gcov_var
.length
= excess
;
506 gcc_assert (!gcov_var
.length
|| gcov_var
.length
== 1);
507 excess
= GCOV_BLOCK_SIZE
;
509 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
510 gcov_allocate (gcov_var
.length
+ words
);
511 excess
= gcov_var
.alloc
- gcov_var
.length
;
513 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
514 1, excess
<< 2, gcov_var
.file
) >> 2;
515 gcov_var
.length
+= excess
;
516 if (gcov_var
.length
< words
)
518 gcov_var
.overread
+= words
- gcov_var
.length
;
523 result
= &gcov_var
.buffer
[gcov_var
.offset
];
524 gcov_var
.offset
+= words
;
528 /* Read unsigned value from a coverage file. Sets error flag on file
529 error, overflow flag on overflow */
531 GCOV_LINKAGE gcov_unsigned_t
532 gcov_read_unsigned (void)
534 gcov_unsigned_t value
;
535 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
539 value
= from_file (buffer
[0]);
543 /* Read counter value from a coverage file. Sets error flag on file
544 error, overflow flag on overflow */
546 GCOV_LINKAGE gcov_type
547 gcov_read_counter (void)
550 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
554 value
= from_file (buffer
[0]);
555 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
556 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
563 /* Read string from coverage file. Returns a pointer to a static
564 buffer, or NULL on empty string. You must copy the string before
565 calling another gcov function. */
568 GCOV_LINKAGE
const char *
569 gcov_read_string (void)
571 unsigned length
= gcov_read_unsigned ();
576 return (const char *) gcov_read_words (length
);
581 gcov_read_summary (struct gcov_summary
*summary
)
583 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
584 struct gcov_ctr_summary
*csum
;
585 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
586 unsigned cur_bitvector
;
588 summary
->checksum
= gcov_read_unsigned ();
589 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
591 csum
->num
= gcov_read_unsigned ();
592 csum
->runs
= gcov_read_unsigned ();
593 csum
->sum_all
= gcov_read_counter ();
594 csum
->run_max
= gcov_read_counter ();
595 csum
->sum_max
= gcov_read_counter ();
596 memset (csum
->histogram
, 0,
597 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
598 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
600 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
602 /* When building libgcov we don't include system.h, which includes
603 hwint.h (where popcount_hwi is declared). However, libgcov.a
604 is built by the bootstrapped compiler and therefore the builtins
605 are always available. */
606 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
608 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
616 /* Find the index corresponding to the next entry we will read in.
617 First find the next non-zero bitvector and re-initialize
618 the histogram index accordingly, then right shift and increment
619 the index until we find a set bit. */
620 while (!cur_bitvector
)
623 gcc_assert (bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
);
624 cur_bitvector
= histo_bitvector
[bv_ix
++];
626 while (!(cur_bitvector
& 0x1))
631 gcc_assert (h_ix
< GCOV_HISTOGRAM_SIZE
);
633 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
634 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
635 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
636 /* Shift off the index we are done with and increment to the
637 corresponding next histogram entry. */
645 /* Reset to a known position. BASE should have been obtained from
646 gcov_position, LENGTH should be a record length. */
649 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
651 gcc_assert (gcov_var
.mode
> 0);
653 if (base
- gcov_var
.start
<= gcov_var
.length
)
654 gcov_var
.offset
= base
- gcov_var
.start
;
657 gcov_var
.offset
= gcov_var
.length
= 0;
658 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
659 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
665 /* Move to a given position in a gcov file. */
668 gcov_seek (gcov_position_t base
)
670 gcc_assert (gcov_var
.mode
< 0);
672 gcov_write_block (gcov_var
.offset
);
673 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
674 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
679 /* Return the modification time of the current gcov file. */
686 if (fstat (fileno (gcov_var
.file
), &status
))
689 return status
.st_mtime
;
694 /* Determine the index into histogram for VALUE. */
699 GCOV_LINKAGE
unsigned
701 gcov_histo_index (gcov_type value
)
703 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
705 unsigned prev2bits
= 0;
707 /* Find index into log2 scale histogram, where each of the log2
708 sized buckets is divided into 4 linear sub-buckets for better
709 focus in the higher buckets. */
711 /* Find the place of the most-significant bit set. */
715 /* When building libgcov we don't include system.h, which includes
716 hwint.h (where floor_log2 is declared). However, libgcov.a
717 is built by the bootstrapped compiler and therefore the builtins
718 are always available. */
719 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
721 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
722 that is either 32 or 64 bits, and gcov_type_unsigned may be 64 bits.
723 Need to check for the case where gcov_type_unsigned is 64 bits
724 and HOST_WIDE_INT is 32 bits and handle it specially. */
725 #if HOST_BITS_PER_WIDEST_INT == HOST_BITS_PER_WIDE_INT
727 #elif HOST_BITS_PER_WIDEST_INT == 2 * HOST_BITS_PER_WIDE_INT
728 HOST_WIDE_INT hwi_v
= v
>> HOST_BITS_PER_WIDE_INT
;
730 r
= floor_log2 (hwi_v
) + HOST_BITS_PER_WIDE_INT
;
732 r
= floor_log2 ((HOST_WIDE_INT
)v
);
739 /* If at most the 2 least significant bits are set (value is
740 0 - 3) then that value is our index into the lowest set of
743 return (unsigned)value
;
747 /* Find the two next most significant bits to determine which
748 of the four linear sub-buckets to select. */
749 prev2bits
= (v
>> (r
- 2)) & 0x3;
750 /* Finally, compose the final bucket index from the log2 index and
751 the next 2 bits. The minimum r value at this point is 2 since we
752 returned above if r was 2 or more, so the minimum bucket at this
754 return (r
- 1) * 4 + prev2bits
;
757 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
758 the same relative order in both histograms, and are matched up
759 and merged in reverse order. Each counter is assigned an equal portion of
760 its entry's original cumulative counter value when computing the
761 new merged cum_value. */
763 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
764 gcov_bucket_type
*src_histo
)
766 int src_i
, tgt_i
, tmp_i
= 0;
767 unsigned src_num
, tgt_num
, merge_num
;
768 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
770 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
773 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
775 /* Assume that the counters are in the same relative order in both
776 histograms. Walk the histograms from largest to smallest entry,
777 matching up and combining counters in order. */
780 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
781 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
783 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
784 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
785 /* Keep going until all of the target histogram's counters at this
786 position have been matched and merged with counters from the
788 while (tgt_num
> 0 && !src_done
)
790 /* If this is either the first time through this loop or we just
791 exhausted the previous non-zero source histogram entry, look
792 for the next non-zero source histogram entry. */
795 /* Locate the next non-zero entry. */
796 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
798 /* If source histogram has fewer counters, then just copy over the
799 remaining target counters and quit. */
802 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
803 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
804 if (!tmp_histo
[tgt_i
].min_value
||
805 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
806 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
809 tmp_histo
[tgt_i
].num_counters
810 += tgt_histo
[tgt_i
].num_counters
;
811 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
812 if (!tmp_histo
[tgt_i
].min_value
||
813 tgt_histo
[tgt_i
].min_value
814 < tmp_histo
[tgt_i
].min_value
)
815 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
822 src_num
= src_histo
[src_i
].num_counters
;
823 src_cum
= src_histo
[src_i
].cum_value
;
826 /* The number of counters to merge on this pass is the minimum
827 of the remaining counters from the current target and source
828 histogram entries. */
830 if (src_num
< merge_num
)
833 /* The merged min_value is the sum of the min_values from target
835 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
837 /* Compute the portion of source and target entries' cum_value
838 that will be apportioned to the counters being merged.
839 The total remaining cum_value from each entry is divided
840 equally among the counters from that histogram entry if we
841 are not merging all of them. */
842 merge_src_cum
= src_cum
;
843 if (merge_num
< src_num
)
844 merge_src_cum
= merge_num
* src_cum
/ src_num
;
845 merge_tgt_cum
= tgt_cum
;
846 if (merge_num
< tgt_num
)
847 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
848 /* The merged cum_value is the sum of the source and target
850 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
852 /* Update the remaining number of counters and cum_value left
853 to be merged from this source and target entry. */
854 src_cum
-= merge_src_cum
;
855 tgt_cum
-= merge_tgt_cum
;
856 src_num
-= merge_num
;
857 tgt_num
-= merge_num
;
859 /* The merged counters get placed in the new merged histogram
860 at the entry for the merged min_value. */
861 tmp_i
= gcov_histo_index (merge_min
);
862 gcc_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
863 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
864 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
865 if (!tmp_histo
[tmp_i
].min_value
||
866 merge_min
< tmp_histo
[tmp_i
].min_value
)
867 tmp_histo
[tmp_i
].min_value
= merge_min
;
869 /* Ensure the search for the next non-zero src_histo entry starts
870 at the next smallest histogram bucket. */
876 gcc_assert (tgt_i
< 0);
878 /* In the case where there were more counters in the source histogram,
879 accumulate the remaining unmerged cumulative counter values. Add
880 those to the smallest non-zero target histogram entry. Otherwise,
881 the total cumulative counter values in the histogram will be smaller
882 than the sum_all stored in the summary, which will complicate
883 computing the working set information from the histogram later on. */
888 src_cum
+= src_histo
[src_i
].cum_value
;
891 /* At this point, tmp_i should be the smallest non-zero entry in the
893 gcc_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
894 && tmp_histo
[tmp_i
].num_counters
> 0);
895 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
897 /* Finally, copy the merged histogram into tgt_histo. */
898 memcpy (tgt_histo
, tmp_histo
,
899 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
901 #endif /* !IN_GCOV */
903 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
904 (!IN_GCOV && !IN_LIBGCOV). */
905 #if IN_GCOV <= 0 && !IN_LIBGCOV
906 /* Compute the working set information from the counter histogram in
907 the profile summary. This is an array of information corresponding to a
908 range of percentages of the total execution count (sum_all), and includes
909 the number of counters required to cover that working set percentage and
910 the minimum counter value in that working set. */
913 compute_working_sets (const struct gcov_ctr_summary
*summary
,
914 gcov_working_set_t
*gcov_working_sets
)
916 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
917 gcov_type ws_cum_hotness_incr
;
918 gcov_type cum
, tmp_cum
;
919 const gcov_bucket_type
*histo_bucket
;
920 unsigned ws_ix
, c_num
, count
;
923 /* Compute the amount of sum_all that the cumulative hotness grows
924 by in each successive working set entry, which depends on the
925 number of working set entries. */
926 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
928 /* Next fill in an array of the cumulative hotness values corresponding
929 to each working set summary entry we are going to compute below.
930 Skip 0% statistics, which can be extrapolated from the
931 rest of the summary data. */
932 cum
= ws_cum_hotness_incr
;
933 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
934 ws_ix
++, cum
+= ws_cum_hotness_incr
)
935 working_set_cum_values
[ws_ix
] = cum
;
936 /* The last summary entry is reserved for (roughly) 99.9% of the
937 working set. Divide by 1024 so it becomes a shift, which gives
938 almost exactly 99.9%. */
939 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
940 = summary
->sum_all
- summary
->sum_all
/1024;
942 /* Next, walk through the histogram in decending order of hotness
943 and compute the statistics for the working set summary array.
944 As histogram entries are accumulated, we check to see which
945 working set entries have had their expected cum_value reached
946 and fill them in, walking the working set entries in increasing
947 size of cum_value. */
948 ws_ix
= 0; /* The current entry into the working set array. */
949 cum
= 0; /* The current accumulated counter sum. */
950 count
= 0; /* The current accumulated count of block counters. */
951 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
952 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
954 histo_bucket
= &summary
->histogram
[h_ix
];
956 /* If we haven't reached the required cumulative counter value for
957 the current working set percentage, simply accumulate this histogram
958 entry into the running sums and continue to the next histogram
960 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
962 cum
+= histo_bucket
->cum_value
;
963 count
+= histo_bucket
->num_counters
;
967 /* If adding the current histogram entry's cumulative counter value
968 causes us to exceed the current working set size, then estimate
969 how many of this histogram entry's counter values are required to
970 reach the working set size, and fill in working set entries
971 as we reach their expected cumulative value. */
972 for (c_num
= 0, tmp_cum
= cum
;
973 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
977 /* If we haven't reached the last histogram entry counter, add
978 in the minimum value again. This will underestimate the
979 cumulative sum so far, because many of the counter values in this
980 entry may have been larger than the minimum. We could add in the
981 average value every time, but that would require an expensive
983 if (c_num
+ 1 < histo_bucket
->num_counters
)
984 tmp_cum
+= histo_bucket
->min_value
;
985 /* If we have reached the last histogram entry counter, then add
986 in the entire cumulative value. */
988 tmp_cum
= cum
+ histo_bucket
->cum_value
;
990 /* Next walk through successive working set entries and fill in
991 the statistics for any whose size we have reached by accumulating
992 this histogram counter. */
993 while (ws_ix
< NUM_GCOV_WORKING_SETS
994 && tmp_cum
>= working_set_cum_values
[ws_ix
])
996 gcov_working_sets
[ws_ix
].num_counters
= count
;
997 gcov_working_sets
[ws_ix
].min_counter
998 = histo_bucket
->min_value
;
1002 /* Finally, update the running cumulative value since we were
1003 using a temporary above. */
1004 cum
+= histo_bucket
->cum_value
;
1006 gcc_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
1008 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */