1 /* File format for coverage information
2 Copyright (C) 1996-2016 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 /* We need to expose this function when compiling for gcov-tool. */
74 gcov_nonruntime_assert (gcov_var
.mode
> 0);
75 return gcov_var
.start
+ gcov_var
.offset
;
78 /* Return nonzero if the error flag is set. */
79 /* We need to expose this function when compiling for gcov-tool. */
86 return gcov_var
.file
? gcov_var
.error
: 1;
90 /* Move to beginning of file and initialize for writing. */
91 GCOV_LINKAGE
inline void
97 fseek (gcov_var
.file
, 0L, SEEK_SET
);
101 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
106 value
= (value
>> 16) | (value
<< 16);
107 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
113 /* Open a gcov file. NAME is the name of the file to open and MODE
114 indicates whether a new file should be created, or an existing file
115 opened. If MODE is >= 0 an existing file will be opened, if
116 possible, and if MODE is <= 0, a new file will be created. Use
117 MODE=0 to attempt to reopen an existing file and then fall back on
118 creating a new one. If MODE < 0, the file will be opened in
119 read-only mode. Otherwise it will be opened for modification.
120 Return zero on failure, >0 on opening an existing file and <0 on
121 creating a new one. */
125 gcov_open (const char *name
)
127 gcov_open (const char *name
, int mode
)
134 struct flock s_flock
;
137 s_flock
.l_whence
= SEEK_SET
;
139 s_flock
.l_len
= 0; /* Until EOF. */
140 s_flock
.l_pid
= getpid ();
143 gcov_nonruntime_assert (!gcov_var
.file
);
145 gcov_var
.offset
= gcov_var
.length
= 0;
146 gcov_var
.overread
= -1u;
154 /* Read-only mode - acquire a read-lock. */
155 s_flock
.l_type
= F_RDLCK
;
156 /* pass mode (ignored) for compatibility */
157 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
161 /* Write mode - acquire a write-lock. */
162 s_flock
.l_type
= F_WRLCK
;
163 fd
= open (name
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
167 /* Read-Write mode - acquire a write-lock. */
168 s_flock
.l_type
= F_WRLCK
;
169 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
174 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
177 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
191 if (fstat (fd
, &st
) < 0)
193 fclose (gcov_var
.file
);
200 gcov_var
.mode
= mode
* 2 + 1;
203 gcov_var
.mode
= mode
* 2 + 1;
206 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
212 gcov_var
.file
= fopen (name
, "w+b");
214 gcov_var
.mode
= mode
* 2 + 1;
220 setbuf (gcov_var
.file
, (char *)0);
225 /* Close the current gcov file. Flushes data to disk. Returns nonzero
226 on failure or error flag set. */
234 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
235 gcov_write_block (gcov_var
.offset
);
237 fclose (gcov_var
.file
);
242 free (gcov_var
.buffer
);
247 return gcov_var
.error
;
251 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
252 file. Returns +1 for same endian, -1 for other endian and zero for
256 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
258 if (magic
== expected
)
260 magic
= (magic
>> 16) | (magic
<< 16);
261 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
262 if (magic
== expected
)
273 gcov_allocate (unsigned length
)
275 size_t new_size
= gcov_var
.alloc
;
278 new_size
= GCOV_BLOCK_SIZE
;
282 gcov_var
.alloc
= new_size
;
283 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
288 /* Write out the current block, if needs be. */
291 gcov_write_block (unsigned size
)
293 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
295 gcov_var
.start
+= size
;
296 gcov_var
.offset
-= size
;
299 /* Allocate space to write BYTES bytes to the gcov file. Return a
300 pointer to those bytes, or NULL on failure. */
302 static gcov_unsigned_t
*
303 gcov_write_words (unsigned words
)
305 gcov_unsigned_t
*result
;
307 gcov_nonruntime_assert (gcov_var
.mode
< 0);
309 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
311 gcov_write_block (GCOV_BLOCK_SIZE
);
314 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
318 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
319 gcov_allocate (gcov_var
.offset
+ words
);
321 result
= &gcov_var
.buffer
[gcov_var
.offset
];
322 gcov_var
.offset
+= words
;
327 /* Write unsigned VALUE to coverage file. Sets error flag
331 gcov_write_unsigned (gcov_unsigned_t value
)
333 gcov_unsigned_t
*buffer
= gcov_write_words (1);
338 /* Write counter VALUE to coverage file. Sets error flag
343 gcov_write_counter (gcov_type value
)
345 gcov_unsigned_t
*buffer
= gcov_write_words (2);
347 buffer
[0] = (gcov_unsigned_t
) value
;
348 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
349 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
353 #endif /* IN_LIBGCOV */
356 /* Write STRING to coverage file. Sets error flag on file
357 error, overflow flag on overflow */
360 gcov_write_string (const char *string
)
364 gcov_unsigned_t
*buffer
;
368 length
= strlen (string
);
369 alloc
= (length
+ 4) >> 2;
372 buffer
= gcov_write_words (1 + alloc
);
376 memcpy (&buffer
[1], string
, length
);
381 /* Write a tag TAG and reserve space for the record length. Return a
382 value to be used for gcov_write_length. */
384 GCOV_LINKAGE gcov_position_t
385 gcov_write_tag (gcov_unsigned_t tag
)
387 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
388 gcov_unsigned_t
*buffer
= gcov_write_words (2);
396 /* Write a record length using POSITION, which was returned by
397 gcov_write_tag. The current file position is the end of the
398 record, and is restored before returning. Returns nonzero on
402 gcov_write_length (gcov_position_t position
)
405 gcov_unsigned_t length
;
406 gcov_unsigned_t
*buffer
;
408 gcov_nonruntime_assert (gcov_var
.mode
< 0);
409 gcov_nonruntime_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
410 gcov_nonruntime_assert (position
>= gcov_var
.start
);
411 offset
= position
- gcov_var
.start
;
412 length
= gcov_var
.offset
- offset
- 2;
413 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
415 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
416 gcov_write_block (gcov_var
.offset
);
419 #else /* IN_LIBGCOV */
421 /* Write a tag TAG and length LENGTH. */
424 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
426 gcov_unsigned_t
*buffer
= gcov_write_words (2);
432 /* Write a summary structure to the gcov file. Return nonzero on
436 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
438 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
439 const struct gcov_ctr_summary
*csum
;
440 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
442 /* Count number of non-zero histogram entries, and fill in a bit vector
443 of non-zero indices. The histogram is only currently computed for arc
445 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
446 histo_bitvector
[bv_ix
] = 0;
447 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
448 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
450 if (csum
->histogram
[h_ix
].num_counters
> 0)
452 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
456 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
457 gcov_write_unsigned (summary
->checksum
);
458 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
460 gcov_write_unsigned (csum
->num
);
461 gcov_write_unsigned (csum
->runs
);
462 gcov_write_counter (csum
->sum_all
);
463 gcov_write_counter (csum
->run_max
);
464 gcov_write_counter (csum
->sum_max
);
465 if (ix
!= GCOV_COUNTER_ARCS
)
467 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
468 gcov_write_unsigned (0);
471 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
472 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
473 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
475 if (!csum
->histogram
[h_ix
].num_counters
)
477 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
478 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
479 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
483 #endif /* IN_LIBGCOV */
487 /* Return a pointer to read BYTES bytes from the gcov file. Returns
488 NULL on failure (read past EOF). */
490 static const gcov_unsigned_t
*
491 gcov_read_words (unsigned words
)
493 const gcov_unsigned_t
*result
;
494 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
496 if (gcov_var
.mode
<= 0)
501 gcov_var
.start
+= gcov_var
.offset
;
505 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
507 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
,
512 gcov_var
.length
= excess
;
514 excess
= GCOV_BLOCK_SIZE
;
516 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
517 gcov_allocate (gcov_var
.length
+ words
);
518 excess
= gcov_var
.alloc
- gcov_var
.length
;
520 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
521 1, excess
<< 2, gcov_var
.file
) >> 2;
522 gcov_var
.length
+= excess
;
523 if (gcov_var
.length
< words
)
525 gcov_var
.overread
+= words
- gcov_var
.length
;
530 result
= &gcov_var
.buffer
[gcov_var
.offset
];
531 gcov_var
.offset
+= words
;
535 /* Read unsigned value from a coverage file. Sets error flag on file
536 error, overflow flag on overflow */
538 GCOV_LINKAGE gcov_unsigned_t
539 gcov_read_unsigned (void)
541 gcov_unsigned_t value
;
542 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
546 value
= from_file (buffer
[0]);
550 /* Read counter value from a coverage file. Sets error flag on file
551 error, overflow flag on overflow */
553 GCOV_LINKAGE gcov_type
554 gcov_read_counter (void)
557 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
561 value
= from_file (buffer
[0]);
562 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
563 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
570 /* We need to expose the below function when compiling for gcov-tool. */
572 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
573 /* Read string from coverage file. Returns a pointer to a static
574 buffer, or NULL on empty string. You must copy the string before
575 calling another gcov function. */
577 GCOV_LINKAGE
const char *
578 gcov_read_string (void)
580 unsigned length
= gcov_read_unsigned ();
585 return (const char *) gcov_read_words (length
);
590 gcov_read_summary (struct gcov_summary
*summary
)
592 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
593 struct gcov_ctr_summary
*csum
;
594 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
595 unsigned cur_bitvector
;
597 summary
->checksum
= gcov_read_unsigned ();
598 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
600 csum
->num
= gcov_read_unsigned ();
601 csum
->runs
= gcov_read_unsigned ();
602 csum
->sum_all
= gcov_read_counter ();
603 csum
->run_max
= gcov_read_counter ();
604 csum
->sum_max
= gcov_read_counter ();
605 memset (csum
->histogram
, 0,
606 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
607 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
609 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
611 /* When building libgcov we don't include system.h, which includes
612 hwint.h (where popcount_hwi is declared). However, libgcov.a
613 is built by the bootstrapped compiler and therefore the builtins
614 are always available. */
615 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
617 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
625 /* Find the index corresponding to the next entry we will read in.
626 First find the next non-zero bitvector and re-initialize
627 the histogram index accordingly, then right shift and increment
628 the index until we find a set bit. */
629 while (!cur_bitvector
)
632 if (bv_ix
>= GCOV_HISTOGRAM_BITVECTOR_SIZE
)
633 gcov_error ("corrupted profile info: summary histogram "
634 "bitvector is corrupt");
635 cur_bitvector
= histo_bitvector
[bv_ix
++];
637 while (!(cur_bitvector
& 0x1))
642 if (h_ix
>= GCOV_HISTOGRAM_SIZE
)
643 gcov_error ("corrupted profile info: summary histogram "
646 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
647 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
648 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
649 /* Shift off the index we are done with and increment to the
650 corresponding next histogram entry. */
657 /* We need to expose the below function when compiling for gcov-tool. */
659 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
660 /* Reset to a known position. BASE should have been obtained from
661 gcov_position, LENGTH should be a record length. */
664 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
666 gcov_nonruntime_assert (gcov_var
.mode
> 0);
668 if (base
- gcov_var
.start
<= gcov_var
.length
)
669 gcov_var
.offset
= base
- gcov_var
.start
;
672 gcov_var
.offset
= gcov_var
.length
= 0;
673 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
674 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
680 /* Move to a given position in a gcov file. */
683 gcov_seek (gcov_position_t base
)
686 gcov_write_block (gcov_var
.offset
);
687 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
688 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
693 /* Return the modification time of the current gcov file. */
700 if (fstat (fileno (gcov_var
.file
), &status
))
703 return status
.st_mtime
;
708 /* Determine the index into histogram for VALUE. */
713 GCOV_LINKAGE
unsigned
715 gcov_histo_index (gcov_type value
)
717 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
719 unsigned prev2bits
= 0;
721 /* Find index into log2 scale histogram, where each of the log2
722 sized buckets is divided into 4 linear sub-buckets for better
723 focus in the higher buckets. */
725 /* Find the place of the most-significant bit set. */
729 /* When building libgcov we don't include system.h, which includes
730 hwint.h (where floor_log2 is declared). However, libgcov.a
731 is built by the bootstrapped compiler and therefore the builtins
732 are always available. */
733 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
735 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
736 that is 64 bits and gcov_type_unsigned is 64 bits. */
741 /* If at most the 2 least significant bits are set (value is
742 0 - 3) then that value is our index into the lowest set of
745 return (unsigned)value
;
747 gcov_nonruntime_assert (r
< 64);
749 /* Find the two next most significant bits to determine which
750 of the four linear sub-buckets to select. */
751 prev2bits
= (v
>> (r
- 2)) & 0x3;
752 /* Finally, compose the final bucket index from the log2 index and
753 the next 2 bits. The minimum r value at this point is 2 since we
754 returned above if r was 2 or more, so the minimum bucket at this
756 return (r
- 1) * 4 + prev2bits
;
759 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
760 the same relative order in both histograms, and are matched up
761 and merged in reverse order. Each counter is assigned an equal portion of
762 its entry's original cumulative counter value when computing the
763 new merged cum_value. */
765 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
766 gcov_bucket_type
*src_histo
)
768 int src_i
, tgt_i
, tmp_i
= 0;
769 unsigned src_num
, tgt_num
, merge_num
;
770 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
772 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
775 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
777 /* Assume that the counters are in the same relative order in both
778 histograms. Walk the histograms from largest to smallest entry,
779 matching up and combining counters in order. */
782 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
783 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
785 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
786 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
787 /* Keep going until all of the target histogram's counters at this
788 position have been matched and merged with counters from the
790 while (tgt_num
> 0 && !src_done
)
792 /* If this is either the first time through this loop or we just
793 exhausted the previous non-zero source histogram entry, look
794 for the next non-zero source histogram entry. */
797 /* Locate the next non-zero entry. */
798 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
800 /* If source histogram has fewer counters, then just copy over the
801 remaining target counters and quit. */
804 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
805 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
806 if (!tmp_histo
[tgt_i
].min_value
||
807 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
808 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
811 tmp_histo
[tgt_i
].num_counters
812 += tgt_histo
[tgt_i
].num_counters
;
813 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
814 if (!tmp_histo
[tgt_i
].min_value
||
815 tgt_histo
[tgt_i
].min_value
816 < tmp_histo
[tgt_i
].min_value
)
817 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
824 src_num
= src_histo
[src_i
].num_counters
;
825 src_cum
= src_histo
[src_i
].cum_value
;
828 /* The number of counters to merge on this pass is the minimum
829 of the remaining counters from the current target and source
830 histogram entries. */
832 if (src_num
< merge_num
)
835 /* The merged min_value is the sum of the min_values from target
837 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
839 /* Compute the portion of source and target entries' cum_value
840 that will be apportioned to the counters being merged.
841 The total remaining cum_value from each entry is divided
842 equally among the counters from that histogram entry if we
843 are not merging all of them. */
844 merge_src_cum
= src_cum
;
845 if (merge_num
< src_num
)
846 merge_src_cum
= merge_num
* src_cum
/ src_num
;
847 merge_tgt_cum
= tgt_cum
;
848 if (merge_num
< tgt_num
)
849 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
850 /* The merged cum_value is the sum of the source and target
852 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
854 /* Update the remaining number of counters and cum_value left
855 to be merged from this source and target entry. */
856 src_cum
-= merge_src_cum
;
857 tgt_cum
-= merge_tgt_cum
;
858 src_num
-= merge_num
;
859 tgt_num
-= merge_num
;
861 /* The merged counters get placed in the new merged histogram
862 at the entry for the merged min_value. */
863 tmp_i
= gcov_histo_index (merge_min
);
864 gcov_nonruntime_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
865 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
866 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
867 if (!tmp_histo
[tmp_i
].min_value
||
868 merge_min
< tmp_histo
[tmp_i
].min_value
)
869 tmp_histo
[tmp_i
].min_value
= merge_min
;
871 /* Ensure the search for the next non-zero src_histo entry starts
872 at the next smallest histogram bucket. */
878 gcov_nonruntime_assert (tgt_i
< 0);
880 /* In the case where there were more counters in the source histogram,
881 accumulate the remaining unmerged cumulative counter values. Add
882 those to the smallest non-zero target histogram entry. Otherwise,
883 the total cumulative counter values in the histogram will be smaller
884 than the sum_all stored in the summary, which will complicate
885 computing the working set information from the histogram later on. */
890 src_cum
+= src_histo
[src_i
].cum_value
;
893 /* At this point, tmp_i should be the smallest non-zero entry in the
895 gcov_nonruntime_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
896 && tmp_histo
[tmp_i
].num_counters
> 0);
897 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
899 /* Finally, copy the merged histogram into tgt_histo. */
900 memcpy (tgt_histo
, tmp_histo
,
901 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
903 #endif /* !IN_GCOV */
905 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
906 (!IN_GCOV && !IN_LIBGCOV). */
907 #if IN_GCOV <= 0 && !IN_LIBGCOV
908 /* Compute the working set information from the counter histogram in
909 the profile summary. This is an array of information corresponding to a
910 range of percentages of the total execution count (sum_all), and includes
911 the number of counters required to cover that working set percentage and
912 the minimum counter value in that working set. */
915 compute_working_sets (const struct gcov_ctr_summary
*summary
,
916 gcov_working_set_t
*gcov_working_sets
)
918 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
919 gcov_type ws_cum_hotness_incr
;
920 gcov_type cum
, tmp_cum
;
921 const gcov_bucket_type
*histo_bucket
;
922 unsigned ws_ix
, c_num
, count
;
925 /* Compute the amount of sum_all that the cumulative hotness grows
926 by in each successive working set entry, which depends on the
927 number of working set entries. */
928 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
930 /* Next fill in an array of the cumulative hotness values corresponding
931 to each working set summary entry we are going to compute below.
932 Skip 0% statistics, which can be extrapolated from the
933 rest of the summary data. */
934 cum
= ws_cum_hotness_incr
;
935 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
936 ws_ix
++, cum
+= ws_cum_hotness_incr
)
937 working_set_cum_values
[ws_ix
] = cum
;
938 /* The last summary entry is reserved for (roughly) 99.9% of the
939 working set. Divide by 1024 so it becomes a shift, which gives
940 almost exactly 99.9%. */
941 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
942 = summary
->sum_all
- summary
->sum_all
/1024;
944 /* Next, walk through the histogram in decending order of hotness
945 and compute the statistics for the working set summary array.
946 As histogram entries are accumulated, we check to see which
947 working set entries have had their expected cum_value reached
948 and fill them in, walking the working set entries in increasing
949 size of cum_value. */
950 ws_ix
= 0; /* The current entry into the working set array. */
951 cum
= 0; /* The current accumulated counter sum. */
952 count
= 0; /* The current accumulated count of block counters. */
953 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
954 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
956 histo_bucket
= &summary
->histogram
[h_ix
];
958 /* If we haven't reached the required cumulative counter value for
959 the current working set percentage, simply accumulate this histogram
960 entry into the running sums and continue to the next histogram
962 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
964 cum
+= histo_bucket
->cum_value
;
965 count
+= histo_bucket
->num_counters
;
969 /* If adding the current histogram entry's cumulative counter value
970 causes us to exceed the current working set size, then estimate
971 how many of this histogram entry's counter values are required to
972 reach the working set size, and fill in working set entries
973 as we reach their expected cumulative value. */
974 for (c_num
= 0, tmp_cum
= cum
;
975 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
979 /* If we haven't reached the last histogram entry counter, add
980 in the minimum value again. This will underestimate the
981 cumulative sum so far, because many of the counter values in this
982 entry may have been larger than the minimum. We could add in the
983 average value every time, but that would require an expensive
985 if (c_num
+ 1 < histo_bucket
->num_counters
)
986 tmp_cum
+= histo_bucket
->min_value
;
987 /* If we have reached the last histogram entry counter, then add
988 in the entire cumulative value. */
990 tmp_cum
= cum
+ histo_bucket
->cum_value
;
992 /* Next walk through successive working set entries and fill in
993 the statistics for any whose size we have reached by accumulating
994 this histogram counter. */
995 while (ws_ix
< NUM_GCOV_WORKING_SETS
996 && tmp_cum
>= working_set_cum_values
[ws_ix
])
998 gcov_working_sets
[ws_ix
].num_counters
= count
;
999 gcov_working_sets
[ws_ix
].min_counter
1000 = histo_bucket
->min_value
;
1004 /* Finally, update the running cumulative value since we were
1005 using a temporary above. */
1006 cum
+= histo_bucket
->cum_value
;
1008 gcov_nonruntime_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
1010 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */