1 /* File format for coverage information
2 Copyright (C) 1996-2016 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 /* We need to expose this function when compiling for gcov-tool. */
74 gcov_nonruntime_assert (gcov_var
.mode
> 0);
75 return gcov_var
.start
+ gcov_var
.offset
;
78 /* Return nonzero if the error flag is set. */
79 /* We need to expose this function when compiling for gcov-tool. */
86 return gcov_var
.file
? gcov_var
.error
: 1;
90 /* Move to beginning of file and initialize for writing. */
91 GCOV_LINKAGE
inline void
97 fseek (gcov_var
.file
, 0L, SEEK_SET
);
101 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
106 value
= (value
>> 16) | (value
<< 16);
107 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
113 /* Open a gcov file. NAME is the name of the file to open and MODE
114 indicates whether a new file should be created, or an existing file
115 opened. If MODE is >= 0 an existing file will be opened, if
116 possible, and if MODE is <= 0, a new file will be created. Use
117 MODE=0 to attempt to reopen an existing file and then fall back on
118 creating a new one. If MODE < 0, the file will be opened in
119 read-only mode. Otherwise it will be opened for modification.
120 Return zero on failure, >0 on opening an existing file and <0 on
121 creating a new one. */
125 gcov_open (const char *name
)
127 gcov_open (const char *name
, int mode
)
134 struct flock s_flock
;
137 s_flock
.l_whence
= SEEK_SET
;
139 s_flock
.l_len
= 0; /* Until EOF. */
140 s_flock
.l_pid
= getpid ();
143 gcov_nonruntime_assert (!gcov_var
.file
);
145 gcov_var
.offset
= gcov_var
.length
= 0;
146 gcov_var
.overread
= -1u;
154 /* Read-only mode - acquire a read-lock. */
155 s_flock
.l_type
= F_RDLCK
;
156 /* pass mode (ignored) for compatibility */
157 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
161 /* Write mode - acquire a write-lock. */
162 s_flock
.l_type
= F_WRLCK
;
163 fd
= open (name
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
167 /* Read-Write mode - acquire a write-lock. */
168 s_flock
.l_type
= F_WRLCK
;
169 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
174 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
177 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
191 if (fstat (fd
, &st
) < 0)
193 fclose (gcov_var
.file
);
200 gcov_var
.mode
= mode
* 2 + 1;
203 gcov_var
.mode
= mode
* 2 + 1;
206 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
212 gcov_var
.file
= fopen (name
, "w+b");
214 gcov_var
.mode
= mode
* 2 + 1;
220 setbuf (gcov_var
.file
, (char *)0);
225 /* Close the current gcov file. Flushes data to disk. Returns nonzero
226 on failure or error flag set. */
234 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
235 gcov_write_block (gcov_var
.offset
);
237 fclose (gcov_var
.file
);
242 free (gcov_var
.buffer
);
247 return gcov_var
.error
;
251 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
252 file. Returns +1 for same endian, -1 for other endian and zero for
256 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
258 if (magic
== expected
)
260 magic
= (magic
>> 16) | (magic
<< 16);
261 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
262 if (magic
== expected
)
273 gcov_allocate (unsigned length
)
275 size_t new_size
= gcov_var
.alloc
;
278 new_size
= GCOV_BLOCK_SIZE
;
282 gcov_var
.alloc
= new_size
;
283 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
288 /* Write out the current block, if needs be. */
291 gcov_write_block (unsigned size
)
293 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
295 gcov_var
.start
+= size
;
296 gcov_var
.offset
-= size
;
299 /* Allocate space to write BYTES bytes to the gcov file. Return a
300 pointer to those bytes, or NULL on failure. */
302 static gcov_unsigned_t
*
303 gcov_write_words (unsigned words
)
305 gcov_unsigned_t
*result
;
307 gcov_nonruntime_assert (gcov_var
.mode
< 0);
309 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
311 gcov_write_block (GCOV_BLOCK_SIZE
);
314 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
318 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
319 gcov_allocate (gcov_var
.offset
+ words
);
321 result
= &gcov_var
.buffer
[gcov_var
.offset
];
322 gcov_var
.offset
+= words
;
327 /* Write unsigned VALUE to coverage file. Sets error flag
331 gcov_write_unsigned (gcov_unsigned_t value
)
333 gcov_unsigned_t
*buffer
= gcov_write_words (1);
338 /* Write counter VALUE to coverage file. Sets error flag
343 gcov_write_counter (gcov_type value
)
345 gcov_unsigned_t
*buffer
= gcov_write_words (2);
347 buffer
[0] = (gcov_unsigned_t
) value
;
348 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
349 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
353 #endif /* IN_LIBGCOV */
356 /* Write STRING to coverage file. Sets error flag on file
357 error, overflow flag on overflow */
360 gcov_write_string (const char *string
)
364 gcov_unsigned_t
*buffer
;
368 length
= strlen (string
);
369 alloc
= (length
+ 4) >> 2;
372 buffer
= gcov_write_words (1 + alloc
);
376 memcpy (&buffer
[1], string
, length
);
381 /* Write a tag TAG and reserve space for the record length. Return a
382 value to be used for gcov_write_length. */
384 GCOV_LINKAGE gcov_position_t
385 gcov_write_tag (gcov_unsigned_t tag
)
387 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
388 gcov_unsigned_t
*buffer
= gcov_write_words (2);
396 /* Write a record length using POSITION, which was returned by
397 gcov_write_tag. The current file position is the end of the
398 record, and is restored before returning. Returns nonzero on
402 gcov_write_length (gcov_position_t position
)
405 gcov_unsigned_t length
;
406 gcov_unsigned_t
*buffer
;
408 gcov_nonruntime_assert (gcov_var
.mode
< 0);
409 gcov_nonruntime_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
410 gcov_nonruntime_assert (position
>= gcov_var
.start
);
411 offset
= position
- gcov_var
.start
;
412 length
= gcov_var
.offset
- offset
- 2;
413 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
415 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
416 gcov_write_block (gcov_var
.offset
);
419 #else /* IN_LIBGCOV */
421 /* Write a tag TAG and length LENGTH. */
424 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
426 gcov_unsigned_t
*buffer
= gcov_write_words (2);
432 /* Write a summary structure to the gcov file. Return nonzero on
436 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
438 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
439 const struct gcov_ctr_summary
*csum
;
440 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
442 /* Count number of non-zero histogram entries, and fill in a bit vector
443 of non-zero indices. The histogram is only currently computed for arc
445 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
446 histo_bitvector
[bv_ix
] = 0;
447 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
448 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
450 if (csum
->histogram
[h_ix
].num_counters
> 0)
452 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
456 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
457 gcov_write_unsigned (summary
->checksum
);
458 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
460 gcov_write_unsigned (csum
->num
);
461 gcov_write_unsigned (csum
->runs
);
462 gcov_write_counter (csum
->sum_all
);
463 gcov_write_counter (csum
->run_max
);
464 gcov_write_counter (csum
->sum_max
);
465 if (ix
!= GCOV_COUNTER_ARCS
)
467 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
468 gcov_write_unsigned (0);
471 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
472 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
473 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
475 if (!csum
->histogram
[h_ix
].num_counters
)
477 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
478 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
479 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
483 #endif /* IN_LIBGCOV */
487 /* Return a pointer to read BYTES bytes from the gcov file. Returns
488 NULL on failure (read past EOF). */
490 static const gcov_unsigned_t
*
491 gcov_read_words (unsigned words
)
493 const gcov_unsigned_t
*result
;
494 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
496 gcov_nonruntime_assert (gcov_var
.mode
> 0);
499 gcov_var
.start
+= gcov_var
.offset
;
503 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
505 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
,
510 gcov_var
.length
= excess
;
512 excess
= GCOV_BLOCK_SIZE
;
514 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
515 gcov_allocate (gcov_var
.length
+ words
);
516 excess
= gcov_var
.alloc
- gcov_var
.length
;
518 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
519 1, excess
<< 2, gcov_var
.file
) >> 2;
520 gcov_var
.length
+= excess
;
521 if (gcov_var
.length
< words
)
523 gcov_var
.overread
+= words
- gcov_var
.length
;
528 result
= &gcov_var
.buffer
[gcov_var
.offset
];
529 gcov_var
.offset
+= words
;
533 /* Read unsigned value from a coverage file. Sets error flag on file
534 error, overflow flag on overflow */
536 GCOV_LINKAGE gcov_unsigned_t
537 gcov_read_unsigned (void)
539 gcov_unsigned_t value
;
540 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
544 value
= from_file (buffer
[0]);
548 /* Read counter value from a coverage file. Sets error flag on file
549 error, overflow flag on overflow */
551 GCOV_LINKAGE gcov_type
552 gcov_read_counter (void)
555 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
559 value
= from_file (buffer
[0]);
560 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
561 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
568 /* We need to expose the below function when compiling for gcov-tool. */
570 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
571 /* Read string from coverage file. Returns a pointer to a static
572 buffer, or NULL on empty string. You must copy the string before
573 calling another gcov function. */
575 GCOV_LINKAGE
const char *
576 gcov_read_string (void)
578 unsigned length
= gcov_read_unsigned ();
583 return (const char *) gcov_read_words (length
);
588 gcov_read_summary (struct gcov_summary
*summary
)
590 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
591 struct gcov_ctr_summary
*csum
;
592 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
593 unsigned cur_bitvector
;
595 summary
->checksum
= gcov_read_unsigned ();
596 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
598 csum
->num
= gcov_read_unsigned ();
599 csum
->runs
= gcov_read_unsigned ();
600 csum
->sum_all
= gcov_read_counter ();
601 csum
->run_max
= gcov_read_counter ();
602 csum
->sum_max
= gcov_read_counter ();
603 memset (csum
->histogram
, 0,
604 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
605 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
607 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
609 /* When building libgcov we don't include system.h, which includes
610 hwint.h (where popcount_hwi is declared). However, libgcov.a
611 is built by the bootstrapped compiler and therefore the builtins
612 are always available. */
613 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
615 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
623 /* Find the index corresponding to the next entry we will read in.
624 First find the next non-zero bitvector and re-initialize
625 the histogram index accordingly, then right shift and increment
626 the index until we find a set bit. */
627 while (!cur_bitvector
)
630 if (bv_ix
>= GCOV_HISTOGRAM_BITVECTOR_SIZE
)
631 gcov_error ("corrupted profile info: summary histogram "
632 "bitvector is corrupt");
633 cur_bitvector
= histo_bitvector
[bv_ix
++];
635 while (!(cur_bitvector
& 0x1))
640 if (h_ix
>= GCOV_HISTOGRAM_SIZE
)
641 gcov_error ("corrupted profile info: summary histogram "
644 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
645 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
646 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
647 /* Shift off the index we are done with and increment to the
648 corresponding next histogram entry. */
655 /* We need to expose the below function when compiling for gcov-tool. */
657 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
658 /* Reset to a known position. BASE should have been obtained from
659 gcov_position, LENGTH should be a record length. */
662 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
664 gcov_nonruntime_assert (gcov_var
.mode
> 0);
666 if (base
- gcov_var
.start
<= gcov_var
.length
)
667 gcov_var
.offset
= base
- gcov_var
.start
;
670 gcov_var
.offset
= gcov_var
.length
= 0;
671 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
672 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
678 /* Move to a given position in a gcov file. */
681 gcov_seek (gcov_position_t base
)
684 gcov_write_block (gcov_var
.offset
);
685 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
686 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
691 /* Return the modification time of the current gcov file. */
698 if (fstat (fileno (gcov_var
.file
), &status
))
701 return status
.st_mtime
;
706 /* Determine the index into histogram for VALUE. */
711 GCOV_LINKAGE
unsigned
713 gcov_histo_index (gcov_type value
)
715 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
717 unsigned prev2bits
= 0;
719 /* Find index into log2 scale histogram, where each of the log2
720 sized buckets is divided into 4 linear sub-buckets for better
721 focus in the higher buckets. */
723 /* Find the place of the most-significant bit set. */
727 /* When building libgcov we don't include system.h, which includes
728 hwint.h (where floor_log2 is declared). However, libgcov.a
729 is built by the bootstrapped compiler and therefore the builtins
730 are always available. */
731 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
733 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
734 that is 64 bits and gcov_type_unsigned is 64 bits. */
739 /* If at most the 2 least significant bits are set (value is
740 0 - 3) then that value is our index into the lowest set of
743 return (unsigned)value
;
745 gcov_nonruntime_assert (r
< 64);
747 /* Find the two next most significant bits to determine which
748 of the four linear sub-buckets to select. */
749 prev2bits
= (v
>> (r
- 2)) & 0x3;
750 /* Finally, compose the final bucket index from the log2 index and
751 the next 2 bits. The minimum r value at this point is 2 since we
752 returned above if r was 2 or more, so the minimum bucket at this
754 return (r
- 1) * 4 + prev2bits
;
757 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
758 the same relative order in both histograms, and are matched up
759 and merged in reverse order. Each counter is assigned an equal portion of
760 its entry's original cumulative counter value when computing the
761 new merged cum_value. */
763 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
764 gcov_bucket_type
*src_histo
)
766 int src_i
, tgt_i
, tmp_i
= 0;
767 unsigned src_num
, tgt_num
, merge_num
;
768 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
770 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
773 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
775 /* Assume that the counters are in the same relative order in both
776 histograms. Walk the histograms from largest to smallest entry,
777 matching up and combining counters in order. */
780 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
781 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
783 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
784 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
785 /* Keep going until all of the target histogram's counters at this
786 position have been matched and merged with counters from the
788 while (tgt_num
> 0 && !src_done
)
790 /* If this is either the first time through this loop or we just
791 exhausted the previous non-zero source histogram entry, look
792 for the next non-zero source histogram entry. */
795 /* Locate the next non-zero entry. */
796 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
798 /* If source histogram has fewer counters, then just copy over the
799 remaining target counters and quit. */
802 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
803 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
804 if (!tmp_histo
[tgt_i
].min_value
||
805 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
806 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
809 tmp_histo
[tgt_i
].num_counters
810 += tgt_histo
[tgt_i
].num_counters
;
811 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
812 if (!tmp_histo
[tgt_i
].min_value
||
813 tgt_histo
[tgt_i
].min_value
814 < tmp_histo
[tgt_i
].min_value
)
815 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
822 src_num
= src_histo
[src_i
].num_counters
;
823 src_cum
= src_histo
[src_i
].cum_value
;
826 /* The number of counters to merge on this pass is the minimum
827 of the remaining counters from the current target and source
828 histogram entries. */
830 if (src_num
< merge_num
)
833 /* The merged min_value is the sum of the min_values from target
835 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
837 /* Compute the portion of source and target entries' cum_value
838 that will be apportioned to the counters being merged.
839 The total remaining cum_value from each entry is divided
840 equally among the counters from that histogram entry if we
841 are not merging all of them. */
842 merge_src_cum
= src_cum
;
843 if (merge_num
< src_num
)
844 merge_src_cum
= merge_num
* src_cum
/ src_num
;
845 merge_tgt_cum
= tgt_cum
;
846 if (merge_num
< tgt_num
)
847 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
848 /* The merged cum_value is the sum of the source and target
850 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
852 /* Update the remaining number of counters and cum_value left
853 to be merged from this source and target entry. */
854 src_cum
-= merge_src_cum
;
855 tgt_cum
-= merge_tgt_cum
;
856 src_num
-= merge_num
;
857 tgt_num
-= merge_num
;
859 /* The merged counters get placed in the new merged histogram
860 at the entry for the merged min_value. */
861 tmp_i
= gcov_histo_index (merge_min
);
862 gcov_nonruntime_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
863 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
864 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
865 if (!tmp_histo
[tmp_i
].min_value
||
866 merge_min
< tmp_histo
[tmp_i
].min_value
)
867 tmp_histo
[tmp_i
].min_value
= merge_min
;
869 /* Ensure the search for the next non-zero src_histo entry starts
870 at the next smallest histogram bucket. */
876 gcov_nonruntime_assert (tgt_i
< 0);
878 /* In the case where there were more counters in the source histogram,
879 accumulate the remaining unmerged cumulative counter values. Add
880 those to the smallest non-zero target histogram entry. Otherwise,
881 the total cumulative counter values in the histogram will be smaller
882 than the sum_all stored in the summary, which will complicate
883 computing the working set information from the histogram later on. */
888 src_cum
+= src_histo
[src_i
].cum_value
;
891 /* At this point, tmp_i should be the smallest non-zero entry in the
893 gcov_nonruntime_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
894 && tmp_histo
[tmp_i
].num_counters
> 0);
895 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
897 /* Finally, copy the merged histogram into tgt_histo. */
898 memcpy (tgt_histo
, tmp_histo
,
899 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
901 #endif /* !IN_GCOV */
903 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
904 (!IN_GCOV && !IN_LIBGCOV). */
905 #if IN_GCOV <= 0 && !IN_LIBGCOV
906 /* Compute the working set information from the counter histogram in
907 the profile summary. This is an array of information corresponding to a
908 range of percentages of the total execution count (sum_all), and includes
909 the number of counters required to cover that working set percentage and
910 the minimum counter value in that working set. */
913 compute_working_sets (const struct gcov_ctr_summary
*summary
,
914 gcov_working_set_t
*gcov_working_sets
)
916 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
917 gcov_type ws_cum_hotness_incr
;
918 gcov_type cum
, tmp_cum
;
919 const gcov_bucket_type
*histo_bucket
;
920 unsigned ws_ix
, c_num
, count
;
923 /* Compute the amount of sum_all that the cumulative hotness grows
924 by in each successive working set entry, which depends on the
925 number of working set entries. */
926 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
928 /* Next fill in an array of the cumulative hotness values corresponding
929 to each working set summary entry we are going to compute below.
930 Skip 0% statistics, which can be extrapolated from the
931 rest of the summary data. */
932 cum
= ws_cum_hotness_incr
;
933 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
934 ws_ix
++, cum
+= ws_cum_hotness_incr
)
935 working_set_cum_values
[ws_ix
] = cum
;
936 /* The last summary entry is reserved for (roughly) 99.9% of the
937 working set. Divide by 1024 so it becomes a shift, which gives
938 almost exactly 99.9%. */
939 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
940 = summary
->sum_all
- summary
->sum_all
/1024;
942 /* Next, walk through the histogram in decending order of hotness
943 and compute the statistics for the working set summary array.
944 As histogram entries are accumulated, we check to see which
945 working set entries have had their expected cum_value reached
946 and fill them in, walking the working set entries in increasing
947 size of cum_value. */
948 ws_ix
= 0; /* The current entry into the working set array. */
949 cum
= 0; /* The current accumulated counter sum. */
950 count
= 0; /* The current accumulated count of block counters. */
951 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
952 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
954 histo_bucket
= &summary
->histogram
[h_ix
];
956 /* If we haven't reached the required cumulative counter value for
957 the current working set percentage, simply accumulate this histogram
958 entry into the running sums and continue to the next histogram
960 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
962 cum
+= histo_bucket
->cum_value
;
963 count
+= histo_bucket
->num_counters
;
967 /* If adding the current histogram entry's cumulative counter value
968 causes us to exceed the current working set size, then estimate
969 how many of this histogram entry's counter values are required to
970 reach the working set size, and fill in working set entries
971 as we reach their expected cumulative value. */
972 for (c_num
= 0, tmp_cum
= cum
;
973 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
977 /* If we haven't reached the last histogram entry counter, add
978 in the minimum value again. This will underestimate the
979 cumulative sum so far, because many of the counter values in this
980 entry may have been larger than the minimum. We could add in the
981 average value every time, but that would require an expensive
983 if (c_num
+ 1 < histo_bucket
->num_counters
)
984 tmp_cum
+= histo_bucket
->min_value
;
985 /* If we have reached the last histogram entry counter, then add
986 in the entire cumulative value. */
988 tmp_cum
= cum
+ histo_bucket
->cum_value
;
990 /* Next walk through successive working set entries and fill in
991 the statistics for any whose size we have reached by accumulating
992 this histogram counter. */
993 while (ws_ix
< NUM_GCOV_WORKING_SETS
994 && tmp_cum
>= working_set_cum_values
[ws_ix
])
996 gcov_working_sets
[ws_ix
].num_counters
= count
;
997 gcov_working_sets
[ws_ix
].min_counter
998 = histo_bucket
->min_value
;
1002 /* Finally, update the running cumulative value since we were
1003 using a temporary above. */
1004 cum
+= histo_bucket
->cum_value
;
1006 gcov_nonruntime_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
1008 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */