1 /* File format for coverage information
2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 /* We need to expose this function when compiling for gcov-tool. */
74 gcov_nonruntime_assert (gcov_var
.mode
> 0);
75 return gcov_var
.start
+ gcov_var
.offset
;
78 /* Return nonzero if the error flag is set. */
79 /* We need to expose this function when compiling for gcov-tool. */
86 return gcov_var
.file
? gcov_var
.error
: 1;
90 /* Move to beginning of file and initialize for writing. */
91 GCOV_LINKAGE
inline void
97 fseek (gcov_var
.file
, 0L, SEEK_SET
);
101 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
106 value
= (value
>> 16) | (value
<< 16);
107 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
113 /* Open a gcov file. NAME is the name of the file to open and MODE
114 indicates whether a new file should be created, or an existing file
115 opened. If MODE is >= 0 an existing file will be opened, if
116 possible, and if MODE is <= 0, a new file will be created. Use
117 MODE=0 to attempt to reopen an existing file and then fall back on
118 creating a new one. If MODE > 0, the file will be opened in
119 read-only mode. Otherwise it will be opened for modification.
120 Return zero on failure, non-zero on success. */
124 gcov_open (const char *name
)
126 gcov_open (const char *name
, int mode
)
133 struct flock s_flock
;
136 s_flock
.l_whence
= SEEK_SET
;
138 s_flock
.l_len
= 0; /* Until EOF. */
139 s_flock
.l_pid
= getpid ();
142 gcov_nonruntime_assert (!gcov_var
.file
);
144 gcov_var
.offset
= gcov_var
.length
= 0;
145 gcov_var
.overread
= -1u;
153 /* Read-only mode - acquire a read-lock. */
154 s_flock
.l_type
= F_RDLCK
;
155 /* pass mode (ignored) for compatibility */
156 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
160 /* Write mode - acquire a write-lock. */
161 s_flock
.l_type
= F_WRLCK
;
162 /* Truncate if force new mode. */
163 fd
= open (name
, O_RDWR
| O_CREAT
| (mode
< 0 ? O_TRUNC
: 0), 0666);
168 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
171 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
180 /* Open an existing file. */
181 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
186 /* Create a new file. */
187 gcov_var
.file
= fopen (name
, "w+b");
193 gcov_var
.mode
= mode
? mode
: 1;
195 setbuf (gcov_var
.file
, (char *)0);
200 /* Close the current gcov file. Flushes data to disk. Returns nonzero
201 on failure or error flag set. */
209 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
210 gcov_write_block (gcov_var
.offset
);
212 fclose (gcov_var
.file
);
217 free (gcov_var
.buffer
);
222 return gcov_var
.error
;
226 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
227 file. Returns +1 for same endian, -1 for other endian and zero for
231 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
233 if (magic
== expected
)
235 magic
= (magic
>> 16) | (magic
<< 16);
236 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
237 if (magic
== expected
)
248 gcov_allocate (unsigned length
)
250 size_t new_size
= gcov_var
.alloc
;
253 new_size
= GCOV_BLOCK_SIZE
;
257 gcov_var
.alloc
= new_size
;
258 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
263 /* Write out the current block, if needs be. */
266 gcov_write_block (unsigned size
)
268 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
270 gcov_var
.start
+= size
;
271 gcov_var
.offset
-= size
;
274 /* Allocate space to write BYTES bytes to the gcov file. Return a
275 pointer to those bytes, or NULL on failure. */
277 static gcov_unsigned_t
*
278 gcov_write_words (unsigned words
)
280 gcov_unsigned_t
*result
;
282 gcov_nonruntime_assert (gcov_var
.mode
< 0);
284 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
286 gcov_write_block (GCOV_BLOCK_SIZE
);
289 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
293 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
294 gcov_allocate (gcov_var
.offset
+ words
);
296 result
= &gcov_var
.buffer
[gcov_var
.offset
];
297 gcov_var
.offset
+= words
;
302 /* Write unsigned VALUE to coverage file. Sets error flag
306 gcov_write_unsigned (gcov_unsigned_t value
)
308 gcov_unsigned_t
*buffer
= gcov_write_words (1);
313 /* Write counter VALUE to coverage file. Sets error flag
318 gcov_write_counter (gcov_type value
)
320 gcov_unsigned_t
*buffer
= gcov_write_words (2);
322 buffer
[0] = (gcov_unsigned_t
) value
;
323 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
324 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
328 #endif /* IN_LIBGCOV */
331 /* Write STRING to coverage file. Sets error flag on file
332 error, overflow flag on overflow */
335 gcov_write_string (const char *string
)
339 gcov_unsigned_t
*buffer
;
343 length
= strlen (string
);
344 alloc
= (length
+ 4) >> 2;
347 buffer
= gcov_write_words (1 + alloc
);
353 buffer
[alloc
] = 0; /* place nul terminators. */
354 memcpy (&buffer
[1], string
, length
);
360 /* Write a tag TAG and reserve space for the record length. Return a
361 value to be used for gcov_write_length. */
363 GCOV_LINKAGE gcov_position_t
364 gcov_write_tag (gcov_unsigned_t tag
)
366 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
367 gcov_unsigned_t
*buffer
= gcov_write_words (2);
375 /* Write a record length using POSITION, which was returned by
376 gcov_write_tag. The current file position is the end of the
377 record, and is restored before returning. Returns nonzero on
381 gcov_write_length (gcov_position_t position
)
384 gcov_unsigned_t length
;
385 gcov_unsigned_t
*buffer
;
387 gcov_nonruntime_assert (gcov_var
.mode
< 0);
388 gcov_nonruntime_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
389 gcov_nonruntime_assert (position
>= gcov_var
.start
);
390 offset
= position
- gcov_var
.start
;
391 length
= gcov_var
.offset
- offset
- 2;
392 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
394 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
395 gcov_write_block (gcov_var
.offset
);
398 #else /* IN_LIBGCOV */
400 /* Write a tag TAG and length LENGTH. */
403 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
405 gcov_unsigned_t
*buffer
= gcov_write_words (2);
411 /* Write a summary structure to the gcov file. Return nonzero on
415 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
417 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
418 const struct gcov_ctr_summary
*csum
;
419 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
421 /* Count number of non-zero histogram entries, and fill in a bit vector
422 of non-zero indices. The histogram is only currently computed for arc
424 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
425 histo_bitvector
[bv_ix
] = 0;
426 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
427 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
428 if (csum
->histogram
[h_ix
].num_counters
)
430 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
433 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
434 gcov_write_unsigned (summary
->checksum
);
435 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
437 gcov_write_unsigned (csum
->num
);
438 gcov_write_unsigned (csum
->runs
);
439 gcov_write_counter (csum
->sum_all
);
440 gcov_write_counter (csum
->run_max
);
441 gcov_write_counter (csum
->sum_max
);
442 if (ix
!= GCOV_COUNTER_ARCS
)
444 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
445 gcov_write_unsigned (0);
448 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
449 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
450 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
452 if (!csum
->histogram
[h_ix
].num_counters
)
454 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
455 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
456 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
460 #endif /* IN_LIBGCOV */
464 /* Return a pointer to read BYTES bytes from the gcov file. Returns
465 NULL on failure (read past EOF). */
467 static const gcov_unsigned_t
*
468 gcov_read_words (unsigned words
)
470 const gcov_unsigned_t
*result
;
471 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
473 if (gcov_var
.mode
<= 0)
478 gcov_var
.start
+= gcov_var
.offset
;
482 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
484 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
,
489 gcov_var
.length
= excess
;
491 excess
= GCOV_BLOCK_SIZE
;
493 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
494 gcov_allocate (gcov_var
.length
+ words
);
495 excess
= gcov_var
.alloc
- gcov_var
.length
;
497 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
498 1, excess
<< 2, gcov_var
.file
) >> 2;
499 gcov_var
.length
+= excess
;
500 if (gcov_var
.length
< words
)
502 gcov_var
.overread
+= words
- gcov_var
.length
;
507 result
= &gcov_var
.buffer
[gcov_var
.offset
];
508 gcov_var
.offset
+= words
;
512 /* Read unsigned value from a coverage file. Sets error flag on file
513 error, overflow flag on overflow */
515 GCOV_LINKAGE gcov_unsigned_t
516 gcov_read_unsigned (void)
518 gcov_unsigned_t value
;
519 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
523 value
= from_file (buffer
[0]);
527 /* Read counter value from a coverage file. Sets error flag on file
528 error, overflow flag on overflow */
530 GCOV_LINKAGE gcov_type
531 gcov_read_counter (void)
534 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
538 value
= from_file (buffer
[0]);
539 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
540 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
547 /* We need to expose the below function when compiling for gcov-tool. */
549 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
550 /* Read string from coverage file. Returns a pointer to a static
551 buffer, or NULL on empty string. You must copy the string before
552 calling another gcov function. */
554 GCOV_LINKAGE
const char *
555 gcov_read_string (void)
557 unsigned length
= gcov_read_unsigned ();
562 return (const char *) gcov_read_words (length
);
567 gcov_read_summary (struct gcov_summary
*summary
)
569 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
570 struct gcov_ctr_summary
*csum
;
571 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
572 unsigned cur_bitvector
;
574 summary
->checksum
= gcov_read_unsigned ();
575 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
577 csum
->num
= gcov_read_unsigned ();
578 csum
->runs
= gcov_read_unsigned ();
579 csum
->sum_all
= gcov_read_counter ();
580 csum
->run_max
= gcov_read_counter ();
581 csum
->sum_max
= gcov_read_counter ();
582 memset (csum
->histogram
, 0,
583 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
584 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
586 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
588 /* When building libgcov we don't include system.h, which includes
589 hwint.h (where popcount_hwi is declared). However, libgcov.a
590 is built by the bootstrapped compiler and therefore the builtins
591 are always available. */
592 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
594 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
602 /* Find the index corresponding to the next entry we will read in.
603 First find the next non-zero bitvector and re-initialize
604 the histogram index accordingly, then right shift and increment
605 the index until we find a set bit. */
606 while (!cur_bitvector
)
609 if (bv_ix
>= GCOV_HISTOGRAM_BITVECTOR_SIZE
)
610 gcov_error ("corrupted profile info: summary histogram "
611 "bitvector is corrupt");
612 cur_bitvector
= histo_bitvector
[bv_ix
++];
614 while (!(cur_bitvector
& 0x1))
619 if (h_ix
>= GCOV_HISTOGRAM_SIZE
)
620 gcov_error ("corrupted profile info: summary histogram "
623 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
624 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
625 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
626 /* Shift off the index we are done with and increment to the
627 corresponding next histogram entry. */
634 /* We need to expose the below function when compiling for gcov-tool. */
636 #if !IN_LIBGCOV || defined (IN_GCOV_TOOL)
637 /* Reset to a known position. BASE should have been obtained from
638 gcov_position, LENGTH should be a record length. */
641 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
643 gcov_nonruntime_assert (gcov_var
.mode
> 0);
645 if (base
- gcov_var
.start
<= gcov_var
.length
)
646 gcov_var
.offset
= base
- gcov_var
.start
;
649 gcov_var
.offset
= gcov_var
.length
= 0;
650 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
651 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
657 /* Move to a given position in a gcov file. */
660 gcov_seek (gcov_position_t base
)
663 gcov_write_block (gcov_var
.offset
);
664 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
665 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
670 /* Return the modification time of the current gcov file. */
677 if (fstat (fileno (gcov_var
.file
), &status
))
680 return status
.st_mtime
;
685 /* Determine the index into histogram for VALUE. */
690 GCOV_LINKAGE
unsigned
692 gcov_histo_index (gcov_type value
)
694 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
696 unsigned prev2bits
= 0;
698 /* Find index into log2 scale histogram, where each of the log2
699 sized buckets is divided into 4 linear sub-buckets for better
700 focus in the higher buckets. */
702 /* Find the place of the most-significant bit set. */
706 /* When building libgcov we don't include system.h, which includes
707 hwint.h (where floor_log2 is declared). However, libgcov.a
708 is built by the bootstrapped compiler and therefore the builtins
709 are always available. */
710 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
712 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
713 that is 64 bits and gcov_type_unsigned is 64 bits. */
718 /* If at most the 2 least significant bits are set (value is
719 0 - 3) then that value is our index into the lowest set of
722 return (unsigned)value
;
724 gcov_nonruntime_assert (r
< 64);
726 /* Find the two next most significant bits to determine which
727 of the four linear sub-buckets to select. */
728 prev2bits
= (v
>> (r
- 2)) & 0x3;
729 /* Finally, compose the final bucket index from the log2 index and
730 the next 2 bits. The minimum r value at this point is 2 since we
731 returned above if r was 2 or more, so the minimum bucket at this
733 return (r
- 1) * 4 + prev2bits
;
736 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
737 the same relative order in both histograms, and are matched up
738 and merged in reverse order. Each counter is assigned an equal portion of
739 its entry's original cumulative counter value when computing the
740 new merged cum_value. */
742 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
743 gcov_bucket_type
*src_histo
)
745 int src_i
, tgt_i
, tmp_i
= 0;
746 unsigned src_num
, tgt_num
, merge_num
;
747 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
749 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
752 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
754 /* Assume that the counters are in the same relative order in both
755 histograms. Walk the histograms from largest to smallest entry,
756 matching up and combining counters in order. */
759 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
760 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
762 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
763 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
764 /* Keep going until all of the target histogram's counters at this
765 position have been matched and merged with counters from the
767 while (tgt_num
> 0 && !src_done
)
769 /* If this is either the first time through this loop or we just
770 exhausted the previous non-zero source histogram entry, look
771 for the next non-zero source histogram entry. */
774 /* Locate the next non-zero entry. */
775 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
777 /* If source histogram has fewer counters, then just copy over the
778 remaining target counters and quit. */
781 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
782 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
783 if (!tmp_histo
[tgt_i
].min_value
||
784 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
785 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
788 tmp_histo
[tgt_i
].num_counters
789 += tgt_histo
[tgt_i
].num_counters
;
790 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
791 if (!tmp_histo
[tgt_i
].min_value
||
792 tgt_histo
[tgt_i
].min_value
793 < tmp_histo
[tgt_i
].min_value
)
794 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
801 src_num
= src_histo
[src_i
].num_counters
;
802 src_cum
= src_histo
[src_i
].cum_value
;
805 /* The number of counters to merge on this pass is the minimum
806 of the remaining counters from the current target and source
807 histogram entries. */
809 if (src_num
< merge_num
)
812 /* The merged min_value is the sum of the min_values from target
814 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
816 /* Compute the portion of source and target entries' cum_value
817 that will be apportioned to the counters being merged.
818 The total remaining cum_value from each entry is divided
819 equally among the counters from that histogram entry if we
820 are not merging all of them. */
821 merge_src_cum
= src_cum
;
822 if (merge_num
< src_num
)
823 merge_src_cum
= merge_num
* src_cum
/ src_num
;
824 merge_tgt_cum
= tgt_cum
;
825 if (merge_num
< tgt_num
)
826 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
827 /* The merged cum_value is the sum of the source and target
829 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
831 /* Update the remaining number of counters and cum_value left
832 to be merged from this source and target entry. */
833 src_cum
-= merge_src_cum
;
834 tgt_cum
-= merge_tgt_cum
;
835 src_num
-= merge_num
;
836 tgt_num
-= merge_num
;
838 /* The merged counters get placed in the new merged histogram
839 at the entry for the merged min_value. */
840 tmp_i
= gcov_histo_index (merge_min
);
841 gcov_nonruntime_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
842 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
843 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
844 if (!tmp_histo
[tmp_i
].min_value
||
845 merge_min
< tmp_histo
[tmp_i
].min_value
)
846 tmp_histo
[tmp_i
].min_value
= merge_min
;
848 /* Ensure the search for the next non-zero src_histo entry starts
849 at the next smallest histogram bucket. */
855 gcov_nonruntime_assert (tgt_i
< 0);
857 /* In the case where there were more counters in the source histogram,
858 accumulate the remaining unmerged cumulative counter values. Add
859 those to the smallest non-zero target histogram entry. Otherwise,
860 the total cumulative counter values in the histogram will be smaller
861 than the sum_all stored in the summary, which will complicate
862 computing the working set information from the histogram later on. */
867 src_cum
+= src_histo
[src_i
].cum_value
;
870 /* At this point, tmp_i should be the smallest non-zero entry in the
872 gcov_nonruntime_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
873 && tmp_histo
[tmp_i
].num_counters
> 0);
874 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
876 /* Finally, copy the merged histogram into tgt_histo. */
877 memcpy (tgt_histo
, tmp_histo
,
878 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
880 #endif /* !IN_GCOV */
882 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
883 (!IN_GCOV && !IN_LIBGCOV). */
884 #if IN_GCOV <= 0 && !IN_LIBGCOV
885 /* Compute the working set information from the counter histogram in
886 the profile summary. This is an array of information corresponding to a
887 range of percentages of the total execution count (sum_all), and includes
888 the number of counters required to cover that working set percentage and
889 the minimum counter value in that working set. */
892 compute_working_sets (const struct gcov_ctr_summary
*summary
,
893 gcov_working_set_t
*gcov_working_sets
)
895 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
896 gcov_type ws_cum_hotness_incr
;
897 gcov_type cum
, tmp_cum
;
898 const gcov_bucket_type
*histo_bucket
;
899 unsigned ws_ix
, c_num
, count
;
902 /* Compute the amount of sum_all that the cumulative hotness grows
903 by in each successive working set entry, which depends on the
904 number of working set entries. */
905 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
907 /* Next fill in an array of the cumulative hotness values corresponding
908 to each working set summary entry we are going to compute below.
909 Skip 0% statistics, which can be extrapolated from the
910 rest of the summary data. */
911 cum
= ws_cum_hotness_incr
;
912 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
913 ws_ix
++, cum
+= ws_cum_hotness_incr
)
914 working_set_cum_values
[ws_ix
] = cum
;
915 /* The last summary entry is reserved for (roughly) 99.9% of the
916 working set. Divide by 1024 so it becomes a shift, which gives
917 almost exactly 99.9%. */
918 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
919 = summary
->sum_all
- summary
->sum_all
/1024;
921 /* Next, walk through the histogram in decending order of hotness
922 and compute the statistics for the working set summary array.
923 As histogram entries are accumulated, we check to see which
924 working set entries have had their expected cum_value reached
925 and fill them in, walking the working set entries in increasing
926 size of cum_value. */
927 ws_ix
= 0; /* The current entry into the working set array. */
928 cum
= 0; /* The current accumulated counter sum. */
929 count
= 0; /* The current accumulated count of block counters. */
930 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
931 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
933 histo_bucket
= &summary
->histogram
[h_ix
];
935 /* If we haven't reached the required cumulative counter value for
936 the current working set percentage, simply accumulate this histogram
937 entry into the running sums and continue to the next histogram
939 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
941 cum
+= histo_bucket
->cum_value
;
942 count
+= histo_bucket
->num_counters
;
946 /* If adding the current histogram entry's cumulative counter value
947 causes us to exceed the current working set size, then estimate
948 how many of this histogram entry's counter values are required to
949 reach the working set size, and fill in working set entries
950 as we reach their expected cumulative value. */
951 for (c_num
= 0, tmp_cum
= cum
;
952 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
956 /* If we haven't reached the last histogram entry counter, add
957 in the minimum value again. This will underestimate the
958 cumulative sum so far, because many of the counter values in this
959 entry may have been larger than the minimum. We could add in the
960 average value every time, but that would require an expensive
962 if (c_num
+ 1 < histo_bucket
->num_counters
)
963 tmp_cum
+= histo_bucket
->min_value
;
964 /* If we have reached the last histogram entry counter, then add
965 in the entire cumulative value. */
967 tmp_cum
= cum
+ histo_bucket
->cum_value
;
969 /* Next walk through successive working set entries and fill in
970 the statistics for any whose size we have reached by accumulating
971 this histogram counter. */
972 while (ws_ix
< NUM_GCOV_WORKING_SETS
973 && tmp_cum
>= working_set_cum_values
[ws_ix
])
975 gcov_working_sets
[ws_ix
].num_counters
= count
;
976 gcov_working_sets
[ws_ix
].min_counter
977 = histo_bucket
->min_value
;
981 /* Finally, update the running cumulative value since we were
982 using a temporary above. */
983 cum
+= histo_bucket
->cum_value
;
985 gcov_nonruntime_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
987 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */