1 /* File format for coverage information
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
42 GCOV_LINKAGE
struct gcov_var
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 static inline gcov_position_t
70 gcov_nonruntime_assert (gcov_var
.mode
> 0);
71 return gcov_var
.start
+ gcov_var
.offset
;
74 /* Return nonzero if the error flag is set. */
78 return gcov_var
.file
? gcov_var
.error
: 1;
82 /* Move to beginning of file and initialize for writing. */
83 GCOV_LINKAGE
inline void
89 fseek (gcov_var
.file
, 0L, SEEK_SET
);
93 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
98 value
= (value
>> 16) | (value
<< 16);
99 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
105 /* Open a gcov file. NAME is the name of the file to open and MODE
106 indicates whether a new file should be created, or an existing file
107 opened. If MODE is >= 0 an existing file will be opened, if
108 possible, and if MODE is <= 0, a new file will be created. Use
109 MODE=0 to attempt to reopen an existing file and then fall back on
110 creating a new one. If MODE < 0, the file will be opened in
111 read-only mode. Otherwise it will be opened for modification.
112 Return zero on failure, >0 on opening an existing file and <0 on
113 creating a new one. */
117 gcov_open (const char *name
)
119 gcov_open (const char *name
, int mode
)
126 struct flock s_flock
;
129 s_flock
.l_whence
= SEEK_SET
;
131 s_flock
.l_len
= 0; /* Until EOF. */
132 s_flock
.l_pid
= getpid ();
135 gcov_nonruntime_assert (!gcov_var
.file
);
137 gcov_var
.offset
= gcov_var
.length
= 0;
138 gcov_var
.overread
= -1u;
146 /* Read-only mode - acquire a read-lock. */
147 s_flock
.l_type
= F_RDLCK
;
148 /* pass mode (ignored) for compatibility */
149 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
153 /* Write mode - acquire a write-lock. */
154 s_flock
.l_type
= F_WRLCK
;
155 fd
= open (name
, O_RDWR
| O_CREAT
| O_TRUNC
, 0666);
159 /* Read-Write mode - acquire a write-lock. */
160 s_flock
.l_type
= F_WRLCK
;
161 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
166 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
169 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
183 if (fstat (fd
, &st
) < 0)
185 fclose (gcov_var
.file
);
192 gcov_var
.mode
= mode
* 2 + 1;
195 gcov_var
.mode
= mode
* 2 + 1;
198 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
204 gcov_var
.file
= fopen (name
, "w+b");
206 gcov_var
.mode
= mode
* 2 + 1;
212 setbuf (gcov_var
.file
, (char *)0);
217 /* Close the current gcov file. Flushes data to disk. Returns nonzero
218 on failure or error flag set. */
226 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
227 gcov_write_block (gcov_var
.offset
);
229 fclose (gcov_var
.file
);
234 free (gcov_var
.buffer
);
239 return gcov_var
.error
;
243 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
244 file. Returns +1 for same endian, -1 for other endian and zero for
248 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
250 if (magic
== expected
)
252 magic
= (magic
>> 16) | (magic
<< 16);
253 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
254 if (magic
== expected
)
265 gcov_allocate (unsigned length
)
267 size_t new_size
= gcov_var
.alloc
;
270 new_size
= GCOV_BLOCK_SIZE
;
274 gcov_var
.alloc
= new_size
;
275 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
280 /* Write out the current block, if needs be. */
283 gcov_write_block (unsigned size
)
285 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
287 gcov_var
.start
+= size
;
288 gcov_var
.offset
-= size
;
291 /* Allocate space to write BYTES bytes to the gcov file. Return a
292 pointer to those bytes, or NULL on failure. */
294 static gcov_unsigned_t
*
295 gcov_write_words (unsigned words
)
297 gcov_unsigned_t
*result
;
299 gcov_nonruntime_assert (gcov_var
.mode
< 0);
301 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
303 gcov_write_block (GCOV_BLOCK_SIZE
);
306 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
310 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
311 gcov_allocate (gcov_var
.offset
+ words
);
313 result
= &gcov_var
.buffer
[gcov_var
.offset
];
314 gcov_var
.offset
+= words
;
319 /* Write unsigned VALUE to coverage file. Sets error flag
323 gcov_write_unsigned (gcov_unsigned_t value
)
325 gcov_unsigned_t
*buffer
= gcov_write_words (1);
330 /* Write counter VALUE to coverage file. Sets error flag
335 gcov_write_counter (gcov_type value
)
337 gcov_unsigned_t
*buffer
= gcov_write_words (2);
339 buffer
[0] = (gcov_unsigned_t
) value
;
340 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
341 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
345 #endif /* IN_LIBGCOV */
348 /* Write STRING to coverage file. Sets error flag on file
349 error, overflow flag on overflow */
352 gcov_write_string (const char *string
)
356 gcov_unsigned_t
*buffer
;
360 length
= strlen (string
);
361 alloc
= (length
+ 4) >> 2;
364 buffer
= gcov_write_words (1 + alloc
);
368 memcpy (&buffer
[1], string
, length
);
373 /* Write a tag TAG and reserve space for the record length. Return a
374 value to be used for gcov_write_length. */
376 GCOV_LINKAGE gcov_position_t
377 gcov_write_tag (gcov_unsigned_t tag
)
379 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
380 gcov_unsigned_t
*buffer
= gcov_write_words (2);
388 /* Write a record length using POSITION, which was returned by
389 gcov_write_tag. The current file position is the end of the
390 record, and is restored before returning. Returns nonzero on
394 gcov_write_length (gcov_position_t position
)
397 gcov_unsigned_t length
;
398 gcov_unsigned_t
*buffer
;
400 gcov_nonruntime_assert (gcov_var
.mode
< 0);
401 gcov_nonruntime_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
402 gcov_nonruntime_assert (position
>= gcov_var
.start
);
403 offset
= position
- gcov_var
.start
;
404 length
= gcov_var
.offset
- offset
- 2;
405 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
407 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
408 gcov_write_block (gcov_var
.offset
);
411 #else /* IN_LIBGCOV */
413 /* Write a tag TAG and length LENGTH. */
416 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
418 gcov_unsigned_t
*buffer
= gcov_write_words (2);
424 /* Write a summary structure to the gcov file. Return nonzero on
428 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
430 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
431 const struct gcov_ctr_summary
*csum
;
432 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
434 /* Count number of non-zero histogram entries, and fill in a bit vector
435 of non-zero indices. The histogram is only currently computed for arc
437 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
438 histo_bitvector
[bv_ix
] = 0;
439 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
440 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
442 if (csum
->histogram
[h_ix
].num_counters
> 0)
444 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
448 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
449 gcov_write_unsigned (summary
->checksum
);
450 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
452 gcov_write_unsigned (csum
->num
);
453 gcov_write_unsigned (csum
->runs
);
454 gcov_write_counter (csum
->sum_all
);
455 gcov_write_counter (csum
->run_max
);
456 gcov_write_counter (csum
->sum_max
);
457 if (ix
!= GCOV_COUNTER_ARCS
)
459 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
460 gcov_write_unsigned (0);
463 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
464 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
465 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
467 if (!csum
->histogram
[h_ix
].num_counters
)
469 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
470 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
471 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
475 #endif /* IN_LIBGCOV */
479 /* Return a pointer to read BYTES bytes from the gcov file. Returns
480 NULL on failure (read past EOF). */
482 static const gcov_unsigned_t
*
483 gcov_read_words (unsigned words
)
485 const gcov_unsigned_t
*result
;
486 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
488 gcov_nonruntime_assert (gcov_var
.mode
> 0);
491 gcov_var
.start
+= gcov_var
.offset
;
495 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
498 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, excess
* 4);
501 gcov_var
.length
= excess
;
503 excess
= GCOV_BLOCK_SIZE
;
505 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
506 gcov_allocate (gcov_var
.length
+ words
);
507 excess
= gcov_var
.alloc
- gcov_var
.length
;
509 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
510 1, excess
<< 2, gcov_var
.file
) >> 2;
511 gcov_var
.length
+= excess
;
512 if (gcov_var
.length
< words
)
514 gcov_var
.overread
+= words
- gcov_var
.length
;
519 result
= &gcov_var
.buffer
[gcov_var
.offset
];
520 gcov_var
.offset
+= words
;
524 /* Read unsigned value from a coverage file. Sets error flag on file
525 error, overflow flag on overflow */
527 GCOV_LINKAGE gcov_unsigned_t
528 gcov_read_unsigned (void)
530 gcov_unsigned_t value
;
531 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
535 value
= from_file (buffer
[0]);
539 /* Read counter value from a coverage file. Sets error flag on file
540 error, overflow flag on overflow */
542 GCOV_LINKAGE gcov_type
543 gcov_read_counter (void)
546 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
550 value
= from_file (buffer
[0]);
551 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
552 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
559 /* Read string from coverage file. Returns a pointer to a static
560 buffer, or NULL on empty string. You must copy the string before
561 calling another gcov function. */
564 GCOV_LINKAGE
const char *
565 gcov_read_string (void)
567 unsigned length
= gcov_read_unsigned ();
572 return (const char *) gcov_read_words (length
);
577 gcov_read_summary (struct gcov_summary
*summary
)
579 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
580 struct gcov_ctr_summary
*csum
;
581 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
582 unsigned cur_bitvector
;
584 summary
->checksum
= gcov_read_unsigned ();
585 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
587 csum
->num
= gcov_read_unsigned ();
588 csum
->runs
= gcov_read_unsigned ();
589 csum
->sum_all
= gcov_read_counter ();
590 csum
->run_max
= gcov_read_counter ();
591 csum
->sum_max
= gcov_read_counter ();
592 memset (csum
->histogram
, 0,
593 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
594 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
596 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
598 /* When building libgcov we don't include system.h, which includes
599 hwint.h (where popcount_hwi is declared). However, libgcov.a
600 is built by the bootstrapped compiler and therefore the builtins
601 are always available. */
602 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
604 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
612 /* Find the index corresponding to the next entry we will read in.
613 First find the next non-zero bitvector and re-initialize
614 the histogram index accordingly, then right shift and increment
615 the index until we find a set bit. */
616 while (!cur_bitvector
)
619 if (bv_ix
>= GCOV_HISTOGRAM_BITVECTOR_SIZE
)
620 gcov_error ("corrupted profile info: summary histogram "
621 "bitvector is corrupt");
622 cur_bitvector
= histo_bitvector
[bv_ix
++];
624 while (!(cur_bitvector
& 0x1))
629 if (h_ix
>= GCOV_HISTOGRAM_SIZE
)
630 gcov_error ("corrupted profile info: summary histogram "
633 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
634 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
635 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
636 /* Shift off the index we are done with and increment to the
637 corresponding next histogram entry. */
645 /* Reset to a known position. BASE should have been obtained from
646 gcov_position, LENGTH should be a record length. */
649 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
651 gcov_nonruntime_assert (gcov_var
.mode
> 0);
653 if (base
- gcov_var
.start
<= gcov_var
.length
)
654 gcov_var
.offset
= base
- gcov_var
.start
;
657 gcov_var
.offset
= gcov_var
.length
= 0;
658 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
659 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
665 /* Move to a given position in a gcov file. */
668 gcov_seek (gcov_position_t base
)
671 gcov_write_block (gcov_var
.offset
);
672 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
673 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
678 /* Return the modification time of the current gcov file. */
685 if (fstat (fileno (gcov_var
.file
), &status
))
688 return status
.st_mtime
;
693 /* Determine the index into histogram for VALUE. */
698 GCOV_LINKAGE
unsigned
700 gcov_histo_index (gcov_type value
)
702 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
704 unsigned prev2bits
= 0;
706 /* Find index into log2 scale histogram, where each of the log2
707 sized buckets is divided into 4 linear sub-buckets for better
708 focus in the higher buckets. */
710 /* Find the place of the most-significant bit set. */
714 /* When building libgcov we don't include system.h, which includes
715 hwint.h (where floor_log2 is declared). However, libgcov.a
716 is built by the bootstrapped compiler and therefore the builtins
717 are always available. */
718 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
720 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
721 that is 64 bits and gcov_type_unsigned is 64 bits. */
726 /* If at most the 2 least significant bits are set (value is
727 0 - 3) then that value is our index into the lowest set of
730 return (unsigned)value
;
732 gcov_nonruntime_assert (r
< 64);
734 /* Find the two next most significant bits to determine which
735 of the four linear sub-buckets to select. */
736 prev2bits
= (v
>> (r
- 2)) & 0x3;
737 /* Finally, compose the final bucket index from the log2 index and
738 the next 2 bits. The minimum r value at this point is 2 since we
739 returned above if r was 2 or more, so the minimum bucket at this
741 return (r
- 1) * 4 + prev2bits
;
744 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
745 the same relative order in both histograms, and are matched up
746 and merged in reverse order. Each counter is assigned an equal portion of
747 its entry's original cumulative counter value when computing the
748 new merged cum_value. */
750 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
751 gcov_bucket_type
*src_histo
)
753 int src_i
, tgt_i
, tmp_i
= 0;
754 unsigned src_num
, tgt_num
, merge_num
;
755 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
757 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
760 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
762 /* Assume that the counters are in the same relative order in both
763 histograms. Walk the histograms from largest to smallest entry,
764 matching up and combining counters in order. */
767 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
768 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
770 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
771 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
772 /* Keep going until all of the target histogram's counters at this
773 position have been matched and merged with counters from the
775 while (tgt_num
> 0 && !src_done
)
777 /* If this is either the first time through this loop or we just
778 exhausted the previous non-zero source histogram entry, look
779 for the next non-zero source histogram entry. */
782 /* Locate the next non-zero entry. */
783 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
785 /* If source histogram has fewer counters, then just copy over the
786 remaining target counters and quit. */
789 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
790 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
791 if (!tmp_histo
[tgt_i
].min_value
||
792 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
793 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
796 tmp_histo
[tgt_i
].num_counters
797 += tgt_histo
[tgt_i
].num_counters
;
798 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
799 if (!tmp_histo
[tgt_i
].min_value
||
800 tgt_histo
[tgt_i
].min_value
801 < tmp_histo
[tgt_i
].min_value
)
802 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
809 src_num
= src_histo
[src_i
].num_counters
;
810 src_cum
= src_histo
[src_i
].cum_value
;
813 /* The number of counters to merge on this pass is the minimum
814 of the remaining counters from the current target and source
815 histogram entries. */
817 if (src_num
< merge_num
)
820 /* The merged min_value is the sum of the min_values from target
822 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
824 /* Compute the portion of source and target entries' cum_value
825 that will be apportioned to the counters being merged.
826 The total remaining cum_value from each entry is divided
827 equally among the counters from that histogram entry if we
828 are not merging all of them. */
829 merge_src_cum
= src_cum
;
830 if (merge_num
< src_num
)
831 merge_src_cum
= merge_num
* src_cum
/ src_num
;
832 merge_tgt_cum
= tgt_cum
;
833 if (merge_num
< tgt_num
)
834 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
835 /* The merged cum_value is the sum of the source and target
837 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
839 /* Update the remaining number of counters and cum_value left
840 to be merged from this source and target entry. */
841 src_cum
-= merge_src_cum
;
842 tgt_cum
-= merge_tgt_cum
;
843 src_num
-= merge_num
;
844 tgt_num
-= merge_num
;
846 /* The merged counters get placed in the new merged histogram
847 at the entry for the merged min_value. */
848 tmp_i
= gcov_histo_index (merge_min
);
849 gcov_nonruntime_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
850 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
851 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
852 if (!tmp_histo
[tmp_i
].min_value
||
853 merge_min
< tmp_histo
[tmp_i
].min_value
)
854 tmp_histo
[tmp_i
].min_value
= merge_min
;
856 /* Ensure the search for the next non-zero src_histo entry starts
857 at the next smallest histogram bucket. */
863 gcov_nonruntime_assert (tgt_i
< 0);
865 /* In the case where there were more counters in the source histogram,
866 accumulate the remaining unmerged cumulative counter values. Add
867 those to the smallest non-zero target histogram entry. Otherwise,
868 the total cumulative counter values in the histogram will be smaller
869 than the sum_all stored in the summary, which will complicate
870 computing the working set information from the histogram later on. */
875 src_cum
+= src_histo
[src_i
].cum_value
;
878 /* At this point, tmp_i should be the smallest non-zero entry in the
880 gcov_nonruntime_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
881 && tmp_histo
[tmp_i
].num_counters
> 0);
882 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
884 /* Finally, copy the merged histogram into tgt_histo. */
885 memcpy (tgt_histo
, tmp_histo
,
886 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
888 #endif /* !IN_GCOV */
890 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
891 (!IN_GCOV && !IN_LIBGCOV). */
892 #if IN_GCOV <= 0 && !IN_LIBGCOV
893 /* Compute the working set information from the counter histogram in
894 the profile summary. This is an array of information corresponding to a
895 range of percentages of the total execution count (sum_all), and includes
896 the number of counters required to cover that working set percentage and
897 the minimum counter value in that working set. */
900 compute_working_sets (const struct gcov_ctr_summary
*summary
,
901 gcov_working_set_t
*gcov_working_sets
)
903 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
904 gcov_type ws_cum_hotness_incr
;
905 gcov_type cum
, tmp_cum
;
906 const gcov_bucket_type
*histo_bucket
;
907 unsigned ws_ix
, c_num
, count
;
910 /* Compute the amount of sum_all that the cumulative hotness grows
911 by in each successive working set entry, which depends on the
912 number of working set entries. */
913 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
915 /* Next fill in an array of the cumulative hotness values corresponding
916 to each working set summary entry we are going to compute below.
917 Skip 0% statistics, which can be extrapolated from the
918 rest of the summary data. */
919 cum
= ws_cum_hotness_incr
;
920 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
921 ws_ix
++, cum
+= ws_cum_hotness_incr
)
922 working_set_cum_values
[ws_ix
] = cum
;
923 /* The last summary entry is reserved for (roughly) 99.9% of the
924 working set. Divide by 1024 so it becomes a shift, which gives
925 almost exactly 99.9%. */
926 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
927 = summary
->sum_all
- summary
->sum_all
/1024;
929 /* Next, walk through the histogram in decending order of hotness
930 and compute the statistics for the working set summary array.
931 As histogram entries are accumulated, we check to see which
932 working set entries have had their expected cum_value reached
933 and fill them in, walking the working set entries in increasing
934 size of cum_value. */
935 ws_ix
= 0; /* The current entry into the working set array. */
936 cum
= 0; /* The current accumulated counter sum. */
937 count
= 0; /* The current accumulated count of block counters. */
938 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
939 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
941 histo_bucket
= &summary
->histogram
[h_ix
];
943 /* If we haven't reached the required cumulative counter value for
944 the current working set percentage, simply accumulate this histogram
945 entry into the running sums and continue to the next histogram
947 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
949 cum
+= histo_bucket
->cum_value
;
950 count
+= histo_bucket
->num_counters
;
954 /* If adding the current histogram entry's cumulative counter value
955 causes us to exceed the current working set size, then estimate
956 how many of this histogram entry's counter values are required to
957 reach the working set size, and fill in working set entries
958 as we reach their expected cumulative value. */
959 for (c_num
= 0, tmp_cum
= cum
;
960 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
964 /* If we haven't reached the last histogram entry counter, add
965 in the minimum value again. This will underestimate the
966 cumulative sum so far, because many of the counter values in this
967 entry may have been larger than the minimum. We could add in the
968 average value every time, but that would require an expensive
970 if (c_num
+ 1 < histo_bucket
->num_counters
)
971 tmp_cum
+= histo_bucket
->min_value
;
972 /* If we have reached the last histogram entry counter, then add
973 in the entire cumulative value. */
975 tmp_cum
= cum
+ histo_bucket
->cum_value
;
977 /* Next walk through successive working set entries and fill in
978 the statistics for any whose size we have reached by accumulating
979 this histogram counter. */
980 while (ws_ix
< NUM_GCOV_WORKING_SETS
981 && tmp_cum
>= working_set_cum_values
[ws_ix
])
983 gcov_working_sets
[ws_ix
].num_counters
= count
;
984 gcov_working_sets
[ws_ix
].min_counter
985 = histo_bucket
->min_value
;
989 /* Finally, update the running cumulative value since we were
990 using a temporary above. */
991 cum
+= histo_bucket
->cum_value
;
993 gcov_nonruntime_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
995 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */