1 /* File format for coverage information
2 Copyright (C) 1996-2014 Free Software Foundation, Inc.
3 Contributed by Bob Manson <manson@cygnus.com>.
4 Completely remangled by Nathan Sidwell <nathan@codesourcery.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 Under Section 7 of GPL version 3, you are granted additional
19 permissions described in the GCC Runtime Library Exception, version
20 3.1, as published by the Free Software Foundation.
22 You should have received a copy of the GNU General Public License and
23 a copy of the GCC Runtime Library Exception along with this program;
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
25 <http://www.gnu.org/licenses/>. */
27 /* Routines declared in gcov-io.h. This file should be #included by
28 another source file, after having #included gcov-io.h. */
31 static void gcov_write_block (unsigned);
32 static gcov_unsigned_t
*gcov_write_words (unsigned);
34 static const gcov_unsigned_t
*gcov_read_words (unsigned);
36 static void gcov_allocate (unsigned);
39 /* Optimum number of gcov_unsigned_t's read from or written to disk. */
40 #define GCOV_BLOCK_SIZE (1 << 10)
42 GCOV_LINKAGE
struct gcov_var
45 gcov_position_t start
; /* Position of first byte of block */
46 unsigned offset
; /* Read/write position within the block. */
47 unsigned length
; /* Read limit in the block. */
48 unsigned overread
; /* Number of words overread. */
49 int error
; /* < 0 overflow, > 0 disk error. */
50 int mode
; /* < 0 writing, > 0 reading */
52 /* Holds one block plus 4 bytes, thus all coverage reads & writes
53 fit within this buffer and we always can transfer GCOV_BLOCK_SIZE
54 to and from the disk. libgcov never backtracks and only writes 4
56 gcov_unsigned_t buffer
[GCOV_BLOCK_SIZE
+ 1];
58 int endian
; /* Swap endianness. */
59 /* Holds a variable length block, as the compiler can write
60 strings and needs to backtrack. */
62 gcov_unsigned_t
*buffer
;
66 /* Save the current position in the gcov file. */
67 static inline gcov_position_t
70 gcc_assert (gcov_var
.mode
> 0);
71 return gcov_var
.start
+ gcov_var
.offset
;
74 /* Return nonzero if the error flag is set. */
78 return gcov_var
.file
? gcov_var
.error
: 1;
82 /* Move to beginning of file and initialize for writing. */
83 GCOV_LINKAGE
inline void
86 gcc_assert (gcov_var
.mode
> 0);
90 fseek (gcov_var
.file
, 0L, SEEK_SET
);
94 static inline gcov_unsigned_t
from_file (gcov_unsigned_t value
)
99 value
= (value
>> 16) | (value
<< 16);
100 value
= ((value
& 0xff00ff) << 8) | ((value
>> 8) & 0xff00ff);
106 /* Open a gcov file. NAME is the name of the file to open and MODE
107 indicates whether a new file should be created, or an existing file
108 opened. If MODE is >= 0 an existing file will be opened, if
109 possible, and if MODE is <= 0, a new file will be created. Use
110 MODE=0 to attempt to reopen an existing file and then fall back on
111 creating a new one. If MODE < 0, the file will be opened in
112 read-only mode. Otherwise it will be opened for modification.
113 Return zero on failure, >0 on opening an existing file and <0 on
114 creating a new one. */
118 gcov_open (const char *name
)
120 gcov_open (const char *name
, int mode
)
127 struct flock s_flock
;
130 s_flock
.l_whence
= SEEK_SET
;
132 s_flock
.l_len
= 0; /* Until EOF. */
133 s_flock
.l_pid
= getpid ();
136 gcc_assert (!gcov_var
.file
);
138 gcov_var
.offset
= gcov_var
.length
= 0;
139 gcov_var
.overread
= -1u;
147 /* Read-only mode - acquire a read-lock. */
148 s_flock
.l_type
= F_RDLCK
;
149 /* pass mode (ignored) for compatibility */
150 fd
= open (name
, O_RDONLY
, S_IRUSR
| S_IWUSR
);
154 /* Write mode - acquire a write-lock. */
155 s_flock
.l_type
= F_WRLCK
;
156 fd
= open (name
, O_RDWR
| O_CREAT
, 0666);
161 while (fcntl (fd
, F_SETLKW
, &s_flock
) && errno
== EINTR
)
164 gcov_var
.file
= fdopen (fd
, (mode
> 0) ? "rb" : "r+b");
178 if (fstat (fd
, &st
) < 0)
180 fclose (gcov_var
.file
);
187 gcov_var
.mode
= mode
* 2 + 1;
190 gcov_var
.mode
= mode
* 2 + 1;
193 gcov_var
.file
= fopen (name
, (mode
> 0) ? "rb" : "r+b");
199 gcov_var
.file
= fopen (name
, "w+b");
201 gcov_var
.mode
= mode
* 2 + 1;
207 setbuf (gcov_var
.file
, (char *)0);
212 /* Close the current gcov file. Flushes data to disk. Returns nonzero
213 on failure or error flag set. */
221 if (gcov_var
.offset
&& gcov_var
.mode
< 0)
222 gcov_write_block (gcov_var
.offset
);
224 fclose (gcov_var
.file
);
229 free (gcov_var
.buffer
);
234 return gcov_var
.error
;
238 /* Check if MAGIC is EXPECTED. Use it to determine endianness of the
239 file. Returns +1 for same endian, -1 for other endian and zero for
243 gcov_magic (gcov_unsigned_t magic
, gcov_unsigned_t expected
)
245 if (magic
== expected
)
247 magic
= (magic
>> 16) | (magic
<< 16);
248 magic
= ((magic
& 0xff00ff) << 8) | ((magic
>> 8) & 0xff00ff);
249 if (magic
== expected
)
260 gcov_allocate (unsigned length
)
262 size_t new_size
= gcov_var
.alloc
;
265 new_size
= GCOV_BLOCK_SIZE
;
269 gcov_var
.alloc
= new_size
;
270 gcov_var
.buffer
= XRESIZEVAR (gcov_unsigned_t
, gcov_var
.buffer
, new_size
<< 2);
275 /* Write out the current block, if needs be. */
278 gcov_write_block (unsigned size
)
280 if (fwrite (gcov_var
.buffer
, size
<< 2, 1, gcov_var
.file
) != 1)
282 gcov_var
.start
+= size
;
283 gcov_var
.offset
-= size
;
286 /* Allocate space to write BYTES bytes to the gcov file. Return a
287 pointer to those bytes, or NULL on failure. */
289 static gcov_unsigned_t
*
290 gcov_write_words (unsigned words
)
292 gcov_unsigned_t
*result
;
294 gcc_assert (gcov_var
.mode
< 0);
296 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
298 gcov_write_block (GCOV_BLOCK_SIZE
);
301 gcc_assert (gcov_var
.offset
== 1);
302 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ GCOV_BLOCK_SIZE
, 4);
306 if (gcov_var
.offset
+ words
> gcov_var
.alloc
)
307 gcov_allocate (gcov_var
.offset
+ words
);
309 result
= &gcov_var
.buffer
[gcov_var
.offset
];
310 gcov_var
.offset
+= words
;
315 /* Write unsigned VALUE to coverage file. Sets error flag
319 gcov_write_unsigned (gcov_unsigned_t value
)
321 gcov_unsigned_t
*buffer
= gcov_write_words (1);
326 /* Write counter VALUE to coverage file. Sets error flag
331 gcov_write_counter (gcov_type value
)
333 gcov_unsigned_t
*buffer
= gcov_write_words (2);
335 buffer
[0] = (gcov_unsigned_t
) value
;
336 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
337 buffer
[1] = (gcov_unsigned_t
) (value
>> 32);
341 #endif /* IN_LIBGCOV */
344 /* Write STRING to coverage file. Sets error flag on file
345 error, overflow flag on overflow */
348 gcov_write_string (const char *string
)
352 gcov_unsigned_t
*buffer
;
356 length
= strlen (string
);
357 alloc
= (length
+ 4) >> 2;
360 buffer
= gcov_write_words (1 + alloc
);
364 memcpy (&buffer
[1], string
, length
);
369 /* Write a tag TAG and reserve space for the record length. Return a
370 value to be used for gcov_write_length. */
372 GCOV_LINKAGE gcov_position_t
373 gcov_write_tag (gcov_unsigned_t tag
)
375 gcov_position_t result
= gcov_var
.start
+ gcov_var
.offset
;
376 gcov_unsigned_t
*buffer
= gcov_write_words (2);
384 /* Write a record length using POSITION, which was returned by
385 gcov_write_tag. The current file position is the end of the
386 record, and is restored before returning. Returns nonzero on
390 gcov_write_length (gcov_position_t position
)
393 gcov_unsigned_t length
;
394 gcov_unsigned_t
*buffer
;
396 gcc_assert (gcov_var
.mode
< 0);
397 gcc_assert (position
+ 2 <= gcov_var
.start
+ gcov_var
.offset
);
398 gcc_assert (position
>= gcov_var
.start
);
399 offset
= position
- gcov_var
.start
;
400 length
= gcov_var
.offset
- offset
- 2;
401 buffer
= (gcov_unsigned_t
*) &gcov_var
.buffer
[offset
];
403 if (gcov_var
.offset
>= GCOV_BLOCK_SIZE
)
404 gcov_write_block (gcov_var
.offset
);
407 #else /* IN_LIBGCOV */
409 /* Write a tag TAG and length LENGTH. */
412 gcov_write_tag_length (gcov_unsigned_t tag
, gcov_unsigned_t length
)
414 gcov_unsigned_t
*buffer
= gcov_write_words (2);
420 /* Write a summary structure to the gcov file. Return nonzero on
424 gcov_write_summary (gcov_unsigned_t tag
, const struct gcov_summary
*summary
)
426 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
427 const struct gcov_ctr_summary
*csum
;
428 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
430 /* Count number of non-zero histogram entries, and fill in a bit vector
431 of non-zero indices. The histogram is only currently computed for arc
433 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
434 histo_bitvector
[bv_ix
] = 0;
435 csum
= &summary
->ctrs
[GCOV_COUNTER_ARCS
];
436 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
438 if (csum
->histogram
[h_ix
].num_counters
> 0)
440 histo_bitvector
[h_ix
/ 32] |= 1 << (h_ix
% 32);
444 gcov_write_tag_length (tag
, GCOV_TAG_SUMMARY_LENGTH (h_cnt
));
445 gcov_write_unsigned (summary
->checksum
);
446 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
448 gcov_write_unsigned (csum
->num
);
449 gcov_write_unsigned (csum
->runs
);
450 gcov_write_counter (csum
->sum_all
);
451 gcov_write_counter (csum
->run_max
);
452 gcov_write_counter (csum
->sum_max
);
453 if (ix
!= GCOV_COUNTER_ARCS
)
455 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
456 gcov_write_unsigned (0);
459 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
460 gcov_write_unsigned (histo_bitvector
[bv_ix
]);
461 for (h_ix
= 0; h_ix
< GCOV_HISTOGRAM_SIZE
; h_ix
++)
463 if (!csum
->histogram
[h_ix
].num_counters
)
465 gcov_write_unsigned (csum
->histogram
[h_ix
].num_counters
);
466 gcov_write_counter (csum
->histogram
[h_ix
].min_value
);
467 gcov_write_counter (csum
->histogram
[h_ix
].cum_value
);
471 #endif /* IN_LIBGCOV */
475 /* Return a pointer to read BYTES bytes from the gcov file. Returns
476 NULL on failure (read past EOF). */
478 static const gcov_unsigned_t
*
479 gcov_read_words (unsigned words
)
481 const gcov_unsigned_t
*result
;
482 unsigned excess
= gcov_var
.length
- gcov_var
.offset
;
484 gcc_assert (gcov_var
.mode
> 0);
487 gcov_var
.start
+= gcov_var
.offset
;
491 gcc_assert (excess
== 1);
492 memcpy (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, 4);
495 memmove (gcov_var
.buffer
, gcov_var
.buffer
+ gcov_var
.offset
, excess
* 4);
498 gcov_var
.length
= excess
;
500 gcc_assert (!gcov_var
.length
|| gcov_var
.length
== 1);
501 excess
= GCOV_BLOCK_SIZE
;
503 if (gcov_var
.length
+ words
> gcov_var
.alloc
)
504 gcov_allocate (gcov_var
.length
+ words
);
505 excess
= gcov_var
.alloc
- gcov_var
.length
;
507 excess
= fread (gcov_var
.buffer
+ gcov_var
.length
,
508 1, excess
<< 2, gcov_var
.file
) >> 2;
509 gcov_var
.length
+= excess
;
510 if (gcov_var
.length
< words
)
512 gcov_var
.overread
+= words
- gcov_var
.length
;
517 result
= &gcov_var
.buffer
[gcov_var
.offset
];
518 gcov_var
.offset
+= words
;
522 /* Read unsigned value from a coverage file. Sets error flag on file
523 error, overflow flag on overflow */
525 GCOV_LINKAGE gcov_unsigned_t
526 gcov_read_unsigned (void)
528 gcov_unsigned_t value
;
529 const gcov_unsigned_t
*buffer
= gcov_read_words (1);
533 value
= from_file (buffer
[0]);
537 /* Read counter value from a coverage file. Sets error flag on file
538 error, overflow flag on overflow */
540 GCOV_LINKAGE gcov_type
541 gcov_read_counter (void)
544 const gcov_unsigned_t
*buffer
= gcov_read_words (2);
548 value
= from_file (buffer
[0]);
549 if (sizeof (value
) > sizeof (gcov_unsigned_t
))
550 value
|= ((gcov_type
) from_file (buffer
[1])) << 32;
557 /* Read string from coverage file. Returns a pointer to a static
558 buffer, or NULL on empty string. You must copy the string before
559 calling another gcov function. */
562 GCOV_LINKAGE
const char *
563 gcov_read_string (void)
565 unsigned length
= gcov_read_unsigned ();
570 return (const char *) gcov_read_words (length
);
575 gcov_read_summary (struct gcov_summary
*summary
)
577 unsigned ix
, h_ix
, bv_ix
, h_cnt
= 0;
578 struct gcov_ctr_summary
*csum
;
579 unsigned histo_bitvector
[GCOV_HISTOGRAM_BITVECTOR_SIZE
];
580 unsigned cur_bitvector
;
582 summary
->checksum
= gcov_read_unsigned ();
583 for (csum
= summary
->ctrs
, ix
= GCOV_COUNTERS_SUMMABLE
; ix
--; csum
++)
585 csum
->num
= gcov_read_unsigned ();
586 csum
->runs
= gcov_read_unsigned ();
587 csum
->sum_all
= gcov_read_counter ();
588 csum
->run_max
= gcov_read_counter ();
589 csum
->sum_max
= gcov_read_counter ();
590 memset (csum
->histogram
, 0,
591 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
592 for (bv_ix
= 0; bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
; bv_ix
++)
594 histo_bitvector
[bv_ix
] = gcov_read_unsigned ();
596 /* When building libgcov we don't include system.h, which includes
597 hwint.h (where popcount_hwi is declared). However, libgcov.a
598 is built by the bootstrapped compiler and therefore the builtins
599 are always available. */
600 h_cnt
+= __builtin_popcount (histo_bitvector
[bv_ix
]);
602 h_cnt
+= popcount_hwi (histo_bitvector
[bv_ix
]);
610 /* Find the index corresponding to the next entry we will read in.
611 First find the next non-zero bitvector and re-initialize
612 the histogram index accordingly, then right shift and increment
613 the index until we find a set bit. */
614 while (!cur_bitvector
)
617 gcc_assert (bv_ix
< GCOV_HISTOGRAM_BITVECTOR_SIZE
);
618 cur_bitvector
= histo_bitvector
[bv_ix
++];
620 while (!(cur_bitvector
& 0x1))
625 gcc_assert (h_ix
< GCOV_HISTOGRAM_SIZE
);
627 csum
->histogram
[h_ix
].num_counters
= gcov_read_unsigned ();
628 csum
->histogram
[h_ix
].min_value
= gcov_read_counter ();
629 csum
->histogram
[h_ix
].cum_value
= gcov_read_counter ();
630 /* Shift off the index we are done with and increment to the
631 corresponding next histogram entry. */
639 /* Reset to a known position. BASE should have been obtained from
640 gcov_position, LENGTH should be a record length. */
643 gcov_sync (gcov_position_t base
, gcov_unsigned_t length
)
645 gcc_assert (gcov_var
.mode
> 0);
647 if (base
- gcov_var
.start
<= gcov_var
.length
)
648 gcov_var
.offset
= base
- gcov_var
.start
;
651 gcov_var
.offset
= gcov_var
.length
= 0;
652 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
653 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
659 /* Move to a given position in a gcov file. */
662 gcov_seek (gcov_position_t base
)
664 gcc_assert (gcov_var
.mode
< 0);
666 gcov_write_block (gcov_var
.offset
);
667 fseek (gcov_var
.file
, base
<< 2, SEEK_SET
);
668 gcov_var
.start
= ftell (gcov_var
.file
) >> 2;
673 /* Return the modification time of the current gcov file. */
680 if (fstat (fileno (gcov_var
.file
), &status
))
683 return status
.st_mtime
;
688 /* Determine the index into histogram for VALUE. */
693 GCOV_LINKAGE
unsigned
695 gcov_histo_index (gcov_type value
)
697 gcov_type_unsigned v
= (gcov_type_unsigned
)value
;
699 unsigned prev2bits
= 0;
701 /* Find index into log2 scale histogram, where each of the log2
702 sized buckets is divided into 4 linear sub-buckets for better
703 focus in the higher buckets. */
705 /* Find the place of the most-significant bit set. */
709 /* When building libgcov we don't include system.h, which includes
710 hwint.h (where floor_log2 is declared). However, libgcov.a
711 is built by the bootstrapped compiler and therefore the builtins
712 are always available. */
713 r
= sizeof (long long) * __CHAR_BIT__
- 1 - __builtin_clzll (v
);
715 /* We use floor_log2 from hwint.c, which takes a HOST_WIDE_INT
716 that is either 32 or 64 bits, and gcov_type_unsigned may be 64 bits.
717 Need to check for the case where gcov_type_unsigned is 64 bits
718 and HOST_WIDE_INT is 32 bits and handle it specially. */
719 #if HOST_BITS_PER_WIDEST_INT == HOST_BITS_PER_WIDE_INT
721 #elif HOST_BITS_PER_WIDEST_INT == 2 * HOST_BITS_PER_WIDE_INT
722 HOST_WIDE_INT hwi_v
= v
>> HOST_BITS_PER_WIDE_INT
;
724 r
= floor_log2 (hwi_v
) + HOST_BITS_PER_WIDE_INT
;
726 r
= floor_log2 ((HOST_WIDE_INT
)v
);
733 /* If at most the 2 least significant bits are set (value is
734 0 - 3) then that value is our index into the lowest set of
737 return (unsigned)value
;
741 /* Find the two next most significant bits to determine which
742 of the four linear sub-buckets to select. */
743 prev2bits
= (v
>> (r
- 2)) & 0x3;
744 /* Finally, compose the final bucket index from the log2 index and
745 the next 2 bits. The minimum r value at this point is 2 since we
746 returned above if r was 2 or more, so the minimum bucket at this
748 return (r
- 1) * 4 + prev2bits
;
751 /* Merge SRC_HISTO into TGT_HISTO. The counters are assumed to be in
752 the same relative order in both histograms, and are matched up
753 and merged in reverse order. Each counter is assigned an equal portion of
754 its entry's original cumulative counter value when computing the
755 new merged cum_value. */
757 static void gcov_histogram_merge (gcov_bucket_type
*tgt_histo
,
758 gcov_bucket_type
*src_histo
)
760 int src_i
, tgt_i
, tmp_i
= 0;
761 unsigned src_num
, tgt_num
, merge_num
;
762 gcov_type src_cum
, tgt_cum
, merge_src_cum
, merge_tgt_cum
, merge_cum
;
764 gcov_bucket_type tmp_histo
[GCOV_HISTOGRAM_SIZE
];
767 memset (tmp_histo
, 0, sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
769 /* Assume that the counters are in the same relative order in both
770 histograms. Walk the histograms from largest to smallest entry,
771 matching up and combining counters in order. */
774 src_i
= GCOV_HISTOGRAM_SIZE
- 1;
775 for (tgt_i
= GCOV_HISTOGRAM_SIZE
- 1; tgt_i
>= 0 && !src_done
; tgt_i
--)
777 tgt_num
= tgt_histo
[tgt_i
].num_counters
;
778 tgt_cum
= tgt_histo
[tgt_i
].cum_value
;
779 /* Keep going until all of the target histogram's counters at this
780 position have been matched and merged with counters from the
782 while (tgt_num
> 0 && !src_done
)
784 /* If this is either the first time through this loop or we just
785 exhausted the previous non-zero source histogram entry, look
786 for the next non-zero source histogram entry. */
789 /* Locate the next non-zero entry. */
790 while (src_i
>= 0 && !src_histo
[src_i
].num_counters
)
792 /* If source histogram has fewer counters, then just copy over the
793 remaining target counters and quit. */
796 tmp_histo
[tgt_i
].num_counters
+= tgt_num
;
797 tmp_histo
[tgt_i
].cum_value
+= tgt_cum
;
798 if (!tmp_histo
[tgt_i
].min_value
||
799 tgt_histo
[tgt_i
].min_value
< tmp_histo
[tgt_i
].min_value
)
800 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
803 tmp_histo
[tgt_i
].num_counters
804 += tgt_histo
[tgt_i
].num_counters
;
805 tmp_histo
[tgt_i
].cum_value
+= tgt_histo
[tgt_i
].cum_value
;
806 if (!tmp_histo
[tgt_i
].min_value
||
807 tgt_histo
[tgt_i
].min_value
808 < tmp_histo
[tgt_i
].min_value
)
809 tmp_histo
[tgt_i
].min_value
= tgt_histo
[tgt_i
].min_value
;
816 src_num
= src_histo
[src_i
].num_counters
;
817 src_cum
= src_histo
[src_i
].cum_value
;
820 /* The number of counters to merge on this pass is the minimum
821 of the remaining counters from the current target and source
822 histogram entries. */
824 if (src_num
< merge_num
)
827 /* The merged min_value is the sum of the min_values from target
829 merge_min
= tgt_histo
[tgt_i
].min_value
+ src_histo
[src_i
].min_value
;
831 /* Compute the portion of source and target entries' cum_value
832 that will be apportioned to the counters being merged.
833 The total remaining cum_value from each entry is divided
834 equally among the counters from that histogram entry if we
835 are not merging all of them. */
836 merge_src_cum
= src_cum
;
837 if (merge_num
< src_num
)
838 merge_src_cum
= merge_num
* src_cum
/ src_num
;
839 merge_tgt_cum
= tgt_cum
;
840 if (merge_num
< tgt_num
)
841 merge_tgt_cum
= merge_num
* tgt_cum
/ tgt_num
;
842 /* The merged cum_value is the sum of the source and target
844 merge_cum
= merge_src_cum
+ merge_tgt_cum
;
846 /* Update the remaining number of counters and cum_value left
847 to be merged from this source and target entry. */
848 src_cum
-= merge_src_cum
;
849 tgt_cum
-= merge_tgt_cum
;
850 src_num
-= merge_num
;
851 tgt_num
-= merge_num
;
853 /* The merged counters get placed in the new merged histogram
854 at the entry for the merged min_value. */
855 tmp_i
= gcov_histo_index (merge_min
);
856 gcc_assert (tmp_i
< GCOV_HISTOGRAM_SIZE
);
857 tmp_histo
[tmp_i
].num_counters
+= merge_num
;
858 tmp_histo
[tmp_i
].cum_value
+= merge_cum
;
859 if (!tmp_histo
[tmp_i
].min_value
||
860 merge_min
< tmp_histo
[tmp_i
].min_value
)
861 tmp_histo
[tmp_i
].min_value
= merge_min
;
863 /* Ensure the search for the next non-zero src_histo entry starts
864 at the next smallest histogram bucket. */
870 gcc_assert (tgt_i
< 0);
872 /* In the case where there were more counters in the source histogram,
873 accumulate the remaining unmerged cumulative counter values. Add
874 those to the smallest non-zero target histogram entry. Otherwise,
875 the total cumulative counter values in the histogram will be smaller
876 than the sum_all stored in the summary, which will complicate
877 computing the working set information from the histogram later on. */
882 src_cum
+= src_histo
[src_i
].cum_value
;
885 /* At this point, tmp_i should be the smallest non-zero entry in the
887 gcc_assert (tmp_i
>= 0 && tmp_i
< GCOV_HISTOGRAM_SIZE
888 && tmp_histo
[tmp_i
].num_counters
> 0);
889 tmp_histo
[tmp_i
].cum_value
+= src_cum
;
891 /* Finally, copy the merged histogram into tgt_histo. */
892 memcpy (tgt_histo
, tmp_histo
,
893 sizeof (gcov_bucket_type
) * GCOV_HISTOGRAM_SIZE
);
895 #endif /* !IN_GCOV */
897 /* This is used by gcov-dump (IN_GCOV == -1) and in the compiler
898 (!IN_GCOV && !IN_LIBGCOV). */
899 #if IN_GCOV <= 0 && !IN_LIBGCOV
900 /* Compute the working set information from the counter histogram in
901 the profile summary. This is an array of information corresponding to a
902 range of percentages of the total execution count (sum_all), and includes
903 the number of counters required to cover that working set percentage and
904 the minimum counter value in that working set. */
907 compute_working_sets (const struct gcov_ctr_summary
*summary
,
908 gcov_working_set_t
*gcov_working_sets
)
910 gcov_type working_set_cum_values
[NUM_GCOV_WORKING_SETS
];
911 gcov_type ws_cum_hotness_incr
;
912 gcov_type cum
, tmp_cum
;
913 const gcov_bucket_type
*histo_bucket
;
914 unsigned ws_ix
, c_num
, count
;
917 /* Compute the amount of sum_all that the cumulative hotness grows
918 by in each successive working set entry, which depends on the
919 number of working set entries. */
920 ws_cum_hotness_incr
= summary
->sum_all
/ NUM_GCOV_WORKING_SETS
;
922 /* Next fill in an array of the cumulative hotness values corresponding
923 to each working set summary entry we are going to compute below.
924 Skip 0% statistics, which can be extrapolated from the
925 rest of the summary data. */
926 cum
= ws_cum_hotness_incr
;
927 for (ws_ix
= 0; ws_ix
< NUM_GCOV_WORKING_SETS
;
928 ws_ix
++, cum
+= ws_cum_hotness_incr
)
929 working_set_cum_values
[ws_ix
] = cum
;
930 /* The last summary entry is reserved for (roughly) 99.9% of the
931 working set. Divide by 1024 so it becomes a shift, which gives
932 almost exactly 99.9%. */
933 working_set_cum_values
[NUM_GCOV_WORKING_SETS
-1]
934 = summary
->sum_all
- summary
->sum_all
/1024;
936 /* Next, walk through the histogram in decending order of hotness
937 and compute the statistics for the working set summary array.
938 As histogram entries are accumulated, we check to see which
939 working set entries have had their expected cum_value reached
940 and fill them in, walking the working set entries in increasing
941 size of cum_value. */
942 ws_ix
= 0; /* The current entry into the working set array. */
943 cum
= 0; /* The current accumulated counter sum. */
944 count
= 0; /* The current accumulated count of block counters. */
945 for (h_ix
= GCOV_HISTOGRAM_SIZE
- 1;
946 h_ix
>= 0 && ws_ix
< NUM_GCOV_WORKING_SETS
; h_ix
--)
948 histo_bucket
= &summary
->histogram
[h_ix
];
950 /* If we haven't reached the required cumulative counter value for
951 the current working set percentage, simply accumulate this histogram
952 entry into the running sums and continue to the next histogram
954 if (cum
+ histo_bucket
->cum_value
< working_set_cum_values
[ws_ix
])
956 cum
+= histo_bucket
->cum_value
;
957 count
+= histo_bucket
->num_counters
;
961 /* If adding the current histogram entry's cumulative counter value
962 causes us to exceed the current working set size, then estimate
963 how many of this histogram entry's counter values are required to
964 reach the working set size, and fill in working set entries
965 as we reach their expected cumulative value. */
966 for (c_num
= 0, tmp_cum
= cum
;
967 c_num
< histo_bucket
->num_counters
&& ws_ix
< NUM_GCOV_WORKING_SETS
;
971 /* If we haven't reached the last histogram entry counter, add
972 in the minimum value again. This will underestimate the
973 cumulative sum so far, because many of the counter values in this
974 entry may have been larger than the minimum. We could add in the
975 average value every time, but that would require an expensive
977 if (c_num
+ 1 < histo_bucket
->num_counters
)
978 tmp_cum
+= histo_bucket
->min_value
;
979 /* If we have reached the last histogram entry counter, then add
980 in the entire cumulative value. */
982 tmp_cum
= cum
+ histo_bucket
->cum_value
;
984 /* Next walk through successive working set entries and fill in
985 the statistics for any whose size we have reached by accumulating
986 this histogram counter. */
987 while (ws_ix
< NUM_GCOV_WORKING_SETS
988 && tmp_cum
>= working_set_cum_values
[ws_ix
])
990 gcov_working_sets
[ws_ix
].num_counters
= count
;
991 gcov_working_sets
[ws_ix
].min_counter
992 = histo_bucket
->min_value
;
996 /* Finally, update the running cumulative value since we were
997 using a temporary above. */
998 cum
+= histo_bucket
->cum_value
;
1000 gcc_assert (ws_ix
== NUM_GCOV_WORKING_SETS
);
1002 #endif /* IN_GCOV <= 0 && !IN_LIBGCOV */