1 //===-- sanitizer_coverage.cc ---------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
9 // This file implements run-time support for a poor man's coverage tool.
11 // Compiler instrumentation:
12 // For every interesting basic block the compiler injects the following code:
14 // __sanitizer_cov(&Guard);
16 // At the module start up time __sanitizer_cov_module_init sets the guards
17 // to consecutive negative numbers (-1, -2, -3, ...).
18 // It's fine to call __sanitizer_cov more than once for a given block.
21 // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
22 // and atomically set Guard to -Guard.
23 // - __sanitizer_cov_dump: dump the coverage data to disk.
24 // For every module of the current process that has coverage data
25 // this will create a file module_name.PID.sancov.
27 // The file format is simple: the first 8 bytes is the magic,
28 // one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
29 // magic defines the size of the following offsets.
30 // The rest of the data is the offsets in the module.
32 // Eventually, this coverage implementation should be obsoleted by a more
33 // powerful general purpose Clang/LLVM coverage instrumentation.
34 // Consider this implementation as prototype.
36 // FIXME: support (or at least test with) dlclose.
37 //===----------------------------------------------------------------------===//
39 #include "sanitizer_allocator_internal.h"
40 #include "sanitizer_common.h"
41 #include "sanitizer_libc.h"
42 #include "sanitizer_mutex.h"
43 #include "sanitizer_procmaps.h"
44 #include "sanitizer_stacktrace.h"
45 #include "sanitizer_symbolizer.h"
46 #include "sanitizer_flags.h"
48 static const u64 kMagic64
= 0xC0BFFFFFFFFFFF64ULL
;
49 static const u64 kMagic32
= 0xC0BFFFFFFFFFFF32ULL
;
51 static atomic_uint32_t dump_once_guard
; // Ensure that CovDump runs only once.
53 static atomic_uintptr_t coverage_counter
;
54 static atomic_uintptr_t caller_callee_counter
;
56 static void ResetGlobalCounters() {
57 return atomic_store(&coverage_counter
, 0, memory_order_relaxed
);
58 return atomic_store(&caller_callee_counter
, 0, memory_order_relaxed
);
61 // pc_array is the array containing the covered PCs.
62 // To make the pc_array thread- and async-signal-safe it has to be large enough.
63 // 128M counters "ought to be enough for anybody" (4M on 32-bit).
65 // With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
66 // In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
67 // dump current memory layout to another file.
69 static bool cov_sandboxed
= false;
70 static fd_t cov_fd
= kInvalidFd
;
71 static unsigned int cov_max_block_size
= 0;
72 static bool coverage_enabled
= false;
73 static const char *coverage_dir
;
75 namespace __sanitizer
{
84 void AfterFork(int child_pid
);
85 void Extend(uptr npcs
);
86 void Add(uptr pc
, u32
*guard
);
87 void IndirCall(uptr caller
, uptr callee
, uptr callee_cache
[],
89 void DumpCallerCalleePairs();
97 void TraceBasicBlock(s32
*id
);
99 void InitializeGuardArray(s32
*guards
);
100 void InitializeGuards(s32
*guards
, uptr n
, const char *module_name
,
102 void InitializeCounters(u8
*counters
, uptr n
);
103 void ReinitializeGuards();
104 uptr
GetNumberOf8bitCounters();
105 uptr
Update8bitCounterBitsetAndClearCounters(u8
*bitset
);
112 void UpdateModuleNameVec(uptr caller_pc
, uptr range_beg
, uptr range_end
);
114 // Maximal size pc array may ever grow.
115 // We MmapNoReserve this space to ensure that the array is contiguous.
116 static const uptr kPcArrayMaxSize
= FIRST_32_SECOND_64(
117 1 << (SANITIZER_ANDROID
? 24 : (SANITIZER_WINDOWS
? 27 : 26)),
119 // The amount file mapping for the pc array is grown by.
120 static const uptr kPcArrayMmapSize
= 64 * 1024;
122 // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
123 // much RAM as it really needs.
125 // Index of the first available pc_array slot.
126 atomic_uintptr_t pc_array_index
;
128 atomic_uintptr_t pc_array_size
;
129 // Current file mapped size of the pc array.
130 uptr pc_array_mapped_size
;
131 // Descriptor of the file mapped pc array.
134 // Vector of coverage guard arrays, protected by mu.
135 InternalMmapVectorNoCtor
<s32
*> guard_array_vec
;
137 struct NamedPcRange
{
138 const char *copied_module_name
;
139 uptr beg
, end
; // elements [beg,end) in pc_array.
142 // Vector of module and compilation unit pc ranges.
143 InternalMmapVectorNoCtor
<NamedPcRange
> comp_unit_name_vec
;
144 InternalMmapVectorNoCtor
<NamedPcRange
> module_name_vec
;
146 struct CounterAndSize
{
151 InternalMmapVectorNoCtor
<CounterAndSize
> counters_vec
;
152 uptr num_8bit_counters
;
154 // Caller-Callee (cc) array, size and current index.
155 static const uptr kCcArrayMaxSize
= FIRST_32_SECOND_64(1 << 18, 1 << 24);
157 atomic_uintptr_t cc_array_index
;
158 atomic_uintptr_t cc_array_size
;
160 // Tracing event array, size and current pointer.
161 // We record all events (basic block entries) in a global buffer of u32
162 // values. Each such value is the index in pc_array.
163 // So far the tracing is highly experimental:
164 // - not thread-safe;
165 // - does not support long traces;
166 // - not tuned for performance.
167 static const uptr kTrEventArrayMaxSize
= FIRST_32_SECOND_64(1 << 22, 1 << 30);
169 uptr tr_event_array_size
;
170 u32
*tr_event_pointer
;
171 static const uptr kTrPcArrayMaxSize
= FIRST_32_SECOND_64(1 << 22, 1 << 27);
176 static CoverageData coverage_data
;
178 void CovUpdateMapping(const char *path
, uptr caller_pc
= 0);
180 void CoverageData::DirectOpen() {
181 InternalScopedString
path(kMaxPathLength
);
182 internal_snprintf((char *)path
.data(), path
.size(), "%s/%zd.sancov.raw",
183 coverage_dir
, internal_getpid());
184 pc_fd
= OpenFile(path
.data(), RdWr
);
185 if (pc_fd
== kInvalidFd
) {
186 Report("Coverage: failed to open %s for reading/writing\n", path
.data());
190 pc_array_mapped_size
= 0;
191 CovUpdateMapping(coverage_dir
);
194 void CoverageData::Init() {
198 void CoverageData::Enable() {
201 pc_array
= reinterpret_cast<uptr
*>(
202 MmapNoReserveOrDie(sizeof(uptr
) * kPcArrayMaxSize
, "CovInit"));
203 atomic_store(&pc_array_index
, 0, memory_order_relaxed
);
204 if (common_flags()->coverage_direct
) {
205 atomic_store(&pc_array_size
, 0, memory_order_relaxed
);
207 atomic_store(&pc_array_size
, kPcArrayMaxSize
, memory_order_relaxed
);
210 cc_array
= reinterpret_cast<uptr
**>(MmapNoReserveOrDie(
211 sizeof(uptr
*) * kCcArrayMaxSize
, "CovInit::cc_array"));
212 atomic_store(&cc_array_size
, kCcArrayMaxSize
, memory_order_relaxed
);
213 atomic_store(&cc_array_index
, 0, memory_order_relaxed
);
215 // Allocate tr_event_array with a guard page at the end.
216 tr_event_array
= reinterpret_cast<u32
*>(MmapNoReserveOrDie(
217 sizeof(tr_event_array
[0]) * kTrEventArrayMaxSize
+ GetMmapGranularity(),
218 "CovInit::tr_event_array"));
220 reinterpret_cast<uptr
>(&tr_event_array
[kTrEventArrayMaxSize
]),
221 GetMmapGranularity());
222 tr_event_array_size
= kTrEventArrayMaxSize
;
223 tr_event_pointer
= tr_event_array
;
225 num_8bit_counters
= 0;
228 void CoverageData::InitializeGuardArray(s32
*guards
) {
229 Enable(); // Make sure coverage is enabled at this point.
231 for (s32 j
= 1; j
<= n
; j
++) {
232 uptr idx
= atomic_load_relaxed(&pc_array_index
);
233 atomic_store_relaxed(&pc_array_index
, idx
+ 1);
234 guards
[j
] = -static_cast<s32
>(idx
+ 1);
238 void CoverageData::Disable() {
240 UnmapOrDie(pc_array
, sizeof(uptr
) * kPcArrayMaxSize
);
244 UnmapOrDie(cc_array
, sizeof(uptr
*) * kCcArrayMaxSize
);
247 if (tr_event_array
) {
248 UnmapOrDie(tr_event_array
,
249 sizeof(tr_event_array
[0]) * kTrEventArrayMaxSize
+
250 GetMmapGranularity());
251 tr_event_array
= nullptr;
252 tr_event_pointer
= nullptr;
254 if (pc_fd
!= kInvalidFd
) {
260 void CoverageData::ReinitializeGuards() {
261 // Assuming single thread.
262 atomic_store(&pc_array_index
, 0, memory_order_relaxed
);
263 for (uptr i
= 0; i
< guard_array_vec
.size(); i
++)
264 InitializeGuardArray(guard_array_vec
[i
]);
267 void CoverageData::ReInit() {
269 if (coverage_enabled
) {
270 if (common_flags()->coverage_direct
) {
271 // In memory-mapped mode we must extend the new file to the known array
273 uptr size
= atomic_load(&pc_array_size
, memory_order_relaxed
);
274 uptr npcs
= size
/ sizeof(uptr
);
276 if (size
) Extend(npcs
);
277 if (coverage_enabled
) CovUpdateMapping(coverage_dir
);
282 // Re-initialize the guards.
283 // We are single-threaded now, no need to grab any lock.
284 CHECK_EQ(atomic_load(&pc_array_index
, memory_order_relaxed
), 0);
285 ReinitializeGuards();
288 void CoverageData::BeforeFork() {
292 void CoverageData::AfterFork(int child_pid
) {
293 // We are single-threaded so it's OK to release the lock early.
295 if (child_pid
== 0) ReInit();
298 // Extend coverage PC array to fit additional npcs elements.
299 void CoverageData::Extend(uptr npcs
) {
300 if (!common_flags()->coverage_direct
) return;
301 SpinMutexLock
l(&mu
);
303 uptr size
= atomic_load(&pc_array_size
, memory_order_relaxed
);
304 size
+= npcs
* sizeof(uptr
);
306 if (coverage_enabled
&& size
> pc_array_mapped_size
) {
307 if (pc_fd
== kInvalidFd
) DirectOpen();
308 CHECK_NE(pc_fd
, kInvalidFd
);
310 uptr new_mapped_size
= pc_array_mapped_size
;
311 while (size
> new_mapped_size
) new_mapped_size
+= kPcArrayMmapSize
;
312 CHECK_LE(new_mapped_size
, sizeof(uptr
) * kPcArrayMaxSize
);
314 // Extend the file and map the new space at the end of pc_array.
315 uptr res
= internal_ftruncate(pc_fd
, new_mapped_size
);
317 if (internal_iserror(res
, &err
)) {
318 Printf("failed to extend raw coverage file: %d\n", err
);
322 uptr next_map_base
= ((uptr
)pc_array
) + pc_array_mapped_size
;
323 void *p
= MapWritableFileToMemory((void *)next_map_base
,
324 new_mapped_size
- pc_array_mapped_size
,
325 pc_fd
, pc_array_mapped_size
);
326 CHECK_EQ((uptr
)p
, next_map_base
);
327 pc_array_mapped_size
= new_mapped_size
;
330 atomic_store(&pc_array_size
, size
, memory_order_release
);
333 void CoverageData::InitializeCounters(u8
*counters
, uptr n
) {
334 if (!counters
) return;
335 CHECK_EQ(reinterpret_cast<uptr
>(counters
) % 16, 0);
336 n
= RoundUpTo(n
, 16); // The compiler must ensure that counters is 16-aligned.
337 SpinMutexLock
l(&mu
);
338 counters_vec
.push_back({counters
, n
});
339 num_8bit_counters
+= n
;
342 void CoverageData::UpdateModuleNameVec(uptr caller_pc
, uptr range_beg
,
344 auto sym
= Symbolizer::GetOrInit();
347 const char *module_name
= sym
->GetModuleNameForPc(caller_pc
);
348 if (!module_name
) return;
349 if (module_name_vec
.empty() ||
350 module_name_vec
.back().copied_module_name
!= module_name
)
351 module_name_vec
.push_back({module_name
, range_beg
, range_end
});
353 module_name_vec
.back().end
= range_end
;
356 void CoverageData::InitializeGuards(s32
*guards
, uptr n
,
357 const char *comp_unit_name
,
359 // The array 'guards' has n+1 elements, we use the element zero
361 CHECK_LT(n
, 1 << 30);
362 guards
[0] = static_cast<s32
>(n
);
363 InitializeGuardArray(guards
);
364 SpinMutexLock
l(&mu
);
365 uptr range_end
= atomic_load(&pc_array_index
, memory_order_relaxed
);
366 uptr range_beg
= range_end
- n
;
367 comp_unit_name_vec
.push_back({comp_unit_name
, range_beg
, range_end
});
368 guard_array_vec
.push_back(guards
);
369 UpdateModuleNameVec(caller_pc
, range_beg
, range_end
);
372 static const uptr kBundleCounterBits
= 16;
374 // When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
375 // we insert the global counter into the first 16 bits of the PC.
376 uptr
BundlePcAndCounter(uptr pc
, uptr counter
) {
377 if (SANITIZER_WORDSIZE
!= 64 || !common_flags()->coverage_order_pcs
)
379 static const uptr kMaxCounter
= (1 << kBundleCounterBits
) - 1;
380 if (counter
> kMaxCounter
)
381 counter
= kMaxCounter
;
382 CHECK_EQ(0, pc
>> (SANITIZER_WORDSIZE
- kBundleCounterBits
));
383 return pc
| (counter
<< (SANITIZER_WORDSIZE
- kBundleCounterBits
));
386 uptr
UnbundlePc(uptr bundle
) {
387 if (SANITIZER_WORDSIZE
!= 64 || !common_flags()->coverage_order_pcs
)
389 return (bundle
<< kBundleCounterBits
) >> kBundleCounterBits
;
392 uptr
UnbundleCounter(uptr bundle
) {
393 if (SANITIZER_WORDSIZE
!= 64 || !common_flags()->coverage_order_pcs
)
395 return bundle
>> (SANITIZER_WORDSIZE
- kBundleCounterBits
);
398 // If guard is negative, atomically set it to -guard and store the PC in
400 void CoverageData::Add(uptr pc
, u32
*guard
) {
401 atomic_uint32_t
*atomic_guard
= reinterpret_cast<atomic_uint32_t
*>(guard
);
402 s32 guard_value
= atomic_load(atomic_guard
, memory_order_relaxed
);
403 if (guard_value
>= 0) return;
405 atomic_store(atomic_guard
, -guard_value
, memory_order_relaxed
);
406 if (!pc_array
) return;
408 uptr idx
= -guard_value
- 1;
409 if (idx
>= atomic_load(&pc_array_index
, memory_order_acquire
))
410 return; // May happen after fork when pc_array_index becomes 0.
411 CHECK_LT(idx
* sizeof(uptr
),
412 atomic_load(&pc_array_size
, memory_order_acquire
));
413 uptr counter
= atomic_fetch_add(&coverage_counter
, 1, memory_order_relaxed
);
414 pc_array
[idx
] = BundlePcAndCounter(pc
, counter
);
417 // Registers a pair caller=>callee.
418 // When a given caller is seen for the first time, the callee_cache is added
419 // to the global array cc_array, callee_cache[0] is set to caller and
420 // callee_cache[1] is set to cache_size.
421 // Then we are trying to add callee to callee_cache [2,cache_size) if it is
423 // If the cache is full we drop the callee (may want to fix this later).
424 void CoverageData::IndirCall(uptr caller
, uptr callee
, uptr callee_cache
[],
426 if (!cc_array
) return;
427 atomic_uintptr_t
*atomic_callee_cache
=
428 reinterpret_cast<atomic_uintptr_t
*>(callee_cache
);
430 if (atomic_compare_exchange_strong(&atomic_callee_cache
[0], &zero
, caller
,
431 memory_order_seq_cst
)) {
432 uptr idx
= atomic_fetch_add(&cc_array_index
, 1, memory_order_relaxed
);
433 CHECK_LT(idx
* sizeof(uptr
),
434 atomic_load(&cc_array_size
, memory_order_acquire
));
435 callee_cache
[1] = cache_size
;
436 cc_array
[idx
] = callee_cache
;
438 CHECK_EQ(atomic_load(&atomic_callee_cache
[0], memory_order_relaxed
), caller
);
439 for (uptr i
= 2; i
< cache_size
; i
++) {
441 if (atomic_compare_exchange_strong(&atomic_callee_cache
[i
], &was
, callee
,
442 memory_order_seq_cst
)) {
443 atomic_fetch_add(&caller_callee_counter
, 1, memory_order_relaxed
);
446 if (was
== callee
) // Already have this callee.
451 uptr
CoverageData::GetNumberOf8bitCounters() {
452 return num_8bit_counters
;
455 // Map every 8bit counter to a 8-bit bitset and clear the counter.
456 uptr
CoverageData::Update8bitCounterBitsetAndClearCounters(u8
*bitset
) {
457 uptr num_new_bits
= 0;
459 // For better speed we map 8 counters to 8 bytes of bitset at once.
460 static const uptr kBatchSize
= 8;
461 CHECK_EQ(reinterpret_cast<uptr
>(bitset
) % kBatchSize
, 0);
462 for (uptr i
= 0, len
= counters_vec
.size(); i
< len
; i
++) {
463 u8
*c
= counters_vec
[i
].counters
;
464 uptr n
= counters_vec
[i
].n
;
466 CHECK_EQ(cur
% kBatchSize
, 0);
467 CHECK_EQ(reinterpret_cast<uptr
>(c
) % kBatchSize
, 0);
469 internal_bzero_aligned16(c
, n
);
473 for (uptr j
= 0; j
< n
; j
+= kBatchSize
, cur
+= kBatchSize
) {
474 CHECK_LT(cur
, num_8bit_counters
);
475 u64
*pc64
= reinterpret_cast<u64
*>(c
+ j
);
476 u64
*pb64
= reinterpret_cast<u64
*>(bitset
+ cur
);
478 u64 old_bits_64
= *pb64
;
479 u64 new_bits_64
= old_bits_64
;
482 for (uptr k
= 0; k
< kBatchSize
; k
++) {
483 u64 x
= (c64
>> (8 * k
)) & 0xff;
486 /**/ if (x
>= 128) bit
= 128;
487 else if (x
>= 32) bit
= 64;
488 else if (x
>= 16) bit
= 32;
489 else if (x
>= 8) bit
= 16;
490 else if (x
>= 4) bit
= 8;
491 else if (x
>= 3) bit
= 4;
492 else if (x
>= 2) bit
= 2;
493 else if (x
>= 1) bit
= 1;
494 u64 mask
= bit
<< (8 * k
);
495 if (!(new_bits_64
& mask
)) {
505 CHECK_EQ(cur
, num_8bit_counters
);
509 uptr
*CoverageData::data() {
513 uptr
CoverageData::size() {
514 return atomic_load(&pc_array_index
, memory_order_relaxed
);
517 // Block layout for packed file format: header, followed by module name (no
518 // trailing zero), followed by data blob.
521 unsigned int module_name_length
;
522 unsigned int data_length
;
525 static void CovWritePacked(int pid
, const char *module
, const void *blob
,
526 unsigned int blob_size
) {
527 if (cov_fd
== kInvalidFd
) return;
528 unsigned module_name_length
= internal_strlen(module
);
529 CovHeader header
= {pid
, module_name_length
, blob_size
};
531 if (cov_max_block_size
== 0) {
532 // Writing to a file. Just go ahead.
533 WriteToFile(cov_fd
, &header
, sizeof(header
));
534 WriteToFile(cov_fd
, module
, module_name_length
);
535 WriteToFile(cov_fd
, blob
, blob_size
);
537 // Writing to a socket. We want to split the data into appropriately sized
539 InternalScopedBuffer
<char> block(cov_max_block_size
);
540 CHECK_EQ((uptr
)block
.data(), (uptr
)(CovHeader
*)block
.data());
541 uptr header_size_with_module
= sizeof(header
) + module_name_length
;
542 CHECK_LT(header_size_with_module
, cov_max_block_size
);
543 unsigned int max_payload_size
=
544 cov_max_block_size
- header_size_with_module
;
545 char *block_pos
= block
.data();
546 internal_memcpy(block_pos
, &header
, sizeof(header
));
547 block_pos
+= sizeof(header
);
548 internal_memcpy(block_pos
, module
, module_name_length
);
549 block_pos
+= module_name_length
;
550 char *block_data_begin
= block_pos
;
551 const char *blob_pos
= (const char *)blob
;
552 while (blob_size
> 0) {
553 unsigned int payload_size
= Min(blob_size
, max_payload_size
);
554 blob_size
-= payload_size
;
555 internal_memcpy(block_data_begin
, blob_pos
, payload_size
);
556 blob_pos
+= payload_size
;
557 ((CovHeader
*)block
.data())->data_length
= payload_size
;
558 WriteToFile(cov_fd
, block
.data(), header_size_with_module
+ payload_size
);
563 // If packed = false: <name>.<pid>.<sancov> (name = module name).
564 // If packed = true and name == 0: <pid>.<sancov>.<packed>.
565 // If packed = true and name != 0: <name>.<sancov>.<packed> (name is
567 static fd_t
CovOpenFile(InternalScopedString
*path
, bool packed
,
568 const char *name
, const char *extension
= "sancov") {
572 path
->append("%s/%s.%zd.%s", coverage_dir
, name
, internal_getpid(),
576 path
->append("%s/%zd.%s.packed", coverage_dir
, internal_getpid(),
579 path
->append("%s/%s.%s.packed", coverage_dir
, name
, extension
);
582 fd_t fd
= OpenFile(path
->data(), WrOnly
, &err
);
583 if (fd
== kInvalidFd
)
584 Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
589 // Dump trace PCs and trace events into two separate files.
590 void CoverageData::DumpTrace() {
591 uptr max_idx
= tr_event_pointer
- tr_event_array
;
592 if (!max_idx
) return;
593 auto sym
= Symbolizer::GetOrInit();
596 InternalScopedString
out(32 << 20);
597 for (uptr i
= 0, n
= size(); i
< n
; i
++) {
598 const char *module_name
= "<unknown>";
599 uptr module_address
= 0;
600 sym
->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array
[i
]), &module_name
,
602 out
.append("%s 0x%zx\n", module_name
, module_address
);
604 InternalScopedString
path(kMaxPathLength
);
605 fd_t fd
= CovOpenFile(&path
, false, "trace-points");
606 if (fd
== kInvalidFd
) return;
607 WriteToFile(fd
, out
.data(), out
.length());
610 fd
= CovOpenFile(&path
, false, "trace-compunits");
611 if (fd
== kInvalidFd
) return;
613 for (uptr i
= 0; i
< comp_unit_name_vec
.size(); i
++)
614 out
.append("%s\n", comp_unit_name_vec
[i
].copied_module_name
);
615 WriteToFile(fd
, out
.data(), out
.length());
618 fd
= CovOpenFile(&path
, false, "trace-events");
619 if (fd
== kInvalidFd
) return;
620 uptr bytes_to_write
= max_idx
* sizeof(tr_event_array
[0]);
621 u8
*event_bytes
= reinterpret_cast<u8
*>(tr_event_array
);
622 // The trace file could be huge, and may not be written with a single syscall.
623 while (bytes_to_write
) {
624 uptr actually_written
;
625 if (WriteToFile(fd
, event_bytes
, bytes_to_write
, &actually_written
) &&
626 actually_written
<= bytes_to_write
) {
627 bytes_to_write
-= actually_written
;
628 event_bytes
+= actually_written
;
634 VReport(1, " CovDump: Trace: %zd PCs written\n", size());
635 VReport(1, " CovDump: Trace: %zd Events written\n", max_idx
);
638 // This function dumps the caller=>callee pairs into a file as a sequence of
639 // lines like "module_name offset".
640 void CoverageData::DumpCallerCalleePairs() {
641 uptr max_idx
= atomic_load(&cc_array_index
, memory_order_relaxed
);
642 if (!max_idx
) return;
643 auto sym
= Symbolizer::GetOrInit();
646 InternalScopedString
out(32 << 20);
648 for (uptr i
= 0; i
< max_idx
; i
++) {
649 uptr
*cc_cache
= cc_array
[i
];
651 uptr caller
= cc_cache
[0];
652 uptr n_callees
= cc_cache
[1];
653 const char *caller_module_name
= "<unknown>";
654 uptr caller_module_address
= 0;
655 sym
->GetModuleNameAndOffsetForPC(caller
, &caller_module_name
,
656 &caller_module_address
);
657 for (uptr j
= 2; j
< n_callees
; j
++) {
658 uptr callee
= cc_cache
[j
];
661 const char *callee_module_name
= "<unknown>";
662 uptr callee_module_address
= 0;
663 sym
->GetModuleNameAndOffsetForPC(callee
, &callee_module_name
,
664 &callee_module_address
);
665 out
.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name
,
666 caller_module_address
, callee_module_name
,
667 callee_module_address
);
670 InternalScopedString
path(kMaxPathLength
);
671 fd_t fd
= CovOpenFile(&path
, false, "caller-callee");
672 if (fd
== kInvalidFd
) return;
673 WriteToFile(fd
, out
.data(), out
.length());
675 VReport(1, " CovDump: %zd caller-callee pairs written\n", total
);
678 // Record the current PC into the event buffer.
679 // Every event is a u32 value (index in tr_pc_array_index) so we compute
680 // it once and then cache in the provided 'cache' storage.
682 // This function will eventually be inlined by the compiler.
683 void CoverageData::TraceBasicBlock(s32
*id
) {
685 // 1. coverage is not enabled at run-time.
686 // 2. The array tr_event_array is full.
687 *tr_event_pointer
= static_cast<u32
>(*id
- 1);
691 void CoverageData::DumpCounters() {
692 if (!common_flags()->coverage_counters
) return;
693 uptr n
= coverage_data
.GetNumberOf8bitCounters();
695 InternalScopedBuffer
<u8
> bitset(n
);
696 coverage_data
.Update8bitCounterBitsetAndClearCounters(bitset
.data());
697 InternalScopedString
path(kMaxPathLength
);
699 for (uptr m
= 0; m
< module_name_vec
.size(); m
++) {
700 auto r
= module_name_vec
[m
];
701 CHECK(r
.copied_module_name
);
702 CHECK_LE(r
.beg
, r
.end
);
703 CHECK_LE(r
.end
, size());
704 const char *base_name
= StripModuleName(r
.copied_module_name
);
706 CovOpenFile(&path
, /* packed */ false, base_name
, "counters-sancov");
707 if (fd
== kInvalidFd
) return;
708 WriteToFile(fd
, bitset
.data() + r
.beg
, r
.end
- r
.beg
);
710 VReport(1, " CovDump: %zd counters written for '%s'\n", r
.end
- r
.beg
,
715 void CoverageData::DumpAsBitSet() {
716 if (!common_flags()->coverage_bitset
) return;
718 InternalScopedBuffer
<char> out(size());
719 InternalScopedString
path(kMaxPathLength
);
720 for (uptr m
= 0; m
< module_name_vec
.size(); m
++) {
722 auto r
= module_name_vec
[m
];
723 CHECK(r
.copied_module_name
);
724 CHECK_LE(r
.beg
, r
.end
);
725 CHECK_LE(r
.end
, size());
726 for (uptr i
= r
.beg
; i
< r
.end
; i
++) {
727 uptr pc
= UnbundlePc(pc_array
[i
]);
728 out
[i
] = pc
? '1' : '0';
732 const char *base_name
= StripModuleName(r
.copied_module_name
);
733 fd_t fd
= CovOpenFile(&path
, /* packed */false, base_name
, "bitset-sancov");
734 if (fd
== kInvalidFd
) return;
735 WriteToFile(fd
, out
.data() + r
.beg
, r
.end
- r
.beg
);
738 " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
739 r
.end
- r
.beg
, base_name
, n_set_bits
);
743 void CoverageData::DumpOffsets() {
744 auto sym
= Symbolizer::GetOrInit();
745 if (!common_flags()->coverage_pcs
) return;
746 CHECK_NE(sym
, nullptr);
747 InternalMmapVector
<uptr
> offsets(0);
748 InternalScopedString
path(kMaxPathLength
);
749 for (uptr m
= 0; m
< module_name_vec
.size(); m
++) {
751 uptr num_words_for_magic
= SANITIZER_WORDSIZE
== 64 ? 1 : 2;
752 for (uptr i
= 0; i
< num_words_for_magic
; i
++)
753 offsets
.push_back(0);
754 auto r
= module_name_vec
[m
];
755 CHECK(r
.copied_module_name
);
756 CHECK_LE(r
.beg
, r
.end
);
757 CHECK_LE(r
.end
, size());
758 for (uptr i
= r
.beg
; i
< r
.end
; i
++) {
759 uptr pc
= UnbundlePc(pc_array
[i
]);
760 uptr counter
= UnbundleCounter(pc_array
[i
]);
761 if (!pc
) continue; // Not visited.
763 sym
->GetModuleNameAndOffsetForPC(pc
, nullptr, &offset
);
764 offsets
.push_back(BundlePcAndCounter(offset
, counter
));
767 CHECK_GE(offsets
.size(), num_words_for_magic
);
768 SortArray(offsets
.data(), offsets
.size());
769 for (uptr i
= 0; i
< offsets
.size(); i
++)
770 offsets
[i
] = UnbundlePc(offsets
[i
]);
772 uptr num_offsets
= offsets
.size() - num_words_for_magic
;
773 u64
*magic_p
= reinterpret_cast<u64
*>(offsets
.data());
774 CHECK_EQ(*magic_p
, 0ULL);
775 // FIXME: we may want to write 32-bit offsets even in 64-mode
776 // if all the offsets are small enough.
777 *magic_p
= SANITIZER_WORDSIZE
== 64 ? kMagic64
: kMagic32
;
779 const char *module_name
= StripModuleName(r
.copied_module_name
);
781 if (cov_fd
!= kInvalidFd
) {
782 CovWritePacked(internal_getpid(), module_name
, offsets
.data(),
783 offsets
.size() * sizeof(offsets
[0]));
784 VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets
);
787 // One file per module per process.
788 fd_t fd
= CovOpenFile(&path
, false /* packed */, module_name
);
789 if (fd
== kInvalidFd
) continue;
790 WriteToFile(fd
, offsets
.data(), offsets
.size() * sizeof(offsets
[0]));
792 VReport(1, " CovDump: %s: %zd PCs written\n", path
.data(), num_offsets
);
795 if (cov_fd
!= kInvalidFd
)
799 void CoverageData::DumpAll() {
800 if (!coverage_enabled
|| common_flags()->coverage_direct
) return;
801 if (atomic_fetch_add(&dump_once_guard
, 1, memory_order_relaxed
))
807 DumpCallerCalleePairs();
810 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
) {
812 if (!coverage_enabled
) return;
813 cov_sandboxed
= args
->coverage_sandboxed
;
814 if (!cov_sandboxed
) return;
815 cov_max_block_size
= args
->coverage_max_block_size
;
816 if (args
->coverage_fd
>= 0) {
817 cov_fd
= (fd_t
)args
->coverage_fd
;
819 InternalScopedString
path(kMaxPathLength
);
820 // Pre-open the file now. The sandbox won't allow us to do it later.
821 cov_fd
= CovOpenFile(&path
, true /* packed */, nullptr);
825 fd_t
MaybeOpenCovFile(const char *name
) {
827 if (!coverage_enabled
) return kInvalidFd
;
828 InternalScopedString
path(kMaxPathLength
);
829 return CovOpenFile(&path
, true /* packed */, name
);
832 void CovBeforeFork() {
833 coverage_data
.BeforeFork();
836 void CovAfterFork(int child_pid
) {
837 coverage_data
.AfterFork(child_pid
);
840 static void MaybeDumpCoverage() {
841 if (common_flags()->coverage
)
842 __sanitizer_cov_dump();
845 void InitializeCoverage(bool enabled
, const char *dir
) {
846 if (coverage_enabled
)
847 return; // May happen if two sanitizer enable coverage in the same process.
848 coverage_enabled
= enabled
;
850 coverage_data
.Init();
851 if (enabled
) coverage_data
.Enable();
852 if (!common_flags()->coverage_direct
) Atexit(__sanitizer_cov_dump
);
853 AddDieCallback(MaybeDumpCoverage
);
856 void ReInitializeCoverage(bool enabled
, const char *dir
) {
857 coverage_enabled
= enabled
;
859 coverage_data
.ReInit();
862 void CoverageUpdateMapping() {
863 if (coverage_enabled
)
864 CovUpdateMapping(coverage_dir
);
867 } // namespace __sanitizer
870 SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov(u32
*guard
) {
871 coverage_data
.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
874 SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_with_check(u32
*guard
) {
875 atomic_uint32_t
*atomic_guard
= reinterpret_cast<atomic_uint32_t
*>(guard
);
876 if (static_cast<s32
>(
877 __sanitizer::atomic_load(atomic_guard
, memory_order_relaxed
)) < 0)
878 __sanitizer_cov(guard
);
880 SANITIZER_INTERFACE_ATTRIBUTE
void
881 __sanitizer_cov_indir_call16(uptr callee
, uptr callee_cache16
[]) {
882 coverage_data
.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
883 callee
, callee_cache16
, 16);
885 SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_init() {
886 coverage_enabled
= true;
887 coverage_dir
= common_flags()->coverage_dir
;
888 coverage_data
.Init();
890 SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_dump() {
891 coverage_data
.DumpAll();
893 SANITIZER_INTERFACE_ATTRIBUTE
void
894 __sanitizer_cov_module_init(s32
*guards
, uptr npcs
, u8
*counters
,
895 const char *comp_unit_name
) {
896 coverage_data
.InitializeGuards(guards
, npcs
, comp_unit_name
, GET_CALLER_PC());
897 coverage_data
.InitializeCounters(counters
, npcs
);
898 if (!common_flags()->coverage_direct
) return;
899 if (SANITIZER_ANDROID
&& coverage_enabled
) {
900 // dlopen/dlclose interceptors do not work on Android, so we rely on
901 // Extend() calls to update .sancov.map.
902 CovUpdateMapping(coverage_dir
, GET_CALLER_PC());
904 coverage_data
.Extend(npcs
);
906 SANITIZER_INTERFACE_ATTRIBUTE
907 sptr
__sanitizer_maybe_open_cov_file(const char *name
) {
908 return (sptr
)MaybeOpenCovFile(name
);
910 SANITIZER_INTERFACE_ATTRIBUTE
911 uptr
__sanitizer_get_total_unique_coverage() {
912 return atomic_load(&coverage_counter
, memory_order_relaxed
);
915 SANITIZER_INTERFACE_ATTRIBUTE
916 uptr
__sanitizer_get_total_unique_caller_callee_pairs() {
917 return atomic_load(&caller_callee_counter
, memory_order_relaxed
);
920 SANITIZER_INTERFACE_ATTRIBUTE
921 void __sanitizer_cov_trace_func_enter(s32
*id
) {
922 coverage_data
.TraceBasicBlock(id
);
924 SANITIZER_INTERFACE_ATTRIBUTE
925 void __sanitizer_cov_trace_basic_block(s32
*id
) {
926 coverage_data
.TraceBasicBlock(id
);
928 SANITIZER_INTERFACE_ATTRIBUTE
929 void __sanitizer_reset_coverage() {
930 ResetGlobalCounters();
931 coverage_data
.ReinitializeGuards();
932 internal_bzero_aligned16(
933 coverage_data
.data(),
934 RoundUpTo(coverage_data
.size() * sizeof(coverage_data
.data()[0]), 16));
936 SANITIZER_INTERFACE_ATTRIBUTE
937 uptr
__sanitizer_get_coverage_guards(uptr
**data
) {
938 *data
= coverage_data
.data();
939 return coverage_data
.size();
942 SANITIZER_INTERFACE_ATTRIBUTE
943 uptr
__sanitizer_get_number_of_counters() {
944 return coverage_data
.GetNumberOf8bitCounters();
947 SANITIZER_INTERFACE_ATTRIBUTE
948 uptr
__sanitizer_update_counter_bitset_and_clear_counters(u8
*bitset
) {
949 return coverage_data
.Update8bitCounterBitsetAndClearCounters(bitset
);
951 // Default empty implementations (weak). Users should redefine them.
952 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
953 void __sanitizer_cov_trace_cmp() {}
954 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
955 void __sanitizer_cov_trace_switch() {}