1 //===-- hwasan_report.cpp -------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
12 //===----------------------------------------------------------------------===//
14 #include "hwasan_report.h"
19 #include "hwasan_allocator.h"
20 #include "hwasan_globals.h"
21 #include "hwasan_mapping.h"
22 #include "hwasan_thread.h"
23 #include "hwasan_thread_list.h"
24 #include "sanitizer_common/sanitizer_allocator_internal.h"
25 #include "sanitizer_common/sanitizer_common.h"
26 #include "sanitizer_common/sanitizer_flags.h"
27 #include "sanitizer_common/sanitizer_mutex.h"
28 #include "sanitizer_common/sanitizer_report_decorator.h"
29 #include "sanitizer_common/sanitizer_stackdepot.h"
30 #include "sanitizer_common/sanitizer_stacktrace_printer.h"
31 #include "sanitizer_common/sanitizer_symbolizer.h"
33 using namespace __sanitizer
;
39 ScopedReport(bool fatal
= false) : error_message_(1), fatal(fatal
) {
40 Lock
lock(&error_message_lock_
);
41 error_message_ptr_
= fatal
? &error_message_
: nullptr;
42 ++hwasan_report_count
;
46 void (*report_cb
)(const char *);
48 Lock
lock(&error_message_lock_
);
49 report_cb
= error_report_callback_
;
50 error_message_ptr_
= nullptr;
53 report_cb(error_message_
.data());
55 SetAbortMessage(error_message_
.data());
56 if (common_flags()->print_module_map
>= 2 ||
57 (fatal
&& common_flags()->print_module_map
))
63 static void MaybeAppendToErrorMessage(const char *msg
) {
64 Lock
lock(&error_message_lock_
);
65 if (!error_message_ptr_
)
67 uptr len
= internal_strlen(msg
);
68 uptr old_size
= error_message_ptr_
->size();
69 error_message_ptr_
->resize(old_size
+ len
);
70 // overwrite old trailing '\0', keep new trailing '\0' untouched.
71 internal_memcpy(&(*error_message_ptr_
)[old_size
- 1], msg
, len
);
74 static void SetErrorReportCallback(void (*callback
)(const char *)) {
75 Lock
lock(&error_message_lock_
);
76 error_report_callback_
= callback
;
80 ScopedErrorReportLock error_report_lock_
;
81 InternalMmapVector
<char> error_message_
;
84 static InternalMmapVector
<char> *error_message_ptr_
;
85 static Mutex error_message_lock_
;
86 static void (*error_report_callback_
)(const char *);
89 InternalMmapVector
<char> *ScopedReport::error_message_ptr_
;
90 Mutex
ScopedReport::error_message_lock_
;
91 void (*ScopedReport::error_report_callback_
)(const char *);
93 // If there is an active ScopedReport, append to its error message.
94 void AppendToErrorMessageBuffer(const char *buffer
) {
95 ScopedReport::MaybeAppendToErrorMessage(buffer
);
98 static StackTrace
GetStackTraceFromId(u32 id
) {
100 StackTrace res
= StackDepotGet(id
);
105 static void MaybePrintAndroidHelpUrl() {
106 #if SANITIZER_ANDROID
108 "Learn more about HWASan reports: "
109 "https://source.android.com/docs/security/test/memory-safety/"
114 // A RAII object that holds a copy of the current thread stack ring buffer.
115 // The actual stack buffer may change while we are iterating over it (for
116 // example, Printf may call syslog() which can itself be built with hwasan).
117 class SavedStackAllocations
{
119 SavedStackAllocations(StackAllocationsRingBuffer
*rb
) {
120 uptr size
= rb
->size() * sizeof(uptr
);
122 MmapAlignedOrDieOnFatalError(size
, size
* 2, "saved stack allocations");
123 new (&rb_
) StackAllocationsRingBuffer(*rb
, storage
);
126 ~SavedStackAllocations() {
127 StackAllocationsRingBuffer
*rb
= get();
128 UnmapOrDie(rb
->StartOfStorage(), rb
->size() * sizeof(uptr
));
131 StackAllocationsRingBuffer
*get() {
132 return (StackAllocationsRingBuffer
*)&rb_
;
139 class Decorator
: public __sanitizer::SanitizerCommonDecorator
{
141 Decorator() : SanitizerCommonDecorator() { }
142 const char *Access() { return Blue(); }
143 const char *Allocation() const { return Magenta(); }
144 const char *Origin() const { return Magenta(); }
145 const char *Name() const { return Green(); }
146 const char *Location() { return Green(); }
147 const char *Thread() { return Green(); }
150 static bool FindHeapAllocation(HeapAllocationsRingBuffer
*rb
, uptr tagged_addr
,
151 HeapAllocationRecord
*har
, uptr
*ring_index
,
152 uptr
*num_matching_addrs
,
153 uptr
*num_matching_addrs_4b
) {
154 if (!rb
) return false;
156 *num_matching_addrs
= 0;
157 *num_matching_addrs_4b
= 0;
158 for (uptr i
= 0, size
= rb
->size(); i
< size
; i
++) {
160 if (h
.tagged_addr
<= tagged_addr
&&
161 h
.tagged_addr
+ h
.requested_size
> tagged_addr
) {
167 // Measure the number of heap ring buffer entries that would have matched
168 // if we had only one entry per address (e.g. if the ring buffer data was
169 // stored at the address itself). This will help us tune the allocator
170 // implementation for MTE.
171 if (UntagAddr(h
.tagged_addr
) <= UntagAddr(tagged_addr
) &&
172 UntagAddr(h
.tagged_addr
) + h
.requested_size
> UntagAddr(tagged_addr
)) {
173 ++*num_matching_addrs
;
176 // Measure the number of heap ring buffer entries that would have matched
177 // if we only had 4 tag bits, which is the case for MTE.
178 auto untag_4b
= [](uptr p
) {
179 return p
& ((1ULL << 60) - 1);
181 if (untag_4b(h
.tagged_addr
) <= untag_4b(tagged_addr
) &&
182 untag_4b(h
.tagged_addr
) + h
.requested_size
> untag_4b(tagged_addr
)) {
183 ++*num_matching_addrs_4b
;
189 static void PrintStackAllocations(StackAllocationsRingBuffer
*sa
,
190 tag_t addr_tag
, uptr untagged_addr
) {
191 uptr frames
= Min((uptr
)flags()->stack_history_size
, sa
->size());
192 bool found_local
= false;
193 for (uptr i
= 0; i
< frames
; i
++) {
194 const uptr
*record_addr
= &(*sa
)[i
];
195 uptr record
= *record_addr
;
199 reinterpret_cast<uptr
>(record_addr
) >> kRecordAddrBaseTagShift
;
200 uptr fp
= (record
>> kRecordFPShift
) << kRecordFPLShift
;
201 uptr pc_mask
= (1ULL << kRecordFPShift
) - 1;
202 uptr pc
= record
& pc_mask
;
204 if (Symbolizer::GetOrInit()->SymbolizeFrame(pc
, &frame
)) {
205 for (LocalInfo
&local
: frame
.locals
) {
206 if (!local
.has_frame_offset
|| !local
.has_size
|| !local
.has_tag_offset
)
208 tag_t obj_tag
= base_tag
^ local
.tag_offset
;
209 if (obj_tag
!= addr_tag
)
211 // Calculate the offset from the object address to the faulting
212 // address. Because we only store bits 4-19 of FP (bits 0-3 are
213 // guaranteed to be zero), the calculation is performed mod 2^20 and may
214 // harmlessly underflow if the address mod 2^20 is below the object
217 (untagged_addr
- fp
- local
.frame_offset
) & (kRecordFPModulus
- 1);
218 if (obj_offset
>= local
.size
)
221 Printf("Potentially referenced stack objects:\n");
224 Printf(" %s in %s %s:%d\n", local
.name
, local
.function_name
,
225 local
.decl_file
, local
.decl_line
);
234 // We didn't find any locals. Most likely we don't have symbols, so dump
235 // the information that we have for offline analysis.
236 InternalScopedString frame_desc
;
237 Printf("Previously allocated frames:\n");
238 for (uptr i
= 0; i
< frames
; i
++) {
239 const uptr
*record_addr
= &(*sa
)[i
];
240 uptr record
= *record_addr
;
243 uptr pc_mask
= (1ULL << 48) - 1;
244 uptr pc
= record
& pc_mask
;
245 frame_desc
.append(" record_addr:0x%zx record:0x%zx",
246 reinterpret_cast<uptr
>(record_addr
), record
);
247 if (SymbolizedStack
*frame
= Symbolizer::GetOrInit()->SymbolizePC(pc
)) {
248 RenderFrame(&frame_desc
, " %F %L", 0, frame
->info
.address
, &frame
->info
,
249 common_flags()->symbolize_vs_style
,
250 common_flags()->strip_path_prefix
);
253 Printf("%s\n", frame_desc
.data());
258 // Returns true if tag == *tag_ptr, reading tags from short granules if
259 // necessary. This may return a false positive if tags 1-15 are used as a
260 // regular tag rather than a short granule marker.
261 static bool TagsEqual(tag_t tag
, tag_t
*tag_ptr
) {
264 if (*tag_ptr
== 0 || *tag_ptr
> kShadowAlignment
- 1)
266 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(tag_ptr
));
267 tag_t inline_tag
= *reinterpret_cast<tag_t
*>(mem
+ kShadowAlignment
- 1);
268 return tag
== inline_tag
;
271 // HWASan globals store the size of the global in the descriptor. In cases where
272 // we don't have a binary with symbols, we can't grab the size of the global
273 // from the debug info - but we might be able to retrieve it from the
274 // descriptor. Returns zero if the lookup failed.
275 static uptr
GetGlobalSizeFromDescriptor(uptr ptr
) {
276 // Find the ELF object that this global resides in.
278 if (dladdr(reinterpret_cast<void *>(ptr
), &info
) == 0)
280 auto *ehdr
= reinterpret_cast<const ElfW(Ehdr
) *>(info
.dli_fbase
);
281 auto *phdr_begin
= reinterpret_cast<const ElfW(Phdr
) *>(
282 reinterpret_cast<const u8
*>(ehdr
) + ehdr
->e_phoff
);
284 // Get the load bias. This is normally the same as the dli_fbase address on
285 // position-independent code, but can be different on non-PIE executables,
286 // binaries using LLD's partitioning feature, or binaries compiled with a
288 ElfW(Addr
) load_bias
= 0;
289 for (const auto &phdr
:
290 ArrayRef
<const ElfW(Phdr
)>(phdr_begin
, phdr_begin
+ ehdr
->e_phnum
)) {
291 if (phdr
.p_type
!= PT_LOAD
|| phdr
.p_offset
!= 0)
293 load_bias
= reinterpret_cast<ElfW(Addr
)>(ehdr
) - phdr
.p_vaddr
;
297 // Walk all globals in this ELF object, looking for the one we're interested
298 // in. Once we find it, we can stop iterating and return the size of the
299 // global we're interested in.
300 for (const hwasan_global
&global
:
301 HwasanGlobalsFor(load_bias
, phdr_begin
, ehdr
->e_phnum
))
302 if (global
.addr() <= ptr
&& ptr
< global
.addr() + global
.size())
303 return global
.size();
308 static void ShowHeapOrGlobalCandidate(uptr untagged_addr
, tag_t
*candidate
,
309 tag_t
*left
, tag_t
*right
) {
311 uptr mem
= ShadowToMem(reinterpret_cast<uptr
>(candidate
));
312 HwasanChunkView chunk
= FindHeapChunkByAddress(mem
);
313 if (chunk
.IsAllocated()) {
316 if (untagged_addr
< chunk
.End() && untagged_addr
>= chunk
.Beg()) {
317 offset
= untagged_addr
- chunk
.Beg();
319 } else if (candidate
== left
) {
320 offset
= untagged_addr
- chunk
.End();
323 offset
= chunk
.Beg() - untagged_addr
;
326 Printf("%s", d
.Error());
327 Printf("\nCause: heap-buffer-overflow\n");
328 Printf("%s", d
.Default());
329 Printf("%s", d
.Location());
330 Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
331 untagged_addr
, offset
, whence
, chunk
.UsedSize(), chunk
.Beg(),
333 Printf("%s", d
.Allocation());
334 Printf("allocated by thread T%u here:\n", chunk
.GetAllocThreadId());
335 Printf("%s", d
.Default());
336 GetStackTraceFromId(chunk
.GetAllocStackId()).Print();
339 // Check whether the address points into a loaded library. If so, this is
340 // most likely a global variable.
341 const char *module_name
;
343 Symbolizer
*sym
= Symbolizer::GetOrInit();
344 if (sym
->GetModuleNameAndOffsetForPC(mem
, &module_name
, &module_address
)) {
345 Printf("%s", d
.Error());
346 Printf("\nCause: global-overflow\n");
347 Printf("%s", d
.Default());
349 Printf("%s", d
.Location());
350 if (sym
->SymbolizeData(mem
, &info
) && info
.start
) {
352 "%p is located %zd bytes %s a %zd-byte global variable "
353 "%s [%p,%p) in %s\n",
355 candidate
== left
? untagged_addr
- (info
.start
+ info
.size
)
356 : info
.start
- untagged_addr
,
357 candidate
== left
? "after" : "before", info
.size
, info
.name
,
358 info
.start
, info
.start
+ info
.size
, module_name
);
360 uptr size
= GetGlobalSizeFromDescriptor(mem
);
362 // We couldn't find the size of the global from the descriptors.
364 "%p is located %s a global variable in "
365 "\n #0 0x%x (%s+0x%x)\n",
366 untagged_addr
, candidate
== left
? "after" : "before", mem
,
367 module_name
, module_address
);
370 "%p is located %s a %zd-byte global variable in "
371 "\n #0 0x%x (%s+0x%x)\n",
372 untagged_addr
, candidate
== left
? "after" : "before", size
, mem
,
373 module_name
, module_address
);
375 Printf("%s", d
.Default());
379 void PrintAddressDescription(
380 uptr tagged_addr
, uptr access_size
,
381 StackAllocationsRingBuffer
*current_stack_allocations
) {
383 int num_descriptions_printed
= 0;
384 uptr untagged_addr
= UntagAddr(tagged_addr
);
386 if (MemIsShadow(untagged_addr
)) {
387 Printf("%s%p is HWAsan shadow memory.\n%s", d
.Location(), untagged_addr
,
392 // Print some very basic information about the address, if it's a heap.
393 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
394 if (uptr beg
= chunk
.Beg()) {
395 uptr size
= chunk
.ActualSize();
396 Printf("%s[%p,%p) is a %s %s heap chunk; "
397 "size: %zd offset: %zd\n%s",
400 chunk
.FromSmallHeap() ? "small" : "large",
401 chunk
.IsAllocated() ? "allocated" : "unallocated",
402 size
, untagged_addr
- beg
,
406 tag_t addr_tag
= GetTagFromPointer(tagged_addr
);
408 bool on_stack
= false;
409 // Check stack first. If the address is on the stack of a live thread, we
410 // know it cannot be a heap / global overflow.
411 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
412 if (t
->AddrIsInStack(untagged_addr
)) {
414 // TODO(fmayer): figure out how to distinguish use-after-return and
415 // stack-buffer-overflow.
416 Printf("%s", d
.Error());
417 Printf("\nCause: stack tag-mismatch\n");
418 Printf("%s", d
.Location());
419 Printf("Address %p is located in stack of thread T%zd\n", untagged_addr
,
421 Printf("%s", d
.Default());
424 auto *sa
= (t
== GetCurrentThread() && current_stack_allocations
)
425 ? current_stack_allocations
426 : t
->stack_allocations();
427 PrintStackAllocations(sa
, addr_tag
, untagged_addr
);
428 num_descriptions_printed
++;
432 // Check if this looks like a heap buffer overflow by scanning
433 // the shadow left and right and looking for the first adjacent
434 // object with a different memory tag. If that tag matches addr_tag,
435 // check the allocator if it has a live chunk there.
436 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
437 tag_t
*candidate
= nullptr, *left
= tag_ptr
, *right
= tag_ptr
;
438 uptr candidate_distance
= 0;
439 for (; candidate_distance
< 1000; candidate_distance
++) {
440 if (MemIsShadow(reinterpret_cast<uptr
>(left
)) &&
441 TagsEqual(addr_tag
, left
)) {
446 if (MemIsShadow(reinterpret_cast<uptr
>(right
)) &&
447 TagsEqual(addr_tag
, right
)) {
454 constexpr auto kCloseCandidateDistance
= 1;
456 if (!on_stack
&& candidate
&& candidate_distance
<= kCloseCandidateDistance
) {
457 ShowHeapOrGlobalCandidate(untagged_addr
, candidate
, left
, right
);
458 num_descriptions_printed
++;
461 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) {
462 // Scan all threads' ring buffers to find if it's a heap-use-after-free.
463 HeapAllocationRecord har
;
464 uptr ring_index
, num_matching_addrs
, num_matching_addrs_4b
;
465 if (FindHeapAllocation(t
->heap_allocations(), tagged_addr
, &har
,
466 &ring_index
, &num_matching_addrs
,
467 &num_matching_addrs_4b
)) {
468 Printf("%s", d
.Error());
469 Printf("\nCause: use-after-free\n");
470 Printf("%s", d
.Location());
471 Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
472 untagged_addr
, untagged_addr
- UntagAddr(har
.tagged_addr
),
473 har
.requested_size
, UntagAddr(har
.tagged_addr
),
474 UntagAddr(har
.tagged_addr
) + har
.requested_size
);
475 Printf("%s", d
.Allocation());
476 Printf("freed by thread T%u here:\n", t
->unique_id());
477 Printf("%s", d
.Default());
478 GetStackTraceFromId(har
.free_context_id
).Print();
480 Printf("%s", d
.Allocation());
481 Printf("previously allocated by thread T%u here:\n", har
.alloc_thread_id
);
482 Printf("%s", d
.Default());
483 GetStackTraceFromId(har
.alloc_context_id
).Print();
485 // Print a developer note: the index of this heap object
486 // in the thread's deallocation ring buffer.
487 Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index
+ 1,
488 flags()->heap_history_size
);
489 Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs
);
490 Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
491 num_matching_addrs_4b
);
494 num_descriptions_printed
++;
498 if (candidate
&& num_descriptions_printed
== 0) {
499 ShowHeapOrGlobalCandidate(untagged_addr
, candidate
, left
, right
);
500 num_descriptions_printed
++;
503 // Print the remaining threads, as an extra information, 1 line per thread.
504 if (flags()->print_live_threads_info
)
505 hwasanThreadList().VisitAllLiveThreads([&](Thread
*t
) { t
->Announce(); });
507 if (!num_descriptions_printed
)
508 // We exhausted our possibilities. Bail out.
509 Printf("HWAddressSanitizer can not describe address in more detail.\n");
510 if (num_descriptions_printed
> 1) {
512 "There are %d potential causes, printed above in order "
514 num_descriptions_printed
);
518 void ReportStats() {}
520 static void PrintTagInfoAroundAddr(tag_t
*tag_ptr
, uptr num_rows
,
521 void (*print_tag
)(InternalScopedString
&s
,
523 const uptr row_len
= 16; // better be power of two.
524 tag_t
*center_row_beg
= reinterpret_cast<tag_t
*>(
525 RoundDownTo(reinterpret_cast<uptr
>(tag_ptr
), row_len
));
526 tag_t
*beg_row
= center_row_beg
- row_len
* (num_rows
/ 2);
527 tag_t
*end_row
= center_row_beg
+ row_len
* ((num_rows
+ 1) / 2);
528 InternalScopedString s
;
529 for (tag_t
*row
= beg_row
; row
< end_row
; row
+= row_len
) {
530 s
.append("%s", row
== center_row_beg
? "=>" : " ");
531 s
.append("%p:", (void *)ShadowToMem(reinterpret_cast<uptr
>(row
)));
532 for (uptr i
= 0; i
< row_len
; i
++) {
533 s
.append("%s", row
+ i
== tag_ptr
? "[" : " ");
534 print_tag(s
, &row
[i
]);
535 s
.append("%s", row
+ i
== tag_ptr
? "]" : " ");
539 Printf("%s", s
.data());
542 static void PrintTagsAroundAddr(tag_t
*tag_ptr
) {
544 "Memory tags around the buggy address (one tag corresponds to %zd "
545 "bytes):\n", kShadowAlignment
);
546 PrintTagInfoAroundAddr(tag_ptr
, 17, [](InternalScopedString
&s
, tag_t
*tag
) {
547 s
.append("%02x", *tag
);
551 "Tags for short granules around the buggy address (one tag corresponds "
554 PrintTagInfoAroundAddr(tag_ptr
, 3, [](InternalScopedString
&s
, tag_t
*tag
) {
555 if (*tag
>= 1 && *tag
<= kShadowAlignment
) {
556 uptr granule_addr
= ShadowToMem(reinterpret_cast<uptr
>(tag
));
558 *reinterpret_cast<u8
*>(granule_addr
+ kShadowAlignment
- 1));
565 "https://clang.llvm.org/docs/"
566 "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
567 "description of short granule tags\n");
570 uptr
GetTopPc(StackTrace
*stack
) {
571 return stack
->size
? StackTrace::GetPreviousInstructionPc(stack
->trace
[0])
575 void ReportInvalidFree(StackTrace
*stack
, uptr tagged_addr
) {
576 ScopedReport
R(flags()->halt_on_error
);
578 uptr untagged_addr
= UntagAddr(tagged_addr
);
579 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
580 tag_t
*tag_ptr
= nullptr;
582 if (MemIsApp(untagged_addr
)) {
583 tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
584 if (MemIsShadow(reinterpret_cast<uptr
>(tag_ptr
)))
590 Printf("%s", d
.Error());
591 uptr pc
= GetTopPc(stack
);
592 const char *bug_type
= "invalid-free";
593 const Thread
*thread
= GetCurrentThread();
595 Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
596 SanitizerToolName
, bug_type
, untagged_addr
, pc
, thread
->unique_id());
598 Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
599 SanitizerToolName
, bug_type
, untagged_addr
, pc
);
601 Printf("%s", d
.Access());
603 Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag
, mem_tag
);
604 Printf("%s", d
.Default());
608 PrintAddressDescription(tagged_addr
, 0, nullptr);
611 PrintTagsAroundAddr(tag_ptr
);
613 MaybePrintAndroidHelpUrl();
614 ReportErrorSummary(bug_type
, stack
);
617 void ReportTailOverwritten(StackTrace
*stack
, uptr tagged_addr
, uptr orig_size
,
618 const u8
*expected
) {
619 uptr tail_size
= kShadowAlignment
- (orig_size
% kShadowAlignment
);
620 u8 actual_expected
[kShadowAlignment
];
621 internal_memcpy(actual_expected
, expected
, tail_size
);
622 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
623 // Short granule is stashed in the last byte of the magic string. To avoid
624 // confusion, make the expected magic string contain the short granule tag.
625 if (orig_size
% kShadowAlignment
!= 0) {
626 actual_expected
[tail_size
- 1] = ptr_tag
;
629 ScopedReport
R(flags()->halt_on_error
);
631 uptr untagged_addr
= UntagAddr(tagged_addr
);
632 Printf("%s", d
.Error());
633 const char *bug_type
= "allocation-tail-overwritten";
634 Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName
,
635 bug_type
, untagged_addr
, untagged_addr
+ orig_size
, orig_size
);
636 Printf("\n%s", d
.Default());
638 "Stack of invalid access unknown. Issue detected at deallocation "
640 Printf("%s", d
.Allocation());
641 Printf("deallocated here:\n");
642 Printf("%s", d
.Default());
644 HwasanChunkView chunk
= FindHeapChunkByAddress(untagged_addr
);
646 Printf("%s", d
.Allocation());
647 Printf("allocated here:\n");
648 Printf("%s", d
.Default());
649 GetStackTraceFromId(chunk
.GetAllocStackId()).Print();
652 InternalScopedString s
;
653 CHECK_GT(tail_size
, 0U);
654 CHECK_LT(tail_size
, kShadowAlignment
);
655 u8
*tail
= reinterpret_cast<u8
*>(untagged_addr
+ orig_size
);
656 s
.append("Tail contains: ");
657 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
659 for (uptr i
= 0; i
< tail_size
; i
++)
660 s
.append("%02x ", tail
[i
]);
662 s
.append("Expected: ");
663 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
665 for (uptr i
= 0; i
< tail_size
; i
++) s
.append("%02x ", actual_expected
[i
]);
668 for (uptr i
= 0; i
< kShadowAlignment
- tail_size
; i
++)
670 for (uptr i
= 0; i
< tail_size
; i
++)
671 s
.append("%s ", actual_expected
[i
] != tail
[i
] ? "^^" : " ");
673 s
.append("\nThis error occurs when a buffer overflow overwrites memory\n"
674 "after a heap object, but within the %zd-byte granule, e.g.\n"
675 " char *x = new char[20];\n"
677 "%s does not detect such bugs in uninstrumented code at the time of write,"
678 "\nbut can detect them at the time of free/delete.\n"
679 "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
680 kShadowAlignment
, SanitizerToolName
);
681 Printf("%s", s
.data());
682 GetCurrentThread()->Announce();
684 tag_t
*tag_ptr
= reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
));
685 PrintTagsAroundAddr(tag_ptr
);
687 MaybePrintAndroidHelpUrl();
688 ReportErrorSummary(bug_type
, stack
);
691 void ReportTagMismatch(StackTrace
*stack
, uptr tagged_addr
, uptr access_size
,
692 bool is_store
, bool fatal
, uptr
*registers_frame
) {
693 ScopedReport
R(fatal
);
694 SavedStackAllocations
current_stack_allocations(
695 GetCurrentThread()->stack_allocations());
698 uptr untagged_addr
= UntagAddr(tagged_addr
);
699 // TODO: when possible, try to print heap-use-after-free, etc.
700 const char *bug_type
= "tag-mismatch";
701 uptr pc
= GetTopPc(stack
);
702 Printf("%s", d
.Error());
703 Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName
, bug_type
,
706 Thread
*t
= GetCurrentThread();
709 __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr
), access_size
);
710 CHECK(offset
>= 0 && offset
< static_cast<sptr
>(access_size
));
711 tag_t ptr_tag
= GetTagFromPointer(tagged_addr
);
713 reinterpret_cast<tag_t
*>(MemToShadow(untagged_addr
+ offset
));
714 tag_t mem_tag
= *tag_ptr
;
716 Printf("%s", d
.Access());
717 if (mem_tag
&& mem_tag
< kShadowAlignment
) {
718 tag_t
*granule_ptr
= reinterpret_cast<tag_t
*>((untagged_addr
+ offset
) &
719 ~(kShadowAlignment
- 1));
720 // If offset is 0, (untagged_addr + offset) is not aligned to granules.
721 // This is the offset of the leftmost accessed byte within the bad granule.
722 u8 in_granule_offset
= (untagged_addr
+ offset
) & (kShadowAlignment
- 1);
723 tag_t short_tag
= granule_ptr
[kShadowAlignment
- 1];
724 // The first mismatch was a short granule that matched the ptr_tag.
725 if (short_tag
== ptr_tag
) {
726 // If the access starts after the end of the short granule, then the first
727 // bad byte is the first byte of the access; otherwise it is the first
728 // byte past the end of the short granule
729 if (mem_tag
> in_granule_offset
) {
730 offset
+= mem_tag
- in_granule_offset
;
734 "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
735 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
736 mem_tag
, short_tag
, t
->unique_id());
738 Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
739 is_store
? "WRITE" : "READ", access_size
, untagged_addr
, ptr_tag
,
740 mem_tag
, t
->unique_id());
743 Printf("Invalid access starting at offset %zu\n", offset
);
744 Printf("%s", d
.Default());
748 PrintAddressDescription(tagged_addr
, access_size
,
749 current_stack_allocations
.get());
752 PrintTagsAroundAddr(tag_ptr
);
755 ReportRegisters(registers_frame
, pc
);
757 MaybePrintAndroidHelpUrl();
758 ReportErrorSummary(bug_type
, stack
);
761 // See the frame breakdown defined in __hwasan_tag_mismatch (from
762 // hwasan_tag_mismatch_{aarch64,riscv64}.S).
763 void ReportRegisters(uptr
*frame
, uptr pc
) {
764 Printf("Registers where the failure occurred (pc %p):\n", pc
);
766 // We explicitly print a single line (4 registers/line) each iteration to
767 // reduce the amount of logcat error messages printed. Each Printf() will
768 // result in a new logcat line, irrespective of whether a newline is present,
769 // and so we wish to reduce the number of Printf() calls we have to make.
770 #if defined(__aarch64__)
771 Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
772 frame
[0], frame
[1], frame
[2], frame
[3]);
773 #elif SANITIZER_RISCV64
774 Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
775 reinterpret_cast<u8
*>(frame
) + 256, frame
[1], frame
[2], frame
[3]);
777 Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
778 frame
[4], frame
[5], frame
[6], frame
[7]);
779 Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
780 frame
[8], frame
[9], frame
[10], frame
[11]);
781 Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
782 frame
[12], frame
[13], frame
[14], frame
[15]);
783 Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
784 frame
[16], frame
[17], frame
[18], frame
[19]);
785 Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
786 frame
[20], frame
[21], frame
[22], frame
[23]);
787 Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
788 frame
[24], frame
[25], frame
[26], frame
[27]);
789 // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
790 // passes it to this function.
791 #if defined(__aarch64__)
792 Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame
[28],
793 frame
[29], frame
[30], reinterpret_cast<u8
*>(frame
) + 256);
794 #elif SANITIZER_RISCV64
795 Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame
[28],
796 frame
[29], frame
[30], frame
[31]);
801 } // namespace __hwasan
803 void __hwasan_set_error_report_callback(void (*callback
)(const char *)) {
804 __hwasan::ScopedReport::SetErrorReportCallback(callback
);