Bit shift the bmAttributes field to interpret isochronous endpoint.
[chromium-blink-merge.git] / base / debug / trace_event_memory.cc
blob28318654750950df5f179aec0f7fa25c26165f7e
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_memory.h"
7 #include "base/debug/leak_annotations.h"
8 #include "base/debug/trace_event.h"
9 #include "base/lazy_instance.h"
10 #include "base/logging.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/strings/string_number_conversions.h"
14 #include "base/strings/string_util.h"
15 #include "base/threading/thread_local_storage.h"
17 namespace base {
18 namespace debug {
20 namespace {
22 // Maximum number of nested TRACE_EVENT scopes to record. Must be less than
23 // or equal to HeapProfileTable::kMaxStackDepth / 2 because we record two
24 // entries on the pseudo-stack per scope.
25 const size_t kMaxScopeDepth = 16;
27 /////////////////////////////////////////////////////////////////////////////
28 // Holds a memory dump until the tracing system needs to serialize it.
29 class MemoryDumpHolder : public base::debug::ConvertableToTraceFormat {
30 public:
31 // Takes ownership of dump, which must be a JSON string, allocated with
32 // malloc() and NULL terminated.
33 explicit MemoryDumpHolder(char* dump) : dump_(dump) {}
35 // base::debug::ConvertableToTraceFormat overrides:
36 void AppendAsTraceFormat(std::string* out) const override {
37 AppendHeapProfileAsTraceFormat(dump_, out);
40 private:
41 ~MemoryDumpHolder() override { free(dump_); }
43 char* dump_;
45 DISALLOW_COPY_AND_ASSIGN(MemoryDumpHolder);
48 /////////////////////////////////////////////////////////////////////////////
49 // Records a stack of TRACE_MEMORY events. One per thread is required.
50 struct TraceMemoryStack {
51 TraceMemoryStack() : scope_depth(0) {
52 memset(scope_data, 0, kMaxScopeDepth * sizeof(scope_data[0]));
55 // Depth of the currently nested TRACE_EVENT scopes. Allowed to be greater
56 // than kMaxScopeDepth so we can match scope pushes and pops even if we don't
57 // have enough space to store the EventData.
58 size_t scope_depth;
60 // Stack of categories and names.
61 ScopedTraceMemory::ScopeData scope_data[kMaxScopeDepth];
64 // Pointer to a TraceMemoryStack per thread.
65 base::ThreadLocalStorage::StaticSlot tls_trace_memory_stack = TLS_INITIALIZER;
67 // Clean up memory pointed to by our thread-local storage.
68 void DeleteStackOnThreadCleanup(void* value) {
69 TraceMemoryStack* stack = static_cast<TraceMemoryStack*>(value);
70 delete stack;
73 // Initializes the thread-local TraceMemoryStack pointer. Returns true on
74 // success or if it is already initialized.
75 bool InitThreadLocalStorage() {
76 if (tls_trace_memory_stack.initialized())
77 return true;
78 // Initialize the thread-local storage key, returning true on success.
79 return tls_trace_memory_stack.Initialize(&DeleteStackOnThreadCleanup);
82 // Clean up thread-local-storage in the main thread.
83 void CleanupThreadLocalStorage() {
84 if (!tls_trace_memory_stack.initialized())
85 return;
86 TraceMemoryStack* stack =
87 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
88 delete stack;
89 tls_trace_memory_stack.Set(NULL);
90 // Intentionally do not release the thread-local-storage key here, that is,
91 // do not call tls_trace_memory_stack.Free(). Other threads have lazily
92 // created pointers in thread-local-storage via GetTraceMemoryStack() below.
93 // Those threads need to run the DeleteStack() destructor function when they
94 // exit. If we release the key the destructor will not be called and those
95 // threads will not clean up their memory.
98 // Returns the thread-local trace memory stack for the current thread, creating
99 // one if needed. Returns NULL if the thread-local storage key isn't
100 // initialized, which indicates that heap profiling isn't running.
101 TraceMemoryStack* GetTraceMemoryStack() {
102 TraceMemoryStack* stack =
103 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
104 // Lazily initialize TraceMemoryStack objects for new threads.
105 if (!stack) {
106 stack = new TraceMemoryStack;
107 tls_trace_memory_stack.Set(stack);
109 return stack;
112 // Returns a "pseudo-stack" of pointers to trace event categories and names.
113 // Because tcmalloc stores one pointer per stack frame this converts N nested
114 // trace events into N * 2 pseudo-stack entries. Thus this macro invocation:
115 // TRACE_EVENT0("category1", "name1");
116 // TRACE_EVENT0("category2", "name2");
117 // becomes this pseudo-stack:
118 // stack_out[0] = "category1"
119 // stack_out[1] = "name1"
120 // stack_out[2] = "category2"
121 // stack_out[3] = "name2"
122 // Returns int instead of size_t to match the signature required by tcmalloc.
123 int GetPseudoStack(int skip_count_ignored, void** stack_out) {
124 // If the tracing system isn't fully initialized, just skip this allocation.
125 // Attempting to initialize will allocate memory, causing this function to
126 // be called recursively from inside the allocator.
127 if (!tls_trace_memory_stack.initialized() || !tls_trace_memory_stack.Get())
128 return 0;
129 TraceMemoryStack* stack =
130 static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
131 // Copy at most kMaxScopeDepth scope entries.
132 const size_t count = std::min(stack->scope_depth, kMaxScopeDepth);
133 // Notes that memcpy() works for zero bytes.
134 memcpy(stack_out,
135 stack->scope_data,
136 count * sizeof(stack->scope_data[0]));
137 // Each item in the trace event stack contains both name and category so tell
138 // tcmalloc that we have returned |count| * 2 stack frames.
139 return static_cast<int>(count * 2);
142 } // namespace
144 //////////////////////////////////////////////////////////////////////////////
146 TraceMemoryController::TraceMemoryController(
147 scoped_refptr<MessageLoopProxy> message_loop_proxy,
148 HeapProfilerStartFunction heap_profiler_start_function,
149 HeapProfilerStopFunction heap_profiler_stop_function,
150 GetHeapProfileFunction get_heap_profile_function)
151 : message_loop_proxy_(message_loop_proxy),
152 heap_profiler_start_function_(heap_profiler_start_function),
153 heap_profiler_stop_function_(heap_profiler_stop_function),
154 get_heap_profile_function_(get_heap_profile_function),
155 weak_factory_(this) {
156 // Force the "memory" category to show up in the trace viewer.
157 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory"), "init");
158 // Watch for the tracing system being enabled.
159 TraceLog::GetInstance()->AddEnabledStateObserver(this);
162 TraceMemoryController::~TraceMemoryController() {
163 if (dump_timer_.IsRunning())
164 StopProfiling();
165 TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
168 // base::debug::TraceLog::EnabledStateChangedObserver overrides:
169 void TraceMemoryController::OnTraceLogEnabled() {
170 // Check to see if tracing is enabled for the memory category.
171 bool enabled;
172 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("memory"),
173 &enabled);
174 if (!enabled)
175 return;
176 DVLOG(1) << "OnTraceLogEnabled";
177 message_loop_proxy_->PostTask(
178 FROM_HERE,
179 base::Bind(&TraceMemoryController::StartProfiling,
180 weak_factory_.GetWeakPtr()));
183 void TraceMemoryController::OnTraceLogDisabled() {
184 // The memory category is always disabled before OnTraceLogDisabled() is
185 // called, so we cannot tell if it was enabled before. Always try to turn
186 // off profiling.
187 DVLOG(1) << "OnTraceLogDisabled";
188 message_loop_proxy_->PostTask(
189 FROM_HERE,
190 base::Bind(&TraceMemoryController::StopProfiling,
191 weak_factory_.GetWeakPtr()));
194 void TraceMemoryController::StartProfiling() {
195 // Watch for the tracing framework sending enabling more than once.
196 if (dump_timer_.IsRunning())
197 return;
198 DVLOG(1) << "Starting trace memory";
199 if (!InitThreadLocalStorage())
200 return;
201 ScopedTraceMemory::set_enabled(true);
202 // Call ::HeapProfilerWithPseudoStackStart().
203 heap_profiler_start_function_(&GetPseudoStack);
204 const int kDumpIntervalSeconds = 5;
205 dump_timer_.Start(FROM_HERE,
206 TimeDelta::FromSeconds(kDumpIntervalSeconds),
207 base::Bind(&TraceMemoryController::DumpMemoryProfile,
208 weak_factory_.GetWeakPtr()));
211 void TraceMemoryController::DumpMemoryProfile() {
212 // Don't trace allocations here in the memory tracing system.
213 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"),
214 TRACE_MEMORY_IGNORE);
216 DVLOG(1) << "DumpMemoryProfile";
217 // MemoryDumpHolder takes ownership of this string. See GetHeapProfile() in
218 // tcmalloc for details.
219 char* dump = get_heap_profile_function_();
220 const int kSnapshotId = 1;
221 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
222 TRACE_DISABLED_BY_DEFAULT("memory"),
223 "memory::Heap",
224 kSnapshotId,
225 scoped_refptr<ConvertableToTraceFormat>(new MemoryDumpHolder(dump)));
228 void TraceMemoryController::StopProfiling() {
229 // Watch for the tracing framework sending disabled more than once.
230 if (!dump_timer_.IsRunning())
231 return;
232 DVLOG(1) << "Stopping trace memory";
233 dump_timer_.Stop();
234 ScopedTraceMemory::set_enabled(false);
235 CleanupThreadLocalStorage();
236 // Call ::HeapProfilerStop().
237 heap_profiler_stop_function_();
240 bool TraceMemoryController::IsTimerRunningForTest() const {
241 return dump_timer_.IsRunning();
244 /////////////////////////////////////////////////////////////////////////////
246 // static
247 bool ScopedTraceMemory::enabled_ = false;
249 void ScopedTraceMemory::Initialize(const char* category, const char* name) {
250 DCHECK(enabled_);
251 // Get our thread's copy of the stack.
252 TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
253 const size_t index = trace_memory_stack->scope_depth;
254 // Don't record data for deeply nested scopes, but continue to increment
255 // |stack_depth| so we can match pushes and pops.
256 if (index < kMaxScopeDepth) {
257 ScopeData& event = trace_memory_stack->scope_data[index];
258 event.category = category;
259 event.name = name;
261 trace_memory_stack->scope_depth++;
264 void ScopedTraceMemory::Destroy() {
265 DCHECK(enabled_);
266 // Get our thread's copy of the stack.
267 TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
268 // The tracing system can be turned on with ScopedTraceMemory objects
269 // allocated on the stack, so avoid potential underflow as they are destroyed.
270 if (trace_memory_stack->scope_depth > 0)
271 trace_memory_stack->scope_depth--;
274 // static
275 void ScopedTraceMemory::InitForTest() {
276 InitThreadLocalStorage();
277 enabled_ = true;
280 // static
281 void ScopedTraceMemory::CleanupForTest() {
282 enabled_ = false;
283 CleanupThreadLocalStorage();
286 // static
287 int ScopedTraceMemory::GetStackDepthForTest() {
288 TraceMemoryStack* stack = GetTraceMemoryStack();
289 return static_cast<int>(stack->scope_depth);
292 // static
293 ScopedTraceMemory::ScopeData ScopedTraceMemory::GetScopeDataForTest(
294 int stack_index) {
295 TraceMemoryStack* stack = GetTraceMemoryStack();
296 return stack->scope_data[stack_index];
299 /////////////////////////////////////////////////////////////////////////////
301 void AppendHeapProfileAsTraceFormat(const char* input, std::string* output) {
302 // Heap profile output has a header total line, then a list of stacks with
303 // memory totals, like this:
305 // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
306 // 95: 40940 [ 649: 114260] @ 0x7fa7f4b3be13
307 // 77: 32546 [ 742: 106234] @
308 // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
310 // MAPPED_LIBRARIES:
311 // 1be411fc1000-1be4139e4000 rw-p 00000000 00:00 0
312 // 1be4139e4000-1be4139e5000 ---p 00000000 00:00 0
313 // ...
315 // Skip input after MAPPED_LIBRARIES.
316 std::string input_string;
317 const char* mapped_libraries = strstr(input, "MAPPED_LIBRARIES");
318 if (mapped_libraries) {
319 input_string.assign(input, mapped_libraries - input);
320 } else {
321 input_string.assign(input);
324 std::vector<std::string> lines;
325 size_t line_count = Tokenize(input_string, "\n", &lines);
326 if (line_count == 0) {
327 DLOG(WARNING) << "No lines found";
328 return;
331 // Handle the initial summary line.
332 output->append("[");
333 AppendHeapProfileTotalsAsTraceFormat(lines[0], output);
335 // Handle the following stack trace lines.
336 for (size_t i = 1; i < line_count; ++i) {
337 const std::string& line = lines[i];
338 AppendHeapProfileLineAsTraceFormat(line, output);
340 output->append("]\n");
343 void AppendHeapProfileTotalsAsTraceFormat(const std::string& line,
344 std::string* output) {
345 // This is what a line looks like:
346 // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
348 // The numbers represent total allocations since profiling was enabled.
349 // From the example above:
350 // 357 = Outstanding allocations (mallocs - frees)
351 // 55227 = Outstanding bytes (malloc bytes - free bytes)
352 // 14653 = Total allocations (mallocs)
353 // 2624014 = Total bytes (malloc bytes)
354 std::vector<std::string> tokens;
355 Tokenize(line, " :[]@", &tokens);
356 if (tokens.size() < 4) {
357 DLOG(WARNING) << "Invalid totals line " << line;
358 return;
360 DCHECK_EQ(tokens[0], "heap");
361 DCHECK_EQ(tokens[1], "profile");
362 output->append("{\"current_allocs\": ");
363 output->append(tokens[2]);
364 output->append(", \"current_bytes\": ");
365 output->append(tokens[3]);
366 output->append(", \"trace\": \"\"}");
369 bool AppendHeapProfileLineAsTraceFormat(const std::string& line,
370 std::string* output) {
371 // This is what a line looks like:
372 // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
374 // The numbers represent allocations for a particular stack trace since
375 // profiling was enabled. From the example above:
376 // 68 = Outstanding allocations (mallocs - frees)
377 // 4195 = Outstanding bytes (malloc bytes - free bytes)
378 // 1087 = Total allocations (mallocs)
379 // 98009 = Total bytes (malloc bytes)
381 // 0x7fa7fa9b9ba0 0x7fa7f4b3be13 = Stack trace represented as pointers to
382 // static strings from trace event categories
383 // and names.
384 std::vector<std::string> tokens;
385 Tokenize(line, " :[]@", &tokens);
386 // It's valid to have no stack addresses, so only require 4 tokens.
387 if (tokens.size() < 4) {
388 DLOG(WARNING) << "Invalid line " << line;
389 return false;
391 // Don't bother with stacks that have no current allocations.
392 if (tokens[0] == "0")
393 return false;
394 output->append(",\n");
395 output->append("{\"current_allocs\": ");
396 output->append(tokens[0]);
397 output->append(", \"current_bytes\": ");
398 output->append(tokens[1]);
399 output->append(", \"trace\": \"");
401 // Convert pairs of "stack addresses" into category and name strings.
402 const std::string kSingleQuote = "'";
403 for (size_t t = 4; t < tokens.size(); t += 2) {
404 // Casting strings into pointers is ugly but otherwise tcmalloc would need
405 // to gain a special output serializer just for pseudo-stacks.
406 const char* trace_category = StringFromHexAddress(tokens[t]);
407 DCHECK_LT(t + 1, tokens.size());
408 const char* trace_name = StringFromHexAddress(tokens[t + 1]);
410 // TODO(jamescook): Report the trace category and name separately to the
411 // trace viewer and allow it to decide what decorations to apply. For now
412 // just hard-code a decoration for posted tasks (toplevel).
413 std::string trace_string(trace_name);
414 if (!strcmp(trace_category, "toplevel"))
415 trace_string.append("->PostTask");
417 // Some trace name strings have double quotes, convert them to single.
418 ReplaceChars(trace_string, "\"", kSingleQuote, &trace_string);
420 output->append(trace_string);
422 // Trace viewer expects a trailing space.
423 output->append(" ");
425 output->append("\"}");
426 return true;
429 const char* StringFromHexAddress(const std::string& hex_address) {
430 uint64 address = 0;
431 if (!base::HexStringToUInt64(hex_address, &address))
432 return "error";
433 if (!address)
434 return "null";
435 // Note that this cast handles 64-bit to 32-bit conversion if necessary.
436 return reinterpret_cast<const char*>(address);
439 } // namespace debug
440 } // namespace base