1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/trace_event/memory_dump_manager.h"
9 #include "base/atomic_sequence_num.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/thread_task_runner_handle.h"
13 #include "base/trace_event/memory_dump_provider.h"
14 #include "base/trace_event/memory_dump_session_state.h"
15 #include "base/trace_event/process_memory_dump.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "build/build_config.h"
20 #include "base/trace_event/process_memory_totals_dump_provider.h"
23 #if defined(OS_LINUX) || defined(OS_ANDROID)
24 #include "base/trace_event/malloc_dump_provider.h"
25 #include "base/trace_event/process_memory_maps_dump_provider.h"
28 #if defined(OS_ANDROID)
29 #include "base/trace_event/java_heap_dump_provider_android.h"
33 #include "base/trace_event/winheap_dump_provider_win.h"
37 namespace trace_event
{
41 const int kTraceEventNumArgs
= 1;
42 const char* kTraceEventArgNames
[] = {"dumps"};
43 const unsigned char kTraceEventArgTypes
[] = {TRACE_VALUE_TYPE_CONVERTABLE
};
45 StaticAtomicSequenceNumber g_next_guid
;
46 uint32 g_periodic_dumps_count
= 0;
47 uint32 g_heavy_dumps_rate
= 0;
48 MemoryDumpManager
* g_instance_for_testing
= nullptr;
50 void RequestPeriodicGlobalDump() {
51 MemoryDumpArgs::LevelOfDetail dump_level_of_detail
;
52 if (g_heavy_dumps_rate
== 0) {
53 dump_level_of_detail
= MemoryDumpArgs::LevelOfDetail::LOW
;
55 dump_level_of_detail
= g_periodic_dumps_count
== 0
56 ? MemoryDumpArgs::LevelOfDetail::HIGH
57 : MemoryDumpArgs::LevelOfDetail::LOW
;
59 if (++g_periodic_dumps_count
== g_heavy_dumps_rate
- 1)
60 g_periodic_dumps_count
= 0;
63 MemoryDumpArgs dump_args
= {dump_level_of_detail
};
64 MemoryDumpManager::GetInstance()->RequestGlobalDump(
65 MemoryDumpType::PERIODIC_INTERVAL
, dump_args
);
71 const char* const MemoryDumpManager::kTraceCategory
=
72 TRACE_DISABLED_BY_DEFAULT("memory-infra");
75 const int MemoryDumpManager::kMaxConsecutiveFailuresCount
= 3;
78 const uint64
MemoryDumpManager::kInvalidTracingProcessId
= 0;
81 const char* const MemoryDumpManager::kSystemAllocatorPoolName
=
82 #if defined(OS_LINUX) || defined(OS_ANDROID)
83 MallocDumpProvider::kAllocatedObjects
;
85 WinHeapDumpProvider::kAllocatedObjects
;
92 MemoryDumpManager
* MemoryDumpManager::GetInstance() {
93 if (g_instance_for_testing
)
94 return g_instance_for_testing
;
96 return Singleton
<MemoryDumpManager
,
97 LeakySingletonTraits
<MemoryDumpManager
>>::get();
101 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager
* instance
) {
103 instance
->skip_core_dumpers_auto_registration_for_testing_
= true;
104 g_instance_for_testing
= instance
;
107 MemoryDumpManager::MemoryDumpManager()
108 : delegate_(nullptr),
109 is_coordinator_(false),
110 memory_tracing_enabled_(0),
111 tracing_process_id_(kInvalidTracingProcessId
),
112 skip_core_dumpers_auto_registration_for_testing_(false),
113 disable_periodic_dumps_for_testing_(false) {
114 g_next_guid
.GetNext(); // Make sure that first guid is not zero.
117 MemoryDumpManager::~MemoryDumpManager() {
118 TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
121 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate
* delegate
,
122 bool is_coordinator
) {
124 AutoLock
lock(lock_
);
127 delegate_
= delegate
;
128 is_coordinator_
= is_coordinator
;
131 // Enable the core dump providers.
132 if (!skip_core_dumpers_auto_registration_for_testing_
) {
133 #if !defined(OS_NACL)
134 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance());
137 #if defined(OS_LINUX) || defined(OS_ANDROID)
138 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance());
139 RegisterDumpProvider(MallocDumpProvider::GetInstance());
142 #if defined(OS_ANDROID)
143 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance());
147 RegisterDumpProvider(WinHeapDumpProvider::GetInstance());
149 } // !skip_core_dumpers_auto_registration_for_testing_
151 // If tracing was enabled before initializing MemoryDumpManager, we missed the
152 // OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
153 bool is_tracing_already_enabled
= TraceLog::GetInstance()->IsEnabled();
154 TRACE_EVENT0(kTraceCategory
, "init"); // Add to trace-viewer category list.
155 TraceLog::GetInstance()->AddEnabledStateObserver(this);
156 if (is_tracing_already_enabled
)
160 void MemoryDumpManager::RegisterDumpProvider(
161 MemoryDumpProvider
* mdp
,
162 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
) {
163 MemoryDumpProviderInfo
mdp_info(mdp
, task_runner
);
164 AutoLock
lock(lock_
);
165 auto iter_new
= dump_providers_
.insert(mdp_info
);
167 // If there was a previous entry, replace it with the new one. This is to deal
168 // with the case where a dump provider unregisters itself and then re-
169 // registers before a memory dump happens, so its entry was still in the
170 // collection but flagged |unregistered|.
171 if (!iter_new
.second
) {
172 dump_providers_
.erase(iter_new
.first
);
173 dump_providers_
.insert(mdp_info
);
177 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider
* mdp
) {
178 RegisterDumpProvider(mdp
, nullptr);
181 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider
* mdp
) {
182 AutoLock
lock(lock_
);
184 auto mdp_iter
= dump_providers_
.begin();
185 for (; mdp_iter
!= dump_providers_
.end(); ++mdp_iter
) {
186 if (mdp_iter
->dump_provider
== mdp
)
190 if (mdp_iter
== dump_providers_
.end())
193 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe
194 // only if the MDP has specified a thread affinity (via task_runner()) AND
195 // the unregistration happens on the same thread (so the MDP cannot unregister
196 // and OnMemoryDump() at the same time).
197 // Otherwise, it is not possible to guarantee that its unregistration is
198 // race-free. If you hit this DCHECK, your MDP has a bug.
200 subtle::NoBarrier_Load(&memory_tracing_enabled_
),
201 mdp_iter
->task_runner
&& mdp_iter
->task_runner
->BelongsToCurrentThread())
202 << "The MemoryDumpProvider attempted to unregister itself in a racy way. "
203 << "Please file a crbug.";
205 mdp_iter
->unregistered
= true;
208 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type
,
209 const MemoryDumpArgs
& dump_args
,
210 const MemoryDumpCallback
& callback
) {
211 // Bail out immediately if tracing is not enabled at all.
212 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_
))) {
213 if (!callback
.is_null())
214 callback
.Run(0u /* guid */, false /* success */);
219 TraceLog::GetInstance()->MangleEventId(g_next_guid
.GetNext());
221 // Technically there is no need to grab the |lock_| here as the delegate is
222 // long-lived and can only be set by Initialize(), which is locked and
223 // necessarily happens before memory_tracing_enabled_ == true.
224 // Not taking the |lock_|, though, is lakely make TSan barf and, at this point
225 // (memory-infra is enabled) we're not in the fast-path anymore.
226 MemoryDumpManagerDelegate
* delegate
;
228 AutoLock
lock(lock_
);
229 delegate
= delegate_
;
232 // The delegate will coordinate the IPC broadcast and at some point invoke
233 // CreateProcessDump() to get a dump for the current process.
234 MemoryDumpRequestArgs args
= {guid
, dump_type
, dump_args
};
235 delegate
->RequestGlobalMemoryDump(args
, callback
);
238 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type
,
239 const MemoryDumpArgs
& dump_args
) {
240 RequestGlobalDump(dump_type
, dump_args
, MemoryDumpCallback());
243 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs
& args
,
244 const MemoryDumpCallback
& callback
) {
245 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
;
247 AutoLock
lock(lock_
);
248 pmd_async_state
.reset(new ProcessMemoryDumpAsyncState(
249 args
, dump_providers_
.begin(), session_state_
, callback
));
252 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
253 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
254 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
255 ContinueAsyncProcessDump(pmd_async_state
.Pass());
258 // At most one ContinueAsyncProcessDump() can be active at any time for a given
259 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to
260 // ensure consistency w.r.t. (un)registrations of |dump_providers_|.
261 // The linearization of dump providers' OnMemoryDump invocations is achieved by
262 // means of subsequent PostTask(s).
265 // - Check if the dump provider is disabled, if so skip the dump.
266 // - Check if we are on the right thread. If not hop and continue there.
267 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
269 // - Unregister the dump provider if it failed too many times consecutively.
270 // - Advance the |next_dump_provider| iterator to the next dump provider.
271 // - If this was the last hop, create a trace event, add it to the trace
272 // and finalize (invoke callback).
274 void MemoryDumpManager::ContinueAsyncProcessDump(
275 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
) {
276 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
277 // in the PostTask below don't end up registering their own dump providers
278 // (for discounting trace memory overhead) while holding the |lock_|.
279 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
281 // DO NOT put any LOG() statement in the locked sections, as in some contexts
282 // (GPU process) LOG() ends up performing PostTask/IPCs.
283 MemoryDumpProvider
* mdp
;
284 bool skip_dump
= false;
286 AutoLock
lock(lock_
);
288 auto mdp_info
= pmd_async_state
->next_dump_provider
;
289 mdp
= mdp_info
->dump_provider
;
290 if (mdp_info
->disabled
|| mdp_info
->unregistered
) {
292 } else if (mdp_info
->task_runner
&&
293 !mdp_info
->task_runner
->BelongsToCurrentThread()) {
294 // It's time to hop onto another thread.
296 // Copy the callback + arguments just for the unlikley case in which
297 // PostTask fails. In such case the Bind helper will destroy the
298 // pmd_async_state and we must keep a copy of the fields to notify the
300 MemoryDumpCallback callback
= pmd_async_state
->callback
;
301 scoped_refptr
<SingleThreadTaskRunner
> callback_task_runner
=
302 pmd_async_state
->task_runner
;
303 const uint64 dump_guid
= pmd_async_state
->req_args
.dump_guid
;
305 const bool did_post_task
= mdp_info
->task_runner
->PostTask(
306 FROM_HERE
, Bind(&MemoryDumpManager::ContinueAsyncProcessDump
,
307 Unretained(this), Passed(pmd_async_state
.Pass())));
311 // The thread is gone. At this point the best thing we can do is to
312 // disable the dump provider and abort this dump.
313 mdp_info
->disabled
= true;
314 return AbortDumpLocked(callback
, callback_task_runner
, dump_guid
);
318 // Invoke the dump provider without holding the |lock_|.
319 bool finalize
= false;
320 bool dump_successful
= false;
323 dump_successful
= mdp
->OnMemoryDump(pmd_async_state
->req_args
.dump_args
,
324 &pmd_async_state
->process_memory_dump
);
328 AutoLock
lock(lock_
);
329 auto mdp_info
= pmd_async_state
->next_dump_provider
;
330 if (dump_successful
) {
331 mdp_info
->consecutive_failures
= 0;
332 } else if (!skip_dump
) {
333 ++mdp_info
->consecutive_failures
;
334 if (mdp_info
->consecutive_failures
>= kMaxConsecutiveFailuresCount
) {
335 mdp_info
->disabled
= true;
338 ++pmd_async_state
->next_dump_provider
;
339 finalize
= pmd_async_state
->next_dump_provider
== dump_providers_
.end();
341 if (mdp_info
->unregistered
)
342 dump_providers_
.erase(mdp_info
);
345 if (!skip_dump
&& !dump_successful
) {
346 LOG(ERROR
) << "A memory dumper failed, possibly due to sandboxing "
347 "(crbug.com/461788). Disabling dumper for current process. "
348 "Try restarting chrome with the --no-sandbox switch.";
352 return FinalizeDumpAndAddToTrace(pmd_async_state
.Pass());
354 ContinueAsyncProcessDump(pmd_async_state
.Pass());
358 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
359 scoped_ptr
<ProcessMemoryDumpAsyncState
> pmd_async_state
) {
360 if (!pmd_async_state
->task_runner
->BelongsToCurrentThread()) {
361 scoped_refptr
<SingleThreadTaskRunner
> task_runner
=
362 pmd_async_state
->task_runner
;
363 task_runner
->PostTask(FROM_HERE
,
364 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace
,
365 Passed(pmd_async_state
.Pass())));
369 scoped_refptr
<ConvertableToTraceFormat
> event_value(new TracedValue());
370 pmd_async_state
->process_memory_dump
.AsValueInto(
371 static_cast<TracedValue
*>(event_value
.get()));
372 const char* const event_name
=
373 MemoryDumpTypeToString(pmd_async_state
->req_args
.dump_type
);
375 TRACE_EVENT_API_ADD_TRACE_EVENT(
376 TRACE_EVENT_PHASE_MEMORY_DUMP
,
377 TraceLog::GetCategoryGroupEnabled(kTraceCategory
), event_name
,
378 pmd_async_state
->req_args
.dump_guid
, kTraceEventNumArgs
,
379 kTraceEventArgNames
, kTraceEventArgTypes
, nullptr /* arg_values */,
380 &event_value
, TRACE_EVENT_FLAG_HAS_ID
);
382 if (!pmd_async_state
->callback
.is_null()) {
383 pmd_async_state
->callback
.Run(pmd_async_state
->req_args
.dump_guid
,
385 pmd_async_state
->callback
.Reset();
390 void MemoryDumpManager::AbortDumpLocked(
391 MemoryDumpCallback callback
,
392 scoped_refptr
<SingleThreadTaskRunner
> task_runner
,
394 if (callback
.is_null())
395 return; // There is nothing to NACK.
397 // Post the callback even if we are already on the right thread to avoid
398 // invoking the callback while holding the lock_.
399 task_runner
->PostTask(FROM_HERE
,
400 Bind(callback
, dump_guid
, false /* success */));
403 void MemoryDumpManager::OnTraceLogEnabled() {
405 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory
, &enabled
);
409 // Initialize the TraceLog for the current thread. This is to avoid that the
410 // TraceLog memory dump provider is registered lazily in the PostTask() below
411 // while the |lock_| is taken;
412 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
414 AutoLock
lock(lock_
);
416 DCHECK(delegate_
); // At this point we must have a delegate.
418 session_state_
= new MemoryDumpSessionState();
419 for (auto it
= dump_providers_
.begin(); it
!= dump_providers_
.end(); ++it
) {
420 it
->disabled
= false;
421 it
->consecutive_failures
= 0;
424 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 1);
426 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
427 // when running memory benchmarks until telemetry uses TraceConfig to
428 // enable/disable periodic dumps. See crbug.com/529184 .
429 // The same mechanism should be used to disable periodic dumps in tests.
430 if (!is_coordinator_
||
431 CommandLine::ForCurrentProcess()->HasSwitch(
432 "enable-memory-benchmarking") ||
433 disable_periodic_dumps_for_testing_
) {
437 // Enable periodic dumps. At the moment the periodic support is limited to at
438 // most one low-detail periodic dump and at most one high-detail periodic
439 // dump. If both are specified the high-detail period must be an integer
440 // multiple of the low-level one.
441 g_periodic_dumps_count
= 0;
442 const TraceConfig trace_config
=
443 TraceLog::GetInstance()->GetCurrentTraceConfig();
444 const TraceConfig::MemoryDumpConfig
& config_list
=
445 trace_config
.memory_dump_config();
446 if (config_list
.empty())
449 uint32 min_timer_period_ms
= std::numeric_limits
<uint32
>::max();
450 uint32 heavy_dump_period_ms
= 0;
451 DCHECK_LE(config_list
.size(), 2u);
452 for (const TraceConfig::MemoryDumpTriggerConfig
& config
: config_list
) {
453 DCHECK(config
.periodic_interval_ms
);
454 if (config
.level_of_detail
== MemoryDumpArgs::LevelOfDetail::HIGH
)
455 heavy_dump_period_ms
= config
.periodic_interval_ms
;
456 min_timer_period_ms
=
457 std::min(min_timer_period_ms
, config
.periodic_interval_ms
);
459 DCHECK_EQ(0u, heavy_dump_period_ms
% min_timer_period_ms
);
460 g_heavy_dumps_rate
= heavy_dump_period_ms
/ min_timer_period_ms
;
462 periodic_dump_timer_
.Start(FROM_HERE
,
463 TimeDelta::FromMilliseconds(min_timer_period_ms
),
464 base::Bind(&RequestPeriodicGlobalDump
));
467 void MemoryDumpManager::OnTraceLogDisabled() {
468 AutoLock
lock(lock_
);
469 periodic_dump_timer_
.Stop();
470 subtle::NoBarrier_Store(&memory_tracing_enabled_
, 0);
471 session_state_
= nullptr;
474 uint64
MemoryDumpManager::GetTracingProcessId() const {
475 return delegate_
->GetTracingProcessId();
478 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
479 MemoryDumpProvider
* dump_provider
,
480 const scoped_refptr
<SingleThreadTaskRunner
>& task_runner
)
481 : dump_provider(dump_provider
),
482 task_runner(task_runner
),
483 consecutive_failures(0),
485 unregistered(false) {}
487 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
490 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
491 const MemoryDumpProviderInfo
& other
) const {
492 if (task_runner
== other
.task_runner
)
493 return dump_provider
< other
.dump_provider
;
494 return task_runner
< other
.task_runner
;
497 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
498 MemoryDumpRequestArgs req_args
,
499 MemoryDumpProviderInfoSet::iterator next_dump_provider
,
500 const scoped_refptr
<MemoryDumpSessionState
>& session_state
,
501 MemoryDumpCallback callback
)
502 : process_memory_dump(session_state
),
504 next_dump_provider(next_dump_provider
),
506 task_runner(MessageLoop::current()->task_runner()) {
509 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
512 } // namespace trace_event