1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/tracked_objects.h"
10 #include "base/compiler_specific.h"
11 #include "base/debug/leak_annotations.h"
12 #include "base/format_macros.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/port.h"
15 #include "base/process_util.h"
16 #include "base/profiler/alternate_timer.h"
17 #include "base/strings/stringprintf.h"
18 #include "base/third_party/valgrind/memcheck.h"
19 #include "base/threading/thread_restrictions.h"
21 using base::TimeDelta
;
23 namespace tracked_objects
{
26 // Flag to compile out almost all of the task tracking code.
27 const bool kTrackAllTaskObjects
= true;
29 // TODO(jar): Evaluate the perf impact of enabling this. If the perf impact is
30 // negligible, enable by default.
31 // Flag to compile out parent-child link recording.
32 const bool kTrackParentChildLinks
= false;
34 // When ThreadData is first initialized, should we start in an ACTIVE state to
35 // record all of the startup-time tasks, or should we start up DEACTIVATED, so
36 // that we only record after parsing the command line flag --enable-tracking.
37 // Note that the flag may force either state, so this really controls only the
38 // period of time up until that flag is parsed. If there is no flag seen, then
39 // this state may prevail for much or all of the process lifetime.
40 const ThreadData::Status kInitialStartupState
=
41 ThreadData::PROFILING_CHILDREN_ACTIVE
;
43 // Control whether an alternate time source (Now() function) is supported by
44 // the ThreadData class. This compile time flag should be set to true if we
45 // want other modules (such as a memory allocator, or a thread-specific CPU time
46 // clock) to be able to provide a thread-specific Now() function. Without this
47 // compile-time flag, the code will only support the wall-clock time. This flag
48 // can be flipped to efficiently disable this path (if there is a performance
49 // problem with its presence).
50 static const bool kAllowAlternateTimeSourceHandling
= true;
54 //------------------------------------------------------------------------------
55 // DeathData tallies durations when a death takes place.
57 DeathData::DeathData() {
61 DeathData::DeathData(int count
) {
66 // TODO(jar): I need to see if this macro to optimize branching is worth using.
68 // This macro has no branching, so it is surely fast, and is equivalent to:
71 // We use a macro rather than a template to force this to inline.
72 // Related code for calculating max is discussed on the web.
73 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
74 ((target) ^= ((target) ^ (source)) & -static_cast<int32>(assign_it))
76 void DeathData::RecordDeath(const int32 queue_duration
,
77 const int32 run_duration
,
78 int32 random_number
) {
79 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
82 queue_duration_sum_
+= queue_duration
;
83 run_duration_sum_
+= run_duration
;
85 if (queue_duration_max_
< queue_duration
)
86 queue_duration_max_
= queue_duration
;
87 if (run_duration_max_
< run_duration
)
88 run_duration_max_
= run_duration
;
90 // Take a uniformly distributed sample over all durations ever supplied.
91 // The probability that we (instead) use this new sample is 1/count_. This
92 // results in a completely uniform selection of the sample (at least when we
93 // don't clamp count_... but that should be inconsequentially likely).
94 // We ignore the fact that we correlated our selection of a sample to the run
95 // and queue times (i.e., we used them to generate random_number).
97 if (0 == (random_number
% count_
)) {
98 queue_duration_sample_
= queue_duration
;
99 run_duration_sample_
= run_duration
;
103 int DeathData::count() const { return count_
; }
105 int32
DeathData::run_duration_sum() const { return run_duration_sum_
; }
107 int32
DeathData::run_duration_max() const { return run_duration_max_
; }
109 int32
DeathData::run_duration_sample() const {
110 return run_duration_sample_
;
113 int32
DeathData::queue_duration_sum() const {
114 return queue_duration_sum_
;
117 int32
DeathData::queue_duration_max() const {
118 return queue_duration_max_
;
121 int32
DeathData::queue_duration_sample() const {
122 return queue_duration_sample_
;
125 void DeathData::ResetMax() {
126 run_duration_max_
= 0;
127 queue_duration_max_
= 0;
130 void DeathData::Clear() {
132 run_duration_sum_
= 0;
133 run_duration_max_
= 0;
134 run_duration_sample_
= 0;
135 queue_duration_sum_
= 0;
136 queue_duration_max_
= 0;
137 queue_duration_sample_
= 0;
140 //------------------------------------------------------------------------------
141 DeathDataSnapshot::DeathDataSnapshot()
143 run_duration_sum(-1),
144 run_duration_max(-1),
145 run_duration_sample(-1),
146 queue_duration_sum(-1),
147 queue_duration_max(-1),
148 queue_duration_sample(-1) {
151 DeathDataSnapshot::DeathDataSnapshot(
152 const tracked_objects::DeathData
& death_data
)
153 : count(death_data
.count()),
154 run_duration_sum(death_data
.run_duration_sum()),
155 run_duration_max(death_data
.run_duration_max()),
156 run_duration_sample(death_data
.run_duration_sample()),
157 queue_duration_sum(death_data
.queue_duration_sum()),
158 queue_duration_max(death_data
.queue_duration_max()),
159 queue_duration_sample(death_data
.queue_duration_sample()) {
162 DeathDataSnapshot::~DeathDataSnapshot() {
165 //------------------------------------------------------------------------------
166 BirthOnThread::BirthOnThread(const Location
& location
,
167 const ThreadData
& current
)
168 : location_(location
),
169 birth_thread_(¤t
) {
172 //------------------------------------------------------------------------------
173 BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
176 BirthOnThreadSnapshot::BirthOnThreadSnapshot(
177 const tracked_objects::BirthOnThread
& birth
)
178 : location(birth
.location()),
179 thread_name(birth
.birth_thread()->thread_name()) {
182 BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
185 //------------------------------------------------------------------------------
186 Births::Births(const Location
& location
, const ThreadData
& current
)
187 : BirthOnThread(location
, current
),
190 int Births::birth_count() const { return birth_count_
; }
192 void Births::RecordBirth() { ++birth_count_
; }
194 void Births::ForgetBirth() { --birth_count_
; }
196 void Births::Clear() { birth_count_
= 0; }
198 //------------------------------------------------------------------------------
199 // ThreadData maintains the central data for all births and deaths on a single
202 // TODO(jar): We should pull all these static vars together, into a struct, and
203 // optimize layout so that we benefit from locality of reference during accesses
207 NowFunction
* ThreadData::now_function_
= NULL
;
209 // A TLS slot which points to the ThreadData instance for the current thread. We
210 // do a fake initialization here (zeroing out data), and then the real in-place
211 // construction happens when we call tls_index_.Initialize().
213 base::ThreadLocalStorage::StaticSlot
ThreadData::tls_index_
= TLS_INITIALIZER
;
216 int ThreadData::worker_thread_data_creation_count_
= 0;
219 int ThreadData::cleanup_count_
= 0;
222 int ThreadData::incarnation_counter_
= 0;
225 ThreadData
* ThreadData::all_thread_data_list_head_
= NULL
;
228 ThreadData
* ThreadData::first_retired_worker_
= NULL
;
231 base::LazyInstance
<base::Lock
>::Leaky
232 ThreadData::list_lock_
= LAZY_INSTANCE_INITIALIZER
;
235 ThreadData::Status
ThreadData::status_
= ThreadData::UNINITIALIZED
;
237 ThreadData::ThreadData(const std::string
& suggested_name
)
239 next_retired_worker_(NULL
),
240 worker_thread_number_(0),
241 incarnation_count_for_pool_(-1) {
242 DCHECK_GE(suggested_name
.size(), 0u);
243 thread_name_
= suggested_name
;
244 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
247 ThreadData::ThreadData(int thread_number
)
249 next_retired_worker_(NULL
),
250 worker_thread_number_(thread_number
),
251 incarnation_count_for_pool_(-1) {
252 CHECK_GT(thread_number
, 0);
253 base::StringAppendF(&thread_name_
, "WorkerThread-%d", thread_number
);
254 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
257 ThreadData::~ThreadData() {}
259 void ThreadData::PushToHeadOfList() {
260 // Toss in a hint of randomness (atop the uniniitalized value).
261 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_
,
262 sizeof(random_number_
));
263 MSAN_UNPOISON(&random_number_
, sizeof(random_number_
));
264 random_number_
+= static_cast<int32
>(this - static_cast<ThreadData
*>(0));
265 random_number_
^= (Now() - TrackedTime()).InMilliseconds();
268 base::AutoLock
lock(*list_lock_
.Pointer());
269 incarnation_count_for_pool_
= incarnation_counter_
;
270 next_
= all_thread_data_list_head_
;
271 all_thread_data_list_head_
= this;
275 ThreadData
* ThreadData::first() {
276 base::AutoLock
lock(*list_lock_
.Pointer());
277 return all_thread_data_list_head_
;
280 ThreadData
* ThreadData::next() const { return next_
; }
283 void ThreadData::InitializeThreadContext(const std::string
& suggested_name
) {
284 if (!Initialize()) // Always initialize if needed.
286 ThreadData
* current_thread_data
=
287 reinterpret_cast<ThreadData
*>(tls_index_
.Get());
288 if (current_thread_data
)
289 return; // Browser tests instigate this.
290 current_thread_data
= new ThreadData(suggested_name
);
291 tls_index_
.Set(current_thread_data
);
295 ThreadData
* ThreadData::Get() {
296 if (!tls_index_
.initialized())
297 return NULL
; // For unittests only.
298 ThreadData
* registered
= reinterpret_cast<ThreadData
*>(tls_index_
.Get());
302 // We must be a worker thread, since we didn't pre-register.
303 ThreadData
* worker_thread_data
= NULL
;
304 int worker_thread_number
= 0;
306 base::AutoLock
lock(*list_lock_
.Pointer());
307 if (first_retired_worker_
) {
308 worker_thread_data
= first_retired_worker_
;
309 first_retired_worker_
= first_retired_worker_
->next_retired_worker_
;
310 worker_thread_data
->next_retired_worker_
= NULL
;
312 worker_thread_number
= ++worker_thread_data_creation_count_
;
316 // If we can't find a previously used instance, then we have to create one.
317 if (!worker_thread_data
) {
318 DCHECK_GT(worker_thread_number
, 0);
319 worker_thread_data
= new ThreadData(worker_thread_number
);
321 DCHECK_GT(worker_thread_data
->worker_thread_number_
, 0);
323 tls_index_
.Set(worker_thread_data
);
324 return worker_thread_data
;
328 void ThreadData::OnThreadTermination(void* thread_data
) {
329 DCHECK(thread_data
); // TLS should *never* call us with a NULL.
330 // We must NOT do any allocations during this callback. There is a chance
331 // that the allocator is no longer active on this thread.
332 if (!kTrackAllTaskObjects
)
333 return; // Not compiled in.
334 reinterpret_cast<ThreadData
*>(thread_data
)->OnThreadTerminationCleanup();
337 void ThreadData::OnThreadTerminationCleanup() {
338 // The list_lock_ was created when we registered the callback, so it won't be
339 // allocated here despite the lazy reference.
340 base::AutoLock
lock(*list_lock_
.Pointer());
341 if (incarnation_counter_
!= incarnation_count_for_pool_
)
342 return; // ThreadData was constructed in an earlier unit test.
344 // Only worker threads need to be retired and reused.
345 if (!worker_thread_number_
) {
348 // We must NOT do any allocations during this callback.
349 // Using the simple linked lists avoids all allocations.
350 DCHECK_EQ(this->next_retired_worker_
, reinterpret_cast<ThreadData
*>(NULL
));
351 this->next_retired_worker_
= first_retired_worker_
;
352 first_retired_worker_
= this;
356 void ThreadData::Snapshot(bool reset_max
, ProcessDataSnapshot
* process_data
) {
357 // Add births that have run to completion to |collected_data|.
358 // |birth_counts| tracks the total number of births recorded at each location
359 // for which we have not seen a death count.
360 BirthCountMap birth_counts
;
361 ThreadData::SnapshotAllExecutedTasks(reset_max
, process_data
, &birth_counts
);
363 // Add births that are still active -- i.e. objects that have tallied a birth,
364 // but have not yet tallied a matching death, and hence must be either
365 // running, queued up, or being held in limbo for future posting.
366 for (BirthCountMap::const_iterator it
= birth_counts
.begin();
367 it
!= birth_counts
.end(); ++it
) {
368 if (it
->second
> 0) {
369 process_data
->tasks
.push_back(
370 TaskSnapshot(*it
->first
, DeathData(it
->second
), "Still_Alive"));
375 Births
* ThreadData::TallyABirth(const Location
& location
) {
376 BirthMap::iterator it
= birth_map_
.find(location
);
378 if (it
!= birth_map_
.end()) {
380 child
->RecordBirth();
382 child
= new Births(location
, *this); // Leak this.
383 // Lock since the map may get relocated now, and other threads sometimes
384 // snapshot it (but they lock before copying it).
385 base::AutoLock
lock(map_lock_
);
386 birth_map_
[location
] = child
;
389 if (kTrackParentChildLinks
&& status_
> PROFILING_ACTIVE
&&
390 !parent_stack_
.empty()) {
391 const Births
* parent
= parent_stack_
.top();
392 ParentChildPair
pair(parent
, child
);
393 if (parent_child_set_
.find(pair
) == parent_child_set_
.end()) {
394 // Lock since the map may get relocated now, and other threads sometimes
395 // snapshot it (but they lock before copying it).
396 base::AutoLock
lock(map_lock_
);
397 parent_child_set_
.insert(pair
);
404 void ThreadData::TallyADeath(const Births
& birth
,
405 int32 queue_duration
,
406 int32 run_duration
) {
407 // Stir in some randomness, plus add constant in case durations are zero.
408 const int32 kSomePrimeNumber
= 2147483647;
409 random_number_
+= queue_duration
+ run_duration
+ kSomePrimeNumber
;
410 // An address is going to have some randomness to it as well ;-).
411 random_number_
^= static_cast<int32
>(&birth
- reinterpret_cast<Births
*>(0));
413 // We don't have queue durations without OS timer. OS timer is automatically
414 // used for task-post-timing, so the use of an alternate timer implies all
415 // queue times are invalid.
416 if (kAllowAlternateTimeSourceHandling
&& now_function_
)
419 DeathMap::iterator it
= death_map_
.find(&birth
);
420 DeathData
* death_data
;
421 if (it
!= death_map_
.end()) {
422 death_data
= &it
->second
;
424 base::AutoLock
lock(map_lock_
); // Lock as the map may get relocated now.
425 death_data
= &death_map_
[&birth
];
426 } // Release lock ASAP.
427 death_data
->RecordDeath(queue_duration
, run_duration
, random_number_
);
429 if (!kTrackParentChildLinks
)
431 if (!parent_stack_
.empty()) { // We might get turned off.
432 DCHECK_EQ(parent_stack_
.top(), &birth
);
438 Births
* ThreadData::TallyABirthIfActive(const Location
& location
) {
439 if (!kTrackAllTaskObjects
)
440 return NULL
; // Not compiled in.
442 if (!TrackingStatus())
444 ThreadData
* current_thread_data
= Get();
445 if (!current_thread_data
)
447 return current_thread_data
->TallyABirth(location
);
451 void ThreadData::TallyRunOnNamedThreadIfTracking(
452 const base::TrackingInfo
& completed_task
,
453 const TrackedTime
& start_of_run
,
454 const TrackedTime
& end_of_run
) {
455 if (!kTrackAllTaskObjects
)
456 return; // Not compiled in.
458 // Even if we have been DEACTIVATED, we will process any pending births so
459 // that our data structures (which counted the outstanding births) remain
461 const Births
* birth
= completed_task
.birth_tally
;
464 ThreadData
* current_thread_data
= Get();
465 if (!current_thread_data
)
468 // Watch out for a race where status_ is changing, and hence one or both
469 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
470 // get a time value since we "weren't tracking" and we were trying to be
471 // efficient by not calling for a genuine time value. For simplicity, we'll
472 // use a default zero duration when we can't calculate a true value.
473 int32 queue_duration
= 0;
474 int32 run_duration
= 0;
475 if (!start_of_run
.is_null()) {
476 queue_duration
= (start_of_run
- completed_task
.EffectiveTimePosted())
478 if (!end_of_run
.is_null())
479 run_duration
= (end_of_run
- start_of_run
).InMilliseconds();
481 current_thread_data
->TallyADeath(*birth
, queue_duration
, run_duration
);
485 void ThreadData::TallyRunOnWorkerThreadIfTracking(
487 const TrackedTime
& time_posted
,
488 const TrackedTime
& start_of_run
,
489 const TrackedTime
& end_of_run
) {
490 if (!kTrackAllTaskObjects
)
491 return; // Not compiled in.
493 // Even if we have been DEACTIVATED, we will process any pending births so
494 // that our data structures (which counted the outstanding births) remain
499 // TODO(jar): Support the option to coalesce all worker-thread activity under
500 // one ThreadData instance that uses locks to protect *all* access. This will
501 // reduce memory (making it provably bounded), but run incrementally slower
502 // (since we'll use locks on TallyABirth and TallyADeath). The good news is
503 // that the locks on TallyADeath will be *after* the worker thread has run,
504 // and hence nothing will be waiting for the completion (... besides some
505 // other thread that might like to run). Also, the worker threads tasks are
506 // generally longer, and hence the cost of the lock may perchance be amortized
507 // over the long task's lifetime.
508 ThreadData
* current_thread_data
= Get();
509 if (!current_thread_data
)
512 int32 queue_duration
= 0;
513 int32 run_duration
= 0;
514 if (!start_of_run
.is_null()) {
515 queue_duration
= (start_of_run
- time_posted
).InMilliseconds();
516 if (!end_of_run
.is_null())
517 run_duration
= (end_of_run
- start_of_run
).InMilliseconds();
519 current_thread_data
->TallyADeath(*birth
, queue_duration
, run_duration
);
523 void ThreadData::TallyRunInAScopedRegionIfTracking(
525 const TrackedTime
& start_of_run
,
526 const TrackedTime
& end_of_run
) {
527 if (!kTrackAllTaskObjects
)
528 return; // Not compiled in.
530 // Even if we have been DEACTIVATED, we will process any pending births so
531 // that our data structures (which counted the outstanding births) remain
536 ThreadData
* current_thread_data
= Get();
537 if (!current_thread_data
)
540 int32 queue_duration
= 0;
541 int32 run_duration
= 0;
542 if (!start_of_run
.is_null() && !end_of_run
.is_null())
543 run_duration
= (end_of_run
- start_of_run
).InMilliseconds();
544 current_thread_data
->TallyADeath(*birth
, queue_duration
, run_duration
);
548 void ThreadData::SnapshotAllExecutedTasks(bool reset_max
,
549 ProcessDataSnapshot
* process_data
,
550 BirthCountMap
* birth_counts
) {
551 if (!kTrackAllTaskObjects
)
552 return; // Not compiled in.
554 // Get an unchanging copy of a ThreadData list.
555 ThreadData
* my_list
= ThreadData::first();
557 // Gather data serially.
558 // This hackish approach *can* get some slighly corrupt tallies, as we are
559 // grabbing values without the protection of a lock, but it has the advantage
560 // of working even with threads that don't have message loops. If a user
561 // sees any strangeness, they can always just run their stats gathering a
563 for (ThreadData
* thread_data
= my_list
;
565 thread_data
= thread_data
->next()) {
566 thread_data
->SnapshotExecutedTasks(reset_max
, process_data
, birth_counts
);
570 void ThreadData::SnapshotExecutedTasks(bool reset_max
,
571 ProcessDataSnapshot
* process_data
,
572 BirthCountMap
* birth_counts
) {
573 // Get copy of data, so that the data will not change during the iterations
575 ThreadData::BirthMap birth_map
;
576 ThreadData::DeathMap death_map
;
577 ThreadData::ParentChildSet parent_child_set
;
578 SnapshotMaps(reset_max
, &birth_map
, &death_map
, &parent_child_set
);
580 for (ThreadData::DeathMap::const_iterator it
= death_map
.begin();
581 it
!= death_map
.end(); ++it
) {
582 process_data
->tasks
.push_back(
583 TaskSnapshot(*it
->first
, it
->second
, thread_name()));
584 (*birth_counts
)[it
->first
] -= it
->first
->birth_count();
587 for (ThreadData::BirthMap::const_iterator it
= birth_map
.begin();
588 it
!= birth_map
.end(); ++it
) {
589 (*birth_counts
)[it
->second
] += it
->second
->birth_count();
592 if (!kTrackParentChildLinks
)
595 for (ThreadData::ParentChildSet::const_iterator it
= parent_child_set
.begin();
596 it
!= parent_child_set
.end(); ++it
) {
597 process_data
->descendants
.push_back(ParentChildPairSnapshot(*it
));
601 // This may be called from another thread.
602 void ThreadData::SnapshotMaps(bool reset_max
,
605 ParentChildSet
* parent_child_set
) {
606 base::AutoLock
lock(map_lock_
);
607 for (BirthMap::const_iterator it
= birth_map_
.begin();
608 it
!= birth_map_
.end(); ++it
)
609 (*birth_map
)[it
->first
] = it
->second
;
610 for (DeathMap::iterator it
= death_map_
.begin();
611 it
!= death_map_
.end(); ++it
) {
612 (*death_map
)[it
->first
] = it
->second
;
614 it
->second
.ResetMax();
617 if (!kTrackParentChildLinks
)
620 for (ParentChildSet::iterator it
= parent_child_set_
.begin();
621 it
!= parent_child_set_
.end(); ++it
)
622 parent_child_set
->insert(*it
);
626 void ThreadData::ResetAllThreadData() {
627 ThreadData
* my_list
= first();
629 for (ThreadData
* thread_data
= my_list
;
631 thread_data
= thread_data
->next())
632 thread_data
->Reset();
635 void ThreadData::Reset() {
636 base::AutoLock
lock(map_lock_
);
637 for (DeathMap::iterator it
= death_map_
.begin();
638 it
!= death_map_
.end(); ++it
)
640 for (BirthMap::iterator it
= birth_map_
.begin();
641 it
!= birth_map_
.end(); ++it
)
645 static void OptionallyInitializeAlternateTimer() {
646 NowFunction
* alternate_time_source
= GetAlternateTimeSource();
647 if (alternate_time_source
)
648 ThreadData::SetAlternateTimeSource(alternate_time_source
);
651 bool ThreadData::Initialize() {
652 if (!kTrackAllTaskObjects
)
653 return false; // Not compiled in.
654 if (status_
>= DEACTIVATED
)
655 return true; // Someone else did the initialization.
656 // Due to racy lazy initialization in tests, we'll need to recheck status_
657 // after we acquire the lock.
659 // Ensure that we don't double initialize tls. We are called when single
660 // threaded in the product, but some tests may be racy and lazy about our
662 base::AutoLock
lock(*list_lock_
.Pointer());
663 if (status_
>= DEACTIVATED
)
664 return true; // Someone raced in here and beat us.
666 // Put an alternate timer in place if the environment calls for it, such as
667 // for tracking TCMalloc allocations. This insertion is idempotent, so we
668 // don't mind if there is a race, and we'd prefer not to be in a lock while
670 if (kAllowAlternateTimeSourceHandling
)
671 OptionallyInitializeAlternateTimer();
673 // Perform the "real" TLS initialization now, and leave it intact through
674 // process termination.
675 if (!tls_index_
.initialized()) { // Testing may have initialized this.
676 DCHECK_EQ(status_
, UNINITIALIZED
);
677 tls_index_
.Initialize(&ThreadData::OnThreadTermination
);
678 if (!tls_index_
.initialized())
681 // TLS was initialzed for us earlier.
682 DCHECK_EQ(status_
, DORMANT_DURING_TESTS
);
685 // Incarnation counter is only significant to testing, as it otherwise will
686 // never again change in this process.
687 ++incarnation_counter_
;
689 // The lock is not critical for setting status_, but it doesn't hurt. It also
690 // ensures that if we have a racy initialization, that we'll bail as soon as
691 // we get the lock earlier in this method.
692 status_
= kInitialStartupState
;
693 if (!kTrackParentChildLinks
&&
694 kInitialStartupState
== PROFILING_CHILDREN_ACTIVE
)
695 status_
= PROFILING_ACTIVE
;
696 DCHECK(status_
!= UNINITIALIZED
);
701 bool ThreadData::InitializeAndSetTrackingStatus(Status status
) {
702 DCHECK_GE(status
, DEACTIVATED
);
703 DCHECK_LE(status
, PROFILING_CHILDREN_ACTIVE
);
705 if (!Initialize()) // No-op if already initialized.
706 return false; // Not compiled in.
708 if (!kTrackParentChildLinks
&& status
> DEACTIVATED
)
709 status
= PROFILING_ACTIVE
;
715 ThreadData::Status
ThreadData::status() {
720 bool ThreadData::TrackingStatus() {
721 return status_
> DEACTIVATED
;
725 bool ThreadData::TrackingParentChildStatus() {
726 return status_
>= PROFILING_CHILDREN_ACTIVE
;
730 TrackedTime
ThreadData::NowForStartOfRun(const Births
* parent
) {
731 if (kTrackParentChildLinks
&& parent
&& status_
> PROFILING_ACTIVE
) {
732 ThreadData
* current_thread_data
= Get();
733 if (current_thread_data
)
734 current_thread_data
->parent_stack_
.push(parent
);
740 TrackedTime
ThreadData::NowForEndOfRun() {
745 void ThreadData::SetAlternateTimeSource(NowFunction
* now_function
) {
746 DCHECK(now_function
);
747 if (kAllowAlternateTimeSourceHandling
)
748 now_function_
= now_function
;
752 TrackedTime
ThreadData::Now() {
753 if (kAllowAlternateTimeSourceHandling
&& now_function_
)
754 return TrackedTime::FromMilliseconds((*now_function_
)());
755 if (kTrackAllTaskObjects
&& TrackingStatus())
756 return TrackedTime::Now();
757 return TrackedTime(); // Super fast when disabled, or not compiled.
761 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count
) {
762 base::AutoLock
lock(*list_lock_
.Pointer());
763 if (worker_thread_data_creation_count_
== 0)
764 return; // We haven't really run much, and couldn't have leaked.
765 // Verify that we've at least shutdown/cleanup the major namesd threads. The
766 // caller should tell us how many thread shutdowns should have taken place by
768 return; // TODO(jar): until this is working on XP, don't run the real test.
769 CHECK_GT(cleanup_count_
, major_threads_shutdown_count
);
773 void ThreadData::ShutdownSingleThreadedCleanup(bool leak
) {
774 // This is only called from test code, where we need to cleanup so that
775 // additional tests can be run.
776 // We must be single threaded... but be careful anyway.
777 if (!InitializeAndSetTrackingStatus(DEACTIVATED
))
779 ThreadData
* thread_data_list
;
781 base::AutoLock
lock(*list_lock_
.Pointer());
782 thread_data_list
= all_thread_data_list_head_
;
783 all_thread_data_list_head_
= NULL
;
784 ++incarnation_counter_
;
785 // To be clean, break apart the retired worker list (though we leak them).
786 while (first_retired_worker_
) {
787 ThreadData
* worker
= first_retired_worker_
;
788 CHECK_GT(worker
->worker_thread_number_
, 0);
789 first_retired_worker_
= worker
->next_retired_worker_
;
790 worker
->next_retired_worker_
= NULL
;
794 // Put most global static back in pristine shape.
795 worker_thread_data_creation_count_
= 0;
797 tls_index_
.Set(NULL
);
798 status_
= DORMANT_DURING_TESTS
; // Almost UNINITIALIZED.
800 // To avoid any chance of racing in unit tests, which is the only place we
801 // call this function, we may sometimes leak all the data structures we
802 // recovered, as they may still be in use on threads from prior tests!
804 ThreadData
* thread_data
= thread_data_list
;
805 while (thread_data
) {
806 ANNOTATE_LEAKING_OBJECT_PTR(thread_data
);
807 thread_data
= thread_data
->next();
812 // When we want to cleanup (on a single thread), here is what we do.
814 // Do actual recursive delete in all ThreadData instances.
815 while (thread_data_list
) {
816 ThreadData
* next_thread_data
= thread_data_list
;
817 thread_data_list
= thread_data_list
->next();
819 for (BirthMap::iterator it
= next_thread_data
->birth_map_
.begin();
820 next_thread_data
->birth_map_
.end() != it
; ++it
)
821 delete it
->second
; // Delete the Birth Records.
822 delete next_thread_data
; // Includes all Death Records.
826 //------------------------------------------------------------------------------
827 TaskSnapshot::TaskSnapshot() {
830 TaskSnapshot::TaskSnapshot(const BirthOnThread
& birth
,
831 const DeathData
& death_data
,
832 const std::string
& death_thread_name
)
834 death_data(death_data
),
835 death_thread_name(death_thread_name
) {
838 TaskSnapshot::~TaskSnapshot() {
841 //------------------------------------------------------------------------------
842 // ParentChildPairSnapshot
844 ParentChildPairSnapshot::ParentChildPairSnapshot() {
847 ParentChildPairSnapshot::ParentChildPairSnapshot(
848 const ThreadData::ParentChildPair
& parent_child
)
849 : parent(*parent_child
.first
),
850 child(*parent_child
.second
) {
853 ParentChildPairSnapshot::~ParentChildPairSnapshot() {
856 //------------------------------------------------------------------------------
857 // ProcessDataSnapshot
859 ProcessDataSnapshot::ProcessDataSnapshot()
860 #if !defined(OS_NACL)
861 : process_id(base::GetCurrentProcId()) {
867 ProcessDataSnapshot::~ProcessDataSnapshot() {
870 } // namespace tracked_objects