Add partial pre-read functionality to browser startup (Windows).
[chromium-blink-merge.git] / base / tracked_objects.cc
blobe7d7bd165b6f77c517bd0ecf4d8460f4e43d1b2b
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/tracked_objects.h"
7 #include <math.h>
9 #include "base/format_macros.h"
10 #include "base/message_loop.h"
11 #include "base/stringprintf.h"
12 #include "base/third_party/valgrind/memcheck.h"
13 #include "base/threading/thread_restrictions.h"
14 #include "build/build_config.h"
15 #include "base/port.h"
17 using base::TimeDelta;
19 namespace tracked_objects {
21 namespace {
23 // Flag to compile out almost all of the task tracking code.
24 const bool kTrackAllTaskObjects = true;
26 // Flag to compile out parent-child link recording.
27 const bool kTrackParentChildLinks = false;
29 // When ThreadData is first initialized, should we start in an ACTIVE state to
30 // record all of the startup-time tasks, or should we start up DEACTIVATED, so
31 // that we only record after parsing the command line flag --enable-tracking.
32 // Note that the flag may force either state, so this really controls only the
33 // period of time up until that flag is parsed. If there is no flag seen, then
34 // this state may prevail for much or all of the process lifetime.
35 const ThreadData::Status kInitialStartupState =
36 ThreadData::PROFILING_CHILDREN_ACTIVE;
38 } // namespace
40 //------------------------------------------------------------------------------
41 // DeathData tallies durations when a death takes place.
43 DeathData::DeathData() {
44 Clear();
47 DeathData::DeathData(int count) {
48 Clear();
49 count_ = count;
52 // TODO(jar): I need to see if this macro to optimize branching is worth using.
54 // This macro has no branching, so it is surely fast, and is equivalent to:
55 // if (assign_it)
56 // target = source;
57 // We use a macro rather than a template to force this to inline.
58 // Related code for calculating max is discussed on the web.
59 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
60 ((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it))
62 void DeathData::RecordDeath(const DurationInt queue_duration,
63 const DurationInt run_duration,
64 int32 random_number) {
65 ++count_;
66 queue_duration_sum_ += queue_duration;
67 run_duration_sum_ += run_duration;
69 if (queue_duration_max_ < queue_duration)
70 queue_duration_max_ = queue_duration;
71 if (run_duration_max_ < run_duration)
72 run_duration_max_ = run_duration;
74 // Take a uniformly distributed sample over all durations ever supplied.
75 // The probability that we (instead) use this new sample is 1/count_. This
76 // results in a completely uniform selection of the sample.
77 // We ignore the fact that we correlated our selection of a sample of run
78 // and queue times.
79 if (0 == (random_number % count_)) {
80 queue_duration_sample_ = queue_duration;
81 run_duration_sample_ = run_duration;
85 int DeathData::count() const { return count_; }
87 DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; }
89 DurationInt DeathData::run_duration_max() const { return run_duration_max_; }
91 DurationInt DeathData::run_duration_sample() const {
92 return run_duration_sample_;
95 DurationInt DeathData::queue_duration_sum() const {
96 return queue_duration_sum_;
99 DurationInt DeathData::queue_duration_max() const {
100 return queue_duration_max_;
103 DurationInt DeathData::queue_duration_sample() const {
104 return queue_duration_sample_;
108 base::DictionaryValue* DeathData::ToValue() const {
109 base::DictionaryValue* dictionary = new base::DictionaryValue;
110 dictionary->Set("count", base::Value::CreateIntegerValue(count_));
111 dictionary->Set("run_ms",
112 base::Value::CreateIntegerValue(run_duration_sum()));
113 dictionary->Set("run_ms_max",
114 base::Value::CreateIntegerValue(run_duration_max()));
115 dictionary->Set("run_ms_sample",
116 base::Value::CreateIntegerValue(run_duration_sample()));
117 dictionary->Set("queue_ms",
118 base::Value::CreateIntegerValue(queue_duration_sum()));
119 dictionary->Set("queue_ms_max",
120 base::Value::CreateIntegerValue(queue_duration_max()));
121 dictionary->Set("queue_ms_sample",
122 base::Value::CreateIntegerValue(queue_duration_sample()));
123 return dictionary;
126 void DeathData::ResetMax() {
127 run_duration_max_ = 0;
128 queue_duration_max_ = 0;
131 void DeathData::Clear() {
132 count_ = 0;
133 run_duration_sum_ = 0;
134 run_duration_max_ = 0;
135 run_duration_sample_ = 0;
136 queue_duration_sum_ = 0;
137 queue_duration_max_ = 0;
138 queue_duration_sample_ = 0;
141 //------------------------------------------------------------------------------
142 BirthOnThread::BirthOnThread(const Location& location,
143 const ThreadData& current)
144 : location_(location),
145 birth_thread_(&current) {
148 const Location BirthOnThread::location() const { return location_; }
149 const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; }
151 void BirthOnThread::ToValue(const std::string& prefix,
152 base::DictionaryValue* dictionary) const {
153 dictionary->Set(prefix + "_location", location_.ToValue());
154 dictionary->Set(prefix + "_thread",
155 base::Value::CreateStringValue(birth_thread_->thread_name()));
158 //------------------------------------------------------------------------------
159 Births::Births(const Location& location, const ThreadData& current)
160 : BirthOnThread(location, current),
161 birth_count_(1) { }
163 int Births::birth_count() const { return birth_count_; }
165 void Births::RecordBirth() { ++birth_count_; }
167 void Births::ForgetBirth() { --birth_count_; }
169 void Births::Clear() { birth_count_ = 0; }
171 //------------------------------------------------------------------------------
172 // ThreadData maintains the central data for all births and deaths on a single
173 // thread.
175 // TODO(jar): We should pull all these static vars together, into a struct, and
176 // optimize layout so that we benefit from locality of reference during accesses
177 // to them.
179 // A TLS slot which points to the ThreadData instance for the current thread. We
180 // do a fake initialization here (zeroing out data), and then the real in-place
181 // construction happens when we call tls_index_.Initialize().
182 // static
183 base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
185 // static
186 int ThreadData::worker_thread_data_creation_count_ = 0;
188 // static
189 int ThreadData::cleanup_count_ = 0;
191 // static
192 int ThreadData::incarnation_counter_ = 0;
194 // static
195 ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
197 // static
198 ThreadData* ThreadData::first_retired_worker_ = NULL;
200 // static
201 base::LazyInstance<base::Lock>::Leaky
202 ThreadData::list_lock_ = LAZY_INSTANCE_INITIALIZER;
204 // static
205 ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
207 ThreadData::ThreadData(const std::string& suggested_name)
208 : next_(NULL),
209 next_retired_worker_(NULL),
210 worker_thread_number_(0),
211 incarnation_count_for_pool_(-1) {
212 DCHECK_GE(suggested_name.size(), 0u);
213 thread_name_ = suggested_name;
214 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
217 ThreadData::ThreadData(int thread_number)
218 : next_(NULL),
219 next_retired_worker_(NULL),
220 worker_thread_number_(thread_number),
221 incarnation_count_for_pool_(-1) {
222 CHECK_GT(thread_number, 0);
223 base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
224 PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
227 ThreadData::~ThreadData() {}
229 void ThreadData::PushToHeadOfList() {
230 // Toss in a hint of randomness (atop the uniniitalized value).
231 (void)VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(&random_number_,
232 sizeof(random_number_));
233 random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
234 random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
236 DCHECK(!next_);
237 base::AutoLock lock(*list_lock_.Pointer());
238 incarnation_count_for_pool_ = incarnation_counter_;
239 next_ = all_thread_data_list_head_;
240 all_thread_data_list_head_ = this;
243 // static
244 ThreadData* ThreadData::first() {
245 base::AutoLock lock(*list_lock_.Pointer());
246 return all_thread_data_list_head_;
249 ThreadData* ThreadData::next() const { return next_; }
251 // static
252 void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
253 if (!Initialize()) // Always initialize if needed.
254 return;
255 ThreadData* current_thread_data =
256 reinterpret_cast<ThreadData*>(tls_index_.Get());
257 if (current_thread_data)
258 return; // Browser tests instigate this.
259 current_thread_data = new ThreadData(suggested_name);
260 tls_index_.Set(current_thread_data);
263 // static
264 ThreadData* ThreadData::Get() {
265 if (!tls_index_.initialized())
266 return NULL; // For unittests only.
267 ThreadData* registered = reinterpret_cast<ThreadData*>(tls_index_.Get());
268 if (registered)
269 return registered;
271 // We must be a worker thread, since we didn't pre-register.
272 ThreadData* worker_thread_data = NULL;
273 int worker_thread_number = 0;
275 base::AutoLock lock(*list_lock_.Pointer());
276 if (first_retired_worker_) {
277 worker_thread_data = first_retired_worker_;
278 first_retired_worker_ = first_retired_worker_->next_retired_worker_;
279 worker_thread_data->next_retired_worker_ = NULL;
280 } else {
281 worker_thread_number = ++worker_thread_data_creation_count_;
285 // If we can't find a previously used instance, then we have to create one.
286 if (!worker_thread_data) {
287 DCHECK_GT(worker_thread_number, 0);
288 worker_thread_data = new ThreadData(worker_thread_number);
290 DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
292 tls_index_.Set(worker_thread_data);
293 return worker_thread_data;
296 // static
297 void ThreadData::OnThreadTermination(void* thread_data) {
298 DCHECK(thread_data); // TLS should *never* call us with a NULL.
299 // We must NOT do any allocations during this callback. There is a chance
300 // that the allocator is no longer active on this thread.
301 if (!kTrackAllTaskObjects)
302 return; // Not compiled in.
303 reinterpret_cast<ThreadData*>(thread_data)->OnThreadTerminationCleanup();
306 void ThreadData::OnThreadTerminationCleanup() {
307 // The list_lock_ was created when we registered the callback, so it won't be
308 // allocated here despite the lazy reference.
309 base::AutoLock lock(*list_lock_.Pointer());
310 if (incarnation_counter_ != incarnation_count_for_pool_)
311 return; // ThreadData was constructed in an earlier unit test.
312 ++cleanup_count_;
313 // Only worker threads need to be retired and reused.
314 if (!worker_thread_number_) {
315 return;
317 // We must NOT do any allocations during this callback.
318 // Using the simple linked lists avoids all allocations.
319 DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
320 this->next_retired_worker_ = first_retired_worker_;
321 first_retired_worker_ = this;
324 // static
325 base::DictionaryValue* ThreadData::ToValue(bool reset_max) {
326 DataCollector collected_data; // Gather data.
327 // Request multiple calls to collected_data.Append() for all threads.
328 SendAllMaps(reset_max, &collected_data);
329 collected_data.AddListOfLivingObjects(); // Add births that are still alive.
330 base::DictionaryValue* dictionary = new base::DictionaryValue();
331 collected_data.ToValue(dictionary);
332 return dictionary;
335 Births* ThreadData::TallyABirth(const Location& location) {
336 BirthMap::iterator it = birth_map_.find(location);
337 Births* child;
338 if (it != birth_map_.end()) {
339 child = it->second;
340 child->RecordBirth();
341 } else {
342 child = new Births(location, *this); // Leak this.
343 // Lock since the map may get relocated now, and other threads sometimes
344 // snapshot it (but they lock before copying it).
345 base::AutoLock lock(map_lock_);
346 birth_map_[location] = child;
349 if (kTrackParentChildLinks && status_ > PROFILING_ACTIVE &&
350 !parent_stack_.empty()) {
351 const Births* parent = parent_stack_.top();
352 ParentChildPair pair(parent, child);
353 if (parent_child_set_.find(pair) == parent_child_set_.end()) {
354 // Lock since the map may get relocated now, and other threads sometimes
355 // snapshot it (but they lock before copying it).
356 base::AutoLock lock(map_lock_);
357 parent_child_set_.insert(pair);
361 return child;
364 void ThreadData::TallyADeath(const Births& birth,
365 DurationInt queue_duration,
366 DurationInt run_duration) {
367 // Stir in some randomness, plus add constant in case durations are zero.
368 const DurationInt kSomePrimeNumber = 2147483647;
369 random_number_ += queue_duration + run_duration + kSomePrimeNumber;
370 // An address is going to have some randomness to it as well ;-).
371 random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0));
373 DeathMap::iterator it = death_map_.find(&birth);
374 DeathData* death_data;
375 if (it != death_map_.end()) {
376 death_data = &it->second;
377 } else {
378 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
379 death_data = &death_map_[&birth];
380 } // Release lock ASAP.
381 death_data->RecordDeath(queue_duration, run_duration, random_number_);
383 if (!kTrackParentChildLinks)
384 return;
385 if (!parent_stack_.empty()) { // We might get turned off.
386 DCHECK_EQ(parent_stack_.top(), &birth);
387 parent_stack_.pop();
391 // static
392 Births* ThreadData::TallyABirthIfActive(const Location& location) {
393 if (!kTrackAllTaskObjects)
394 return NULL; // Not compiled in.
396 if (!tracking_status())
397 return NULL;
398 ThreadData* current_thread_data = Get();
399 if (!current_thread_data)
400 return NULL;
401 return current_thread_data->TallyABirth(location);
404 // static
405 void ThreadData::TallyRunOnNamedThreadIfTracking(
406 const base::TrackingInfo& completed_task,
407 const TrackedTime& start_of_run,
408 const TrackedTime& end_of_run) {
409 if (!kTrackAllTaskObjects)
410 return; // Not compiled in.
412 // Even if we have been DEACTIVATED, we will process any pending births so
413 // that our data structures (which counted the outstanding births) remain
414 // consistent.
415 const Births* birth = completed_task.birth_tally;
416 if (!birth)
417 return;
418 ThreadData* current_thread_data = Get();
419 if (!current_thread_data)
420 return;
422 // To avoid conflating our stats with the delay duration in a PostDelayedTask,
423 // we identify such tasks, and replace their post_time with the time they
424 // were scheduled (requested?) to emerge from the delayed task queue. This
425 // means that queueing delay for such tasks will show how long they went
426 // unserviced, after they *could* be serviced. This is the same stat as we
427 // have for non-delayed tasks, and we consistently call it queueing delay.
428 TrackedTime effective_post_time = completed_task.delayed_run_time.is_null()
429 ? tracked_objects::TrackedTime(completed_task.time_posted)
430 : tracked_objects::TrackedTime(completed_task.delayed_run_time);
432 // Watch out for a race where status_ is changing, and hence one or both
433 // of start_of_run or end_of_run is zero. In that case, we didn't bother to
434 // get a time value since we "weren't tracking" and we were trying to be
435 // efficient by not calling for a genuine time value. For simplicity, we'll
436 // use a default zero duration when we can't calculate a true value.
437 DurationInt queue_duration = 0;
438 DurationInt run_duration = 0;
439 if (!start_of_run.is_null()) {
440 queue_duration = (start_of_run - effective_post_time).InMilliseconds();
441 if (!end_of_run.is_null())
442 run_duration = (end_of_run - start_of_run).InMilliseconds();
444 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
447 // static
448 void ThreadData::TallyRunOnWorkerThreadIfTracking(
449 const Births* birth,
450 const TrackedTime& time_posted,
451 const TrackedTime& start_of_run,
452 const TrackedTime& end_of_run) {
453 if (!kTrackAllTaskObjects)
454 return; // Not compiled in.
456 // Even if we have been DEACTIVATED, we will process any pending births so
457 // that our data structures (which counted the outstanding births) remain
458 // consistent.
459 if (!birth)
460 return;
462 // TODO(jar): Support the option to coalesce all worker-thread activity under
463 // one ThreadData instance that uses locks to protect *all* access. This will
464 // reduce memory (making it provably bounded), but run incrementally slower
465 // (since we'll use locks on TallyBirth and TallyDeath). The good news is
466 // that the locks on TallyDeath will be *after* the worker thread has run, and
467 // hence nothing will be waiting for the completion (... besides some other
468 // thread that might like to run). Also, the worker threads tasks are
469 // generally longer, and hence the cost of the lock may perchance be amortized
470 // over the long task's lifetime.
471 ThreadData* current_thread_data = Get();
472 if (!current_thread_data)
473 return;
475 DurationInt queue_duration = 0;
476 DurationInt run_duration = 0;
477 if (!start_of_run.is_null()) {
478 queue_duration = (start_of_run - time_posted).InMilliseconds();
479 if (!end_of_run.is_null())
480 run_duration = (end_of_run - start_of_run).InMilliseconds();
482 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
485 // static
486 void ThreadData::TallyRunInAScopedRegionIfTracking(
487 const Births* birth,
488 const TrackedTime& start_of_run,
489 const TrackedTime& end_of_run) {
490 if (!kTrackAllTaskObjects)
491 return; // Not compiled in.
493 // Even if we have been DEACTIVATED, we will process any pending births so
494 // that our data structures (which counted the outstanding births) remain
495 // consistent.
496 if (!birth)
497 return;
499 ThreadData* current_thread_data = Get();
500 if (!current_thread_data)
501 return;
503 DurationInt queue_duration = 0;
504 DurationInt run_duration = 0;
505 if (!start_of_run.is_null() && !end_of_run.is_null())
506 run_duration = (end_of_run - start_of_run).InMilliseconds();
507 current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
510 const std::string ThreadData::thread_name() const { return thread_name_; }
512 // This may be called from another thread.
513 void ThreadData::SnapshotMaps(bool reset_max,
514 BirthMap* birth_map,
515 DeathMap* death_map,
516 ParentChildSet* parent_child_set) {
517 base::AutoLock lock(map_lock_);
518 for (BirthMap::const_iterator it = birth_map_.begin();
519 it != birth_map_.end(); ++it)
520 (*birth_map)[it->first] = it->second;
521 for (DeathMap::iterator it = death_map_.begin();
522 it != death_map_.end(); ++it) {
523 (*death_map)[it->first] = it->second;
524 if (reset_max)
525 it->second.ResetMax();
528 if (!kTrackParentChildLinks)
529 return;
531 for (ParentChildSet::iterator it = parent_child_set_.begin();
532 it != parent_child_set_.end(); ++it)
533 parent_child_set->insert(*it);
536 // static
537 void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) {
538 if (!kTrackAllTaskObjects)
539 return; // Not compiled in.
540 // Get an unchanging copy of a ThreadData list.
541 ThreadData* my_list = ThreadData::first();
543 // Gather data serially.
544 // This hackish approach *can* get some slighly corrupt tallies, as we are
545 // grabbing values without the protection of a lock, but it has the advantage
546 // of working even with threads that don't have message loops. If a user
547 // sees any strangeness, they can always just run their stats gathering a
548 // second time.
549 for (ThreadData* thread_data = my_list;
550 thread_data;
551 thread_data = thread_data->next()) {
552 // Get copy of data.
553 ThreadData::BirthMap birth_map;
554 ThreadData::DeathMap death_map;
555 ThreadData::ParentChildSet parent_child_set;
556 thread_data->SnapshotMaps(reset_max, &birth_map, &death_map,
557 &parent_child_set);
558 target->Append(*thread_data, birth_map, death_map, parent_child_set);
562 // static
563 void ThreadData::ResetAllThreadData() {
564 ThreadData* my_list = first();
566 for (ThreadData* thread_data = my_list;
567 thread_data;
568 thread_data = thread_data->next())
569 thread_data->Reset();
572 void ThreadData::Reset() {
573 base::AutoLock lock(map_lock_);
574 for (DeathMap::iterator it = death_map_.begin();
575 it != death_map_.end(); ++it)
576 it->second.Clear();
577 for (BirthMap::iterator it = birth_map_.begin();
578 it != birth_map_.end(); ++it)
579 it->second->Clear();
582 bool ThreadData::Initialize() {
583 if (!kTrackAllTaskObjects)
584 return false; // Not compiled in.
585 if (status_ >= DEACTIVATED)
586 return true; // Someone else did the initialization.
587 // Due to racy lazy initialization in tests, we'll need to recheck status_
588 // after we acquire the lock.
590 // Ensure that we don't double initialize tls. We are called when single
591 // threaded in the product, but some tests may be racy and lazy about our
592 // initialization.
593 base::AutoLock lock(*list_lock_.Pointer());
594 if (status_ >= DEACTIVATED)
595 return true; // Someone raced in here and beat us.
597 // Perform the "real" TLS initialization now, and leave it intact through
598 // process termination.
599 if (!tls_index_.initialized()) { // Testing may have initialized this.
600 DCHECK_EQ(status_, UNINITIALIZED);
601 tls_index_.Initialize(&ThreadData::OnThreadTermination);
602 if (!tls_index_.initialized())
603 return false;
604 } else {
605 // TLS was initialzed for us earlier.
606 DCHECK_EQ(status_, DORMANT_DURING_TESTS);
609 // Incarnation counter is only significant to testing, as it otherwise will
610 // never again change in this process.
611 ++incarnation_counter_;
613 // The lock is not critical for setting status_, but it doesn't hurt. It also
614 // ensures that if we have a racy initialization, that we'll bail as soon as
615 // we get the lock earlier in this method.
616 status_ = kInitialStartupState;
617 if (!kTrackParentChildLinks &&
618 kInitialStartupState == PROFILING_CHILDREN_ACTIVE)
619 status_ = PROFILING_ACTIVE;
620 DCHECK(status_ != UNINITIALIZED);
621 return true;
624 // static
625 bool ThreadData::InitializeAndSetTrackingStatus(bool status) {
626 if (!Initialize()) // No-op if already initialized.
627 return false; // Not compiled in.
629 if (!status) {
630 status_ = DEACTIVATED;
631 } else {
632 if (kTrackParentChildLinks)
633 status_ = PROFILING_CHILDREN_ACTIVE;
634 else
635 status_ = PROFILING_ACTIVE;
637 return true;
640 // static
641 bool ThreadData::tracking_status() {
642 return status_ > DEACTIVATED;
645 // static
646 bool ThreadData::tracking_parent_child_status() {
647 return status_ >= PROFILING_CHILDREN_ACTIVE;
650 // static
651 TrackedTime ThreadData::NowForStartOfRun(const Births* parent) {
652 if (kTrackParentChildLinks && parent && status_ > PROFILING_ACTIVE) {
653 ThreadData* current_thread_data = Get();
654 if (current_thread_data)
655 current_thread_data->parent_stack_.push(parent);
657 return Now();
660 // static
661 TrackedTime ThreadData::NowForEndOfRun() {
662 return Now();
665 // static
666 TrackedTime ThreadData::Now() {
667 if (kTrackAllTaskObjects && tracking_status())
668 return TrackedTime::Now();
669 return TrackedTime(); // Super fast when disabled, or not compiled.
672 // static
673 void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
674 base::AutoLock lock(*list_lock_.Pointer());
675 if (worker_thread_data_creation_count_ == 0)
676 return; // We haven't really run much, and couldn't have leaked.
677 // Verify that we've at least shutdown/cleanup the major namesd threads. The
678 // caller should tell us how many thread shutdowns should have taken place by
679 // now.
680 return; // TODO(jar): until this is working on XP, don't run the real test.
681 CHECK_GT(cleanup_count_, major_threads_shutdown_count);
684 // static
685 void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
686 // This is only called from test code, where we need to cleanup so that
687 // additional tests can be run.
688 // We must be single threaded... but be careful anyway.
689 if (!InitializeAndSetTrackingStatus(false))
690 return;
691 ThreadData* thread_data_list;
693 base::AutoLock lock(*list_lock_.Pointer());
694 thread_data_list = all_thread_data_list_head_;
695 all_thread_data_list_head_ = NULL;
696 ++incarnation_counter_;
697 // To be clean, break apart the retired worker list (though we leak them).
698 while (first_retired_worker_) {
699 ThreadData* worker = first_retired_worker_;
700 CHECK_GT(worker->worker_thread_number_, 0);
701 first_retired_worker_ = worker->next_retired_worker_;
702 worker->next_retired_worker_ = NULL;
706 // Put most global static back in pristine shape.
707 worker_thread_data_creation_count_ = 0;
708 cleanup_count_ = 0;
709 tls_index_.Set(NULL);
710 status_ = DORMANT_DURING_TESTS; // Almost UNINITIALIZED.
712 // To avoid any chance of racing in unit tests, which is the only place we
713 // call this function, we may sometimes leak all the data structures we
714 // recovered, as they may still be in use on threads from prior tests!
715 if (leak)
716 return;
718 // When we want to cleanup (on a single thread), here is what we do.
720 // Do actual recursive delete in all ThreadData instances.
721 while (thread_data_list) {
722 ThreadData* next_thread_data = thread_data_list;
723 thread_data_list = thread_data_list->next();
725 for (BirthMap::iterator it = next_thread_data->birth_map_.begin();
726 next_thread_data->birth_map_.end() != it; ++it)
727 delete it->second; // Delete the Birth Records.
728 delete next_thread_data; // Includes all Death Records.
732 //------------------------------------------------------------------------------
733 // Individual 3-tuple of birth (place and thread) along with death thread, and
734 // the accumulated stats for instances (DeathData).
736 Snapshot::Snapshot(const BirthOnThread& birth_on_thread,
737 const ThreadData& death_thread,
738 const DeathData& death_data)
739 : birth_(&birth_on_thread),
740 death_thread_(&death_thread),
741 death_data_(death_data) {
744 Snapshot::Snapshot(const BirthOnThread& birth_on_thread, int count)
745 : birth_(&birth_on_thread),
746 death_thread_(NULL),
747 death_data_(DeathData(count)) {
750 const std::string Snapshot::DeathThreadName() const {
751 if (death_thread_)
752 return death_thread_->thread_name();
753 return "Still_Alive";
756 base::DictionaryValue* Snapshot::ToValue() const {
757 base::DictionaryValue* dictionary = new base::DictionaryValue;
758 // TODO(jar): Switch the next two lines to:
759 // birth_->ToValue("birth", dictionary);
760 // ...but that will require fixing unit tests, and JS to take
761 // "birth_location" rather than "location"
762 dictionary->Set("birth_thread",
763 base::Value::CreateStringValue(birth_->birth_thread()->thread_name()));
764 dictionary->Set("location", birth_->location().ToValue());
766 dictionary->Set("death_data", death_data_.ToValue());
767 dictionary->Set("death_thread",
768 base::Value::CreateStringValue(DeathThreadName()));
769 return dictionary;
772 //------------------------------------------------------------------------------
773 // DataCollector
775 DataCollector::DataCollector() {}
777 DataCollector::~DataCollector() {
780 void DataCollector::Append(const ThreadData& thread_data,
781 const ThreadData::BirthMap& birth_map,
782 const ThreadData::DeathMap& death_map,
783 const ThreadData::ParentChildSet& parent_child_set) {
784 for (ThreadData::DeathMap::const_iterator it = death_map.begin();
785 it != death_map.end(); ++it) {
786 collection_.push_back(Snapshot(*it->first, thread_data, it->second));
787 global_birth_count_[it->first] -= it->first->birth_count();
790 for (ThreadData::BirthMap::const_iterator it = birth_map.begin();
791 it != birth_map.end(); ++it) {
792 global_birth_count_[it->second] += it->second->birth_count();
795 if (!kTrackParentChildLinks)
796 return;
798 for (ThreadData::ParentChildSet::const_iterator it = parent_child_set.begin();
799 it != parent_child_set.end(); ++it) {
800 parent_child_set_.insert(*it);
804 DataCollector::Collection* DataCollector::collection() {
805 return &collection_;
808 void DataCollector::AddListOfLivingObjects() {
809 for (BirthCount::iterator it = global_birth_count_.begin();
810 it != global_birth_count_.end(); ++it) {
811 if (it->second > 0)
812 collection_.push_back(Snapshot(*it->first, it->second));
816 void DataCollector::ToValue(base::DictionaryValue* dictionary) const {
817 base::ListValue* list = new base::ListValue;
818 for (size_t i = 0; i < collection_.size(); ++i) {
819 list->Append(collection_[i].ToValue());
821 dictionary->Set("list", list);
823 base::ListValue* descendants = new base::ListValue;
824 for (ThreadData::ParentChildSet::const_iterator it =
825 parent_child_set_.begin();
826 it != parent_child_set_.end();
827 ++it) {
828 base::DictionaryValue* parent_child = new base::DictionaryValue;
829 it->first->ToValue("parent", parent_child);
830 it->second->ToValue("child", parent_child);
831 descendants->Append(parent_child);
833 dictionary->Set("descendants", descendants);
836 } // namespace tracked_objects