1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/metrics/statistics_recorder.h"
7 #include "base/at_exit.h"
8 #include "base/debug/leak_annotations.h"
9 #include "base/logging.h"
10 #include "base/memory/scoped_ptr.h"
11 #include "base/metrics/histogram.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/synchronization/lock.h"
19 // Initialize histogram statistics gathering system.
20 base::LazyInstance
<base::StatisticsRecorder
>::Leaky g_statistics_recorder_
=
21 LAZY_INSTANCE_INITIALIZER
;
27 void StatisticsRecorder::Initialize() {
28 // Ensure that an instance of the StatisticsRecorder object is created.
29 g_statistics_recorder_
.Get();
34 bool StatisticsRecorder::IsActive() {
37 base::AutoLock
auto_lock(*lock_
);
38 return NULL
!= histograms_
;
42 HistogramBase
* StatisticsRecorder::RegisterOrDeleteDuplicate(
43 HistogramBase
* histogram
) {
44 // As per crbug.com/79322 the histograms are intentionally leaked, so we need
45 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
46 // for an object, the duplicates should not be annotated.
47 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
48 // twice if (lock_ == NULL) || (!histograms_).
50 ANNOTATE_LEAKING_OBJECT_PTR(histogram
); // see crbug.com/79322
54 HistogramBase
* histogram_to_delete
= NULL
;
55 HistogramBase
* histogram_to_return
= NULL
;
57 base::AutoLock
auto_lock(*lock_
);
58 if (histograms_
== NULL
) {
59 histogram_to_return
= histogram
;
61 const string
& name
= histogram
->histogram_name();
62 HistogramMap::iterator it
= histograms_
->find(name
);
63 if (histograms_
->end() == it
) {
64 (*histograms_
)[name
] = histogram
;
65 ANNOTATE_LEAKING_OBJECT_PTR(histogram
); // see crbug.com/79322
66 histogram_to_return
= histogram
;
67 } else if (histogram
== it
->second
) {
68 // The histogram was registered before.
69 histogram_to_return
= histogram
;
71 // We already have one histogram with this name.
72 histogram_to_return
= it
->second
;
73 histogram_to_delete
= histogram
;
77 delete histogram_to_delete
;
78 return histogram_to_return
;
82 const BucketRanges
* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
83 const BucketRanges
* ranges
) {
84 DCHECK(ranges
->HasValidChecksum());
85 scoped_ptr
<const BucketRanges
> ranges_deleter
;
88 ANNOTATE_LEAKING_OBJECT_PTR(ranges
);
92 base::AutoLock
auto_lock(*lock_
);
93 if (ranges_
== NULL
) {
94 ANNOTATE_LEAKING_OBJECT_PTR(ranges
);
98 list
<const BucketRanges
*>* checksum_matching_list
;
99 RangesMap::iterator ranges_it
= ranges_
->find(ranges
->checksum());
100 if (ranges_
->end() == ranges_it
) {
101 // Add a new matching list to map.
102 checksum_matching_list
= new list
<const BucketRanges
*>();
103 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list
);
104 (*ranges_
)[ranges
->checksum()] = checksum_matching_list
;
106 checksum_matching_list
= ranges_it
->second
;
109 list
<const BucketRanges
*>::iterator checksum_matching_list_it
;
110 for (checksum_matching_list_it
= checksum_matching_list
->begin();
111 checksum_matching_list_it
!= checksum_matching_list
->end();
112 ++checksum_matching_list_it
) {
113 const BucketRanges
* existing_ranges
= *checksum_matching_list_it
;
114 if (existing_ranges
->Equals(ranges
)) {
115 if (existing_ranges
== ranges
) {
118 ranges_deleter
.reset(ranges
);
119 return existing_ranges
;
123 // We haven't found a BucketRanges which has the same ranges. Register the
125 checksum_matching_list
->push_front(ranges
);
130 void StatisticsRecorder::WriteHTMLGraph(const std::string
& query
,
131 std::string
* output
) {
136 GetSnapshot(query
, &snapshot
);
137 for (Histograms::iterator it
= snapshot
.begin();
138 it
!= snapshot
.end();
140 (*it
)->WriteHTMLGraph(output
);
141 output
->append("<br><hr><br>");
146 void StatisticsRecorder::WriteGraph(const std::string
& query
,
147 std::string
* output
) {
151 StringAppendF(output
, "Collections of histograms for %s\n", query
.c_str());
153 output
->append("Collections of all histograms\n");
156 GetSnapshot(query
, &snapshot
);
157 for (Histograms::iterator it
= snapshot
.begin();
158 it
!= snapshot
.end();
160 (*it
)->WriteAscii(output
);
161 output
->append("\n");
166 void StatisticsRecorder::GetHistograms(Histograms
* output
) {
169 base::AutoLock
auto_lock(*lock_
);
170 if (histograms_
== NULL
)
173 for (HistogramMap::iterator it
= histograms_
->begin();
174 histograms_
->end() != it
;
176 DCHECK_EQ(it
->first
, it
->second
->histogram_name());
177 output
->push_back(it
->second
);
182 void StatisticsRecorder::GetBucketRanges(
183 std::vector
<const BucketRanges
*>* output
) {
186 base::AutoLock
auto_lock(*lock_
);
190 for (RangesMap::iterator it
= ranges_
->begin();
191 ranges_
->end() != it
;
193 list
<const BucketRanges
*>* ranges_list
= it
->second
;
194 list
<const BucketRanges
*>::iterator ranges_list_it
;
195 for (ranges_list_it
= ranges_list
->begin();
196 ranges_list_it
!= ranges_list
->end();
198 output
->push_back(*ranges_list_it
);
204 HistogramBase
* StatisticsRecorder::FindHistogram(const std::string
& name
) {
207 base::AutoLock
auto_lock(*lock_
);
208 if (histograms_
== NULL
)
211 HistogramMap::iterator it
= histograms_
->find(name
);
212 if (histograms_
->end() == it
)
218 void StatisticsRecorder::GetSnapshot(const std::string
& query
,
219 Histograms
* snapshot
) {
222 base::AutoLock
auto_lock(*lock_
);
223 if (histograms_
== NULL
)
226 for (HistogramMap::iterator it
= histograms_
->begin();
227 histograms_
->end() != it
;
229 if (it
->first
.find(query
) != std::string::npos
)
230 snapshot
->push_back(it
->second
);
234 // This singleton instance should be started during the single threaded portion
235 // of main(), and hence it is not thread safe. It initializes globals to
236 // provide support for all future calls.
237 StatisticsRecorder::StatisticsRecorder() {
238 DCHECK(!histograms_
);
240 // This will leak on purpose. It's the only way to make sure we won't race
241 // against the static uninitialization of the module while one of our
242 // static methods relying on the lock get called at an inappropriate time
243 // during the termination phase. Since it's a static data member, we will
244 // leak one per process, which would be similar to the instance allocated
245 // during static initialization and released only on process termination.
246 lock_
= new base::Lock
;
248 base::AutoLock
auto_lock(*lock_
);
249 histograms_
= new HistogramMap
;
250 ranges_
= new RangesMap
;
253 AtExitManager::RegisterCallback(&DumpHistogramsToVlog
, this);
257 void StatisticsRecorder::DumpHistogramsToVlog(void* instance
) {
258 DCHECK(VLOG_IS_ON(1));
260 StatisticsRecorder
* me
= reinterpret_cast<StatisticsRecorder
*>(instance
);
262 me
->WriteGraph(std::string(), &output
);
266 StatisticsRecorder::~StatisticsRecorder() {
267 DCHECK(histograms_
&& ranges_
&& lock_
);
270 scoped_ptr
<HistogramMap
> histograms_deleter
;
271 scoped_ptr
<RangesMap
> ranges_deleter
;
272 // We don't delete lock_ on purpose to avoid having to properly protect
273 // against it going away after we checked for NULL in the static methods.
275 base::AutoLock
auto_lock(*lock_
);
276 histograms_deleter
.reset(histograms_
);
277 ranges_deleter
.reset(ranges_
);
281 // We are going to leak the histograms and the ranges.
286 StatisticsRecorder::HistogramMap
* StatisticsRecorder::histograms_
= NULL
;
288 StatisticsRecorder::RangesMap
* StatisticsRecorder::ranges_
= NULL
;
290 base::Lock
* StatisticsRecorder::lock_
= NULL
;