Resize destination bus to the actual number of decoded frames.
[chromium-blink-merge.git] / base / metrics / statistics_recorder.cc
blobf23c81054cf5cff26915da0ae36ffd34f9e69050
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/metrics/statistics_recorder.h"
7 #include "base/at_exit.h"
8 #include "base/debug/leak_annotations.h"
9 #include "base/logging.h"
10 #include "base/memory/scoped_ptr.h"
11 #include "base/metrics/histogram.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/synchronization/lock.h"
15 using std::list;
16 using std::string;
18 namespace {
19 // Initialize histogram statistics gathering system.
20 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
21 LAZY_INSTANCE_INITIALIZER;
22 } // namespace
24 namespace base {
26 // static
27 void StatisticsRecorder::Initialize() {
28 // Ensure that an instance of the StatisticsRecorder object is created.
29 g_statistics_recorder_.Get();
33 // static
34 bool StatisticsRecorder::IsActive() {
35 if (lock_ == NULL)
36 return false;
37 base::AutoLock auto_lock(*lock_);
38 return NULL != histograms_;
41 // static
42 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
43 HistogramBase* histogram) {
44 // As per crbug.com/79322 the histograms are intentionally leaked, so we need
45 // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
46 // for an object, the duplicates should not be annotated.
47 // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
48 // twice if (lock_ == NULL) || (!histograms_).
49 if (lock_ == NULL) {
50 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
51 return histogram;
54 HistogramBase* histogram_to_delete = NULL;
55 HistogramBase* histogram_to_return = NULL;
57 base::AutoLock auto_lock(*lock_);
58 if (histograms_ == NULL) {
59 histogram_to_return = histogram;
60 } else {
61 const string& name = histogram->histogram_name();
62 HistogramMap::iterator it = histograms_->find(name);
63 if (histograms_->end() == it) {
64 (*histograms_)[name] = histogram;
65 ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
66 histogram_to_return = histogram;
67 } else if (histogram == it->second) {
68 // The histogram was registered before.
69 histogram_to_return = histogram;
70 } else {
71 // We already have one histogram with this name.
72 histogram_to_return = it->second;
73 histogram_to_delete = histogram;
77 delete histogram_to_delete;
78 return histogram_to_return;
81 // static
82 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
83 const BucketRanges* ranges) {
84 DCHECK(ranges->HasValidChecksum());
85 scoped_ptr<const BucketRanges> ranges_deleter;
87 if (lock_ == NULL) {
88 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
89 return ranges;
92 base::AutoLock auto_lock(*lock_);
93 if (ranges_ == NULL) {
94 ANNOTATE_LEAKING_OBJECT_PTR(ranges);
95 return ranges;
98 list<const BucketRanges*>* checksum_matching_list;
99 RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
100 if (ranges_->end() == ranges_it) {
101 // Add a new matching list to map.
102 checksum_matching_list = new list<const BucketRanges*>();
103 ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
104 (*ranges_)[ranges->checksum()] = checksum_matching_list;
105 } else {
106 checksum_matching_list = ranges_it->second;
109 list<const BucketRanges*>::iterator checksum_matching_list_it;
110 for (checksum_matching_list_it = checksum_matching_list->begin();
111 checksum_matching_list_it != checksum_matching_list->end();
112 ++checksum_matching_list_it) {
113 const BucketRanges* existing_ranges = *checksum_matching_list_it;
114 if (existing_ranges->Equals(ranges)) {
115 if (existing_ranges == ranges) {
116 return ranges;
117 } else {
118 ranges_deleter.reset(ranges);
119 return existing_ranges;
123 // We haven't found a BucketRanges which has the same ranges. Register the
124 // new BucketRanges.
125 checksum_matching_list->push_front(ranges);
126 return ranges;
129 // static
130 void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
131 std::string* output) {
132 if (!IsActive())
133 return;
135 Histograms snapshot;
136 GetSnapshot(query, &snapshot);
137 for (Histograms::iterator it = snapshot.begin();
138 it != snapshot.end();
139 ++it) {
140 (*it)->WriteHTMLGraph(output);
141 output->append("<br><hr><br>");
145 // static
146 void StatisticsRecorder::WriteGraph(const std::string& query,
147 std::string* output) {
148 if (!IsActive())
149 return;
150 if (query.length())
151 StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
152 else
153 output->append("Collections of all histograms\n");
155 Histograms snapshot;
156 GetSnapshot(query, &snapshot);
157 for (Histograms::iterator it = snapshot.begin();
158 it != snapshot.end();
159 ++it) {
160 (*it)->WriteAscii(output);
161 output->append("\n");
165 // static
166 void StatisticsRecorder::GetHistograms(Histograms* output) {
167 if (lock_ == NULL)
168 return;
169 base::AutoLock auto_lock(*lock_);
170 if (histograms_ == NULL)
171 return;
173 for (HistogramMap::iterator it = histograms_->begin();
174 histograms_->end() != it;
175 ++it) {
176 DCHECK_EQ(it->first, it->second->histogram_name());
177 output->push_back(it->second);
181 // static
182 void StatisticsRecorder::GetBucketRanges(
183 std::vector<const BucketRanges*>* output) {
184 if (lock_ == NULL)
185 return;
186 base::AutoLock auto_lock(*lock_);
187 if (ranges_ == NULL)
188 return;
190 for (RangesMap::iterator it = ranges_->begin();
191 ranges_->end() != it;
192 ++it) {
193 list<const BucketRanges*>* ranges_list = it->second;
194 list<const BucketRanges*>::iterator ranges_list_it;
195 for (ranges_list_it = ranges_list->begin();
196 ranges_list_it != ranges_list->end();
197 ++ranges_list_it) {
198 output->push_back(*ranges_list_it);
203 // static
204 HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
205 if (lock_ == NULL)
206 return NULL;
207 base::AutoLock auto_lock(*lock_);
208 if (histograms_ == NULL)
209 return NULL;
211 HistogramMap::iterator it = histograms_->find(name);
212 if (histograms_->end() == it)
213 return NULL;
214 return it->second;
217 // private static
218 void StatisticsRecorder::GetSnapshot(const std::string& query,
219 Histograms* snapshot) {
220 if (lock_ == NULL)
221 return;
222 base::AutoLock auto_lock(*lock_);
223 if (histograms_ == NULL)
224 return;
226 for (HistogramMap::iterator it = histograms_->begin();
227 histograms_->end() != it;
228 ++it) {
229 if (it->first.find(query) != std::string::npos)
230 snapshot->push_back(it->second);
234 // This singleton instance should be started during the single threaded portion
235 // of main(), and hence it is not thread safe. It initializes globals to
236 // provide support for all future calls.
237 StatisticsRecorder::StatisticsRecorder() {
238 DCHECK(!histograms_);
239 if (lock_ == NULL) {
240 // This will leak on purpose. It's the only way to make sure we won't race
241 // against the static uninitialization of the module while one of our
242 // static methods relying on the lock get called at an inappropriate time
243 // during the termination phase. Since it's a static data member, we will
244 // leak one per process, which would be similar to the instance allocated
245 // during static initialization and released only on process termination.
246 lock_ = new base::Lock;
248 base::AutoLock auto_lock(*lock_);
249 histograms_ = new HistogramMap;
250 ranges_ = new RangesMap;
252 if (VLOG_IS_ON(1))
253 AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
256 // static
257 void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
258 DCHECK(VLOG_IS_ON(1));
260 StatisticsRecorder* me = reinterpret_cast<StatisticsRecorder*>(instance);
261 string output;
262 me->WriteGraph(std::string(), &output);
263 VLOG(1) << output;
266 StatisticsRecorder::~StatisticsRecorder() {
267 DCHECK(histograms_ && ranges_ && lock_);
269 // Clean up.
270 scoped_ptr<HistogramMap> histograms_deleter;
271 scoped_ptr<RangesMap> ranges_deleter;
272 // We don't delete lock_ on purpose to avoid having to properly protect
273 // against it going away after we checked for NULL in the static methods.
275 base::AutoLock auto_lock(*lock_);
276 histograms_deleter.reset(histograms_);
277 ranges_deleter.reset(ranges_);
278 histograms_ = NULL;
279 ranges_ = NULL;
281 // We are going to leak the histograms and the ranges.
285 // static
286 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
287 // static
288 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
289 // static
290 base::Lock* StatisticsRecorder::lock_ = NULL;
292 } // namespace base