Roll WebRTC 7546:7549.
[chromium-blink-merge.git] / components / web_cache / browser / web_cache_manager.cc
blob4acd2097ac31a60b69eb732fcd0aecf2f49c9d6e
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "components/web_cache/browser/web_cache_manager.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/memory/singleton.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/histogram.h"
14 #include "base/prefs/pref_registry_simple.h"
15 #include "base/prefs/pref_service.h"
16 #include "base/sys_info.h"
17 #include "base/time/time.h"
18 #include "components/web_cache/common/web_cache_messages.h"
19 #include "content/public/browser/notification_service.h"
20 #include "content/public/browser/notification_types.h"
21 #include "content/public/browser/render_process_host.h"
23 using base::Time;
24 using base::TimeDelta;
25 using blink::WebCache;
27 namespace web_cache {
29 static const int kReviseAllocationDelayMS = 200;
31 // The default size limit of the in-memory cache is 8 MB
32 static const int kDefaultMemoryCacheSize = 8 * 1024 * 1024;
34 namespace {
36 int GetDefaultCacheSize() {
37 // Start off with a modest default
38 int default_cache_size = kDefaultMemoryCacheSize;
40 // Check how much physical memory the OS has
41 int mem_size_mb = base::SysInfo::AmountOfPhysicalMemoryMB();
42 if (mem_size_mb >= 1000) // If we have a GB of memory, set a larger default.
43 default_cache_size *= 4;
44 else if (mem_size_mb >= 512) // With 512 MB, set a slightly larger default.
45 default_cache_size *= 2;
47 UMA_HISTOGRAM_MEMORY_MB("Cache.MaxCacheSizeMB",
48 default_cache_size / 1024 / 1024);
50 return default_cache_size;
53 } // anonymous namespace
55 // static
56 WebCacheManager* WebCacheManager::GetInstance() {
57 return Singleton<WebCacheManager>::get();
60 WebCacheManager::WebCacheManager()
61 : global_size_limit_(GetDefaultGlobalSizeLimit()),
62 weak_factory_(this) {
63 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_CREATED,
64 content::NotificationService::AllBrowserContextsAndSources());
65 registrar_.Add(this, content::NOTIFICATION_RENDERER_PROCESS_TERMINATED,
66 content::NotificationService::AllBrowserContextsAndSources());
69 WebCacheManager::~WebCacheManager() {
72 void WebCacheManager::Add(int renderer_id) {
73 DCHECK(inactive_renderers_.count(renderer_id) == 0);
75 // It is tempting to make the following DCHECK here, but it fails when a new
76 // tab is created as we observe activity from that tab because the
77 // RenderProcessHost is recreated and adds itself.
79 // DCHECK(active_renderers_.count(renderer_id) == 0);
81 // However, there doesn't seem to be much harm in receiving the calls in this
82 // order.
84 active_renderers_.insert(renderer_id);
86 RendererInfo* stats = &(stats_[renderer_id]);
87 memset(stats, 0, sizeof(*stats));
88 stats->access = Time::Now();
90 // Revise our allocation strategy to account for this new renderer.
91 ReviseAllocationStrategyLater();
94 void WebCacheManager::Remove(int renderer_id) {
95 // Erase all knowledge of this renderer
96 active_renderers_.erase(renderer_id);
97 inactive_renderers_.erase(renderer_id);
98 stats_.erase(renderer_id);
100 // Reallocate the resources used by this renderer
101 ReviseAllocationStrategyLater();
104 void WebCacheManager::ObserveActivity(int renderer_id) {
105 StatsMap::iterator item = stats_.find(renderer_id);
106 if (item == stats_.end())
107 return; // We might see stats for a renderer that has been destroyed.
109 // Record activity.
110 active_renderers_.insert(renderer_id);
111 item->second.access = Time::Now();
113 std::set<int>::iterator elmt = inactive_renderers_.find(renderer_id);
114 if (elmt != inactive_renderers_.end()) {
115 inactive_renderers_.erase(elmt);
117 // A renderer that was inactive, just became active. We should make sure
118 // it is given a fair cache allocation, but we defer this for a bit in
119 // order to make this function call cheap.
120 ReviseAllocationStrategyLater();
124 void WebCacheManager::ObserveStats(int renderer_id,
125 const WebCache::UsageStats& stats) {
126 StatsMap::iterator entry = stats_.find(renderer_id);
127 if (entry == stats_.end())
128 return; // We might see stats for a renderer that has been destroyed.
130 // Record the updated stats.
131 entry->second.capacity = stats.capacity;
132 entry->second.deadSize = stats.deadSize;
133 entry->second.liveSize = stats.liveSize;
134 entry->second.maxDeadCapacity = stats.maxDeadCapacity;
135 entry->second.minDeadCapacity = stats.minDeadCapacity;
138 void WebCacheManager::SetGlobalSizeLimit(size_t bytes) {
139 global_size_limit_ = bytes;
140 ReviseAllocationStrategyLater();
143 void WebCacheManager::ClearCache() {
144 // Tell each renderer process to clear the cache.
145 ClearRendererCache(active_renderers_, INSTANTLY);
146 ClearRendererCache(inactive_renderers_, INSTANTLY);
149 void WebCacheManager::ClearCacheOnNavigation() {
150 // Tell each renderer process to clear the cache when a tab is reloaded or
151 // the user navigates to a new website.
152 ClearRendererCache(active_renderers_, ON_NAVIGATION);
153 ClearRendererCache(inactive_renderers_, ON_NAVIGATION);
156 void WebCacheManager::Observe(int type,
157 const content::NotificationSource& source,
158 const content::NotificationDetails& details) {
159 switch (type) {
160 case content::NOTIFICATION_RENDERER_PROCESS_CREATED: {
161 content::RenderProcessHost* process =
162 content::Source<content::RenderProcessHost>(source).ptr();
163 Add(process->GetID());
164 break;
166 case content::NOTIFICATION_RENDERER_PROCESS_TERMINATED: {
167 content::RenderProcessHost* process =
168 content::Source<content::RenderProcessHost>(source).ptr();
169 Remove(process->GetID());
170 break;
172 default:
173 NOTREACHED();
174 break;
178 // static
179 size_t WebCacheManager::GetDefaultGlobalSizeLimit() {
180 return GetDefaultCacheSize();
183 void WebCacheManager::GatherStats(const std::set<int>& renderers,
184 WebCache::UsageStats* stats) {
185 DCHECK(stats);
187 memset(stats, 0, sizeof(WebCache::UsageStats));
189 std::set<int>::const_iterator iter = renderers.begin();
190 while (iter != renderers.end()) {
191 StatsMap::iterator elmt = stats_.find(*iter);
192 if (elmt != stats_.end()) {
193 stats->minDeadCapacity += elmt->second.minDeadCapacity;
194 stats->maxDeadCapacity += elmt->second.maxDeadCapacity;
195 stats->capacity += elmt->second.capacity;
196 stats->liveSize += elmt->second.liveSize;
197 stats->deadSize += elmt->second.deadSize;
199 ++iter;
203 // static
204 size_t WebCacheManager::GetSize(AllocationTactic tactic,
205 const WebCache::UsageStats& stats) {
206 switch (tactic) {
207 case DIVIDE_EVENLY:
208 // We aren't going to reserve any space for existing objects.
209 return 0;
210 case KEEP_CURRENT_WITH_HEADROOM:
211 // We need enough space for our current objects, plus some headroom.
212 return 3 * GetSize(KEEP_CURRENT, stats) / 2;
213 case KEEP_CURRENT:
214 // We need enough space to keep our current objects.
215 return stats.liveSize + stats.deadSize;
216 case KEEP_LIVE_WITH_HEADROOM:
217 // We need enough space to keep out live resources, plus some headroom.
218 return 3 * GetSize(KEEP_LIVE, stats) / 2;
219 case KEEP_LIVE:
220 // We need enough space to keep our live resources.
221 return stats.liveSize;
222 default:
223 NOTREACHED() << "Unknown cache allocation tactic";
224 return 0;
228 bool WebCacheManager::AttemptTactic(
229 AllocationTactic active_tactic,
230 const WebCache::UsageStats& active_stats,
231 AllocationTactic inactive_tactic,
232 const WebCache::UsageStats& inactive_stats,
233 AllocationStrategy* strategy) {
234 DCHECK(strategy);
236 size_t active_size = GetSize(active_tactic, active_stats);
237 size_t inactive_size = GetSize(inactive_tactic, inactive_stats);
239 // Give up if we don't have enough space to use this tactic.
240 if (global_size_limit_ < active_size + inactive_size)
241 return false;
243 // Compute the unreserved space available.
244 size_t total_extra = global_size_limit_ - (active_size + inactive_size);
246 // The plan for the extra space is to divide it evenly amoung the active
247 // renderers.
248 size_t shares = active_renderers_.size();
250 // The inactive renderers get one share of the extra memory to be divided
251 // among themselves.
252 size_t inactive_extra = 0;
253 if (!inactive_renderers_.empty()) {
254 ++shares;
255 inactive_extra = total_extra / shares;
258 // The remaining memory is allocated to the active renderers.
259 size_t active_extra = total_extra - inactive_extra;
261 // Actually compute the allocations for each renderer.
262 AddToStrategy(active_renderers_, active_tactic, active_extra, strategy);
263 AddToStrategy(inactive_renderers_, inactive_tactic, inactive_extra, strategy);
265 // We succeeded in computing an allocation strategy.
266 return true;
269 void WebCacheManager::AddToStrategy(const std::set<int>& renderers,
270 AllocationTactic tactic,
271 size_t extra_bytes_to_allocate,
272 AllocationStrategy* strategy) {
273 DCHECK(strategy);
275 // Nothing to do if there are no renderers. It is common for there to be no
276 // inactive renderers if there is a single active tab.
277 if (renderers.empty())
278 return;
280 // Divide the extra memory evenly among the renderers.
281 size_t extra_each = extra_bytes_to_allocate / renderers.size();
283 std::set<int>::const_iterator iter = renderers.begin();
284 while (iter != renderers.end()) {
285 size_t cache_size = extra_each;
287 // Add in the space required to implement |tactic|.
288 StatsMap::iterator elmt = stats_.find(*iter);
289 if (elmt != stats_.end())
290 cache_size += GetSize(tactic, elmt->second);
292 // Record the allocation in our strategy.
293 strategy->push_back(Allocation(*iter, cache_size));
294 ++iter;
298 void WebCacheManager::EnactStrategy(const AllocationStrategy& strategy) {
299 // Inform each render process of its cache allocation.
300 AllocationStrategy::const_iterator allocation = strategy.begin();
301 while (allocation != strategy.end()) {
302 content::RenderProcessHost* host =
303 content::RenderProcessHost::FromID(allocation->first);
304 if (host) {
305 // This is the capacity this renderer has been allocated.
306 size_t capacity = allocation->second;
308 // We don't reserve any space for dead objects in the cache. Instead, we
309 // prefer to keep live objects around. There is probably some performance
310 // tuning to be done here.
311 size_t min_dead_capacity = 0;
313 // We allow the dead objects to consume up to half of the cache capacity.
314 size_t max_dead_capacity = capacity / 2;
315 if (base::SysInfo::IsLowEndDevice()) {
316 max_dead_capacity = std::min(static_cast<size_t>(512 * 1024),
317 max_dead_capacity);
319 host->Send(new WebCacheMsg_SetCacheCapacities(min_dead_capacity,
320 max_dead_capacity,
321 capacity));
323 ++allocation;
327 void WebCacheManager::ClearRendererCache(
328 const std::set<int>& renderers,
329 WebCacheManager::ClearCacheOccasion occasion) {
330 std::set<int>::const_iterator iter = renderers.begin();
331 for (; iter != renderers.end(); ++iter) {
332 content::RenderProcessHost* host =
333 content::RenderProcessHost::FromID(*iter);
334 if (host)
335 host->Send(new WebCacheMsg_ClearCache(occasion == ON_NAVIGATION));
339 void WebCacheManager::ReviseAllocationStrategy() {
340 DCHECK(stats_.size() <=
341 active_renderers_.size() + inactive_renderers_.size());
343 // Check if renderers have gone inactive.
344 FindInactiveRenderers();
346 // Gather statistics
347 WebCache::UsageStats active;
348 WebCache::UsageStats inactive;
349 GatherStats(active_renderers_, &active);
350 GatherStats(inactive_renderers_, &inactive);
352 UMA_HISTOGRAM_COUNTS_100("Cache.ActiveTabs", active_renderers_.size());
353 UMA_HISTOGRAM_COUNTS_100("Cache.InactiveTabs", inactive_renderers_.size());
354 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveCapacityMB",
355 active.capacity / 1024 / 1024);
356 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveDeadSizeMB",
357 active.deadSize / 1024 / 1024);
358 UMA_HISTOGRAM_MEMORY_MB("Cache.ActiveLiveSizeMB",
359 active.liveSize / 1024 / 1024);
360 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveCapacityMB",
361 inactive.capacity / 1024 / 1024);
362 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveDeadSizeMB",
363 inactive.deadSize / 1024 / 1024);
364 UMA_HISTOGRAM_MEMORY_MB("Cache.InactiveLiveSizeMB",
365 inactive.liveSize / 1024 / 1024);
367 // Compute an allocation strategy.
369 // We attempt various tactics in order of preference. Our first preference
370 // is not to evict any objects. If we don't have enough resources, we'll
371 // first try to evict dead data only. If that fails, we'll just divide the
372 // resources we have evenly.
374 // We always try to give the active renderers some head room in their
375 // allocations so they can take memory away from an inactive renderer with
376 // a large cache allocation.
378 // Notice the early exit will prevent attempting less desirable tactics once
379 // we've found a workable strategy.
380 AllocationStrategy strategy;
381 if ( // Ideally, we'd like to give the active renderers some headroom and
382 // keep all our current objects.
383 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
384 KEEP_CURRENT, inactive, &strategy) ||
385 // If we can't have that, then we first try to evict the dead objects in
386 // the caches of inactive renderers.
387 AttemptTactic(KEEP_CURRENT_WITH_HEADROOM, active,
388 KEEP_LIVE, inactive, &strategy) ||
389 // Next, we try to keep the live objects in the active renders (with some
390 // room for new objects) and give whatever is left to the inactive
391 // renderers.
392 AttemptTactic(KEEP_LIVE_WITH_HEADROOM, active,
393 DIVIDE_EVENLY, inactive, &strategy) ||
394 // If we've gotten this far, then we are very tight on memory. Let's try
395 // to at least keep around the live objects for the active renderers.
396 AttemptTactic(KEEP_LIVE, active, DIVIDE_EVENLY, inactive, &strategy) ||
397 // We're basically out of memory. The best we can do is just divide up
398 // what we have and soldier on.
399 AttemptTactic(DIVIDE_EVENLY, active, DIVIDE_EVENLY, inactive,
400 &strategy)) {
401 // Having found a workable strategy, we enact it.
402 EnactStrategy(strategy);
403 } else {
404 // DIVIDE_EVENLY / DIVIDE_EVENLY should always succeed.
405 NOTREACHED() << "Unable to find a cache allocation";
409 void WebCacheManager::ReviseAllocationStrategyLater() {
410 // Ask to be called back in a few milliseconds to actually recompute our
411 // allocation.
412 base::MessageLoop::current()->PostDelayedTask(FROM_HERE,
413 base::Bind(
414 &WebCacheManager::ReviseAllocationStrategy,
415 weak_factory_.GetWeakPtr()),
416 base::TimeDelta::FromMilliseconds(kReviseAllocationDelayMS));
419 void WebCacheManager::FindInactiveRenderers() {
420 std::set<int>::const_iterator iter = active_renderers_.begin();
421 while (iter != active_renderers_.end()) {
422 StatsMap::iterator elmt = stats_.find(*iter);
423 DCHECK(elmt != stats_.end());
424 TimeDelta idle = Time::Now() - elmt->second.access;
425 if (idle >= TimeDelta::FromMinutes(kRendererInactiveThresholdMinutes)) {
426 // Moved to inactive status. This invalidates our iterator.
427 inactive_renderers_.insert(*iter);
428 active_renderers_.erase(*iter);
429 iter = active_renderers_.begin();
430 continue;
432 ++iter;
436 } // namespace web_cache