1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/disk_cache/backend_impl.h"
8 #include "base/bind_helpers.h"
9 #include "base/file_util.h"
10 #include "base/files/file_path.h"
11 #include "base/hash.h"
12 #include "base/message_loop/message_loop.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram.h"
15 #include "base/metrics/stats_counters.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/strings/stringprintf.h"
19 #include "base/sys_info.h"
20 #include "base/threading/thread_restrictions.h"
21 #include "base/time/time.h"
22 #include "base/timer/timer.h"
23 #include "net/base/net_errors.h"
24 #include "net/disk_cache/cache_util.h"
25 #include "net/disk_cache/disk_format.h"
26 #include "net/disk_cache/entry_impl.h"
27 #include "net/disk_cache/errors.h"
28 #include "net/disk_cache/experiments.h"
29 #include "net/disk_cache/file.h"
31 // This has to be defined before including histogram_macros.h from this file.
32 #define NET_DISK_CACHE_BACKEND_IMPL_CC_
33 #include "net/disk_cache/histogram_macros.h"
36 using base::TimeDelta
;
37 using base::TimeTicks
;
41 const char* kIndexName
= "index";
43 // Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
44 // Note that the actual target is to keep the index table load factor under 55%
46 const int k64kEntriesStore
= 240 * 1000 * 1000;
47 const int kBaseTableLen
= 64 * 1024;
48 const int kDefaultCacheSize
= 80 * 1024 * 1024;
50 // Avoid trimming the cache for the first 5 minutes (10 timer ticks).
51 const int kTrimDelay
= 10;
53 int DesiredIndexTableLen(int32 storage_size
) {
54 if (storage_size
<= k64kEntriesStore
)
56 if (storage_size
<= k64kEntriesStore
* 2)
57 return kBaseTableLen
* 2;
58 if (storage_size
<= k64kEntriesStore
* 4)
59 return kBaseTableLen
* 4;
60 if (storage_size
<= k64kEntriesStore
* 8)
61 return kBaseTableLen
* 8;
63 // The biggest storage_size for int32 requires a 4 MB table.
64 return kBaseTableLen
* 16;
67 int MaxStorageSizeForTable(int table_len
) {
68 return table_len
* (k64kEntriesStore
/ kBaseTableLen
);
71 size_t GetIndexSize(int table_len
) {
72 size_t table_size
= sizeof(disk_cache::CacheAddr
) * table_len
;
73 return sizeof(disk_cache::IndexHeader
) + table_size
;
76 // ------------------------------------------------------------------------
78 // Sets group for the current experiment. Returns false if the files should be
80 bool InitExperiment(disk_cache::IndexHeader
* header
, bool cache_created
) {
81 if (header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE1
||
82 header
->experiment
== disk_cache::EXPERIMENT_OLD_FILE2
) {
83 // Discard current cache.
87 if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
88 "ExperimentControl") {
90 header
->experiment
= disk_cache::EXPERIMENT_SIMPLE_CONTROL
;
93 return header
->experiment
== disk_cache::EXPERIMENT_SIMPLE_CONTROL
;
96 header
->experiment
= disk_cache::NO_EXPERIMENT
;
100 // A callback to perform final cleanup on the background thread.
101 void FinalCleanupCallback(disk_cache::BackendImpl
* backend
) {
102 backend
->CleanupCache();
107 // ------------------------------------------------------------------------
109 namespace disk_cache
{
111 // Returns the preferred maximum number of bytes for the cache given the
112 // number of available bytes.
113 int PreferedCacheSize(int64 available
) {
114 // Return 80% of the available space if there is not enough space to use
115 // kDefaultCacheSize.
116 if (available
< kDefaultCacheSize
* 10 / 8)
117 return static_cast<int32
>(available
* 8 / 10);
119 // Return kDefaultCacheSize if it uses 80% to 10% of the available space.
120 if (available
< kDefaultCacheSize
* 10)
121 return kDefaultCacheSize
;
123 // Return 10% of the available space if the target size
124 // (2.5 * kDefaultCacheSize) is more than 10%.
125 if (available
< static_cast<int64
>(kDefaultCacheSize
) * 25)
126 return static_cast<int32
>(available
/ 10);
128 // Return the target size (2.5 * kDefaultCacheSize) if it uses 10% to 1%
129 // of the available space.
130 if (available
< static_cast<int64
>(kDefaultCacheSize
) * 250)
131 return kDefaultCacheSize
* 5 / 2;
133 // Return 1% of the available space if it does not exceed kint32max.
134 if (available
< static_cast<int64
>(kint32max
) * 100)
135 return static_cast<int32
>(available
/ 100);
140 // ------------------------------------------------------------------------
142 BackendImpl::BackendImpl(const base::FilePath
& path
,
143 base::MessageLoopProxy
* cache_thread
,
144 net::NetLog
* net_log
)
145 : background_queue_(this, cache_thread
),
151 cache_type_(net::DISK_CACHE
),
159 new_eviction_(false),
167 BackendImpl::BackendImpl(const base::FilePath
& path
,
169 base::MessageLoopProxy
* cache_thread
,
170 net::NetLog
* net_log
)
171 : background_queue_(this, cache_thread
),
177 cache_type_(net::DISK_CACHE
),
185 new_eviction_(false),
193 BackendImpl::~BackendImpl() {
194 if (user_flags_
& kNoRandom
) {
195 // This is a unit test, so we want to be strict about not leaking entries
196 // and completing all the work.
197 background_queue_
.WaitForPendingIO();
199 // This is most likely not a test, so we want to do as little work as
200 // possible at this time, at the price of leaving dirty entries behind.
201 background_queue_
.DropPendingIO();
204 if (background_queue_
.BackgroundIsCurrentThread()) {
205 // Unit tests may use the same thread for everything.
208 background_queue_
.background_thread()->PostTask(
209 FROM_HERE
, base::Bind(&FinalCleanupCallback
, base::Unretained(this)));
210 // http://crbug.com/74623
211 base::ThreadRestrictions::ScopedAllowWait allow_wait
;
216 int BackendImpl::Init(const CompletionCallback
& callback
) {
217 background_queue_
.Init(callback
);
218 return net::ERR_IO_PENDING
;
221 int BackendImpl::SyncInit() {
222 #if defined(NET_BUILD_STRESS_CACHE)
223 // Start evictions right away.
224 up_ticks_
= kTrimDelay
* 2;
228 return net::ERR_FAILED
;
230 bool create_files
= false;
231 if (!InitBackingStore(&create_files
)) {
232 ReportError(ERR_STORAGE_ERROR
);
233 return net::ERR_FAILED
;
236 num_refs_
= num_pending_io_
= max_refs_
= 0;
237 entry_count_
= byte_count_
= 0;
241 trace_object_
= TraceObject::GetTraceObject();
242 // Create a recurrent timer of 30 secs.
243 int timer_delay
= unit_test_
? 1000 : 30000;
244 timer_
.reset(new base::RepeatingTimer
<BackendImpl
>());
245 timer_
->Start(FROM_HERE
, TimeDelta::FromMilliseconds(timer_delay
), this,
246 &BackendImpl::OnStatsTimer
);
252 if (data_
->header
.experiment
!= NO_EXPERIMENT
&&
253 cache_type_
!= net::DISK_CACHE
) {
254 // No experiment for other caches.
255 return net::ERR_FAILED
;
258 if (!(user_flags_
& kNoRandom
)) {
259 // The unit test controls directly what to test.
260 new_eviction_
= (cache_type_
== net::DISK_CACHE
);
264 ReportError(ERR_INIT_FAILED
);
265 return net::ERR_FAILED
;
268 if (!restarted_
&& (create_files
|| !data_
->header
.num_entries
))
269 ReportError(ERR_CACHE_CREATED
);
271 if (!(user_flags_
& kNoRandom
) && cache_type_
== net::DISK_CACHE
&&
272 !InitExperiment(&data_
->header
, create_files
)) {
273 return net::ERR_FAILED
;
276 // We don't care if the value overflows. The only thing we care about is that
277 // the id cannot be zero, because that value is used as "not dirty".
278 // Increasing the value once per second gives us many years before we start
279 // having collisions.
280 data_
->header
.this_id
++;
281 if (!data_
->header
.this_id
)
282 data_
->header
.this_id
++;
284 bool previous_crash
= (data_
->header
.crash
!= 0);
285 data_
->header
.crash
= 1;
287 if (!block_files_
.Init(create_files
))
288 return net::ERR_FAILED
;
290 // We want to minimize the changes to cache for an AppCache.
291 if (cache_type() == net::APP_CACHE
) {
292 DCHECK(!new_eviction_
);
294 } else if (cache_type() == net::SHADER_CACHE
) {
295 DCHECK(!new_eviction_
);
298 eviction_
.Init(this);
300 // stats_ and rankings_ may end up calling back to us so we better be enabled.
303 return net::ERR_FAILED
;
305 disabled_
= !rankings_
.Init(this, new_eviction_
);
307 #if defined(STRESS_CACHE_EXTENDED_VALIDATION)
308 trace_object_
->EnableTracing(false);
309 int sc
= SelfCheck();
310 if (sc
< 0 && sc
!= ERR_NUM_ENTRIES_MISMATCH
)
312 trace_object_
->EnableTracing(true);
315 if (previous_crash
) {
316 ReportError(ERR_PREVIOUS_CRASH
);
317 } else if (!restarted_
) {
318 ReportError(ERR_NO_ERROR
);
323 return disabled_
? net::ERR_FAILED
: net::OK
;
326 void BackendImpl::CleanupCache() {
327 Trace("Backend Cleanup");
334 data_
->header
.crash
= 0;
336 if (user_flags_
& kNoRandom
) {
337 // This is a net_unittest, verify that we are not 'leaking' entries.
338 File::WaitForPendingIO(&num_pending_io_
);
342 block_files_
.CloseFiles();
345 ptr_factory_
.InvalidateWeakPtrs();
349 // ------------------------------------------------------------------------
351 int BackendImpl::OpenPrevEntry(void** iter
, Entry
** prev_entry
,
352 const CompletionCallback
& callback
) {
353 DCHECK(!callback
.is_null());
354 background_queue_
.OpenPrevEntry(iter
, prev_entry
, callback
);
355 return net::ERR_IO_PENDING
;
358 int BackendImpl::SyncOpenEntry(const std::string
& key
, Entry
** entry
) {
360 *entry
= OpenEntryImpl(key
);
361 return (*entry
) ? net::OK
: net::ERR_FAILED
;
364 int BackendImpl::SyncCreateEntry(const std::string
& key
, Entry
** entry
) {
366 *entry
= CreateEntryImpl(key
);
367 return (*entry
) ? net::OK
: net::ERR_FAILED
;
370 int BackendImpl::SyncDoomEntry(const std::string
& key
) {
372 return net::ERR_FAILED
;
374 EntryImpl
* entry
= OpenEntryImpl(key
);
376 return net::ERR_FAILED
;
383 int BackendImpl::SyncDoomAllEntries() {
384 // This is not really an error, but it is an interesting condition.
385 ReportError(ERR_CACHE_DOOMED
);
386 stats_
.OnEvent(Stats::DOOM_CACHE
);
389 return disabled_
? net::ERR_FAILED
: net::OK
;
392 return net::ERR_FAILED
;
394 eviction_
.TrimCache(true);
399 int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time
,
400 const base::Time end_time
) {
401 DCHECK_NE(net::APP_CACHE
, cache_type_
);
402 if (end_time
.is_null())
403 return SyncDoomEntriesSince(initial_time
);
405 DCHECK(end_time
>= initial_time
);
408 return net::ERR_FAILED
;
412 EntryImpl
* next
= OpenNextEntryImpl(&iter
);
418 next
= OpenNextEntryImpl(&iter
);
420 if (node
->GetLastUsed() >= initial_time
&&
421 node
->GetLastUsed() < end_time
) {
423 } else if (node
->GetLastUsed() < initial_time
) {
427 SyncEndEnumeration(iter
);
436 // We use OpenNextEntryImpl to retrieve elements from the cache, until we get
437 // entries that are too old.
438 int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time
) {
439 DCHECK_NE(net::APP_CACHE
, cache_type_
);
441 return net::ERR_FAILED
;
443 stats_
.OnEvent(Stats::DOOM_RECENT
);
446 EntryImpl
* entry
= OpenNextEntryImpl(&iter
);
450 if (initial_time
> entry
->GetLastUsed()) {
452 SyncEndEnumeration(iter
);
458 SyncEndEnumeration(iter
); // Dooming the entry invalidates the iterator.
462 int BackendImpl::SyncOpenNextEntry(void** iter
, Entry
** next_entry
) {
463 *next_entry
= OpenNextEntryImpl(iter
);
464 return (*next_entry
) ? net::OK
: net::ERR_FAILED
;
467 int BackendImpl::SyncOpenPrevEntry(void** iter
, Entry
** prev_entry
) {
468 *prev_entry
= OpenPrevEntryImpl(iter
);
469 return (*prev_entry
) ? net::OK
: net::ERR_FAILED
;
472 void BackendImpl::SyncEndEnumeration(void* iter
) {
473 scoped_ptr
<Rankings::Iterator
> iterator(
474 reinterpret_cast<Rankings::Iterator
*>(iter
));
477 void BackendImpl::SyncOnExternalCacheHit(const std::string
& key
) {
481 uint32 hash
= base::Hash(key
);
483 EntryImpl
* cache_entry
= MatchEntry(key
, hash
, false, Addr(), &error
);
485 if (ENTRY_NORMAL
== cache_entry
->entry()->Data()->state
) {
486 UpdateRank(cache_entry
, cache_type() == net::SHADER_CACHE
);
488 cache_entry
->Release();
492 EntryImpl
* BackendImpl::OpenEntryImpl(const std::string
& key
) {
496 TimeTicks start
= TimeTicks::Now();
497 uint32 hash
= base::Hash(key
);
498 Trace("Open hash 0x%x", hash
);
501 EntryImpl
* cache_entry
= MatchEntry(key
, hash
, false, Addr(), &error
);
502 if (cache_entry
&& ENTRY_NORMAL
!= cache_entry
->entry()->Data()->state
) {
503 // The entry was already evicted.
504 cache_entry
->Release();
508 int current_size
= data_
->header
.num_bytes
/ (1024 * 1024);
509 int64 total_hours
= stats_
.GetCounter(Stats::TIMER
) / 120;
510 int64 no_use_hours
= stats_
.GetCounter(Stats::LAST_REPORT_TIMER
) / 120;
511 int64 use_hours
= total_hours
- no_use_hours
;
514 CACHE_UMA(AGE_MS
, "OpenTime.Miss", 0, start
);
515 CACHE_UMA(COUNTS_10000
, "AllOpenBySize.Miss", 0, current_size
);
516 CACHE_UMA(HOURS
, "AllOpenByTotalHours.Miss", 0, total_hours
);
517 CACHE_UMA(HOURS
, "AllOpenByUseHours.Miss", 0, use_hours
);
518 stats_
.OnEvent(Stats::OPEN_MISS
);
522 eviction_
.OnOpenEntry(cache_entry
);
525 Trace("Open hash 0x%x end: 0x%x", hash
,
526 cache_entry
->entry()->address().value());
527 CACHE_UMA(AGE_MS
, "OpenTime", 0, start
);
528 CACHE_UMA(COUNTS_10000
, "AllOpenBySize.Hit", 0, current_size
);
529 CACHE_UMA(HOURS
, "AllOpenByTotalHours.Hit", 0, total_hours
);
530 CACHE_UMA(HOURS
, "AllOpenByUseHours.Hit", 0, use_hours
);
531 stats_
.OnEvent(Stats::OPEN_HIT
);
532 SIMPLE_STATS_COUNTER("disk_cache.hit");
536 EntryImpl
* BackendImpl::CreateEntryImpl(const std::string
& key
) {
537 if (disabled_
|| key
.empty())
540 TimeTicks start
= TimeTicks::Now();
541 uint32 hash
= base::Hash(key
);
542 Trace("Create hash 0x%x", hash
);
544 scoped_refptr
<EntryImpl
> parent
;
545 Addr
entry_address(data_
->table
[hash
& mask_
]);
546 if (entry_address
.is_initialized()) {
547 // We have an entry already. It could be the one we are looking for, or just
550 EntryImpl
* old_entry
= MatchEntry(key
, hash
, false, Addr(), &error
);
552 return ResurrectEntry(old_entry
);
554 EntryImpl
* parent_entry
= MatchEntry(key
, hash
, true, Addr(), &error
);
557 parent
.swap(&parent_entry
);
558 } else if (data_
->table
[hash
& mask_
]) {
559 // We should have corrected the problem.
565 // The general flow is to allocate disk space and initialize the entry data,
566 // followed by saving that to disk, then linking the entry though the index
567 // and finally through the lists. If there is a crash in this process, we may
569 // a. Used, unreferenced empty blocks on disk (basically just garbage).
570 // b. Used, unreferenced but meaningful data on disk (more garbage).
571 // c. A fully formed entry, reachable only through the index.
572 // d. A fully formed entry, also reachable through the lists, but still dirty.
574 // Anything after (b) can be automatically cleaned up. We may consider saving
575 // the current operation (as we do while manipulating the lists) so that we
576 // can detect and cleanup (a) and (b).
578 int num_blocks
= EntryImpl::NumBlocksForEntry(key
.size());
579 if (!block_files_
.CreateBlock(BLOCK_256
, num_blocks
, &entry_address
)) {
580 LOG(ERROR
) << "Create entry failed " << key
.c_str();
581 stats_
.OnEvent(Stats::CREATE_ERROR
);
585 Addr
node_address(0);
586 if (!block_files_
.CreateBlock(RANKINGS
, 1, &node_address
)) {
587 block_files_
.DeleteBlock(entry_address
, false);
588 LOG(ERROR
) << "Create entry failed " << key
.c_str();
589 stats_
.OnEvent(Stats::CREATE_ERROR
);
593 scoped_refptr
<EntryImpl
> cache_entry(
594 new EntryImpl(this, entry_address
, false));
597 if (!cache_entry
->CreateEntry(node_address
, key
, hash
)) {
598 block_files_
.DeleteBlock(entry_address
, false);
599 block_files_
.DeleteBlock(node_address
, false);
600 LOG(ERROR
) << "Create entry failed " << key
.c_str();
601 stats_
.OnEvent(Stats::CREATE_ERROR
);
605 cache_entry
->BeginLogging(net_log_
, true);
607 // We are not failing the operation; let's add this to the map.
608 open_entries_
[entry_address
.value()] = cache_entry
.get();
611 cache_entry
->entry()->Store();
612 cache_entry
->rankings()->Store();
613 IncreaseNumEntries();
616 // Link this entry through the index.
618 parent
->SetNextAddress(entry_address
);
620 data_
->table
[hash
& mask_
] = entry_address
.value();
623 // Link this entry through the lists.
624 eviction_
.OnCreateEntry(cache_entry
.get());
626 CACHE_UMA(AGE_MS
, "CreateTime", 0, start
);
627 stats_
.OnEvent(Stats::CREATE_HIT
);
628 SIMPLE_STATS_COUNTER("disk_cache.miss");
629 Trace("create entry hit ");
631 cache_entry
->AddRef();
632 return cache_entry
.get();
635 EntryImpl
* BackendImpl::OpenNextEntryImpl(void** iter
) {
636 return OpenFollowingEntry(true, iter
);
639 EntryImpl
* BackendImpl::OpenPrevEntryImpl(void** iter
) {
640 return OpenFollowingEntry(false, iter
);
643 bool BackendImpl::SetMaxSize(int max_bytes
) {
644 COMPILE_ASSERT(sizeof(max_bytes
) == sizeof(max_size_
), unsupported_int_model
);
648 // Zero size means use the default.
652 // Avoid a DCHECK later on.
653 if (max_bytes
>= kint32max
- kint32max
/ 10)
654 max_bytes
= kint32max
- kint32max
/ 10 - 1;
656 user_flags_
|= kMaxSize
;
657 max_size_
= max_bytes
;
661 void BackendImpl::SetType(net::CacheType type
) {
662 DCHECK_NE(net::MEMORY_CACHE
, type
);
666 base::FilePath
BackendImpl::GetFileName(Addr address
) const {
667 if (!address
.is_separate_file() || !address
.is_initialized()) {
669 return base::FilePath();
672 std::string tmp
= base::StringPrintf("f_%06x", address
.FileNumber());
673 return path_
.AppendASCII(tmp
);
676 MappedFile
* BackendImpl::File(Addr address
) {
679 return block_files_
.GetFile(address
);
682 base::WeakPtr
<InFlightBackendIO
> BackendImpl::GetBackgroundQueue() {
683 return background_queue_
.GetWeakPtr();
686 bool BackendImpl::CreateExternalFile(Addr
* address
) {
687 int file_number
= data_
->header
.last_file
+ 1;
688 Addr
file_address(0);
689 bool success
= false;
690 for (int i
= 0; i
< 0x0fffffff; i
++, file_number
++) {
691 if (!file_address
.SetFileNumber(file_number
)) {
695 base::FilePath name
= GetFileName(file_address
);
696 int flags
= base::PLATFORM_FILE_READ
|
697 base::PLATFORM_FILE_WRITE
|
698 base::PLATFORM_FILE_CREATE
|
699 base::PLATFORM_FILE_EXCLUSIVE_WRITE
;
700 base::PlatformFileError error
;
701 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(
702 base::CreatePlatformFile(name
, flags
, NULL
, &error
)));
703 if (!file
->IsValid()) {
704 if (error
!= base::PLATFORM_FILE_ERROR_EXISTS
) {
705 LOG(ERROR
) << "Unable to create file: " << error
;
719 data_
->header
.last_file
= file_number
;
720 address
->set_value(file_address
.value());
724 bool BackendImpl::CreateBlock(FileType block_type
, int block_count
,
725 Addr
* block_address
) {
726 return block_files_
.CreateBlock(block_type
, block_count
, block_address
);
729 void BackendImpl::DeleteBlock(Addr block_address
, bool deep
) {
730 block_files_
.DeleteBlock(block_address
, deep
);
733 LruData
* BackendImpl::GetLruData() {
734 return &data_
->header
.lru
;
737 void BackendImpl::UpdateRank(EntryImpl
* entry
, bool modified
) {
738 if (read_only_
|| (!modified
&& cache_type() == net::SHADER_CACHE
))
740 eviction_
.UpdateRank(entry
, modified
);
743 void BackendImpl::RecoveredEntry(CacheRankingsBlock
* rankings
) {
744 Addr
address(rankings
->Data()->contents
);
745 EntryImpl
* cache_entry
= NULL
;
746 if (NewEntry(address
, &cache_entry
)) {
751 uint32 hash
= cache_entry
->GetHash();
752 cache_entry
->Release();
754 // Anything on the table means that this entry is there.
755 if (data_
->table
[hash
& mask_
])
758 data_
->table
[hash
& mask_
] = address
.value();
762 void BackendImpl::InternalDoomEntry(EntryImpl
* entry
) {
763 uint32 hash
= entry
->GetHash();
764 std::string key
= entry
->GetKey();
765 Addr entry_addr
= entry
->entry()->address();
767 EntryImpl
* parent_entry
= MatchEntry(key
, hash
, true, entry_addr
, &error
);
768 CacheAddr
child(entry
->GetNextAddress());
770 Trace("Doom entry 0x%p", entry
);
772 if (!entry
->doomed()) {
773 // We may have doomed this entry from within MatchEntry.
774 eviction_
.OnDoomEntry(entry
);
775 entry
->InternalDoom();
776 if (!new_eviction_
) {
777 DecreaseNumEntries();
779 stats_
.OnEvent(Stats::DOOM_ENTRY
);
783 parent_entry
->SetNextAddress(Addr(child
));
784 parent_entry
->Release();
786 data_
->table
[hash
& mask_
] = child
;
792 #if defined(NET_BUILD_STRESS_CACHE)
794 CacheAddr
BackendImpl::GetNextAddr(Addr address
) {
795 EntriesMap::iterator it
= open_entries_
.find(address
.value());
796 if (it
!= open_entries_
.end()) {
797 EntryImpl
* this_entry
= it
->second
;
798 return this_entry
->GetNextAddress();
800 DCHECK(block_files_
.IsValid(address
));
801 DCHECK(!address
.is_separate_file() && address
.file_type() == BLOCK_256
);
803 CacheEntryBlock
entry(File(address
), address
);
805 return entry
.Data()->next
;
808 void BackendImpl::NotLinked(EntryImpl
* entry
) {
809 Addr entry_addr
= entry
->entry()->address();
810 uint32 i
= entry
->GetHash() & mask_
;
811 Addr
address(data_
->table
[i
]);
812 if (!address
.is_initialized())
816 DCHECK(entry_addr
.value() != address
.value());
817 address
.set_value(GetNextAddr(address
));
818 if (!address
.is_initialized())
822 #endif // NET_BUILD_STRESS_CACHE
824 // An entry may be linked on the DELETED list for a while after being doomed.
825 // This function is called when we want to remove it.
826 void BackendImpl::RemoveEntry(EntryImpl
* entry
) {
827 #if defined(NET_BUILD_STRESS_CACHE)
833 DCHECK_NE(ENTRY_NORMAL
, entry
->entry()->Data()->state
);
835 Trace("Remove entry 0x%p", entry
);
836 eviction_
.OnDestroyEntry(entry
);
837 DecreaseNumEntries();
840 void BackendImpl::OnEntryDestroyBegin(Addr address
) {
841 EntriesMap::iterator it
= open_entries_
.find(address
.value());
842 if (it
!= open_entries_
.end())
843 open_entries_
.erase(it
);
846 void BackendImpl::OnEntryDestroyEnd() {
848 if (data_
->header
.num_bytes
> max_size_
&& !read_only_
&&
849 (up_ticks_
> kTrimDelay
|| user_flags_
& kNoRandom
))
850 eviction_
.TrimCache(false);
853 EntryImpl
* BackendImpl::GetOpenEntry(CacheRankingsBlock
* rankings
) const {
854 DCHECK(rankings
->HasData());
855 EntriesMap::const_iterator it
=
856 open_entries_
.find(rankings
->Data()->contents
);
857 if (it
!= open_entries_
.end()) {
858 // We have this entry in memory.
865 int32
BackendImpl::GetCurrentEntryId() const {
866 return data_
->header
.this_id
;
869 int BackendImpl::MaxFileSize() const {
870 return cache_type() == net::PNACL_CACHE
? max_size_
: max_size_
/ 8;
873 void BackendImpl::ModifyStorageSize(int32 old_size
, int32 new_size
) {
874 if (disabled_
|| old_size
== new_size
)
876 if (old_size
> new_size
)
877 SubstractStorageSize(old_size
- new_size
);
879 AddStorageSize(new_size
- old_size
);
883 // Update the usage statistics.
884 stats_
.ModifyStorageStats(old_size
, new_size
);
887 void BackendImpl::TooMuchStorageRequested(int32 size
) {
888 stats_
.ModifyStorageStats(0, size
);
891 bool BackendImpl::IsAllocAllowed(int current_size
, int new_size
) {
892 DCHECK_GT(new_size
, current_size
);
893 if (user_flags_
& kNoBuffering
)
896 int to_add
= new_size
- current_size
;
897 if (buffer_bytes_
+ to_add
> MaxBuffersSize())
900 buffer_bytes_
+= to_add
;
901 CACHE_UMA(COUNTS_50000
, "BufferBytes", 0, buffer_bytes_
/ 1024);
905 void BackendImpl::BufferDeleted(int size
) {
906 buffer_bytes_
-= size
;
910 bool BackendImpl::IsLoaded() const {
911 CACHE_UMA(COUNTS
, "PendingIO", 0, num_pending_io_
);
912 if (user_flags_
& kNoLoadProtection
)
915 return (num_pending_io_
> 5 || user_load_
);
918 std::string
BackendImpl::HistogramName(const char* name
, int experiment
) const {
920 return base::StringPrintf("DiskCache.%d.%s", cache_type_
, name
);
921 return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_
,
925 base::WeakPtr
<BackendImpl
> BackendImpl::GetWeakPtr() {
926 return ptr_factory_
.GetWeakPtr();
929 // We want to remove biases from some histograms so we only send data once per
931 bool BackendImpl::ShouldReportAgain() {
933 return uma_report_
== 2;
936 int64 last_report
= stats_
.GetCounter(Stats::LAST_REPORT
);
937 Time last_time
= Time::FromInternalValue(last_report
);
938 if (!last_report
|| (Time::Now() - last_time
).InDays() >= 7) {
939 stats_
.SetCounter(Stats::LAST_REPORT
, Time::Now().ToInternalValue());
946 void BackendImpl::FirstEviction() {
947 DCHECK(data_
->header
.create_time
);
948 if (!GetEntryCount())
949 return; // This is just for unit tests.
951 Time create_time
= Time::FromInternalValue(data_
->header
.create_time
);
952 CACHE_UMA(AGE
, "FillupAge", 0, create_time
);
954 int64 use_time
= stats_
.GetCounter(Stats::TIMER
);
955 CACHE_UMA(HOURS
, "FillupTime", 0, static_cast<int>(use_time
/ 120));
956 CACHE_UMA(PERCENTAGE
, "FirstHitRatio", 0, stats_
.GetHitRatio());
960 CACHE_UMA(COUNTS_10000
, "FirstEntryAccessRate", 0,
961 static_cast<int>(data_
->header
.num_entries
/ use_time
));
962 CACHE_UMA(COUNTS
, "FirstByteIORate", 0,
963 static_cast<int>((data_
->header
.num_bytes
/ 1024) / use_time
));
965 int avg_size
= data_
->header
.num_bytes
/ GetEntryCount();
966 CACHE_UMA(COUNTS
, "FirstEntrySize", 0, avg_size
);
968 int large_entries_bytes
= stats_
.GetLargeEntriesSize();
969 int large_ratio
= large_entries_bytes
* 100 / data_
->header
.num_bytes
;
970 CACHE_UMA(PERCENTAGE
, "FirstLargeEntriesRatio", 0, large_ratio
);
973 CACHE_UMA(PERCENTAGE
, "FirstResurrectRatio", 0, stats_
.GetResurrectRatio());
974 CACHE_UMA(PERCENTAGE
, "FirstNoUseRatio", 0,
975 data_
->header
.lru
.sizes
[0] * 100 / data_
->header
.num_entries
);
976 CACHE_UMA(PERCENTAGE
, "FirstLowUseRatio", 0,
977 data_
->header
.lru
.sizes
[1] * 100 / data_
->header
.num_entries
);
978 CACHE_UMA(PERCENTAGE
, "FirstHighUseRatio", 0,
979 data_
->header
.lru
.sizes
[2] * 100 / data_
->header
.num_entries
);
982 stats_
.ResetRatios();
985 void BackendImpl::CriticalError(int error
) {
987 LOG(ERROR
) << "Critical error found " << error
;
991 stats_
.OnEvent(Stats::FATAL_ERROR
);
995 // Setting the index table length to an invalid value will force re-creation
996 // of the cache files.
997 data_
->header
.table_len
= 1;
1001 base::MessageLoop::current()->PostTask(
1002 FROM_HERE
, base::Bind(&BackendImpl::RestartCache
, GetWeakPtr(), true));
1005 void BackendImpl::ReportError(int error
) {
1006 STRESS_DCHECK(!error
|| error
== ERR_PREVIOUS_CRASH
||
1007 error
== ERR_CACHE_CREATED
);
1009 // We transmit positive numbers, instead of direct error codes.
1010 DCHECK_LE(error
, 0);
1011 CACHE_UMA(CACHE_ERROR
, "Error", 0, error
* -1);
1014 void BackendImpl::OnEvent(Stats::Counters an_event
) {
1015 stats_
.OnEvent(an_event
);
1018 void BackendImpl::OnRead(int32 bytes
) {
1019 DCHECK_GE(bytes
, 0);
1020 byte_count_
+= bytes
;
1021 if (byte_count_
< 0)
1022 byte_count_
= kint32max
;
1025 void BackendImpl::OnWrite(int32 bytes
) {
1026 // We use the same implementation as OnRead... just log the number of bytes.
1030 void BackendImpl::OnStatsTimer() {
1031 stats_
.OnEvent(Stats::TIMER
);
1032 int64 time
= stats_
.GetCounter(Stats::TIMER
);
1033 int64 current
= stats_
.GetCounter(Stats::OPEN_ENTRIES
);
1035 // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1036 // the bias towards 0.
1037 if (num_refs_
&& (current
!= num_refs_
)) {
1038 int64 diff
= (num_refs_
- current
) / 50;
1040 diff
= num_refs_
> current
? 1 : -1;
1041 current
= current
+ diff
;
1042 stats_
.SetCounter(Stats::OPEN_ENTRIES
, current
);
1043 stats_
.SetCounter(Stats::MAX_ENTRIES
, max_refs_
);
1046 CACHE_UMA(COUNTS
, "NumberOfReferences", 0, num_refs_
);
1048 CACHE_UMA(COUNTS_10000
, "EntryAccessRate", 0, entry_count_
);
1049 CACHE_UMA(COUNTS
, "ByteIORate", 0, byte_count_
/ 1024);
1051 // These values cover about 99.5% of the population (Oct 2011).
1052 user_load_
= (entry_count_
> 300 || byte_count_
> 7 * 1024 * 1024);
1058 first_timer_
= false;
1060 first_timer_
= false;
1061 if (ShouldReportAgain())
1065 // Save stats to disk at 5 min intervals.
1070 void BackendImpl::IncrementIoCount() {
1074 void BackendImpl::DecrementIoCount() {
1078 void BackendImpl::SetUnitTestMode() {
1079 user_flags_
|= kUnitTestMode
;
1083 void BackendImpl::SetUpgradeMode() {
1084 user_flags_
|= kUpgradeMode
;
1088 void BackendImpl::SetNewEviction() {
1089 user_flags_
|= kNewEviction
;
1090 new_eviction_
= true;
1093 void BackendImpl::SetFlags(uint32 flags
) {
1094 user_flags_
|= flags
;
1097 void BackendImpl::ClearRefCountForTest() {
1101 int BackendImpl::FlushQueueForTest(const CompletionCallback
& callback
) {
1102 background_queue_
.FlushQueue(callback
);
1103 return net::ERR_IO_PENDING
;
1106 int BackendImpl::RunTaskForTest(const base::Closure
& task
,
1107 const CompletionCallback
& callback
) {
1108 background_queue_
.RunTask(task
, callback
);
1109 return net::ERR_IO_PENDING
;
1112 void BackendImpl::TrimForTest(bool empty
) {
1113 eviction_
.SetTestMode();
1114 eviction_
.TrimCache(empty
);
1117 void BackendImpl::TrimDeletedListForTest(bool empty
) {
1118 eviction_
.SetTestMode();
1119 eviction_
.TrimDeletedList(empty
);
1122 int BackendImpl::SelfCheck() {
1124 LOG(ERROR
) << "Init failed";
1125 return ERR_INIT_FAILED
;
1128 int num_entries
= rankings_
.SelfCheck();
1129 if (num_entries
< 0) {
1130 LOG(ERROR
) << "Invalid rankings list, error " << num_entries
;
1131 #if !defined(NET_BUILD_STRESS_CACHE)
1136 if (num_entries
!= data_
->header
.num_entries
) {
1137 LOG(ERROR
) << "Number of entries mismatch";
1138 #if !defined(NET_BUILD_STRESS_CACHE)
1139 return ERR_NUM_ENTRIES_MISMATCH
;
1143 return CheckAllEntries();
1146 void BackendImpl::FlushIndex() {
1147 if (index_
.get() && !disabled_
)
1151 // ------------------------------------------------------------------------
1153 net::CacheType
BackendImpl::GetCacheType() const {
1157 int32
BackendImpl::GetEntryCount() const {
1158 if (!index_
.get() || disabled_
)
1160 // num_entries includes entries already evicted.
1161 int32 not_deleted
= data_
->header
.num_entries
-
1162 data_
->header
.lru
.sizes
[Rankings::DELETED
];
1164 if (not_deleted
< 0) {
1172 int BackendImpl::OpenEntry(const std::string
& key
, Entry
** entry
,
1173 const CompletionCallback
& callback
) {
1174 DCHECK(!callback
.is_null());
1175 background_queue_
.OpenEntry(key
, entry
, callback
);
1176 return net::ERR_IO_PENDING
;
1179 int BackendImpl::CreateEntry(const std::string
& key
, Entry
** entry
,
1180 const CompletionCallback
& callback
) {
1181 DCHECK(!callback
.is_null());
1182 background_queue_
.CreateEntry(key
, entry
, callback
);
1183 return net::ERR_IO_PENDING
;
1186 int BackendImpl::DoomEntry(const std::string
& key
,
1187 const CompletionCallback
& callback
) {
1188 DCHECK(!callback
.is_null());
1189 background_queue_
.DoomEntry(key
, callback
);
1190 return net::ERR_IO_PENDING
;
1193 int BackendImpl::DoomAllEntries(const CompletionCallback
& callback
) {
1194 DCHECK(!callback
.is_null());
1195 background_queue_
.DoomAllEntries(callback
);
1196 return net::ERR_IO_PENDING
;
1199 int BackendImpl::DoomEntriesBetween(const base::Time initial_time
,
1200 const base::Time end_time
,
1201 const CompletionCallback
& callback
) {
1202 DCHECK(!callback
.is_null());
1203 background_queue_
.DoomEntriesBetween(initial_time
, end_time
, callback
);
1204 return net::ERR_IO_PENDING
;
1207 int BackendImpl::DoomEntriesSince(const base::Time initial_time
,
1208 const CompletionCallback
& callback
) {
1209 DCHECK(!callback
.is_null());
1210 background_queue_
.DoomEntriesSince(initial_time
, callback
);
1211 return net::ERR_IO_PENDING
;
1214 int BackendImpl::OpenNextEntry(void** iter
, Entry
** next_entry
,
1215 const CompletionCallback
& callback
) {
1216 DCHECK(!callback
.is_null());
1217 background_queue_
.OpenNextEntry(iter
, next_entry
, callback
);
1218 return net::ERR_IO_PENDING
;
1221 void BackendImpl::EndEnumeration(void** iter
) {
1222 background_queue_
.EndEnumeration(*iter
);
1226 void BackendImpl::GetStats(StatsItems
* stats
) {
1230 std::pair
<std::string
, std::string
> item
;
1232 item
.first
= "Entries";
1233 item
.second
= base::StringPrintf("%d", data_
->header
.num_entries
);
1234 stats
->push_back(item
);
1236 item
.first
= "Pending IO";
1237 item
.second
= base::StringPrintf("%d", num_pending_io_
);
1238 stats
->push_back(item
);
1240 item
.first
= "Max size";
1241 item
.second
= base::StringPrintf("%d", max_size_
);
1242 stats
->push_back(item
);
1244 item
.first
= "Current size";
1245 item
.second
= base::StringPrintf("%d", data_
->header
.num_bytes
);
1246 stats
->push_back(item
);
1248 item
.first
= "Cache type";
1249 item
.second
= "Blockfile Cache";
1250 stats
->push_back(item
);
1252 stats_
.GetItems(stats
);
1255 void BackendImpl::OnExternalCacheHit(const std::string
& key
) {
1256 background_queue_
.OnExternalCacheHit(key
);
1259 // ------------------------------------------------------------------------
1261 // We just created a new file so we're going to write the header and set the
1262 // file length to include the hash table (zero filled).
1263 bool BackendImpl::CreateBackingStore(disk_cache::File
* file
) {
1264 AdjustMaxCacheSize(0);
1267 header
.table_len
= DesiredIndexTableLen(max_size_
);
1269 // We need file version 2.1 for the new eviction algorithm.
1271 header
.version
= 0x20001;
1273 header
.create_time
= Time::Now().ToInternalValue();
1275 if (!file
->Write(&header
, sizeof(header
), 0))
1278 return file
->SetLength(GetIndexSize(header
.table_len
));
1281 bool BackendImpl::InitBackingStore(bool* file_created
) {
1282 if (!file_util::CreateDirectory(path_
))
1285 base::FilePath index_name
= path_
.AppendASCII(kIndexName
);
1287 int flags
= base::PLATFORM_FILE_READ
|
1288 base::PLATFORM_FILE_WRITE
|
1289 base::PLATFORM_FILE_OPEN_ALWAYS
|
1290 base::PLATFORM_FILE_EXCLUSIVE_WRITE
;
1291 scoped_refptr
<disk_cache::File
> file(new disk_cache::File(
1292 base::CreatePlatformFile(index_name
, flags
, file_created
, NULL
)));
1294 if (!file
->IsValid())
1299 ret
= CreateBackingStore(file
.get());
1305 index_
= new MappedFile();
1306 data_
= reinterpret_cast<Index
*>(index_
->Init(index_name
, 0));
1308 LOG(ERROR
) << "Unable to map Index file";
1312 if (index_
->GetLength() < sizeof(Index
)) {
1313 // We verify this again on CheckIndex() but it's easier to make sure now
1314 // that the header is there.
1315 LOG(ERROR
) << "Corrupt Index file";
1322 // The maximum cache size will be either set explicitly by the caller, or
1323 // calculated by this code.
1324 void BackendImpl::AdjustMaxCacheSize(int table_len
) {
1328 // If table_len is provided, the index file exists.
1329 DCHECK(!table_len
|| data_
->header
.magic
);
1331 // The user is not setting the size, let's figure it out.
1332 int64 available
= base::SysInfo::AmountOfFreeDiskSpace(path_
);
1333 if (available
< 0) {
1334 max_size_
= kDefaultCacheSize
;
1339 available
+= data_
->header
.num_bytes
;
1341 max_size_
= PreferedCacheSize(available
);
1343 // Let's not use more than the default size while we tune-up the performance
1344 // of bigger caches. TODO(rvargas): remove this limit.
1345 if (max_size_
> kDefaultCacheSize
* 4)
1346 max_size_
= kDefaultCacheSize
* 4;
1351 // If we already have a table, adjust the size to it.
1352 int current_max_size
= MaxStorageSizeForTable(table_len
);
1353 if (max_size_
> current_max_size
)
1354 max_size_
= current_max_size
;
1357 bool BackendImpl::InitStats() {
1358 Addr
address(data_
->header
.stats
);
1359 int size
= stats_
.StorageSize();
1361 if (!address
.is_initialized()) {
1362 FileType file_type
= Addr::RequiredFileType(size
);
1363 DCHECK_NE(file_type
, EXTERNAL
);
1364 int num_blocks
= Addr::RequiredBlocks(size
, file_type
);
1366 if (!CreateBlock(file_type
, num_blocks
, &address
))
1369 data_
->header
.stats
= address
.value();
1370 return stats_
.Init(NULL
, 0, address
);
1373 if (!address
.is_block_file()) {
1378 // Load the required data.
1379 size
= address
.num_blocks() * address
.BlockSize();
1380 MappedFile
* file
= File(address
);
1384 scoped_ptr
<char[]> data(new char[size
]);
1385 size_t offset
= address
.start_block() * address
.BlockSize() +
1387 if (!file
->Read(data
.get(), size
, offset
))
1390 if (!stats_
.Init(data
.get(), size
, address
))
1392 if (cache_type_
== net::DISK_CACHE
&& ShouldReportAgain())
1393 stats_
.InitSizeHistogram();
1397 void BackendImpl::StoreStats() {
1398 int size
= stats_
.StorageSize();
1399 scoped_ptr
<char[]> data(new char[size
]);
1401 size
= stats_
.SerializeStats(data
.get(), size
, &address
);
1403 if (!address
.is_initialized())
1406 MappedFile
* file
= File(address
);
1410 size_t offset
= address
.start_block() * address
.BlockSize() +
1412 file
->Write(data
.get(), size
, offset
); // ignore result.
1415 void BackendImpl::RestartCache(bool failure
) {
1416 int64 errors
= stats_
.GetCounter(Stats::FATAL_ERROR
);
1417 int64 full_dooms
= stats_
.GetCounter(Stats::DOOM_CACHE
);
1418 int64 partial_dooms
= stats_
.GetCounter(Stats::DOOM_RECENT
);
1419 int64 last_report
= stats_
.GetCounter(Stats::LAST_REPORT
);
1421 PrepareForRestart();
1424 DCHECK(!open_entries_
.size());
1425 DelayedCacheCleanup(path_
);
1427 DeleteCache(path_
, false);
1430 // Don't call Init() if directed by the unit test: we are simulating a failure
1431 // trying to re-enable the cache.
1433 init_
= true; // Let the destructor do proper cleanup.
1434 else if (SyncInit() == net::OK
) {
1435 stats_
.SetCounter(Stats::FATAL_ERROR
, errors
);
1436 stats_
.SetCounter(Stats::DOOM_CACHE
, full_dooms
);
1437 stats_
.SetCounter(Stats::DOOM_RECENT
, partial_dooms
);
1438 stats_
.SetCounter(Stats::LAST_REPORT
, last_report
);
1442 void BackendImpl::PrepareForRestart() {
1443 // Reset the mask_ if it was not given by the user.
1444 if (!(user_flags_
& kMask
))
1447 if (!(user_flags_
& kNewEviction
))
1448 new_eviction_
= false;
1451 data_
->header
.crash
= 0;
1455 block_files_
.CloseFiles();
1461 int BackendImpl::NewEntry(Addr address
, EntryImpl
** entry
) {
1462 EntriesMap::iterator it
= open_entries_
.find(address
.value());
1463 if (it
!= open_entries_
.end()) {
1464 // Easy job. This entry is already in memory.
1465 EntryImpl
* this_entry
= it
->second
;
1466 this_entry
->AddRef();
1467 *entry
= this_entry
;
1471 STRESS_DCHECK(block_files_
.IsValid(address
));
1473 if (!address
.SanityCheckForEntryV2()) {
1474 LOG(WARNING
) << "Wrong entry address.";
1475 STRESS_NOTREACHED();
1476 return ERR_INVALID_ADDRESS
;
1479 scoped_refptr
<EntryImpl
> cache_entry(
1480 new EntryImpl(this, address
, read_only_
));
1484 TimeTicks start
= TimeTicks::Now();
1485 if (!cache_entry
->entry()->Load())
1486 return ERR_READ_FAILURE
;
1489 CACHE_UMA(AGE_MS
, "LoadTime", 0, start
);
1492 if (!cache_entry
->SanityCheck()) {
1493 LOG(WARNING
) << "Messed up entry found.";
1494 STRESS_NOTREACHED();
1495 return ERR_INVALID_ENTRY
;
1498 STRESS_DCHECK(block_files_
.IsValid(
1499 Addr(cache_entry
->entry()->Data()->rankings_node
)));
1501 if (!cache_entry
->LoadNodeAddress())
1502 return ERR_READ_FAILURE
;
1504 if (!rankings_
.SanityCheck(cache_entry
->rankings(), false)) {
1505 STRESS_NOTREACHED();
1506 cache_entry
->SetDirtyFlag(0);
1507 // Don't remove this from the list (it is not linked properly). Instead,
1508 // break the link back to the entry because it is going away, and leave the
1509 // rankings node to be deleted if we find it through a list.
1510 rankings_
.SetContents(cache_entry
->rankings(), 0);
1511 } else if (!rankings_
.DataSanityCheck(cache_entry
->rankings(), false)) {
1512 STRESS_NOTREACHED();
1513 cache_entry
->SetDirtyFlag(0);
1514 rankings_
.SetContents(cache_entry
->rankings(), address
.value());
1517 if (!cache_entry
->DataSanityCheck()) {
1518 LOG(WARNING
) << "Messed up entry found.";
1519 cache_entry
->SetDirtyFlag(0);
1520 cache_entry
->FixForDelete();
1523 // Prevent overwriting the dirty flag on the destructor.
1524 cache_entry
->SetDirtyFlag(GetCurrentEntryId());
1526 if (cache_entry
->dirty()) {
1527 Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry
.get()),
1531 open_entries_
[address
.value()] = cache_entry
.get();
1533 cache_entry
->BeginLogging(net_log_
, false);
1534 cache_entry
.swap(entry
);
1538 EntryImpl
* BackendImpl::MatchEntry(const std::string
& key
, uint32 hash
,
1539 bool find_parent
, Addr entry_addr
,
1540 bool* match_error
) {
1541 Addr
address(data_
->table
[hash
& mask_
]);
1542 scoped_refptr
<EntryImpl
> cache_entry
, parent_entry
;
1543 EntryImpl
* tmp
= NULL
;
1545 std::set
<CacheAddr
> visited
;
1546 *match_error
= false;
1552 if (visited
.find(address
.value()) != visited
.end()) {
1553 // It's possible for a buggy version of the code to write a loop. Just
1555 Trace("Hash collision loop 0x%x", address
.value());
1556 address
.set_value(0);
1557 parent_entry
->SetNextAddress(address
);
1559 visited
.insert(address
.value());
1561 if (!address
.is_initialized()) {
1567 int error
= NewEntry(address
, &tmp
);
1568 cache_entry
.swap(&tmp
);
1570 if (error
|| cache_entry
->dirty()) {
1571 // This entry is dirty on disk (it was not properly closed): we cannot
1575 child
.set_value(cache_entry
->GetNextAddress());
1577 if (parent_entry
.get()) {
1578 parent_entry
->SetNextAddress(child
);
1579 parent_entry
= NULL
;
1581 data_
->table
[hash
& mask_
] = child
.value();
1584 Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent
, entry_addr
.value(),
1588 // It is important to call DestroyInvalidEntry after removing this
1589 // entry from the table.
1590 DestroyInvalidEntry(cache_entry
.get());
1593 Trace("NewEntry failed on MatchEntry 0x%x", address
.value());
1596 // Restart the search.
1597 address
.set_value(data_
->table
[hash
& mask_
]);
1602 DCHECK_EQ(hash
& mask_
, cache_entry
->entry()->Data()->hash
& mask_
);
1603 if (cache_entry
->IsSameEntry(key
, hash
)) {
1604 if (!cache_entry
->Update())
1607 if (find_parent
&& entry_addr
.value() != address
.value()) {
1608 Trace("Entry not on the index 0x%x", address
.value());
1609 *match_error
= true;
1610 parent_entry
= NULL
;
1614 if (!cache_entry
->Update())
1616 parent_entry
= cache_entry
;
1618 if (!parent_entry
.get())
1621 address
.set_value(parent_entry
->GetNextAddress());
1624 if (parent_entry
.get() && (!find_parent
|| !found
))
1625 parent_entry
= NULL
;
1627 if (find_parent
&& entry_addr
.is_initialized() && !cache_entry
.get()) {
1628 *match_error
= true;
1629 parent_entry
= NULL
;
1632 if (cache_entry
.get() && (find_parent
|| !found
))
1635 find_parent
? parent_entry
.swap(&tmp
) : cache_entry
.swap(&tmp
);
1640 // This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1641 EntryImpl
* BackendImpl::OpenFollowingEntry(bool forward
, void** iter
) {
1647 const int kListsToSearch
= 3;
1648 scoped_refptr
<EntryImpl
> entries
[kListsToSearch
];
1649 scoped_ptr
<Rankings::Iterator
> iterator(
1650 reinterpret_cast<Rankings::Iterator
*>(*iter
));
1653 if (!iterator
.get()) {
1654 iterator
.reset(new Rankings::Iterator(&rankings_
));
1657 // Get an entry from each list.
1658 for (int i
= 0; i
< kListsToSearch
; i
++) {
1659 EntryImpl
* temp
= NULL
;
1660 ret
|= OpenFollowingEntryFromList(forward
, static_cast<Rankings::List
>(i
),
1661 &iterator
->nodes
[i
], &temp
);
1662 entries
[i
].swap(&temp
); // The entry was already addref'd.
1667 // Get the next entry from the last list, and the actual entries for the
1668 // elements on the other lists.
1669 for (int i
= 0; i
< kListsToSearch
; i
++) {
1670 EntryImpl
* temp
= NULL
;
1671 if (iterator
->list
== i
) {
1672 OpenFollowingEntryFromList(forward
, iterator
->list
,
1673 &iterator
->nodes
[i
], &temp
);
1675 temp
= GetEnumeratedEntry(iterator
->nodes
[i
],
1676 static_cast<Rankings::List
>(i
));
1679 entries
[i
].swap(&temp
); // The entry was already addref'd.
1685 Time access_times
[kListsToSearch
];
1686 for (int i
= 0; i
< kListsToSearch
; i
++) {
1687 if (entries
[i
].get()) {
1688 access_times
[i
] = entries
[i
]->GetLastUsed();
1690 DCHECK_LT(oldest
, 0);
1691 newest
= oldest
= i
;
1694 if (access_times
[i
] > access_times
[newest
])
1696 if (access_times
[i
] < access_times
[oldest
])
1701 if (newest
< 0 || oldest
< 0)
1704 EntryImpl
* next_entry
;
1706 next_entry
= entries
[newest
].get();
1707 iterator
->list
= static_cast<Rankings::List
>(newest
);
1709 next_entry
= entries
[oldest
].get();
1710 iterator
->list
= static_cast<Rankings::List
>(oldest
);
1713 *iter
= iterator
.release();
1714 next_entry
->AddRef();
1718 bool BackendImpl::OpenFollowingEntryFromList(bool forward
, Rankings::List list
,
1719 CacheRankingsBlock
** from_entry
,
1720 EntryImpl
** next_entry
) {
1724 if (!new_eviction_
&& Rankings::NO_USE
!= list
)
1727 Rankings::ScopedRankingsBlock
rankings(&rankings_
, *from_entry
);
1728 CacheRankingsBlock
* next_block
= forward
?
1729 rankings_
.GetNext(rankings
.get(), list
) :
1730 rankings_
.GetPrev(rankings
.get(), list
);
1731 Rankings::ScopedRankingsBlock
next(&rankings_
, next_block
);
1734 *next_entry
= GetEnumeratedEntry(next
.get(), list
);
1738 *from_entry
= next
.release();
1742 EntryImpl
* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock
* next
,
1743 Rankings::List list
) {
1744 if (!next
|| disabled_
)
1748 int rv
= NewEntry(Addr(next
->Data()->contents
), &entry
);
1750 STRESS_NOTREACHED();
1751 rankings_
.Remove(next
, list
, false);
1752 if (rv
== ERR_INVALID_ADDRESS
) {
1753 // There is nothing linked from the index. Delete the rankings node.
1754 DeleteBlock(next
->address(), true);
1759 if (entry
->dirty()) {
1760 // We cannot trust this entry.
1761 InternalDoomEntry(entry
);
1766 if (!entry
->Update()) {
1767 STRESS_NOTREACHED();
1772 // Note that it is unfortunate (but possible) for this entry to be clean, but
1773 // not actually the real entry. In other words, we could have lost this entry
1774 // from the index, and it could have been replaced with a newer one. It's not
1775 // worth checking that this entry is "the real one", so we just return it and
1776 // let the enumeration continue; this entry will be evicted at some point, and
1777 // the regular path will work with the real entry. With time, this problem
1778 // will disasappear because this scenario is just a bug.
1780 // Make sure that we save the key for later.
1786 EntryImpl
* BackendImpl::ResurrectEntry(EntryImpl
* deleted_entry
) {
1787 if (ENTRY_NORMAL
== deleted_entry
->entry()->Data()->state
) {
1788 deleted_entry
->Release();
1789 stats_
.OnEvent(Stats::CREATE_MISS
);
1790 Trace("create entry miss ");
1794 // We are attempting to create an entry and found out that the entry was
1795 // previously deleted.
1797 eviction_
.OnCreateEntry(deleted_entry
);
1800 stats_
.OnEvent(Stats::RESURRECT_HIT
);
1801 Trace("Resurrect entry hit ");
1802 return deleted_entry
;
1805 void BackendImpl::DestroyInvalidEntry(EntryImpl
* entry
) {
1806 LOG(WARNING
) << "Destroying invalid entry.";
1807 Trace("Destroying invalid entry 0x%p", entry
);
1809 entry
->SetPointerForInvalidEntry(GetCurrentEntryId());
1811 eviction_
.OnDoomEntry(entry
);
1812 entry
->InternalDoom();
1815 DecreaseNumEntries();
1816 stats_
.OnEvent(Stats::INVALID_ENTRY
);
1819 void BackendImpl::AddStorageSize(int32 bytes
) {
1820 data_
->header
.num_bytes
+= bytes
;
1821 DCHECK_GE(data_
->header
.num_bytes
, 0);
1824 void BackendImpl::SubstractStorageSize(int32 bytes
) {
1825 data_
->header
.num_bytes
-= bytes
;
1826 DCHECK_GE(data_
->header
.num_bytes
, 0);
1829 void BackendImpl::IncreaseNumRefs() {
1831 if (max_refs_
< num_refs_
)
1832 max_refs_
= num_refs_
;
1835 void BackendImpl::DecreaseNumRefs() {
1839 if (!num_refs_
&& disabled_
)
1840 base::MessageLoop::current()->PostTask(
1841 FROM_HERE
, base::Bind(&BackendImpl::RestartCache
, GetWeakPtr(), true));
1844 void BackendImpl::IncreaseNumEntries() {
1845 data_
->header
.num_entries
++;
1846 DCHECK_GT(data_
->header
.num_entries
, 0);
1849 void BackendImpl::DecreaseNumEntries() {
1850 data_
->header
.num_entries
--;
1851 if (data_
->header
.num_entries
< 0) {
1853 data_
->header
.num_entries
= 0;
1857 void BackendImpl::LogStats() {
1861 for (size_t index
= 0; index
< stats
.size(); index
++)
1862 VLOG(1) << stats
[index
].first
<< ": " << stats
[index
].second
;
1865 void BackendImpl::ReportStats() {
1866 CACHE_UMA(COUNTS
, "Entries", 0, data_
->header
.num_entries
);
1868 int current_size
= data_
->header
.num_bytes
/ (1024 * 1024);
1869 int max_size
= max_size_
/ (1024 * 1024);
1870 int hit_ratio_as_percentage
= stats_
.GetHitRatio();
1872 CACHE_UMA(COUNTS_10000
, "Size2", 0, current_size
);
1873 // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1874 // ratio of that bin's total count to the count in the same bin in the Size2
1876 if (base::RandInt(0, 99) < hit_ratio_as_percentage
)
1877 CACHE_UMA(COUNTS_10000
, "HitRatioBySize2", 0, current_size
);
1878 CACHE_UMA(COUNTS_10000
, "MaxSize2", 0, max_size
);
1881 CACHE_UMA(PERCENTAGE
, "UsedSpace", 0, current_size
* 100 / max_size
);
1883 CACHE_UMA(COUNTS_10000
, "AverageOpenEntries2", 0,
1884 static_cast<int>(stats_
.GetCounter(Stats::OPEN_ENTRIES
)));
1885 CACHE_UMA(COUNTS_10000
, "MaxOpenEntries2", 0,
1886 static_cast<int>(stats_
.GetCounter(Stats::MAX_ENTRIES
)));
1887 stats_
.SetCounter(Stats::MAX_ENTRIES
, 0);
1889 CACHE_UMA(COUNTS_10000
, "TotalFatalErrors", 0,
1890 static_cast<int>(stats_
.GetCounter(Stats::FATAL_ERROR
)));
1891 CACHE_UMA(COUNTS_10000
, "TotalDoomCache", 0,
1892 static_cast<int>(stats_
.GetCounter(Stats::DOOM_CACHE
)));
1893 CACHE_UMA(COUNTS_10000
, "TotalDoomRecentEntries", 0,
1894 static_cast<int>(stats_
.GetCounter(Stats::DOOM_RECENT
)));
1895 stats_
.SetCounter(Stats::FATAL_ERROR
, 0);
1896 stats_
.SetCounter(Stats::DOOM_CACHE
, 0);
1897 stats_
.SetCounter(Stats::DOOM_RECENT
, 0);
1899 int64 total_hours
= stats_
.GetCounter(Stats::TIMER
) / 120;
1900 if (!data_
->header
.create_time
|| !data_
->header
.lru
.filled
) {
1901 int cause
= data_
->header
.create_time
? 0 : 1;
1902 if (!data_
->header
.lru
.filled
)
1904 CACHE_UMA(CACHE_ERROR
, "ShortReport", 0, cause
);
1905 CACHE_UMA(HOURS
, "TotalTimeNotFull", 0, static_cast<int>(total_hours
));
1909 // This is an up to date client that will report FirstEviction() data. After
1910 // that event, start reporting this:
1912 CACHE_UMA(HOURS
, "TotalTime", 0, static_cast<int>(total_hours
));
1913 // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1914 // time is the ratio of that bin's total count to the count in the same bin in
1915 // the TotalTime histogram.
1916 if (base::RandInt(0, 99) < hit_ratio_as_percentage
)
1917 CACHE_UMA(HOURS
, "HitRatioByTotalTime", 0, implicit_cast
<int>(total_hours
));
1919 int64 use_hours
= stats_
.GetCounter(Stats::LAST_REPORT_TIMER
) / 120;
1920 stats_
.SetCounter(Stats::LAST_REPORT_TIMER
, stats_
.GetCounter(Stats::TIMER
));
1922 // We may see users with no use_hours at this point if this is the first time
1923 // we are running this code.
1925 use_hours
= total_hours
- use_hours
;
1927 if (!use_hours
|| !GetEntryCount() || !data_
->header
.num_bytes
)
1930 CACHE_UMA(HOURS
, "UseTime", 0, static_cast<int>(use_hours
));
1931 // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1932 // is the ratio of that bin's total count to the count in the same bin in the
1933 // UseTime histogram.
1934 if (base::RandInt(0, 99) < hit_ratio_as_percentage
)
1935 CACHE_UMA(HOURS
, "HitRatioByUseTime", 0, implicit_cast
<int>(use_hours
));
1936 CACHE_UMA(PERCENTAGE
, "HitRatio", 0, hit_ratio_as_percentage
);
1938 int64 trim_rate
= stats_
.GetCounter(Stats::TRIM_ENTRY
) / use_hours
;
1939 CACHE_UMA(COUNTS
, "TrimRate", 0, static_cast<int>(trim_rate
));
1941 int avg_size
= data_
->header
.num_bytes
/ GetEntryCount();
1942 CACHE_UMA(COUNTS
, "EntrySize", 0, avg_size
);
1943 CACHE_UMA(COUNTS
, "EntriesFull", 0, data_
->header
.num_entries
);
1945 CACHE_UMA(PERCENTAGE
, "IndexLoad", 0,
1946 data_
->header
.num_entries
* 100 / (mask_
+ 1));
1948 int large_entries_bytes
= stats_
.GetLargeEntriesSize();
1949 int large_ratio
= large_entries_bytes
* 100 / data_
->header
.num_bytes
;
1950 CACHE_UMA(PERCENTAGE
, "LargeEntriesRatio", 0, large_ratio
);
1952 if (new_eviction_
) {
1953 CACHE_UMA(PERCENTAGE
, "ResurrectRatio", 0, stats_
.GetResurrectRatio());
1954 CACHE_UMA(PERCENTAGE
, "NoUseRatio", 0,
1955 data_
->header
.lru
.sizes
[0] * 100 / data_
->header
.num_entries
);
1956 CACHE_UMA(PERCENTAGE
, "LowUseRatio", 0,
1957 data_
->header
.lru
.sizes
[1] * 100 / data_
->header
.num_entries
);
1958 CACHE_UMA(PERCENTAGE
, "HighUseRatio", 0,
1959 data_
->header
.lru
.sizes
[2] * 100 / data_
->header
.num_entries
);
1960 CACHE_UMA(PERCENTAGE
, "DeletedRatio", 0,
1961 data_
->header
.lru
.sizes
[4] * 100 / data_
->header
.num_entries
);
1964 stats_
.ResetRatios();
1965 stats_
.SetCounter(Stats::TRIM_ENTRY
, 0);
1967 if (cache_type_
== net::DISK_CACHE
)
1968 block_files_
.ReportStats();
1971 void BackendImpl::UpgradeTo2_1() {
1972 // 2.1 is basically the same as 2.0, except that new fields are actually
1973 // updated by the new eviction algorithm.
1974 DCHECK(0x20000 == data_
->header
.version
);
1975 data_
->header
.version
= 0x20001;
1976 data_
->header
.lru
.sizes
[Rankings::NO_USE
] = data_
->header
.num_entries
;
1979 bool BackendImpl::CheckIndex() {
1982 size_t current_size
= index_
->GetLength();
1983 if (current_size
< sizeof(Index
)) {
1984 LOG(ERROR
) << "Corrupt Index file";
1988 if (new_eviction_
) {
1989 // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1990 if (kIndexMagic
!= data_
->header
.magic
||
1991 kCurrentVersion
>> 16 != data_
->header
.version
>> 16) {
1992 LOG(ERROR
) << "Invalid file version or magic";
1995 if (kCurrentVersion
== data_
->header
.version
) {
1996 // We need file version 2.1 for the new eviction algorithm.
2000 if (kIndexMagic
!= data_
->header
.magic
||
2001 kCurrentVersion
!= data_
->header
.version
) {
2002 LOG(ERROR
) << "Invalid file version or magic";
2007 if (!data_
->header
.table_len
) {
2008 LOG(ERROR
) << "Invalid table size";
2012 if (current_size
< GetIndexSize(data_
->header
.table_len
) ||
2013 data_
->header
.table_len
& (kBaseTableLen
- 1)) {
2014 LOG(ERROR
) << "Corrupt Index file";
2018 AdjustMaxCacheSize(data_
->header
.table_len
);
2020 #if !defined(NET_BUILD_STRESS_CACHE)
2021 if (data_
->header
.num_bytes
< 0 ||
2022 (max_size_
< kint32max
- kDefaultCacheSize
&&
2023 data_
->header
.num_bytes
> max_size_
+ kDefaultCacheSize
)) {
2024 LOG(ERROR
) << "Invalid cache (current) size";
2029 if (data_
->header
.num_entries
< 0) {
2030 LOG(ERROR
) << "Invalid number of entries";
2035 mask_
= data_
->header
.table_len
- 1;
2037 // Load the table into memory with a single read.
2038 scoped_ptr
<char[]> buf(new char[current_size
]);
2039 return index_
->Read(buf
.get(), current_size
, 0);
2042 int BackendImpl::CheckAllEntries() {
2044 int num_entries
= 0;
2045 DCHECK(mask_
< kuint32max
);
2046 for (unsigned int i
= 0; i
<= mask_
; i
++) {
2047 Addr
address(data_
->table
[i
]);
2048 if (!address
.is_initialized())
2052 int ret
= NewEntry(address
, &tmp
);
2054 STRESS_NOTREACHED();
2057 scoped_refptr
<EntryImpl
> cache_entry
;
2058 cache_entry
.swap(&tmp
);
2060 if (cache_entry
->dirty())
2062 else if (CheckEntry(cache_entry
.get()))
2065 return ERR_INVALID_ENTRY
;
2067 DCHECK_EQ(i
, cache_entry
->entry()->Data()->hash
& mask_
);
2068 address
.set_value(cache_entry
->GetNextAddress());
2069 if (!address
.is_initialized())
2074 Trace("CheckAllEntries End");
2075 if (num_entries
+ num_dirty
!= data_
->header
.num_entries
) {
2076 LOG(ERROR
) << "Number of entries " << num_entries
<< " " << num_dirty
<<
2077 " " << data_
->header
.num_entries
;
2078 DCHECK_LT(num_entries
, data_
->header
.num_entries
);
2079 return ERR_NUM_ENTRIES_MISMATCH
;
2085 bool BackendImpl::CheckEntry(EntryImpl
* cache_entry
) {
2086 bool ok
= block_files_
.IsValid(cache_entry
->entry()->address());
2087 ok
= ok
&& block_files_
.IsValid(cache_entry
->rankings()->address());
2088 EntryStore
* data
= cache_entry
->entry()->Data();
2089 for (size_t i
= 0; i
< arraysize(data
->data_addr
); i
++) {
2090 if (data
->data_addr
[i
]) {
2091 Addr
address(data
->data_addr
[i
]);
2092 if (address
.is_block_file())
2093 ok
= ok
&& block_files_
.IsValid(address
);
2097 return ok
&& cache_entry
->rankings()->VerifyHash();
2100 int BackendImpl::MaxBuffersSize() {
2101 static int64 total_memory
= base::SysInfo::AmountOfPhysicalMemory();
2102 static bool done
= false;
2105 const int kMaxBuffersSize
= 30 * 1024 * 1024;
2107 // We want to use up to 2% of the computer's memory.
2108 total_memory
= total_memory
* 2 / 100;
2109 if (total_memory
> kMaxBuffersSize
|| total_memory
<= 0)
2110 total_memory
= kMaxBuffersSize
;
2115 return static_cast<int>(total_memory
);
2118 } // namespace disk_cache