1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/http/mock_http_cache.h"
8 #include "base/message_loop/message_loop.h"
9 #include "net/base/completion_callback.h"
10 #include "net/base/net_errors.h"
11 #include "testing/gtest/include/gtest/gtest.h"
15 // We can override the test mode for a given operation by setting this global
19 int GetTestModeForEntry(const std::string
& key
) {
20 // 'key' is prefixed with an identifier if it corresponds to a cached POST.
21 // Skip past that to locate the actual URL.
23 // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
24 // URL corresponding to a registered MockTransaction. It would be good to
25 // have another way to access the test_mode.
27 if (isdigit(key
[0])) {
28 size_t slash
= key
.find('/');
29 DCHECK(slash
!= std::string::npos
);
30 url
= GURL(key
.substr(slash
+ 1));
34 const MockTransaction
* t
= FindMockTransaction(url
);
39 void CallbackForwader(const net::CompletionCallback
& callback
, int result
) {
45 //-----------------------------------------------------------------------------
47 struct MockDiskEntry::CallbackInfo
{
48 scoped_refptr
<MockDiskEntry
> entry
;
49 net::CompletionCallback callback
;
53 MockDiskEntry::MockDiskEntry(const std::string
& key
)
54 : key_(key
), doomed_(false), sparse_(false),
55 fail_requests_(false), fail_sparse_requests_(false), busy_(false),
57 test_mode_
= GetTestModeForEntry(key
);
60 void MockDiskEntry::Doom() {
64 void MockDiskEntry::Close() {
68 std::string
MockDiskEntry::GetKey() const {
72 base::Time
MockDiskEntry::GetLastUsed() const {
73 return base::Time::FromInternalValue(0);
76 base::Time
MockDiskEntry::GetLastModified() const {
77 return base::Time::FromInternalValue(0);
80 int32
MockDiskEntry::GetDataSize(int index
) const {
81 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
82 return static_cast<int32
>(data_
[index
].size());
85 int MockDiskEntry::ReadData(
86 int index
, int offset
, net::IOBuffer
* buf
, int buf_len
,
87 const net::CompletionCallback
& callback
) {
88 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
89 DCHECK(!callback
.is_null());
92 return net::ERR_CACHE_READ_FAILURE
;
94 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
95 return net::ERR_FAILED
;
96 if (static_cast<size_t>(offset
) == data_
[index
].size())
99 int num
= std::min(buf_len
, static_cast<int>(data_
[index
].size()) - offset
);
100 memcpy(buf
->data(), &data_
[index
][offset
], num
);
102 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
105 CallbackLater(callback
, num
);
106 return net::ERR_IO_PENDING
;
109 int MockDiskEntry::WriteData(
110 int index
, int offset
, net::IOBuffer
* buf
, int buf_len
,
111 const net::CompletionCallback
& callback
, bool truncate
) {
112 DCHECK(index
>= 0 && index
< kNumCacheEntryDataIndices
);
113 DCHECK(!callback
.is_null());
116 if (fail_requests_
) {
117 CallbackLater(callback
, net::ERR_CACHE_READ_FAILURE
);
118 return net::ERR_IO_PENDING
;
121 if (offset
< 0 || offset
> static_cast<int>(data_
[index
].size()))
122 return net::ERR_FAILED
;
124 data_
[index
].resize(offset
+ buf_len
);
126 memcpy(&data_
[index
][offset
], buf
->data(), buf_len
);
128 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
131 CallbackLater(callback
, buf_len
);
132 return net::ERR_IO_PENDING
;
135 int MockDiskEntry::ReadSparseData(int64 offset
, net::IOBuffer
* buf
, int buf_len
,
136 const net::CompletionCallback
& callback
) {
137 DCHECK(!callback
.is_null());
138 if (fail_sparse_requests_
)
139 return net::ERR_NOT_IMPLEMENTED
;
140 if (!sparse_
|| busy_
)
141 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED
;
143 return net::ERR_FAILED
;
146 return net::ERR_CACHE_READ_FAILURE
;
148 DCHECK(offset
< kint32max
);
149 int real_offset
= static_cast<int>(offset
);
153 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
,
155 memcpy(buf
->data(), &data_
[1][real_offset
], num
);
157 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
160 CallbackLater(callback
, num
);
163 return net::ERR_IO_PENDING
;
166 int MockDiskEntry::WriteSparseData(int64 offset
, net::IOBuffer
* buf
,
168 const net::CompletionCallback
& callback
) {
169 DCHECK(!callback
.is_null());
170 if (fail_sparse_requests_
)
171 return net::ERR_NOT_IMPLEMENTED
;
173 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED
;
176 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED
;
180 return net::ERR_FAILED
;
185 return net::ERR_CACHE_READ_FAILURE
;
187 DCHECK(offset
< kint32max
);
188 int real_offset
= static_cast<int>(offset
);
190 if (static_cast<int>(data_
[1].size()) < real_offset
+ buf_len
)
191 data_
[1].resize(real_offset
+ buf_len
);
193 memcpy(&data_
[1][real_offset
], buf
->data(), buf_len
);
194 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
197 CallbackLater(callback
, buf_len
);
198 return net::ERR_IO_PENDING
;
201 int MockDiskEntry::GetAvailableRange(int64 offset
, int len
, int64
* start
,
202 const net::CompletionCallback
& callback
) {
203 DCHECK(!callback
.is_null());
204 if (!sparse_
|| busy_
)
205 return net::ERR_CACHE_OPERATION_NOT_SUPPORTED
;
207 return net::ERR_FAILED
;
210 return net::ERR_CACHE_READ_FAILURE
;
213 DCHECK(offset
< kint32max
);
214 int real_offset
= static_cast<int>(offset
);
215 if (static_cast<int>(data_
[1].size()) < real_offset
)
218 int num
= std::min(static_cast<int>(data_
[1].size()) - real_offset
, len
);
220 for (; num
> 0; num
--, real_offset
++) {
222 if (data_
[1][real_offset
]) {
224 *start
= real_offset
;
227 if (!data_
[1][real_offset
])
232 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_WRITE
)
235 CallbackLater(callback
, count
);
236 return net::ERR_IO_PENDING
;
239 bool MockDiskEntry::CouldBeSparse() const {
240 if (fail_sparse_requests_
)
245 void MockDiskEntry::CancelSparseIO() {
249 int MockDiskEntry::ReadyForSparseIO(const net::CompletionCallback
& callback
) {
250 if (fail_sparse_requests_
)
251 return net::ERR_NOT_IMPLEMENTED
;
256 DCHECK(!callback
.is_null());
257 if (MockHttpCache::GetTestMode(test_mode_
) & TEST_MODE_SYNC_CACHE_READ
)
260 // The pending operation is already in the message loop (and hopefully
261 // already in the second pass). Just notify the caller that it finished.
262 CallbackLater(callback
, 0);
263 return net::ERR_IO_PENDING
;
266 // If |value| is true, don't deliver any completion callbacks until called
267 // again with |value| set to false. Caution: remember to enable callbacks
268 // again or all subsequent tests will fail.
270 void MockDiskEntry::IgnoreCallbacks(bool value
) {
271 if (ignore_callbacks_
== value
)
273 ignore_callbacks_
= value
;
275 StoreAndDeliverCallbacks(false, NULL
, net::CompletionCallback(), 0);
278 MockDiskEntry::~MockDiskEntry() {
281 // Unlike the callbacks for MockHttpTransaction, we want this one to run even
282 // if the consumer called Close on the MockDiskEntry. We achieve that by
283 // leveraging the fact that this class is reference counted.
284 void MockDiskEntry::CallbackLater(const net::CompletionCallback
& callback
,
286 if (ignore_callbacks_
)
287 return StoreAndDeliverCallbacks(true, this, callback
, result
);
288 base::MessageLoop::current()->PostTask(
290 base::Bind(&MockDiskEntry::RunCallback
, this, callback
, result
));
293 void MockDiskEntry::RunCallback(
294 const net::CompletionCallback
& callback
, int result
) {
296 // This is kind of hacky, but controlling the behavior of just this entry
297 // from a test is sort of complicated. What we really want to do is
298 // delay the delivery of a sparse IO operation a little more so that the
299 // request start operation (async) will finish without seeing the end of
300 // this operation (already posted to the message loop)... and without
301 // just delaying for n mS (which may cause trouble with slow bots). So
302 // we re-post this operation (all async sparse IO operations will take two
303 // trips through the message loop instead of one).
306 return CallbackLater(callback
, result
);
310 callback
.Run(result
);
313 // When |store| is true, stores the callback to be delivered later; otherwise
314 // delivers any callback previously stored.
316 void MockDiskEntry::StoreAndDeliverCallbacks(
317 bool store
, MockDiskEntry
* entry
, const net::CompletionCallback
& callback
,
319 static std::vector
<CallbackInfo
> callback_list
;
321 CallbackInfo c
= {entry
, callback
, result
};
322 callback_list
.push_back(c
);
324 for (size_t i
= 0; i
< callback_list
.size(); i
++) {
325 CallbackInfo
& c
= callback_list
[i
];
326 c
.entry
->CallbackLater(c
.callback
, c
.result
);
328 callback_list
.clear();
333 bool MockDiskEntry::cancel_
= false;
334 bool MockDiskEntry::ignore_callbacks_
= false;
336 //-----------------------------------------------------------------------------
338 MockDiskCache::MockDiskCache()
339 : open_count_(0), create_count_(0), fail_requests_(false),
340 soft_failures_(false), double_create_check_(true),
341 fail_sparse_requests_(false) {
344 MockDiskCache::~MockDiskCache() {
348 net::CacheType
MockDiskCache::GetCacheType() const {
349 return net::DISK_CACHE
;
352 int32
MockDiskCache::GetEntryCount() const {
353 return static_cast<int32
>(entries_
.size());
356 int MockDiskCache::OpenEntry(const std::string
& key
, disk_cache::Entry
** entry
,
357 const net::CompletionCallback
& callback
) {
358 DCHECK(!callback
.is_null());
360 return net::ERR_CACHE_OPEN_FAILURE
;
362 EntryMap::iterator it
= entries_
.find(key
);
363 if (it
== entries_
.end())
364 return net::ERR_CACHE_OPEN_FAILURE
;
366 if (it
->second
->is_doomed()) {
367 it
->second
->Release();
369 return net::ERR_CACHE_OPEN_FAILURE
;
374 it
->second
->AddRef();
378 it
->second
->set_fail_requests();
380 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
383 CallbackLater(callback
, net::OK
);
384 return net::ERR_IO_PENDING
;
387 int MockDiskCache::CreateEntry(const std::string
& key
,
388 disk_cache::Entry
** entry
,
389 const net::CompletionCallback
& callback
) {
390 DCHECK(!callback
.is_null());
392 return net::ERR_CACHE_CREATE_FAILURE
;
394 EntryMap::iterator it
= entries_
.find(key
);
395 if (it
!= entries_
.end()) {
396 if (!it
->second
->is_doomed()) {
397 if (double_create_check_
)
400 return net::ERR_CACHE_CREATE_FAILURE
;
402 it
->second
->Release();
408 MockDiskEntry
* new_entry
= new MockDiskEntry(key
);
411 entries_
[key
] = new_entry
;
417 new_entry
->set_fail_requests();
419 if (fail_sparse_requests_
)
420 new_entry
->set_fail_sparse_requests();
422 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
425 CallbackLater(callback
, net::OK
);
426 return net::ERR_IO_PENDING
;
429 int MockDiskCache::DoomEntry(const std::string
& key
,
430 const net::CompletionCallback
& callback
) {
431 DCHECK(!callback
.is_null());
432 EntryMap::iterator it
= entries_
.find(key
);
433 if (it
!= entries_
.end()) {
434 it
->second
->Release();
438 if (GetTestModeForEntry(key
) & TEST_MODE_SYNC_CACHE_START
)
441 CallbackLater(callback
, net::OK
);
442 return net::ERR_IO_PENDING
;
445 int MockDiskCache::DoomAllEntries(const net::CompletionCallback
& callback
) {
446 return net::ERR_NOT_IMPLEMENTED
;
449 int MockDiskCache::DoomEntriesBetween(const base::Time initial_time
,
450 const base::Time end_time
,
451 const net::CompletionCallback
& callback
) {
452 return net::ERR_NOT_IMPLEMENTED
;
455 int MockDiskCache::DoomEntriesSince(const base::Time initial_time
,
456 const net::CompletionCallback
& callback
) {
457 return net::ERR_NOT_IMPLEMENTED
;
460 class MockDiskCache::NotImplementedIterator
: public Iterator
{
462 int OpenNextEntry(disk_cache::Entry
** next_entry
,
463 const net::CompletionCallback
& callback
) override
{
464 return net::ERR_NOT_IMPLEMENTED
;
468 scoped_ptr
<disk_cache::Backend::Iterator
> MockDiskCache::CreateIterator() {
469 return scoped_ptr
<Iterator
>(new NotImplementedIterator());
472 void MockDiskCache::GetStats(
473 std::vector
<std::pair
<std::string
, std::string
> >* stats
) {
476 void MockDiskCache::OnExternalCacheHit(const std::string
& key
) {
479 void MockDiskCache::ReleaseAll() {
480 EntryMap::iterator it
= entries_
.begin();
481 for (; it
!= entries_
.end(); ++it
)
482 it
->second
->Release();
486 void MockDiskCache::CallbackLater(const net::CompletionCallback
& callback
,
488 base::MessageLoop::current()->PostTask(
489 FROM_HERE
, base::Bind(&CallbackForwader
, callback
, result
));
492 //-----------------------------------------------------------------------------
494 int MockBackendFactory::CreateBackend(net::NetLog
* net_log
,
495 scoped_ptr
<disk_cache::Backend
>* backend
,
496 const net::CompletionCallback
& callback
) {
497 backend
->reset(new MockDiskCache());
501 //-----------------------------------------------------------------------------
503 MockHttpCache::MockHttpCache()
504 : http_cache_(new MockNetworkLayer(), NULL
, new MockBackendFactory()) {
507 MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory
* disk_cache_factory
)
508 : http_cache_(new MockNetworkLayer(), NULL
, disk_cache_factory
) {
511 disk_cache::Backend
* MockHttpCache::backend() {
512 net::TestCompletionCallback cb
;
513 disk_cache::Backend
* backend
;
514 int rv
= http_cache_
.GetBackend(&backend
, cb
.callback());
515 rv
= cb
.GetResult(rv
);
516 return (rv
== net::OK
) ? backend
: NULL
;
519 MockDiskCache
* MockHttpCache::disk_cache() {
520 return static_cast<MockDiskCache
*>(backend());
523 int MockHttpCache::CreateTransaction(scoped_ptr
<net::HttpTransaction
>* trans
) {
524 return http_cache_
.CreateTransaction(net::DEFAULT_PRIORITY
, trans
);
527 void MockHttpCache::BypassCacheLock() {
528 http_cache_
.BypassLockForTest();
531 void MockHttpCache::FailConditionalizations() {
532 http_cache_
.FailConditionalizationForTest();
535 bool MockHttpCache::ReadResponseInfo(disk_cache::Entry
* disk_entry
,
536 net::HttpResponseInfo
* response_info
,
537 bool* response_truncated
) {
538 int size
= disk_entry
->GetDataSize(0);
540 net::TestCompletionCallback cb
;
541 scoped_refptr
<net::IOBuffer
> buffer(new net::IOBuffer(size
));
542 int rv
= disk_entry
->ReadData(0, 0, buffer
.get(), size
, cb
.callback());
543 rv
= cb
.GetResult(rv
);
546 return net::HttpCache::ParseResponseInfo(buffer
->data(), size
,
551 bool MockHttpCache::WriteResponseInfo(
552 disk_cache::Entry
* disk_entry
, const net::HttpResponseInfo
* response_info
,
553 bool skip_transient_headers
, bool response_truncated
) {
555 response_info
->Persist(
556 &pickle
, skip_transient_headers
, response_truncated
);
558 net::TestCompletionCallback cb
;
559 scoped_refptr
<net::WrappedIOBuffer
> data(new net::WrappedIOBuffer(
560 reinterpret_cast<const char*>(pickle
.data())));
561 int len
= static_cast<int>(pickle
.size());
563 int rv
= disk_entry
->WriteData(0, 0, data
.get(), len
, cb
.callback(), true);
564 rv
= cb
.GetResult(rv
);
568 bool MockHttpCache::OpenBackendEntry(const std::string
& key
,
569 disk_cache::Entry
** entry
) {
570 net::TestCompletionCallback cb
;
571 int rv
= backend()->OpenEntry(key
, entry
, cb
.callback());
572 return (cb
.GetResult(rv
) == net::OK
);
575 bool MockHttpCache::CreateBackendEntry(const std::string
& key
,
576 disk_cache::Entry
** entry
,
577 net::NetLog
* net_log
) {
578 net::TestCompletionCallback cb
;
579 int rv
= backend()->CreateEntry(key
, entry
, cb
.callback());
580 return (cb
.GetResult(rv
) == net::OK
);
584 int MockHttpCache::GetTestMode(int test_mode
) {
592 void MockHttpCache::SetTestMode(int test_mode
) {
593 g_test_mode
= test_mode
;
596 //-----------------------------------------------------------------------------
598 int MockDiskCacheNoCB::CreateEntry(const std::string
& key
,
599 disk_cache::Entry
** entry
,
600 const net::CompletionCallback
& callback
) {
601 return net::ERR_IO_PENDING
;
604 //-----------------------------------------------------------------------------
606 int MockBackendNoCbFactory::CreateBackend(
607 net::NetLog
* net_log
, scoped_ptr
<disk_cache::Backend
>* backend
,
608 const net::CompletionCallback
& callback
) {
609 backend
->reset(new MockDiskCacheNoCB());
613 //-----------------------------------------------------------------------------
615 MockBlockingBackendFactory::MockBlockingBackendFactory()
621 MockBlockingBackendFactory::~MockBlockingBackendFactory() {
624 int MockBlockingBackendFactory::CreateBackend(
625 net::NetLog
* net_log
, scoped_ptr
<disk_cache::Backend
>* backend
,
626 const net::CompletionCallback
& callback
) {
629 backend
->reset(new MockDiskCache());
634 callback_
= callback
;
635 return net::ERR_IO_PENDING
;
638 void MockBlockingBackendFactory::FinishCreation() {
640 if (!callback_
.is_null()) {
642 backend_
->reset(new MockDiskCache());
643 net::CompletionCallback cb
= callback_
;
645 cb
.Run(Result()); // This object can be deleted here.