Bug 1866777 - Disable test_race_cache_with_network.js on windows opt for frequent...
[gecko.git] / netwerk / cache2 / CacheFile.cpp
blob02a9f3bce7bbaa210e92d69b03ba601822849de9
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #include "CacheFile.h"
7 #include <algorithm>
8 #include <utility>
10 #include "CacheFileChunk.h"
11 #include "CacheFileInputStream.h"
12 #include "CacheFileOutputStream.h"
13 #include "CacheFileUtils.h"
14 #include "CacheIndex.h"
15 #include "CacheLog.h"
16 #include "mozilla/DebugOnly.h"
17 #include "mozilla/Telemetry.h"
18 #include "mozilla/TelemetryHistogramEnums.h"
19 #include "nsComponentManagerUtils.h"
20 #include "nsICacheEntry.h"
21 #include "nsProxyRelease.h"
22 #include "nsThreadUtils.h"
24 // When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
25 // When it is not defined, we always release the chunks ASAP, i.e. we cache
26 // unused chunks only when:
27 // - CacheFile is memory-only
28 // - CacheFile is still waiting for the handle
29 // - the chunk is preloaded
31 // #define CACHE_CHUNKS
33 namespace mozilla::net {
35 using CacheFileUtils::CacheFileLock;
37 class NotifyCacheFileListenerEvent : public Runnable {
38 public:
39 NotifyCacheFileListenerEvent(CacheFileListener* aCallback, nsresult aResult,
40 bool aIsNew)
41 : Runnable("net::NotifyCacheFileListenerEvent"),
42 mCallback(aCallback),
43 mRV(aResult),
44 mIsNew(aIsNew) {
45 LOG(
46 ("NotifyCacheFileListenerEvent::NotifyCacheFileListenerEvent() "
47 "[this=%p]",
48 this));
51 protected:
52 ~NotifyCacheFileListenerEvent() {
53 LOG(
54 ("NotifyCacheFileListenerEvent::~NotifyCacheFileListenerEvent() "
55 "[this=%p]",
56 this));
59 public:
60 NS_IMETHOD Run() override {
61 LOG(("NotifyCacheFileListenerEvent::Run() [this=%p]", this));
63 mCallback->OnFileReady(mRV, mIsNew);
64 return NS_OK;
67 protected:
68 nsCOMPtr<CacheFileListener> mCallback;
69 nsresult mRV;
70 bool mIsNew;
73 class NotifyChunkListenerEvent : public Runnable {
74 public:
75 NotifyChunkListenerEvent(CacheFileChunkListener* aCallback, nsresult aResult,
76 uint32_t aChunkIdx, CacheFileChunk* aChunk)
77 : Runnable("net::NotifyChunkListenerEvent"),
78 mCallback(aCallback),
79 mRV(aResult),
80 mChunkIdx(aChunkIdx),
81 mChunk(aChunk) {
82 LOG(("NotifyChunkListenerEvent::NotifyChunkListenerEvent() [this=%p]",
83 this));
86 protected:
87 ~NotifyChunkListenerEvent() {
88 LOG(("NotifyChunkListenerEvent::~NotifyChunkListenerEvent() [this=%p]",
89 this));
92 public:
93 NS_IMETHOD Run() override {
94 LOG(("NotifyChunkListenerEvent::Run() [this=%p]", this));
96 mCallback->OnChunkAvailable(mRV, mChunkIdx, mChunk);
97 return NS_OK;
100 protected:
101 nsCOMPtr<CacheFileChunkListener> mCallback;
102 nsresult mRV;
103 uint32_t mChunkIdx;
104 RefPtr<CacheFileChunk> mChunk;
107 class DoomFileHelper : public CacheFileIOListener {
108 public:
109 NS_DECL_THREADSAFE_ISUPPORTS
111 explicit DoomFileHelper(CacheFileListener* aListener)
112 : mListener(aListener) {}
114 NS_IMETHOD OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) override {
115 MOZ_CRASH("DoomFileHelper::OnFileOpened should not be called!");
116 return NS_ERROR_UNEXPECTED;
119 NS_IMETHOD OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
120 nsresult aResult) override {
121 MOZ_CRASH("DoomFileHelper::OnDataWritten should not be called!");
122 return NS_ERROR_UNEXPECTED;
125 NS_IMETHOD OnDataRead(CacheFileHandle* aHandle, char* aBuf,
126 nsresult aResult) override {
127 MOZ_CRASH("DoomFileHelper::OnDataRead should not be called!");
128 return NS_ERROR_UNEXPECTED;
131 NS_IMETHOD OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) override {
132 if (mListener) mListener->OnFileDoomed(aResult);
133 return NS_OK;
136 NS_IMETHOD OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) override {
137 MOZ_CRASH("DoomFileHelper::OnEOFSet should not be called!");
138 return NS_ERROR_UNEXPECTED;
141 NS_IMETHOD OnFileRenamed(CacheFileHandle* aHandle,
142 nsresult aResult) override {
143 MOZ_CRASH("DoomFileHelper::OnFileRenamed should not be called!");
144 return NS_ERROR_UNEXPECTED;
147 private:
148 virtual ~DoomFileHelper() = default;
150 nsCOMPtr<CacheFileListener> mListener;
153 NS_IMPL_ISUPPORTS(DoomFileHelper, CacheFileIOListener)
155 NS_IMPL_ADDREF(CacheFile)
156 NS_IMPL_RELEASE(CacheFile)
157 NS_INTERFACE_MAP_BEGIN(CacheFile)
158 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileChunkListener)
159 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileIOListener)
160 NS_INTERFACE_MAP_ENTRY(mozilla::net::CacheFileMetadataListener)
161 NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports,
162 mozilla::net::CacheFileChunkListener)
163 NS_INTERFACE_MAP_END
165 CacheFile::CacheFile() : mLock(new CacheFileLock()) {
166 LOG(("CacheFile::CacheFile() [this=%p]", this));
169 CacheFile::~CacheFile() {
170 LOG(("CacheFile::~CacheFile() [this=%p]", this));
172 MutexAutoLock lock(mLock->Lock());
173 if (!mMemoryOnly && mReady && !mKill) {
174 // mReady flag indicates we have metadata plus in a valid state.
175 WriteMetadataIfNeededLocked(true);
179 nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew,
180 bool aMemoryOnly, bool aSkipSizeCheck, bool aPriority,
181 bool aPinned, CacheFileListener* aCallback)
182 MOZ_NO_THREAD_SAFETY_ANALYSIS {
183 MOZ_ASSERT(!mListener);
184 MOZ_ASSERT(!mHandle);
186 MOZ_ASSERT(!(aMemoryOnly && aPinned));
188 nsresult rv;
190 mKey = aKey;
191 mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
192 mSkipSizeCheck = aSkipSizeCheck;
193 mPriority = aPriority;
194 mPinned = aPinned;
196 // Some consumers (at least nsHTTPCompressConv) assume that Read() can read
197 // such amount of data that was announced by Available().
198 // CacheFileInputStream::Available() uses also preloaded chunks to compute
199 // number of available bytes in the input stream, so we have to make sure the
200 // preloadChunkCount won't change during CacheFile's lifetime since otherwise
201 // we could potentially release some cached chunks that was used to calculate
202 // available bytes but would not be available later during call to
203 // CacheFileInputStream::Read().
204 mPreloadChunkCount = CacheObserver::PreloadChunkCount();
206 LOG(
207 ("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
208 "priority=%d, listener=%p]",
209 this, mKey.get(), aCreateNew, aMemoryOnly, aPriority, aCallback));
211 if (mMemoryOnly) {
212 MOZ_ASSERT(!aCallback);
214 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, false, mKey,
215 WrapNotNull(mLock));
216 mReady = true;
217 mDataSize = mMetadata->Offset();
218 return NS_OK;
220 uint32_t flags;
221 if (aCreateNew) {
222 MOZ_ASSERT(!aCallback);
223 flags = CacheFileIOManager::CREATE_NEW;
225 // make sure we can use this entry immediately
226 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey,
227 WrapNotNull(mLock));
228 mReady = true;
229 mDataSize = mMetadata->Offset();
230 } else {
231 flags = CacheFileIOManager::CREATE;
234 if (mPriority) {
235 flags |= CacheFileIOManager::PRIORITY;
238 if (mPinned) {
239 flags |= CacheFileIOManager::PINNED;
242 mOpeningFile = true;
243 mListener = aCallback;
244 rv = CacheFileIOManager::OpenFile(mKey, flags, this);
245 if (NS_FAILED(rv)) {
246 mListener = nullptr;
247 mOpeningFile = false;
249 if (mPinned) {
250 LOG(
251 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
252 "but we want to pin, fail the file opening. [this=%p]",
253 this));
254 return NS_ERROR_NOT_AVAILABLE;
257 if (aCreateNew) {
258 NS_WARNING("Forcing memory-only entry since OpenFile failed");
259 LOG(
260 ("CacheFile::Init() - CacheFileIOManager::OpenFile() failed "
261 "synchronously. We can continue in memory-only mode since "
262 "aCreateNew == true. [this=%p]",
263 this));
265 mMemoryOnly = true;
266 } else if (rv == NS_ERROR_NOT_INITIALIZED) {
267 NS_WARNING(
268 "Forcing memory-only entry since CacheIOManager isn't "
269 "initialized.");
270 LOG(
271 ("CacheFile::Init() - CacheFileIOManager isn't initialized, "
272 "initializing entry as memory-only. [this=%p]",
273 this));
275 mMemoryOnly = true;
276 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey,
277 WrapNotNull(mLock));
278 mReady = true;
279 mDataSize = mMetadata->Offset();
281 RefPtr<NotifyCacheFileListenerEvent> ev;
282 ev = new NotifyCacheFileListenerEvent(aCallback, NS_OK, true);
283 rv = NS_DispatchToCurrentThread(ev);
284 NS_ENSURE_SUCCESS(rv, rv);
285 } else {
286 NS_ENSURE_SUCCESS(rv, rv);
290 return NS_OK;
293 void CacheFile::Key(nsACString& aKey) {
294 CacheFileAutoLock lock(this);
295 aKey = mKey;
298 bool CacheFile::IsPinned() {
299 CacheFileAutoLock lock(this);
300 return mPinned;
303 nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) {
304 CacheFileAutoLock lock(this);
306 nsresult rv;
308 uint32_t index = aChunk->Index();
310 LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32
311 ", chunk=%p, idx=%u]",
312 this, static_cast<uint32_t>(aResult), aChunk, index));
314 if (aChunk->mDiscardedChunk) {
315 // We discard only unused chunks, so it must be still unused when reading
316 // data finishes.
317 MOZ_ASSERT(aChunk->mRefCnt == 2);
318 aChunk->mActiveChunk = false;
319 ReleaseOutsideLock(
320 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
322 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
323 MOZ_ASSERT(removed);
324 return NS_OK;
327 if (NS_FAILED(aResult)) {
328 SetError(aResult);
331 if (HaveChunkListeners(index)) {
332 rv = NotifyChunkListeners(index, aResult, aChunk);
333 NS_ENSURE_SUCCESS(rv, rv);
336 return NS_OK;
339 nsresult CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk* aChunk) {
340 // In case the chunk was reused, made dirty and released between calls to
341 // CacheFileChunk::Write() and CacheFile::OnChunkWritten(), we must write
342 // the chunk to the disk again. When the chunk is unused and is dirty simply
343 // addref and release (outside the lock) the chunk which ensures that
344 // CacheFile::DeactivateChunk() will be called again.
345 RefPtr<CacheFileChunk> deactivateChunkAgain;
347 CacheFileAutoLock lock(this);
349 nsresult rv;
351 LOG(("CacheFile::OnChunkWritten() [this=%p, rv=0x%08" PRIx32
352 ", chunk=%p, idx=%u]",
353 this, static_cast<uint32_t>(aResult), aChunk, aChunk->Index()));
355 MOZ_ASSERT(!mMemoryOnly);
356 MOZ_ASSERT(!mOpeningFile);
357 MOZ_ASSERT(mHandle);
359 if (aChunk->mDiscardedChunk) {
360 // We discard only unused chunks, so it must be still unused when writing
361 // data finishes.
362 MOZ_ASSERT(aChunk->mRefCnt == 2);
363 aChunk->mActiveChunk = false;
364 ReleaseOutsideLock(
365 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
367 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
368 MOZ_ASSERT(removed);
369 return NS_OK;
372 if (NS_FAILED(aResult)) {
373 SetError(aResult);
376 if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
377 // update hash value in metadata
378 mMetadata->SetHash(aChunk->Index(), aChunk->Hash());
381 // notify listeners if there is any
382 if (HaveChunkListeners(aChunk->Index())) {
383 // don't release the chunk since there are some listeners queued
384 rv = NotifyChunkListeners(aChunk->Index(), aResult, aChunk);
385 if (NS_SUCCEEDED(rv)) {
386 MOZ_ASSERT(aChunk->mRefCnt != 2);
387 return NS_OK;
391 if (aChunk->mRefCnt != 2) {
392 LOG(
393 ("CacheFile::OnChunkWritten() - Chunk is still used [this=%p, chunk=%p,"
394 " refcnt=%" PRIuPTR "]",
395 this, aChunk, aChunk->mRefCnt.get()));
397 return NS_OK;
400 if (aChunk->IsDirty()) {
401 LOG(
402 ("CacheFile::OnChunkWritten() - Unused chunk is dirty. We must go "
403 "through deactivation again. [this=%p, chunk=%p]",
404 this, aChunk));
406 deactivateChunkAgain = aChunk;
407 return NS_OK;
410 bool keepChunk = false;
411 if (NS_SUCCEEDED(aResult)) {
412 keepChunk = ShouldCacheChunk(aChunk->Index());
413 LOG(("CacheFile::OnChunkWritten() - %s unused chunk [this=%p, chunk=%p]",
414 keepChunk ? "Caching" : "Releasing", this, aChunk));
415 } else {
416 LOG(
417 ("CacheFile::OnChunkWritten() - Releasing failed chunk [this=%p, "
418 "chunk=%p]",
419 this, aChunk));
422 RemoveChunkInternal(aChunk, keepChunk);
424 WriteMetadataIfNeededLocked();
426 return NS_OK;
429 nsresult CacheFile::OnChunkAvailable(nsresult aResult, uint32_t aChunkIdx,
430 CacheFileChunk* aChunk) {
431 MOZ_CRASH("CacheFile::OnChunkAvailable should not be called!");
432 return NS_ERROR_UNEXPECTED;
435 nsresult CacheFile::OnChunkUpdated(CacheFileChunk* aChunk) {
436 MOZ_CRASH("CacheFile::OnChunkUpdated should not be called!");
437 return NS_ERROR_UNEXPECTED;
440 nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
441 // Using an 'auto' class to perform doom or fail the listener
442 // outside the CacheFile's lock.
443 class AutoFailDoomListener {
444 public:
445 explicit AutoFailDoomListener(CacheFileHandle* aHandle)
446 : mHandle(aHandle), mAlreadyDoomed(false) {}
447 ~AutoFailDoomListener() {
448 if (!mListener) return;
450 if (mHandle) {
451 if (mAlreadyDoomed) {
452 mListener->OnFileDoomed(mHandle, NS_OK);
453 } else {
454 CacheFileIOManager::DoomFile(mHandle, mListener);
456 } else {
457 mListener->OnFileDoomed(nullptr, NS_ERROR_NOT_AVAILABLE);
461 CacheFileHandle* mHandle;
462 nsCOMPtr<CacheFileIOListener> mListener;
463 bool mAlreadyDoomed;
464 } autoDoom(aHandle);
466 RefPtr<CacheFileMetadata> metadata;
467 nsCOMPtr<CacheFileListener> listener;
468 bool isNew = false;
469 nsresult retval = NS_OK;
472 CacheFileAutoLock lock(this);
474 MOZ_ASSERT(mOpeningFile);
475 MOZ_ASSERT((NS_SUCCEEDED(aResult) && aHandle) ||
476 (NS_FAILED(aResult) && !aHandle));
477 MOZ_ASSERT((mListener && !mMetadata) || // !createNew
478 (!mListener && mMetadata)); // createNew
479 MOZ_ASSERT(!mMemoryOnly || mMetadata); // memory-only was set on new entry
481 LOG(("CacheFile::OnFileOpened() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
482 this, static_cast<uint32_t>(aResult), aHandle));
484 mOpeningFile = false;
486 autoDoom.mListener.swap(mDoomAfterOpenListener);
488 if (mMemoryOnly) {
489 // We can be here only in case the entry was initilized as createNew and
490 // SetMemoryOnly() was called.
492 // Just don't store the handle into mHandle and exit
493 autoDoom.mAlreadyDoomed = true;
494 return NS_OK;
497 if (NS_FAILED(aResult)) {
498 if (mMetadata) {
499 // This entry was initialized as createNew, just switch to memory-only
500 // mode.
501 NS_WARNING("Forcing memory-only entry since OpenFile failed");
502 LOG(
503 ("CacheFile::OnFileOpened() - CacheFileIOManager::OpenFile() "
504 "failed asynchronously. We can continue in memory-only mode since "
505 "aCreateNew == true. [this=%p]",
506 this));
508 mMemoryOnly = true;
509 return NS_OK;
512 if (aResult == NS_ERROR_FILE_INVALID_PATH) {
513 // CacheFileIOManager doesn't have mCacheDirectory, switch to
514 // memory-only mode.
515 NS_WARNING(
516 "Forcing memory-only entry since CacheFileIOManager doesn't "
517 "have mCacheDirectory.");
518 LOG(
519 ("CacheFile::OnFileOpened() - CacheFileIOManager doesn't have "
520 "mCacheDirectory, initializing entry as memory-only. [this=%p]",
521 this));
523 mMemoryOnly = true;
524 mMetadata = new CacheFileMetadata(mOpenAsMemoryOnly, mPinned, mKey,
525 WrapNotNull(mLock));
526 mReady = true;
527 mDataSize = mMetadata->Offset();
529 isNew = true;
530 retval = NS_OK;
531 } else {
532 // CacheFileIOManager::OpenFile() failed for another reason.
533 isNew = false;
534 retval = aResult;
537 mListener.swap(listener);
538 } else {
539 mHandle = aHandle;
540 if (NS_FAILED(mStatus)) {
541 CacheFileIOManager::DoomFile(mHandle, nullptr);
544 if (mMetadata) {
545 InitIndexEntry();
547 // The entry was initialized as createNew, don't try to read metadata.
548 mMetadata->SetHandle(mHandle);
550 // Write all cached chunks, otherwise they may stay unwritten.
551 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
552 uint32_t idx = iter.Key();
553 RefPtr<CacheFileChunk>& chunk = iter.Data();
555 LOG(("CacheFile::OnFileOpened() - write [this=%p, idx=%u, chunk=%p]",
556 this, idx, chunk.get()));
558 mChunks.InsertOrUpdate(idx, RefPtr{chunk});
559 chunk->mFile = this;
560 chunk->mActiveChunk = true;
562 MOZ_ASSERT(chunk->IsReady());
564 // This would be cleaner if we had an nsRefPtr constructor that took
565 // a RefPtr<Derived>.
566 ReleaseOutsideLock(std::move(chunk));
568 iter.Remove();
571 return NS_OK;
574 if (listener) {
575 lock.Unlock();
576 listener->OnFileReady(retval, isNew);
577 return NS_OK;
580 MOZ_ASSERT(NS_SUCCEEDED(aResult));
581 MOZ_ASSERT(!mMetadata);
582 MOZ_ASSERT(mListener);
584 // mMetaData is protected by a lock, but ReadMetaData has to be called
585 // without the lock. Alternatively we could make a
586 // "ReadMetaDataLocked", and temporarily unlock to call OnFileReady
587 metadata = mMetadata =
588 new CacheFileMetadata(mHandle, mKey, WrapNotNull(mLock));
590 metadata->ReadMetadata(this);
591 return NS_OK;
594 nsresult CacheFile::OnDataWritten(CacheFileHandle* aHandle, const char* aBuf,
595 nsresult aResult) {
596 MOZ_CRASH("CacheFile::OnDataWritten should not be called!");
597 return NS_ERROR_UNEXPECTED;
600 nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
601 nsresult aResult) {
602 MOZ_CRASH("CacheFile::OnDataRead should not be called!");
603 return NS_ERROR_UNEXPECTED;
606 nsresult CacheFile::OnMetadataRead(nsresult aResult) {
607 nsCOMPtr<CacheFileListener> listener;
608 bool isNew = false;
610 CacheFileAutoLock lock(this);
611 MOZ_ASSERT(mListener);
613 LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
614 static_cast<uint32_t>(aResult)));
616 if (NS_SUCCEEDED(aResult)) {
617 mPinned = mMetadata->Pinned();
618 mReady = true;
619 mDataSize = mMetadata->Offset();
620 if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
621 isNew = true;
622 mMetadata->MarkDirty();
623 } else {
624 const char* altData =
625 mMetadata->GetElement(CacheFileUtils::kAltDataKey);
626 if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
627 altData, &mAltDataOffset, &mAltDataType)) ||
628 (mAltDataOffset > mDataSize))) {
629 // alt-metadata cannot be parsed or alt-data offset is invalid
630 mMetadata->InitEmptyMetadata();
631 isNew = true;
632 mAltDataOffset = -1;
633 mAltDataType.Truncate();
634 mDataSize = 0;
635 } else {
636 PreloadChunks(0);
640 InitIndexEntry();
643 mListener.swap(listener);
645 listener->OnFileReady(aResult, isNew);
646 return NS_OK;
649 nsresult CacheFile::OnMetadataWritten(nsresult aResult) {
650 CacheFileAutoLock lock(this);
652 LOG(("CacheFile::OnMetadataWritten() [this=%p, rv=0x%08" PRIx32 "]", this,
653 static_cast<uint32_t>(aResult)));
655 MOZ_ASSERT(mWritingMetadata);
656 mWritingMetadata = false;
658 MOZ_ASSERT(!mMemoryOnly);
659 MOZ_ASSERT(!mOpeningFile);
661 if (NS_WARN_IF(NS_FAILED(aResult))) {
662 // TODO close streams with an error ???
663 SetError(aResult);
666 if (mOutput || mInputs.Length() || mChunks.Count()) return NS_OK;
668 if (IsDirty()) WriteMetadataIfNeededLocked();
670 if (!mWritingMetadata) {
671 LOG(("CacheFile::OnMetadataWritten() - Releasing file handle [this=%p]",
672 this));
673 CacheFileIOManager::ReleaseNSPRHandle(mHandle);
676 return NS_OK;
679 nsresult CacheFile::OnFileDoomed(CacheFileHandle* aHandle, nsresult aResult) {
680 nsCOMPtr<CacheFileListener> listener;
683 CacheFileAutoLock lock(this);
685 MOZ_ASSERT(mListener);
687 LOG(("CacheFile::OnFileDoomed() [this=%p, rv=0x%08" PRIx32 ", handle=%p]",
688 this, static_cast<uint32_t>(aResult), aHandle));
690 mListener.swap(listener);
693 listener->OnFileDoomed(aResult);
694 return NS_OK;
697 nsresult CacheFile::OnEOFSet(CacheFileHandle* aHandle, nsresult aResult) {
698 MOZ_CRASH("CacheFile::OnEOFSet should not be called!");
699 return NS_ERROR_UNEXPECTED;
702 nsresult CacheFile::OnFileRenamed(CacheFileHandle* aHandle, nsresult aResult) {
703 MOZ_CRASH("CacheFile::OnFileRenamed should not be called!");
704 return NS_ERROR_UNEXPECTED;
707 bool CacheFile::IsKilled() {
708 bool killed = mKill;
709 if (killed) {
710 LOG(("CacheFile is killed, this=%p", this));
713 return killed;
716 nsresult CacheFile::OpenInputStream(nsICacheEntry* aEntryHandle,
717 nsIInputStream** _retval) {
718 CacheFileAutoLock lock(this);
720 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
722 if (!mReady) {
723 LOG(("CacheFile::OpenInputStream() - CacheFile is not ready [this=%p]",
724 this));
726 return NS_ERROR_NOT_AVAILABLE;
729 if (NS_FAILED(mStatus)) {
730 LOG(
731 ("CacheFile::OpenInputStream() - CacheFile is in a failure state "
732 "[this=%p, status=0x%08" PRIx32 "]",
733 this, static_cast<uint32_t>(mStatus)));
735 // Don't allow opening the input stream when this CacheFile is in
736 // a failed state. This is the only way to protect consumers correctly
737 // from reading a broken entry. When the file is in the failed state,
738 // it's also doomed, so reopening the entry won't make any difference -
739 // data will still be inaccessible anymore. Note that for just doomed
740 // files, we must allow reading the data.
741 return mStatus;
744 // Once we open input stream we no longer allow preloading of chunks without
745 // input stream, i.e. we will no longer keep first few chunks preloaded when
746 // the last input stream is closed.
747 mPreloadWithoutInputStreams = false;
749 CacheFileInputStream* input =
750 new CacheFileInputStream(this, aEntryHandle, false);
751 LOG(("CacheFile::OpenInputStream() - Creating new input stream %p [this=%p]",
752 input, this));
754 mInputs.AppendElement(input);
755 NS_ADDREF(input);
757 mDataAccessed = true;
758 *_retval = do_AddRef(input).take();
759 return NS_OK;
762 nsresult CacheFile::OpenAlternativeInputStream(nsICacheEntry* aEntryHandle,
763 const char* aAltDataType,
764 nsIInputStream** _retval) {
765 CacheFileAutoLock lock(this);
767 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
769 if (NS_WARN_IF(!mReady)) {
770 LOG(
771 ("CacheFile::OpenAlternativeInputStream() - CacheFile is not ready "
772 "[this=%p]",
773 this));
774 return NS_ERROR_NOT_AVAILABLE;
777 if (mAltDataOffset == -1) {
778 LOG(
779 ("CacheFile::OpenAlternativeInputStream() - Alternative data is not "
780 "available [this=%p]",
781 this));
782 return NS_ERROR_NOT_AVAILABLE;
785 if (NS_FAILED(mStatus)) {
786 LOG(
787 ("CacheFile::OpenAlternativeInputStream() - CacheFile is in a failure "
788 "state [this=%p, status=0x%08" PRIx32 "]",
789 this, static_cast<uint32_t>(mStatus)));
791 // Don't allow opening the input stream when this CacheFile is in
792 // a failed state. This is the only way to protect consumers correctly
793 // from reading a broken entry. When the file is in the failed state,
794 // it's also doomed, so reopening the entry won't make any difference -
795 // data will still be inaccessible anymore. Note that for just doomed
796 // files, we must allow reading the data.
797 return mStatus;
800 if (mAltDataType != aAltDataType) {
801 LOG(
802 ("CacheFile::OpenAlternativeInputStream() - Alternative data is of a "
803 "different type than requested [this=%p, availableType=%s, "
804 "requestedType=%s]",
805 this, mAltDataType.get(), aAltDataType));
806 return NS_ERROR_NOT_AVAILABLE;
809 // Once we open input stream we no longer allow preloading of chunks without
810 // input stream, i.e. we will no longer keep first few chunks preloaded when
811 // the last input stream is closed.
812 mPreloadWithoutInputStreams = false;
814 CacheFileInputStream* input =
815 new CacheFileInputStream(this, aEntryHandle, true);
817 LOG(
818 ("CacheFile::OpenAlternativeInputStream() - Creating new input stream %p "
819 "[this=%p]",
820 input, this));
822 mInputs.AppendElement(input);
823 NS_ADDREF(input);
825 mDataAccessed = true;
826 *_retval = do_AddRef(input).take();
828 return NS_OK;
831 nsresult CacheFile::OpenOutputStream(CacheOutputCloseListener* aCloseListener,
832 nsIOutputStream** _retval) {
833 CacheFileAutoLock lock(this);
835 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
837 nsresult rv;
839 if (!mReady) {
840 LOG(("CacheFile::OpenOutputStream() - CacheFile is not ready [this=%p]",
841 this));
843 return NS_ERROR_NOT_AVAILABLE;
846 if (mOutput) {
847 LOG(
848 ("CacheFile::OpenOutputStream() - We already have output stream %p "
849 "[this=%p]",
850 mOutput, this));
852 return NS_ERROR_NOT_AVAILABLE;
855 if (NS_FAILED(mStatus)) {
856 LOG(
857 ("CacheFile::OpenOutputStream() - CacheFile is in a failure state "
858 "[this=%p, status=0x%08" PRIx32 "]",
859 this, static_cast<uint32_t>(mStatus)));
861 // The CacheFile is already doomed. It make no sense to allow to write any
862 // data to such entry.
863 return mStatus;
866 // Fail if there is any input stream opened for alternative data
867 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
868 if (mInputs[i]->IsAlternativeData()) {
869 return NS_ERROR_NOT_AVAILABLE;
873 if (mAltDataOffset != -1) {
874 // Remove alt-data
875 rv = Truncate(mAltDataOffset);
876 if (NS_FAILED(rv)) {
877 LOG(
878 ("CacheFile::OpenOutputStream() - Truncating alt-data failed "
879 "[rv=0x%08" PRIx32 "]",
880 static_cast<uint32_t>(rv)));
881 return rv;
883 SetAltMetadata(nullptr);
884 mAltDataOffset = -1;
885 mAltDataType.Truncate();
888 // Once we open output stream we no longer allow preloading of chunks without
889 // input stream. There is no reason to believe that some input stream will be
890 // opened soon. Otherwise we would cache unused chunks of all newly created
891 // entries until the CacheFile is destroyed.
892 mPreloadWithoutInputStreams = false;
894 mOutput = new CacheFileOutputStream(this, aCloseListener, false);
896 LOG(
897 ("CacheFile::OpenOutputStream() - Creating new output stream %p "
898 "[this=%p]",
899 mOutput, this));
901 mDataAccessed = true;
902 *_retval = do_AddRef(mOutput).take();
903 return NS_OK;
906 nsresult CacheFile::OpenAlternativeOutputStream(
907 CacheOutputCloseListener* aCloseListener, const char* aAltDataType,
908 nsIAsyncOutputStream** _retval) {
909 CacheFileAutoLock lock(this);
911 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
913 if (!mReady) {
914 LOG(
915 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is not ready "
916 "[this=%p]",
917 this));
919 return NS_ERROR_NOT_AVAILABLE;
922 if (mOutput) {
923 LOG(
924 ("CacheFile::OpenAlternativeOutputStream() - We already have output "
925 "stream %p [this=%p]",
926 mOutput, this));
928 return NS_ERROR_NOT_AVAILABLE;
931 if (NS_FAILED(mStatus)) {
932 LOG(
933 ("CacheFile::OpenAlternativeOutputStream() - CacheFile is in a failure "
934 "state [this=%p, status=0x%08" PRIx32 "]",
935 this, static_cast<uint32_t>(mStatus)));
937 // The CacheFile is already doomed. It make no sense to allow to write any
938 // data to such entry.
939 return mStatus;
942 // Fail if there is any input stream opened for alternative data
943 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
944 if (mInputs[i]->IsAlternativeData()) {
945 return NS_ERROR_NOT_AVAILABLE;
949 nsresult rv;
951 if (mAltDataOffset != -1) {
952 // Truncate old alt-data
953 rv = Truncate(mAltDataOffset);
954 if (NS_FAILED(rv)) {
955 LOG(
956 ("CacheFile::OpenAlternativeOutputStream() - Truncating old alt-data "
957 "failed [rv=0x%08" PRIx32 "]",
958 static_cast<uint32_t>(rv)));
959 return rv;
961 } else {
962 mAltDataOffset = mDataSize;
965 nsAutoCString altMetadata;
966 CacheFileUtils::BuildAlternativeDataInfo(aAltDataType, mAltDataOffset,
967 altMetadata);
968 rv = SetAltMetadata(altMetadata.get());
969 if (NS_FAILED(rv)) {
970 LOG(
971 ("CacheFile::OpenAlternativeOutputStream() - Set Metadata for alt-data"
972 "failed [rv=0x%08" PRIx32 "]",
973 static_cast<uint32_t>(rv)));
974 return rv;
977 // Once we open output stream we no longer allow preloading of chunks without
978 // input stream. There is no reason to believe that some input stream will be
979 // opened soon. Otherwise we would cache unused chunks of all newly created
980 // entries until the CacheFile is destroyed.
981 mPreloadWithoutInputStreams = false;
983 mOutput = new CacheFileOutputStream(this, aCloseListener, true);
985 LOG(
986 ("CacheFile::OpenAlternativeOutputStream() - Creating new output stream "
987 "%p [this=%p]",
988 mOutput, this));
990 mDataAccessed = true;
991 mAltDataType = aAltDataType;
992 *_retval = do_AddRef(mOutput).take();
993 return NS_OK;
996 nsresult CacheFile::SetMemoryOnly() {
997 CacheFileAutoLock lock(this);
999 LOG(("CacheFile::SetMemoryOnly() mMemoryOnly=%d [this=%p]", mMemoryOnly,
1000 this));
1002 if (mMemoryOnly) return NS_OK;
1004 MOZ_ASSERT(mReady);
1006 if (!mReady) {
1007 LOG(("CacheFile::SetMemoryOnly() - CacheFile is not ready [this=%p]",
1008 this));
1010 return NS_ERROR_NOT_AVAILABLE;
1013 if (mDataAccessed) {
1014 LOG(("CacheFile::SetMemoryOnly() - Data was already accessed [this=%p]",
1015 this));
1016 return NS_ERROR_NOT_AVAILABLE;
1019 // TODO what to do when this isn't a new entry and has an existing metadata???
1020 mMemoryOnly = true;
1021 return NS_OK;
1024 nsresult CacheFile::Doom(CacheFileListener* aCallback) {
1025 LOG(("CacheFile::Doom() [this=%p, listener=%p]", this, aCallback));
1027 CacheFileAutoLock lock(this);
1029 return DoomLocked(aCallback);
1032 nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) {
1033 AssertOwnsLock();
1034 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1036 LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
1038 nsresult rv = NS_OK;
1040 if (mMemoryOnly) {
1041 return NS_ERROR_FILE_NOT_FOUND;
1044 if (mHandle && mHandle->IsDoomed()) {
1045 return NS_ERROR_FILE_NOT_FOUND;
1048 nsCOMPtr<CacheFileIOListener> listener;
1049 if (aCallback || !mHandle) {
1050 listener = new DoomFileHelper(aCallback);
1052 if (mHandle) {
1053 rv = CacheFileIOManager::DoomFile(mHandle, listener);
1054 } else if (mOpeningFile) {
1055 mDoomAfterOpenListener = listener;
1058 return rv;
1061 nsresult CacheFile::ThrowMemoryCachedData() {
1062 CacheFileAutoLock lock(this);
1064 LOG(("CacheFile::ThrowMemoryCachedData() [this=%p]", this));
1066 if (mMemoryOnly) {
1067 // This method should not be called when the CacheFile was initialized as
1068 // memory-only, but it can be called when CacheFile end up as memory-only
1069 // due to e.g. IO failure since CacheEntry doesn't know it.
1070 LOG(
1071 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1072 "entry is memory-only. [this=%p]",
1073 this));
1075 return NS_ERROR_NOT_AVAILABLE;
1078 if (mOpeningFile) {
1079 // mayhemer, note: we shouldn't get here, since CacheEntry prevents loading
1080 // entries from being purged.
1082 LOG(
1083 ("CacheFile::ThrowMemoryCachedData() - Ignoring request because the "
1084 "entry is still opening the file [this=%p]",
1085 this));
1087 return NS_ERROR_ABORT;
1090 // We cannot release all cached chunks since we need to keep preloaded chunks
1091 // in memory. See initialization of mPreloadChunkCount for explanation.
1092 CleanUpCachedChunks();
1094 return NS_OK;
1097 nsresult CacheFile::GetElement(const char* aKey, char** _retval) {
1098 CacheFileAutoLock lock(this);
1099 MOZ_ASSERT(mMetadata);
1100 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1102 const char* value;
1103 value = mMetadata->GetElement(aKey);
1104 if (!value) return NS_ERROR_NOT_AVAILABLE;
1106 *_retval = NS_xstrdup(value);
1107 return NS_OK;
1110 nsresult CacheFile::SetElement(const char* aKey, const char* aValue) {
1111 CacheFileAutoLock lock(this);
1113 LOG(("CacheFile::SetElement() this=%p", this));
1115 MOZ_ASSERT(mMetadata);
1116 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1118 if (!strcmp(aKey, CacheFileUtils::kAltDataKey)) {
1119 NS_ERROR(
1120 "alt-data element is reserved for internal use and must not be "
1121 "changed via CacheFile::SetElement()");
1122 return NS_ERROR_FAILURE;
1125 PostWriteTimer();
1126 return mMetadata->SetElement(aKey, aValue);
1129 nsresult CacheFile::VisitMetaData(nsICacheEntryMetaDataVisitor* aVisitor) {
1130 CacheFileAutoLock lock(this);
1131 MOZ_ASSERT(mMetadata);
1132 MOZ_ASSERT(mReady);
1133 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1135 mMetadata->Visit(aVisitor);
1136 return NS_OK;
1139 nsresult CacheFile::ElementsSize(uint32_t* _retval) {
1140 CacheFileAutoLock lock(this);
1142 if (!mMetadata) return NS_ERROR_NOT_AVAILABLE;
1144 *_retval = mMetadata->ElementsSize();
1145 return NS_OK;
1148 nsresult CacheFile::SetExpirationTime(uint32_t aExpirationTime) {
1149 CacheFileAutoLock lock(this);
1151 LOG(("CacheFile::SetExpirationTime() this=%p, expiration=%u", this,
1152 aExpirationTime));
1154 MOZ_ASSERT(mMetadata);
1155 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1157 PostWriteTimer();
1158 mMetadata->SetExpirationTime(aExpirationTime);
1159 return NS_OK;
1162 nsresult CacheFile::GetExpirationTime(uint32_t* _retval) {
1163 CacheFileAutoLock lock(this);
1164 MOZ_ASSERT(mMetadata);
1165 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1167 *_retval = mMetadata->GetExpirationTime();
1168 return NS_OK;
1171 nsresult CacheFile::SetFrecency(uint32_t aFrecency) {
1172 CacheFileAutoLock lock(this);
1174 LOG(("CacheFile::SetFrecency() this=%p, frecency=%u", this, aFrecency));
1176 MOZ_ASSERT(mMetadata);
1177 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1179 PostWriteTimer();
1181 if (mHandle && !mHandle->IsDoomed()) {
1182 CacheFileIOManager::UpdateIndexEntry(mHandle, &aFrecency, nullptr, nullptr,
1183 nullptr, nullptr);
1186 mMetadata->SetFrecency(aFrecency);
1187 return NS_OK;
1190 nsresult CacheFile::GetFrecency(uint32_t* _retval) {
1191 CacheFileAutoLock lock(this);
1192 MOZ_ASSERT(mMetadata);
1193 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1194 *_retval = mMetadata->GetFrecency();
1195 return NS_OK;
1198 nsresult CacheFile::SetNetworkTimes(uint64_t aOnStartTime,
1199 uint64_t aOnStopTime) {
1200 CacheFileAutoLock lock(this);
1202 LOG(("CacheFile::SetNetworkTimes() this=%p, aOnStartTime=%" PRIu64
1203 ", aOnStopTime=%" PRIu64 "",
1204 this, aOnStartTime, aOnStopTime));
1206 MOZ_ASSERT(mMetadata);
1207 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1209 PostWriteTimer();
1211 nsAutoCString onStartTime;
1212 onStartTime.AppendInt(aOnStartTime);
1213 nsresult rv =
1214 mMetadata->SetElement("net-response-time-onstart", onStartTime.get());
1215 if (NS_WARN_IF(NS_FAILED(rv))) {
1216 return rv;
1219 nsAutoCString onStopTime;
1220 onStopTime.AppendInt(aOnStopTime);
1221 rv = mMetadata->SetElement("net-response-time-onstop", onStopTime.get());
1222 if (NS_WARN_IF(NS_FAILED(rv))) {
1223 return rv;
1226 uint16_t onStartTime16 = aOnStartTime <= kIndexTimeOutOfBound
1227 ? aOnStartTime
1228 : kIndexTimeOutOfBound;
1229 uint16_t onStopTime16 =
1230 aOnStopTime <= kIndexTimeOutOfBound ? aOnStopTime : kIndexTimeOutOfBound;
1232 if (mHandle && !mHandle->IsDoomed()) {
1233 CacheFileIOManager::UpdateIndexEntry(
1234 mHandle, nullptr, nullptr, &onStartTime16, &onStopTime16, nullptr);
1236 return NS_OK;
1239 nsresult CacheFile::GetOnStartTime(uint64_t* _retval) {
1240 CacheFileAutoLock lock(this);
1242 MOZ_ASSERT(mMetadata);
1243 const char* onStartTimeStr =
1244 mMetadata->GetElement("net-response-time-onstart");
1245 if (!onStartTimeStr) {
1246 return NS_ERROR_NOT_AVAILABLE;
1248 nsresult rv;
1249 *_retval = nsDependentCString(onStartTimeStr).ToInteger64(&rv);
1250 MOZ_ASSERT(NS_SUCCEEDED(rv));
1251 return NS_OK;
1254 nsresult CacheFile::GetOnStopTime(uint64_t* _retval) {
1255 CacheFileAutoLock lock(this);
1257 MOZ_ASSERT(mMetadata);
1258 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
1259 if (!onStopTimeStr) {
1260 return NS_ERROR_NOT_AVAILABLE;
1262 nsresult rv;
1263 *_retval = nsDependentCString(onStopTimeStr).ToInteger64(&rv);
1264 MOZ_ASSERT(NS_SUCCEEDED(rv));
1265 return NS_OK;
1268 nsresult CacheFile::SetContentType(uint8_t aContentType) {
1269 CacheFileAutoLock lock(this);
1271 LOG(("CacheFile::SetContentType() this=%p, contentType=%u", this,
1272 aContentType));
1274 MOZ_ASSERT(mMetadata);
1275 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1277 PostWriteTimer();
1279 // Save the content type to metadata for case we need to rebuild the index.
1280 nsAutoCString contentType;
1281 contentType.AppendInt(aContentType);
1282 nsresult rv = mMetadata->SetElement("ctid", contentType.get());
1283 if (NS_WARN_IF(NS_FAILED(rv))) {
1284 return rv;
1287 if (mHandle && !mHandle->IsDoomed()) {
1288 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, nullptr, nullptr,
1289 nullptr, &aContentType);
1291 return NS_OK;
1294 nsresult CacheFile::SetAltMetadata(const char* aAltMetadata) {
1295 AssertOwnsLock();
1296 LOG(("CacheFile::SetAltMetadata() this=%p, aAltMetadata=%s", this,
1297 aAltMetadata ? aAltMetadata : ""));
1299 MOZ_ASSERT(mMetadata);
1300 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1302 PostWriteTimer();
1304 nsresult rv =
1305 mMetadata->SetElement(CacheFileUtils::kAltDataKey, aAltMetadata);
1307 bool hasAltData = !!aAltMetadata;
1309 if (NS_FAILED(rv)) {
1310 // Removing element shouldn't fail because it doesn't allocate memory.
1311 mMetadata->SetElement(CacheFileUtils::kAltDataKey, nullptr);
1313 mAltDataOffset = -1;
1314 mAltDataType.Truncate();
1315 hasAltData = false;
1318 if (mHandle && !mHandle->IsDoomed()) {
1319 CacheFileIOManager::UpdateIndexEntry(mHandle, nullptr, &hasAltData, nullptr,
1320 nullptr, nullptr);
1322 return rv;
1325 nsresult CacheFile::GetLastModified(uint32_t* _retval) {
1326 CacheFileAutoLock lock(this);
1327 MOZ_ASSERT(mMetadata);
1328 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1330 *_retval = mMetadata->GetLastModified();
1331 return NS_OK;
1334 nsresult CacheFile::GetLastFetched(uint32_t* _retval) {
1335 CacheFileAutoLock lock(this);
1336 MOZ_ASSERT(mMetadata);
1337 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1339 *_retval = mMetadata->GetLastFetched();
1340 return NS_OK;
1343 nsresult CacheFile::GetFetchCount(uint32_t* _retval) {
1344 CacheFileAutoLock lock(this);
1345 MOZ_ASSERT(mMetadata);
1346 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1347 *_retval = mMetadata->GetFetchCount();
1348 return NS_OK;
1351 nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
1352 CacheFileAutoLock lock(this);
1353 if (!mHandle) {
1354 return NS_ERROR_NOT_AVAILABLE;
1357 *aDiskStorageSize = mHandle->FileSizeInK();
1358 return NS_OK;
1361 nsresult CacheFile::OnFetched() {
1362 CacheFileAutoLock lock(this);
1364 LOG(("CacheFile::OnFetched() this=%p", this));
1366 MOZ_ASSERT(mMetadata);
1367 NS_ENSURE_TRUE(mMetadata, NS_ERROR_UNEXPECTED);
1369 PostWriteTimer();
1371 mMetadata->OnFetched();
1372 return NS_OK;
1375 void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) {
1376 AssertOwnsLock();
1378 mObjsToRelease.AppendElement(std::move(aObject));
1381 nsresult CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
1382 CacheFileChunkListener* aCallback,
1383 CacheFileChunk** _retval) {
1384 AssertOwnsLock();
1386 LOG(("CacheFile::GetChunkLocked() [this=%p, idx=%u, caller=%d, listener=%p]",
1387 this, aIndex, aCaller, aCallback));
1389 MOZ_ASSERT(mReady);
1390 MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
1391 MOZ_ASSERT((aCaller == READER && aCallback) ||
1392 (aCaller == WRITER && !aCallback) ||
1393 (aCaller == PRELOADER && !aCallback));
1395 // Preload chunks from disk when this is disk backed entry and the listener
1396 // is reader.
1397 bool preload = !mMemoryOnly && (aCaller == READER);
1399 nsresult rv;
1401 RefPtr<CacheFileChunk> chunk;
1402 if (mChunks.Get(aIndex, getter_AddRefs(chunk))) {
1403 LOG(("CacheFile::GetChunkLocked() - Found chunk %p in mChunks [this=%p]",
1404 chunk.get(), this));
1406 // Preloader calls this method to preload only non-loaded chunks.
1407 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1409 // We might get failed chunk between releasing the lock in
1410 // CacheFileChunk::OnDataWritten/Read and CacheFile::OnChunkWritten/Read
1411 rv = chunk->GetStatus();
1412 if (NS_FAILED(rv)) {
1413 SetError(rv);
1414 LOG(
1415 ("CacheFile::GetChunkLocked() - Found failed chunk in mChunks "
1416 "[this=%p]",
1417 this));
1418 return rv;
1421 if (chunk->IsReady() || aCaller == WRITER) {
1422 chunk.swap(*_retval);
1423 } else {
1424 QueueChunkListener(aIndex, aCallback);
1427 if (preload) {
1428 PreloadChunks(aIndex + 1);
1431 return NS_OK;
1434 if (mCachedChunks.Get(aIndex, getter_AddRefs(chunk))) {
1435 LOG(("CacheFile::GetChunkLocked() - Reusing cached chunk %p [this=%p]",
1436 chunk.get(), this));
1438 // Preloader calls this method to preload only non-loaded chunks.
1439 MOZ_ASSERT(aCaller != PRELOADER, "Unexpected!");
1441 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1442 mCachedChunks.Remove(aIndex);
1443 chunk->mFile = this;
1444 chunk->mActiveChunk = true;
1446 MOZ_ASSERT(chunk->IsReady());
1448 chunk.swap(*_retval);
1450 if (preload) {
1451 PreloadChunks(aIndex + 1);
1454 return NS_OK;
1457 int64_t off = aIndex * static_cast<int64_t>(kChunkSize);
1459 if (off < mDataSize) {
1460 // We cannot be here if this is memory only entry since the chunk must exist
1461 MOZ_ASSERT(!mMemoryOnly);
1462 if (mMemoryOnly) {
1463 // If this ever really happen it is better to fail rather than crashing on
1464 // a null handle.
1465 LOG(
1466 ("CacheFile::GetChunkLocked() - Unexpected state! Offset < mDataSize "
1467 "for memory-only entry. [this=%p, off=%" PRId64
1468 ", mDataSize=%" PRId64 "]",
1469 this, off, mDataSize));
1471 return NS_ERROR_UNEXPECTED;
1474 chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
1475 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1476 chunk->mActiveChunk = true;
1478 LOG(
1479 ("CacheFile::GetChunkLocked() - Reading newly created chunk %p from "
1480 "the disk [this=%p]",
1481 chunk.get(), this));
1483 // Read the chunk from the disk
1484 rv = chunk->Read(mHandle,
1485 std::min(static_cast<uint32_t>(mDataSize - off),
1486 static_cast<uint32_t>(kChunkSize)),
1487 mMetadata->GetHash(aIndex), this);
1488 if (NS_WARN_IF(NS_FAILED(rv))) {
1489 RemoveChunkInternal(chunk, false);
1490 return rv;
1493 if (aCaller == WRITER) {
1494 chunk.swap(*_retval);
1495 } else if (aCaller != PRELOADER) {
1496 QueueChunkListener(aIndex, aCallback);
1499 if (preload) {
1500 PreloadChunks(aIndex + 1);
1503 return NS_OK;
1505 if (off == mDataSize) {
1506 if (aCaller == WRITER) {
1507 // this listener is going to write to the chunk
1508 chunk = new CacheFileChunk(this, aIndex, true);
1509 mChunks.InsertOrUpdate(aIndex, RefPtr{chunk});
1510 chunk->mActiveChunk = true;
1512 LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
1513 chunk.get(), this));
1515 chunk->InitNew();
1516 mMetadata->SetHash(aIndex, chunk->Hash());
1518 if (HaveChunkListeners(aIndex)) {
1519 rv = NotifyChunkListeners(aIndex, NS_OK, chunk);
1520 NS_ENSURE_SUCCESS(rv, rv);
1523 chunk.swap(*_retval);
1524 return NS_OK;
1526 } else {
1527 if (aCaller == WRITER) {
1528 // this chunk was requested by writer, but we need to fill the gap first
1530 // Fill with zero the last chunk if it is incomplete
1531 if (mDataSize % kChunkSize) {
1532 rv = PadChunkWithZeroes(mDataSize / kChunkSize);
1533 NS_ENSURE_SUCCESS(rv, rv);
1535 MOZ_ASSERT(!(mDataSize % kChunkSize));
1538 uint32_t startChunk = mDataSize / kChunkSize;
1540 if (mMemoryOnly) {
1541 // We need to create all missing CacheFileChunks if this is memory-only
1542 // entry
1543 for (uint32_t i = startChunk; i < aIndex; i++) {
1544 rv = PadChunkWithZeroes(i);
1545 NS_ENSURE_SUCCESS(rv, rv);
1547 } else {
1548 // We don't need to create CacheFileChunk for other empty chunks unless
1549 // there is some input stream waiting for this chunk.
1551 if (startChunk != aIndex) {
1552 // Make sure the file contains zeroes at the end of the file
1553 rv = CacheFileIOManager::TruncateSeekSetEOF(
1554 mHandle, startChunk * kChunkSize, aIndex * kChunkSize, nullptr);
1555 NS_ENSURE_SUCCESS(rv, rv);
1558 for (uint32_t i = startChunk; i < aIndex; i++) {
1559 if (HaveChunkListeners(i)) {
1560 rv = PadChunkWithZeroes(i);
1561 NS_ENSURE_SUCCESS(rv, rv);
1562 } else {
1563 mMetadata->SetHash(i, kEmptyChunkHash);
1564 mDataSize = (i + 1) * kChunkSize;
1569 MOZ_ASSERT(mDataSize == off);
1570 rv = GetChunkLocked(aIndex, WRITER, nullptr, getter_AddRefs(chunk));
1571 NS_ENSURE_SUCCESS(rv, rv);
1573 chunk.swap(*_retval);
1574 return NS_OK;
1578 // We can be here only if the caller is reader since writer always create a
1579 // new chunk above and preloader calls this method to preload only chunks that
1580 // are not loaded but that do exist.
1581 MOZ_ASSERT(aCaller == READER, "Unexpected!");
1583 if (mOutput) {
1584 // the chunk doesn't exist but mOutput may create it
1585 QueueChunkListener(aIndex, aCallback);
1586 } else {
1587 return NS_ERROR_NOT_AVAILABLE;
1590 return NS_OK;
1593 void CacheFile::PreloadChunks(uint32_t aIndex) {
1594 AssertOwnsLock();
1596 uint32_t limit = aIndex + mPreloadChunkCount;
1598 for (uint32_t i = aIndex; i < limit; ++i) {
1599 int64_t off = i * static_cast<int64_t>(kChunkSize);
1601 if (off >= mDataSize) {
1602 // This chunk is beyond EOF.
1603 return;
1606 if (mChunks.GetWeak(i) || mCachedChunks.GetWeak(i)) {
1607 // This chunk is already in memory or is being read right now.
1608 continue;
1611 LOG(("CacheFile::PreloadChunks() - Preloading chunk [this=%p, idx=%u]",
1612 this, i));
1614 RefPtr<CacheFileChunk> chunk;
1615 GetChunkLocked(i, PRELOADER, nullptr, getter_AddRefs(chunk));
1616 // We've checked that we don't have this chunk, so no chunk must be
1617 // returned.
1618 MOZ_ASSERT(!chunk);
1622 bool CacheFile::ShouldCacheChunk(uint32_t aIndex) {
1623 AssertOwnsLock();
1625 #ifdef CACHE_CHUNKS
1626 // We cache all chunks.
1627 return true;
1628 #else
1630 if (mPreloadChunkCount != 0 && mInputs.Length() == 0 &&
1631 mPreloadWithoutInputStreams && aIndex < mPreloadChunkCount) {
1632 // We don't have any input stream yet, but it is likely that some will be
1633 // opened soon. Keep first mPreloadChunkCount chunks in memory. The
1634 // condition is here instead of in MustKeepCachedChunk() since these
1635 // chunks should be preloaded and can be kept in memory as an optimization,
1636 // but they can be released at any time until they are considered as
1637 // preloaded chunks for any input stream.
1638 return true;
1641 // Cache only chunks that we really need to keep.
1642 return MustKeepCachedChunk(aIndex);
1643 #endif
1646 bool CacheFile::MustKeepCachedChunk(uint32_t aIndex) {
1647 AssertOwnsLock();
1649 // We must keep the chunk when this is memory only entry or we don't have
1650 // a handle yet.
1651 if (mMemoryOnly || mOpeningFile) {
1652 return true;
1655 if (mPreloadChunkCount == 0) {
1656 // Preloading of chunks is disabled
1657 return false;
1660 // Check whether this chunk should be considered as preloaded chunk for any
1661 // existing input stream.
1663 // maxPos is the position of the last byte in the given chunk
1664 int64_t maxPos = static_cast<int64_t>(aIndex + 1) * kChunkSize - 1;
1666 // minPos is the position of the first byte in a chunk that precedes the given
1667 // chunk by mPreloadChunkCount chunks
1668 int64_t minPos;
1669 if (mPreloadChunkCount >= aIndex) {
1670 minPos = 0;
1671 } else {
1672 minPos = static_cast<int64_t>(aIndex - mPreloadChunkCount) * kChunkSize;
1675 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1676 int64_t inputPos = mInputs[i]->GetPosition();
1677 if (inputPos >= minPos && inputPos <= maxPos) {
1678 return true;
1682 return false;
1685 nsresult CacheFile::DeactivateChunk(CacheFileChunk* aChunk) {
1686 nsresult rv;
1688 // Avoid lock reentrancy by increasing the RefCnt
1689 RefPtr<CacheFileChunk> chunk = aChunk;
1692 CacheFileAutoLock lock(this);
1694 LOG(("CacheFile::DeactivateChunk() [this=%p, chunk=%p, idx=%u]", this,
1695 aChunk, aChunk->Index()));
1697 MOZ_ASSERT(mReady);
1698 MOZ_ASSERT((mHandle && !mMemoryOnly && !mOpeningFile) ||
1699 (!mHandle && mMemoryOnly && !mOpeningFile) ||
1700 (!mHandle && !mMemoryOnly && mOpeningFile));
1702 if (aChunk->mRefCnt != 2) {
1703 LOG(
1704 ("CacheFile::DeactivateChunk() - Chunk is still used [this=%p, "
1705 "chunk=%p, refcnt=%" PRIuPTR "]",
1706 this, aChunk, aChunk->mRefCnt.get()));
1708 // somebody got the reference before the lock was acquired
1709 return NS_OK;
1712 if (aChunk->mDiscardedChunk) {
1713 aChunk->mActiveChunk = false;
1714 ReleaseOutsideLock(
1715 RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1717 DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
1718 MOZ_ASSERT(removed);
1719 return NS_OK;
1722 #ifdef DEBUG
1724 // We can be here iff the chunk is in the hash table
1725 RefPtr<CacheFileChunk> chunkCheck;
1726 mChunks.Get(chunk->Index(), getter_AddRefs(chunkCheck));
1727 MOZ_ASSERT(chunkCheck == chunk);
1729 // We also shouldn't have any queued listener for this chunk
1730 ChunkListeners* listeners;
1731 mChunkListeners.Get(chunk->Index(), &listeners);
1732 MOZ_ASSERT(!listeners);
1734 #endif
1736 if (NS_FAILED(chunk->GetStatus())) {
1737 SetError(chunk->GetStatus());
1740 if (NS_FAILED(mStatus)) {
1741 // Don't write any chunk to disk since this entry will be doomed
1742 LOG(
1743 ("CacheFile::DeactivateChunk() - Releasing chunk because of status "
1744 "[this=%p, chunk=%p, mStatus=0x%08" PRIx32 "]",
1745 this, chunk.get(), static_cast<uint32_t>(mStatus)));
1747 RemoveChunkInternal(chunk, false);
1748 return mStatus;
1751 if (chunk->IsDirty() && !mMemoryOnly && !mOpeningFile) {
1752 LOG(
1753 ("CacheFile::DeactivateChunk() - Writing dirty chunk to the disk "
1754 "[this=%p]",
1755 this));
1757 mDataIsDirty = true;
1759 rv = chunk->Write(mHandle, this);
1760 if (NS_FAILED(rv)) {
1761 LOG(
1762 ("CacheFile::DeactivateChunk() - CacheFileChunk::Write() failed "
1763 "synchronously. Removing it. [this=%p, chunk=%p, rv=0x%08" PRIx32
1764 "]",
1765 this, chunk.get(), static_cast<uint32_t>(rv)));
1767 RemoveChunkInternal(chunk, false);
1769 SetError(rv);
1770 return rv;
1773 // Chunk will be removed in OnChunkWritten if it is still unused
1775 // chunk needs to be released under the lock to be able to rely on
1776 // CacheFileChunk::mRefCnt in CacheFile::OnChunkWritten()
1777 chunk = nullptr;
1778 return NS_OK;
1781 bool keepChunk = ShouldCacheChunk(aChunk->Index());
1782 LOG(("CacheFile::DeactivateChunk() - %s unused chunk [this=%p, chunk=%p]",
1783 keepChunk ? "Caching" : "Releasing", this, chunk.get()));
1785 RemoveChunkInternal(chunk, keepChunk);
1787 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
1790 return NS_OK;
1793 void CacheFile::RemoveChunkInternal(CacheFileChunk* aChunk, bool aCacheChunk) {
1794 AssertOwnsLock();
1796 aChunk->mActiveChunk = false;
1797 ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(std::move(aChunk->mFile)));
1799 if (aCacheChunk) {
1800 mCachedChunks.InsertOrUpdate(aChunk->Index(), RefPtr{aChunk});
1803 mChunks.Remove(aChunk->Index());
1806 bool CacheFile::OutputStreamExists(bool aAlternativeData) {
1807 AssertOwnsLock();
1809 if (!mOutput) {
1810 return false;
1813 return mOutput->IsAlternativeData() == aAlternativeData;
1816 int64_t CacheFile::BytesFromChunk(uint32_t aIndex, bool aAlternativeData) {
1817 AssertOwnsLock();
1819 int64_t dataSize;
1821 if (mAltDataOffset != -1) {
1822 if (aAlternativeData) {
1823 dataSize = mDataSize;
1824 } else {
1825 dataSize = mAltDataOffset;
1827 } else {
1828 MOZ_ASSERT(!aAlternativeData);
1829 dataSize = mDataSize;
1832 if (!dataSize) {
1833 return 0;
1836 // Index of the last existing chunk.
1837 uint32_t lastChunk = (dataSize - 1) / kChunkSize;
1838 if (aIndex > lastChunk) {
1839 return 0;
1842 // We can use only preloaded chunks for the given stream to calculate
1843 // available bytes if this is an entry stored on disk, since only those
1844 // chunks are guaranteed not to be released.
1845 uint32_t maxPreloadedChunk;
1846 if (mMemoryOnly) {
1847 maxPreloadedChunk = lastChunk;
1848 } else {
1849 maxPreloadedChunk = std::min(aIndex + mPreloadChunkCount, lastChunk);
1852 uint32_t i;
1853 for (i = aIndex; i <= maxPreloadedChunk; ++i) {
1854 CacheFileChunk* chunk;
1856 chunk = mChunks.GetWeak(i);
1857 if (chunk) {
1858 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1859 if (chunk->IsReady()) {
1860 continue;
1863 // don't search this chunk in cached
1864 break;
1867 chunk = mCachedChunks.GetWeak(i);
1868 if (chunk) {
1869 MOZ_ASSERT(i == lastChunk || chunk->DataSize() == kChunkSize);
1870 continue;
1873 break;
1876 // theoretic bytes in advance
1877 int64_t advance = int64_t(i - aIndex) * kChunkSize;
1878 // real bytes till the end of the file
1879 int64_t tail = dataSize - (aIndex * kChunkSize);
1881 return std::min(advance, tail);
1884 nsresult CacheFile::Truncate(int64_t aOffset) {
1885 AssertOwnsLock();
1887 LOG(("CacheFile::Truncate() [this=%p, offset=%" PRId64 "]", this, aOffset));
1889 nsresult rv;
1891 // If we ever need to truncate on non alt-data boundary, we need to handle
1892 // existing input streams.
1893 MOZ_ASSERT(aOffset == mAltDataOffset,
1894 "Truncating normal data not implemented");
1895 MOZ_ASSERT(mReady);
1896 MOZ_ASSERT(!mOutput);
1898 uint32_t lastChunk = 0;
1899 if (mDataSize > 0) {
1900 lastChunk = (mDataSize - 1) / kChunkSize;
1903 uint32_t newLastChunk = 0;
1904 if (aOffset > 0) {
1905 newLastChunk = (aOffset - 1) / kChunkSize;
1908 uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
1910 LOG(
1911 ("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
1912 "bytesInNewLastChunk=%u",
1913 lastChunk, newLastChunk, bytesInNewLastChunk));
1915 // Remove all truncated chunks from mCachedChunks
1916 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
1917 uint32_t idx = iter.Key();
1919 if (idx > newLastChunk) {
1920 // This is unused chunk, simply remove it.
1921 LOG(("CacheFile::Truncate() - removing cached chunk [idx=%u]", idx));
1922 iter.Remove();
1926 // We need to make sure no input stream holds a reference to a chunk we're
1927 // going to discard. In theory, if alt-data begins at chunk boundary, input
1928 // stream for normal data can get the chunk containing only alt-data via
1929 // EnsureCorrectChunk() call. The input stream won't read the data from such
1930 // chunk, but it will keep the reference until the stream is closed and we
1931 // cannot simply discard this chunk.
1932 int64_t maxInputChunk = -1;
1933 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
1934 int64_t inputChunk = mInputs[i]->GetChunkIdx();
1936 if (maxInputChunk < inputChunk) {
1937 maxInputChunk = inputChunk;
1940 MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
1943 MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
1944 if (maxInputChunk == newLastChunk + 1) {
1945 // Truncating must be done at chunk boundary
1946 MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
1947 newLastChunk++;
1948 bytesInNewLastChunk = 0;
1949 LOG(
1950 ("CacheFile::Truncate() - chunk %p is still in use, using "
1951 "newLastChunk=%u and bytesInNewLastChunk=%u",
1952 mChunks.GetWeak(newLastChunk), newLastChunk, bytesInNewLastChunk));
1955 // Discard all truncated chunks in mChunks
1956 for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
1957 uint32_t idx = iter.Key();
1959 if (idx > newLastChunk) {
1960 RefPtr<CacheFileChunk>& chunk = iter.Data();
1961 LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]", idx,
1962 chunk.get()));
1964 if (HaveChunkListeners(idx)) {
1965 NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
1968 chunk->mDiscardedChunk = true;
1969 mDiscardedChunks.AppendElement(chunk);
1970 iter.Remove();
1974 // Remove hashes of all removed chunks from the metadata
1975 for (uint32_t i = lastChunk; i > newLastChunk; --i) {
1976 mMetadata->RemoveHash(i);
1979 // Truncate new last chunk
1980 if (bytesInNewLastChunk == kChunkSize) {
1981 LOG(("CacheFile::Truncate() - not truncating last chunk."));
1982 } else {
1983 RefPtr<CacheFileChunk> chunk;
1984 if (mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1985 LOG(("CacheFile::Truncate() - New last chunk %p got from mChunks.",
1986 chunk.get()));
1987 } else if (mCachedChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
1988 LOG(("CacheFile::Truncate() - New last chunk %p got from mCachedChunks.",
1989 chunk.get()));
1990 } else {
1991 // New last chunk isn't loaded but we need to update the hash.
1992 MOZ_ASSERT(!mMemoryOnly);
1993 MOZ_ASSERT(mHandle);
1995 rv = GetChunkLocked(newLastChunk, PRELOADER, nullptr,
1996 getter_AddRefs(chunk));
1997 if (NS_FAILED(rv)) {
1998 return rv;
2000 // We've checked that we don't have this chunk, so no chunk must be
2001 // returned.
2002 MOZ_ASSERT(!chunk);
2004 if (!mChunks.Get(newLastChunk, getter_AddRefs(chunk))) {
2005 return NS_ERROR_UNEXPECTED;
2008 LOG(("CacheFile::Truncate() - New last chunk %p got from preloader.",
2009 chunk.get()));
2012 rv = chunk->GetStatus();
2013 if (NS_FAILED(rv)) {
2014 LOG(
2015 ("CacheFile::Truncate() - New last chunk is failed "
2016 "[status=0x%08" PRIx32 "]",
2017 static_cast<uint32_t>(rv)));
2018 return rv;
2021 chunk->Truncate(bytesInNewLastChunk);
2023 // If the chunk is ready set the new hash now. If it's still being loaded
2024 // CacheChunk::Truncate() made the chunk dirty and the hash will be updated
2025 // in OnChunkWritten().
2026 if (chunk->IsReady()) {
2027 mMetadata->SetHash(newLastChunk, chunk->Hash());
2031 if (mHandle) {
2032 rv = CacheFileIOManager::TruncateSeekSetEOF(mHandle, aOffset, aOffset,
2033 nullptr);
2034 if (NS_FAILED(rv)) {
2035 return rv;
2039 mDataSize = aOffset;
2041 return NS_OK;
2044 static uint32_t StatusToTelemetryEnum(nsresult aStatus) {
2045 if (NS_SUCCEEDED(aStatus)) {
2046 return 0;
2049 switch (aStatus) {
2050 case NS_BASE_STREAM_CLOSED:
2051 return 0; // Log this as a success
2052 case NS_ERROR_OUT_OF_MEMORY:
2053 return 2;
2054 case NS_ERROR_FILE_NO_DEVICE_SPACE:
2055 return 3;
2056 case NS_ERROR_FILE_CORRUPTED:
2057 return 4;
2058 case NS_ERROR_FILE_NOT_FOUND:
2059 return 5;
2060 case NS_BINDING_ABORTED:
2061 return 6;
2062 default:
2063 return 1; // other error
2066 MOZ_ASSERT_UNREACHABLE("We should never get here");
2069 void CacheFile::RemoveInput(CacheFileInputStream* aInput, nsresult aStatus) {
2070 AssertOwnsLock();
2072 LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08" PRIx32 "]",
2073 this, aInput, static_cast<uint32_t>(aStatus)));
2075 DebugOnly<bool> found{};
2076 found = mInputs.RemoveElement(aInput);
2077 MOZ_ASSERT(found);
2079 ReleaseOutsideLock(
2080 already_AddRefed<nsIInputStream>(static_cast<nsIInputStream*>(aInput)));
2082 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2084 // If the input didn't read all data, there might be left some preloaded
2085 // chunks that won't be used anymore.
2086 CleanUpCachedChunks();
2088 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
2089 StatusToTelemetryEnum(aStatus));
2092 void CacheFile::RemoveOutput(CacheFileOutputStream* aOutput, nsresult aStatus) {
2093 AssertOwnsLock();
2095 nsresult rv;
2097 LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08" PRIx32 "]",
2098 this, aOutput, static_cast<uint32_t>(aStatus)));
2100 if (mOutput != aOutput) {
2101 LOG(
2102 ("CacheFile::RemoveOutput() - This output was already removed, ignoring"
2103 " call [this=%p]",
2104 this));
2105 return;
2108 mOutput = nullptr;
2110 // Cancel all queued chunk and update listeners that cannot be satisfied
2111 NotifyListenersAboutOutputRemoval();
2113 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2115 // Make sure the CacheFile status is set to a failure when the output stream
2116 // is closed with a fatal error. This way we propagate correctly and w/o any
2117 // windows the failure state of this entry to end consumers.
2118 if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) &&
2119 aStatus != NS_BASE_STREAM_CLOSED) {
2120 if (aOutput->IsAlternativeData()) {
2121 MOZ_ASSERT(mAltDataOffset != -1);
2122 // If there is no alt-data input stream truncate only alt-data, otherwise
2123 // doom the entry.
2124 bool altDataInputExists = false;
2125 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2126 if (mInputs[i]->IsAlternativeData()) {
2127 altDataInputExists = true;
2128 break;
2131 if (altDataInputExists) {
2132 SetError(aStatus);
2133 } else {
2134 rv = Truncate(mAltDataOffset);
2135 if (NS_FAILED(rv)) {
2136 LOG(
2137 ("CacheFile::RemoveOutput() - Truncating alt-data failed "
2138 "[rv=0x%08" PRIx32 "]",
2139 static_cast<uint32_t>(rv)));
2140 SetError(aStatus);
2141 } else {
2142 SetAltMetadata(nullptr);
2143 mAltDataOffset = -1;
2144 mAltDataType.Truncate();
2147 } else {
2148 SetError(aStatus);
2152 // Notify close listener as the last action
2153 aOutput->NotifyCloseListener();
2155 Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
2156 StatusToTelemetryEnum(aStatus));
2159 nsresult CacheFile::NotifyChunkListener(CacheFileChunkListener* aCallback,
2160 nsIEventTarget* aTarget,
2161 nsresult aResult, uint32_t aChunkIdx,
2162 CacheFileChunk* aChunk) {
2163 LOG(
2164 ("CacheFile::NotifyChunkListener() [this=%p, listener=%p, target=%p, "
2165 "rv=0x%08" PRIx32 ", idx=%u, chunk=%p]",
2166 this, aCallback, aTarget, static_cast<uint32_t>(aResult), aChunkIdx,
2167 aChunk));
2169 RefPtr<NotifyChunkListenerEvent> ev;
2170 ev = new NotifyChunkListenerEvent(aCallback, aResult, aChunkIdx, aChunk);
2171 if (aTarget) {
2172 return aTarget->Dispatch(ev, NS_DISPATCH_NORMAL);
2174 return NS_DispatchToCurrentThread(ev);
2177 void CacheFile::QueueChunkListener(uint32_t aIndex,
2178 CacheFileChunkListener* aCallback) {
2179 LOG(("CacheFile::QueueChunkListener() [this=%p, idx=%u, listener=%p]", this,
2180 aIndex, aCallback));
2182 AssertOwnsLock();
2184 MOZ_ASSERT(aCallback);
2186 ChunkListenerItem* item = new ChunkListenerItem();
2187 item->mTarget = CacheFileIOManager::IOTarget();
2188 if (!item->mTarget) {
2189 LOG(
2190 ("CacheFile::QueueChunkListener() - Cannot get Cache I/O thread! Using "
2191 "main thread for callback."));
2192 item->mTarget = GetMainThreadSerialEventTarget();
2194 item->mCallback = aCallback;
2196 mChunkListeners.GetOrInsertNew(aIndex)->mItems.AppendElement(item);
2199 nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
2200 CacheFileChunk* aChunk) {
2201 LOG(("CacheFile::NotifyChunkListeners() [this=%p, idx=%u, rv=0x%08" PRIx32
2202 ", "
2203 "chunk=%p]",
2204 this, aIndex, static_cast<uint32_t>(aResult), aChunk));
2206 AssertOwnsLock();
2208 nsresult rv, rv2;
2210 ChunkListeners* listeners;
2211 mChunkListeners.Get(aIndex, &listeners);
2212 MOZ_ASSERT(listeners);
2214 rv = NS_OK;
2215 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2216 ChunkListenerItem* item = listeners->mItems[i];
2217 rv2 = NotifyChunkListener(item->mCallback, item->mTarget, aResult, aIndex,
2218 aChunk);
2219 if (NS_FAILED(rv2) && NS_SUCCEEDED(rv)) rv = rv2;
2220 delete item;
2223 mChunkListeners.Remove(aIndex);
2225 return rv;
2228 bool CacheFile::HaveChunkListeners(uint32_t aIndex) {
2229 AssertOwnsLock();
2230 ChunkListeners* listeners;
2231 mChunkListeners.Get(aIndex, &listeners);
2232 return !!listeners;
2235 void CacheFile::NotifyListenersAboutOutputRemoval() {
2236 LOG(("CacheFile::NotifyListenersAboutOutputRemoval() [this=%p]", this));
2238 AssertOwnsLock();
2240 // First fail all chunk listeners that wait for non-existent chunk
2241 for (auto iter = mChunkListeners.Iter(); !iter.Done(); iter.Next()) {
2242 uint32_t idx = iter.Key();
2243 auto* listeners = iter.UserData();
2245 LOG(
2246 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail "
2247 "[this=%p, idx=%u]",
2248 this, idx));
2250 RefPtr<CacheFileChunk> chunk;
2251 mChunks.Get(idx, getter_AddRefs(chunk));
2252 if (chunk) {
2253 // Skip these listeners because the chunk is being read. We don't have
2254 // assertion here to check its state because it might be already in READY
2255 // state while CacheFile::OnChunkRead() is waiting on Cache I/O thread for
2256 // a lock so the listeners hasn't been notified yet. In any case, the
2257 // listeners will be notified from CacheFile::OnChunkRead().
2258 continue;
2261 for (uint32_t i = 0; i < listeners->mItems.Length(); i++) {
2262 ChunkListenerItem* item = listeners->mItems[i];
2263 NotifyChunkListener(item->mCallback, item->mTarget,
2264 NS_ERROR_NOT_AVAILABLE, idx, nullptr);
2265 delete item;
2268 iter.Remove();
2271 // Fail all update listeners
2272 for (const auto& entry : mChunks) {
2273 const RefPtr<CacheFileChunk>& chunk = entry.GetData();
2274 LOG(
2275 ("CacheFile::NotifyListenersAboutOutputRemoval() - fail2 "
2276 "[this=%p, idx=%u]",
2277 this, entry.GetKey()));
2279 if (chunk->IsReady()) {
2280 chunk->NotifyUpdateListeners();
2285 bool CacheFile::DataSize(int64_t* aSize) {
2286 CacheFileAutoLock lock(this);
2288 if (OutputStreamExists(false)) {
2289 return false;
2292 if (mAltDataOffset == -1) {
2293 *aSize = mDataSize;
2294 } else {
2295 *aSize = mAltDataOffset;
2298 return true;
2301 nsresult CacheFile::GetAltDataSize(int64_t* aSize) {
2302 CacheFileAutoLock lock(this);
2303 if (mOutput) {
2304 return NS_ERROR_IN_PROGRESS;
2307 if (mAltDataOffset == -1) {
2308 return NS_ERROR_NOT_AVAILABLE;
2311 *aSize = mDataSize - mAltDataOffset;
2312 return NS_OK;
2315 nsresult CacheFile::GetAltDataType(nsACString& aType) {
2316 CacheFileAutoLock lock(this);
2318 if (mAltDataOffset == -1) {
2319 return NS_ERROR_NOT_AVAILABLE;
2322 aType = mAltDataType;
2323 return NS_OK;
2326 bool CacheFile::IsDoomed() {
2327 CacheFileAutoLock lock(this);
2329 if (!mHandle) return false;
2331 return mHandle->IsDoomed();
2334 bool CacheFile::IsWriteInProgress() {
2335 CacheFileAutoLock lock(this);
2337 bool result = false;
2339 if (!mMemoryOnly) {
2340 result =
2341 mDataIsDirty || (mMetadata && mMetadata->IsDirty()) || mWritingMetadata;
2344 result = result || mOpeningFile || mOutput || mChunks.Count();
2346 return result;
2349 bool CacheFile::EntryWouldExceedLimit(int64_t aOffset, int64_t aSize,
2350 bool aIsAltData) {
2351 CacheFileAutoLock lock(this);
2353 if (mSkipSizeCheck || aSize < 0) {
2354 return false;
2357 int64_t totalSize = aOffset + aSize;
2358 if (aIsAltData) {
2359 totalSize += (mAltDataOffset == -1) ? mDataSize : mAltDataOffset;
2362 return CacheObserver::EntryIsTooBig(totalSize, !mMemoryOnly);
2365 bool CacheFile::IsDirty() { return mDataIsDirty || mMetadata->IsDirty(); }
2367 void CacheFile::WriteMetadataIfNeeded() {
2368 LOG(("CacheFile::WriteMetadataIfNeeded() [this=%p]", this));
2370 CacheFileAutoLock lock(this);
2372 if (!mMemoryOnly) WriteMetadataIfNeededLocked();
2375 void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) {
2376 // When aFireAndForget is set to true, we are called from dtor.
2377 // |this| must not be referenced after this method returns!
2379 LOG(("CacheFile::WriteMetadataIfNeededLocked() [this=%p]", this));
2381 nsresult rv;
2383 AssertOwnsLock();
2384 MOZ_ASSERT(!mMemoryOnly);
2386 if (!mMetadata) {
2387 MOZ_CRASH("Must have metadata here");
2388 return;
2391 if (NS_FAILED(mStatus)) return;
2393 if (!IsDirty() || mOutput || mInputs.Length() || mChunks.Count() ||
2394 mWritingMetadata || mOpeningFile || mKill) {
2395 return;
2398 if (!aFireAndForget) {
2399 // if aFireAndForget is set, we are called from dtor. Write
2400 // scheduler hard-refers CacheFile otherwise, so we cannot be here.
2401 CacheFileIOManager::UnscheduleMetadataWrite(this);
2404 LOG(("CacheFile::WriteMetadataIfNeededLocked() - Writing metadata [this=%p]",
2405 this));
2407 rv = mMetadata->WriteMetadata(mDataSize, aFireAndForget ? nullptr : this);
2408 if (NS_SUCCEEDED(rv)) {
2409 mWritingMetadata = true;
2410 mDataIsDirty = false;
2411 } else {
2412 LOG(
2413 ("CacheFile::WriteMetadataIfNeededLocked() - Writing synchronously "
2414 "failed [this=%p]",
2415 this));
2416 // TODO: close streams with error
2417 SetError(rv);
2421 void CacheFile::PostWriteTimer() {
2422 if (mMemoryOnly) return;
2423 LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
2425 CacheFileIOManager::ScheduleMetadataWrite(this);
2428 void CacheFile::CleanUpCachedChunks() {
2429 for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
2430 uint32_t idx = iter.Key();
2431 const RefPtr<CacheFileChunk>& chunk = iter.Data();
2433 LOG(("CacheFile::CleanUpCachedChunks() [this=%p, idx=%u, chunk=%p]", this,
2434 idx, chunk.get()));
2436 if (MustKeepCachedChunk(idx)) {
2437 LOG(("CacheFile::CleanUpCachedChunks() - Keeping chunk"));
2438 continue;
2441 LOG(("CacheFile::CleanUpCachedChunks() - Removing chunk"));
2442 iter.Remove();
2446 nsresult CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx) {
2447 AssertOwnsLock();
2449 // This method is used to pad last incomplete chunk with zeroes or create
2450 // a new chunk full of zeroes
2451 MOZ_ASSERT(mDataSize / kChunkSize == aChunkIdx);
2453 nsresult rv;
2454 RefPtr<CacheFileChunk> chunk;
2455 rv = GetChunkLocked(aChunkIdx, WRITER, nullptr, getter_AddRefs(chunk));
2456 NS_ENSURE_SUCCESS(rv, rv);
2458 LOG(
2459 ("CacheFile::PadChunkWithZeroes() - Zeroing hole in chunk %d, range %d-%d"
2460 " [this=%p]",
2461 aChunkIdx, chunk->DataSize(), kChunkSize - 1, this));
2463 CacheFileChunkWriteHandle hnd = chunk->GetWriteHandle(kChunkSize);
2464 if (!hnd.Buf()) {
2465 ReleaseOutsideLock(std::move(chunk));
2466 SetError(NS_ERROR_OUT_OF_MEMORY);
2467 return NS_ERROR_OUT_OF_MEMORY;
2470 uint32_t offset = hnd.DataSize();
2471 memset(hnd.Buf() + offset, 0, kChunkSize - offset);
2472 hnd.UpdateDataSize(offset, kChunkSize - offset);
2474 ReleaseOutsideLock(std::move(chunk));
2476 return NS_OK;
2479 void CacheFile::SetError(nsresult aStatus) {
2480 AssertOwnsLock();
2482 if (NS_SUCCEEDED(mStatus)) {
2483 mStatus = aStatus;
2484 if (mHandle) {
2485 CacheFileIOManager::DoomFile(mHandle, nullptr);
2490 nsresult CacheFile::InitIndexEntry() {
2491 AssertOwnsLock();
2492 MOZ_ASSERT(mHandle);
2494 if (mHandle->IsDoomed()) return NS_OK;
2496 nsresult rv;
2498 rv = CacheFileIOManager::InitIndexEntry(
2499 mHandle, GetOriginAttrsHash(mMetadata->OriginAttributes()),
2500 mMetadata->IsAnonymous(), mPinned);
2501 NS_ENSURE_SUCCESS(rv, rv);
2503 uint32_t frecency = mMetadata->GetFrecency();
2505 bool hasAltData =
2506 mMetadata->GetElement(CacheFileUtils::kAltDataKey) != nullptr;
2508 static auto toUint16 = [](const char* s) -> uint16_t {
2509 if (s) {
2510 nsresult rv;
2511 uint64_t n64 = nsDependentCString(s).ToInteger64(&rv);
2512 MOZ_ASSERT(NS_SUCCEEDED(rv));
2513 return n64 <= kIndexTimeOutOfBound ? n64 : kIndexTimeOutOfBound;
2515 return kIndexTimeNotAvailable;
2518 const char* onStartTimeStr =
2519 mMetadata->GetElement("net-response-time-onstart");
2520 uint16_t onStartTime = toUint16(onStartTimeStr);
2522 const char* onStopTimeStr = mMetadata->GetElement("net-response-time-onstop");
2523 uint16_t onStopTime = toUint16(onStopTimeStr);
2525 const char* contentTypeStr = mMetadata->GetElement("ctid");
2526 uint8_t contentType = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2527 if (contentTypeStr) {
2528 int64_t n64 = nsDependentCString(contentTypeStr).ToInteger64(&rv);
2529 if (NS_FAILED(rv) || n64 < nsICacheEntry::CONTENT_TYPE_UNKNOWN ||
2530 n64 >= nsICacheEntry::CONTENT_TYPE_LAST) {
2531 n64 = nsICacheEntry::CONTENT_TYPE_UNKNOWN;
2533 contentType = n64;
2536 rv = CacheFileIOManager::UpdateIndexEntry(
2537 mHandle, &frecency, &hasAltData, &onStartTime, &onStopTime, &contentType);
2538 NS_ENSURE_SUCCESS(rv, rv);
2540 return NS_OK;
2543 size_t CacheFile::SizeOfExcludingThis(
2544 mozilla::MallocSizeOf mallocSizeOf) const {
2545 CacheFileAutoLock lock(const_cast<CacheFile*>(this));
2547 size_t n = 0;
2548 n += mKey.SizeOfExcludingThisIfUnshared(mallocSizeOf);
2549 n += mChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2550 for (const auto& chunk : mChunks.Values()) {
2551 n += chunk->SizeOfIncludingThis(mallocSizeOf);
2553 n += mCachedChunks.ShallowSizeOfExcludingThis(mallocSizeOf);
2554 for (const auto& chunk : mCachedChunks.Values()) {
2555 n += chunk->SizeOfIncludingThis(mallocSizeOf);
2557 // Ignore metadata if it's still being read. It's not safe to access buffers
2558 // in CacheFileMetadata because they might be reallocated on another thread
2559 // outside CacheFile's lock.
2560 if (mMetadata && mReady) {
2561 n += mMetadata->SizeOfIncludingThis(mallocSizeOf);
2564 // Input streams are not elsewhere reported.
2565 n += mInputs.ShallowSizeOfExcludingThis(mallocSizeOf);
2566 for (uint32_t i = 0; i < mInputs.Length(); ++i) {
2567 n += mInputs[i]->SizeOfIncludingThis(mallocSizeOf);
2570 // Output streams are not elsewhere reported.
2571 if (mOutput) {
2572 n += mOutput->SizeOfIncludingThis(mallocSizeOf);
2575 // The listeners are usually classes reported just above.
2576 n += mChunkListeners.ShallowSizeOfExcludingThis(mallocSizeOf);
2577 n += mObjsToRelease.ShallowSizeOfExcludingThis(mallocSizeOf);
2579 // mHandle reported directly from CacheFileIOManager.
2581 return n;
2584 size_t CacheFile::SizeOfIncludingThis(
2585 mozilla::MallocSizeOf mallocSizeOf) const {
2586 return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
2589 } // namespace mozilla::net