1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
11 #include "PLDHashTable.h"
12 #include "mozilla/HashFunctions.h"
13 #include "mozilla/MathAlgorithms.h"
14 #include "mozilla/OperatorNewExtensions.h"
15 #include "nsAlgorithm.h"
16 #include "nsPointerHashKeys.h"
17 #include "mozilla/Likely.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/ChaosMode.h"
21 using namespace mozilla
;
29 explicit AutoReadOp(Checker
& aChk
) : mChk(aChk
) { mChk
.StartReadOp(); }
30 ~AutoReadOp() { mChk
.EndReadOp(); }
37 explicit AutoWriteOp(Checker
& aChk
) : mChk(aChk
) { mChk
.StartWriteOp(); }
38 ~AutoWriteOp() { mChk
.EndWriteOp(); }
41 class AutoIteratorRemovalOp
45 explicit AutoIteratorRemovalOp(Checker
& aChk
)
48 mChk
.StartIteratorRemovalOp();
50 ~AutoIteratorRemovalOp() { mChk
.EndIteratorRemovalOp(); }
53 class AutoDestructorOp
57 explicit AutoDestructorOp(Checker
& aChk
)
60 mChk
.StartDestructorOp();
62 ~AutoDestructorOp() { mChk
.EndDestructorOp(); }
67 /* static */ PLDHashNumber
68 PLDHashTable::HashStringKey(const void* aKey
)
70 return HashString(static_cast<const char*>(aKey
));
73 /* static */ PLDHashNumber
74 PLDHashTable::HashVoidPtrKeyStub(const void* aKey
)
76 return nsPtrHashKey
<void>::HashKey(aKey
);
80 PLDHashTable::MatchEntryStub(const PLDHashEntryHdr
* aEntry
, const void* aKey
)
82 const PLDHashEntryStub
* stub
= (const PLDHashEntryStub
*)aEntry
;
84 return stub
->key
== aKey
;
88 PLDHashTable::MatchStringKey(const PLDHashEntryHdr
* aEntry
, const void* aKey
)
90 const PLDHashEntryStub
* stub
= (const PLDHashEntryStub
*)aEntry
;
92 // XXX tolerate null keys on account of sloppy Mozilla callers.
93 return stub
->key
== aKey
||
95 strcmp((const char*)stub
->key
, (const char*)aKey
) == 0);
99 PLDHashTable::MoveEntryStub(PLDHashTable
* aTable
,
100 const PLDHashEntryHdr
* aFrom
,
101 PLDHashEntryHdr
* aTo
)
103 memcpy(aTo
, aFrom
, aTable
->mEntrySize
);
107 PLDHashTable::ClearEntryStub(PLDHashTable
* aTable
, PLDHashEntryHdr
* aEntry
)
109 memset(aEntry
, 0, aTable
->mEntrySize
);
112 static const PLDHashTableOps gStubOps
= {
113 PLDHashTable::HashVoidPtrKeyStub
,
114 PLDHashTable::MatchEntryStub
,
115 PLDHashTable::MoveEntryStub
,
116 PLDHashTable::ClearEntryStub
,
120 /* static */ const PLDHashTableOps
*
121 PLDHashTable::StubOps()
127 SizeOfEntryStore(uint32_t aCapacity
, uint32_t aEntrySize
, uint32_t* aNbytes
)
129 uint64_t nbytes64
= uint64_t(aCapacity
) * uint64_t(aEntrySize
);
130 *aNbytes
= aCapacity
* aEntrySize
;
131 return uint64_t(*aNbytes
) == nbytes64
; // returns false on overflow
134 // Compute max and min load numbers (entry counts). We have a secondary max
135 // that allows us to overload a table reasonably if it cannot be grown further
136 // (i.e. if ChangeTable() fails). The table slows down drastically if the
137 // secondary max is too close to 1, but 0.96875 gives only a slight slowdown
138 // while allowing 1.3x more elements.
139 static inline uint32_t
140 MaxLoad(uint32_t aCapacity
)
142 return aCapacity
- (aCapacity
>> 2); // == aCapacity * 0.75
144 static inline uint32_t
145 MaxLoadOnGrowthFailure(uint32_t aCapacity
)
147 return aCapacity
- (aCapacity
>> 5); // == aCapacity * 0.96875
149 static inline uint32_t
150 MinLoad(uint32_t aCapacity
)
152 return aCapacity
>> 2; // == aCapacity * 0.25
155 // Compute the minimum capacity (and the Log2 of that capacity) for a table
156 // containing |aLength| elements while respecting the following contraints:
157 // - table must be at most 75% full;
158 // - capacity must be a power of two;
159 // - capacity cannot be too small.
161 BestCapacity(uint32_t aLength
, uint32_t* aCapacityOut
,
162 uint32_t* aLog2CapacityOut
)
164 // Compute the smallest capacity allowing |aLength| elements to be inserted
165 // without rehashing.
166 uint32_t capacity
= (aLength
* 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
167 if (capacity
< PLDHashTable::kMinCapacity
) {
168 capacity
= PLDHashTable::kMinCapacity
;
171 // Round up capacity to next power-of-two.
172 uint32_t log2
= CeilingLog2(capacity
);
173 capacity
= 1u << log2
;
174 MOZ_ASSERT(capacity
<= PLDHashTable::kMaxCapacity
);
176 *aCapacityOut
= capacity
;
177 *aLog2CapacityOut
= log2
;
180 /* static */ MOZ_ALWAYS_INLINE
uint32_t
181 PLDHashTable::HashShift(uint32_t aEntrySize
, uint32_t aLength
)
183 if (aLength
> kMaxInitialLength
) {
184 MOZ_CRASH("Initial length is too large");
187 uint32_t capacity
, log2
;
188 BestCapacity(aLength
, &capacity
, &log2
);
191 if (!SizeOfEntryStore(capacity
, aEntrySize
, &nbytes
)) {
192 MOZ_CRASH("Initial entry store size is too large");
195 // Compute the hashShift value.
196 return kPLDHashNumberBits
- log2
;
199 PLDHashTable::PLDHashTable(const PLDHashTableOps
* aOps
, uint32_t aEntrySize
,
201 : mOps(recordreplay::GeneratePLDHashTableCallbacks(aOps
))
204 , mHashShift(HashShift(aEntrySize
, aLength
))
205 , mEntrySize(aEntrySize
)
212 // An entry size greater than 0xff is unlikely, but let's check anyway. If
213 // you hit this, your hashtable would waste lots of space for unused entries
214 // and you should change your hash table's entries to pointers.
215 if (aEntrySize
!= uint32_t(mEntrySize
)) {
216 MOZ_CRASH("Entry size is too large");
221 PLDHashTable::operator=(PLDHashTable
&& aOther
)
223 if (this == &aOther
) {
227 // |mOps| and |mEntrySize| are required to stay the same, they're
228 // conceptually part of the type -- indeed, if PLDHashTable was a templated
229 // type like nsTHashtable, they *would* be part of the type -- so it only
230 // makes sense to assign in cases where they match. An exception is when we
231 // are recording or replaying the execution, in which case custom ops are
232 // generated for each table.
233 MOZ_RELEASE_ASSERT(mOps
== aOther
.mOps
|| !mOps
|| recordreplay::IsRecordingOrReplaying());
234 MOZ_RELEASE_ASSERT(mEntrySize
== aOther
.mEntrySize
|| !mEntrySize
);
236 // Reconstruct |this|.
237 const PLDHashTableOps
* ops
= recordreplay::UnwrapPLDHashTableCallbacks(aOther
.mOps
);
238 this->~PLDHashTable();
239 new (KnownNotNull
, this) PLDHashTable(ops
, aOther
.mEntrySize
, 0);
241 // Move non-const pieces over.
242 mHashShift
= std::move(aOther
.mHashShift
);
243 mEntryCount
= std::move(aOther
.mEntryCount
);
244 mRemovedCount
= std::move(aOther
.mRemovedCount
);
245 mEntryStore
.Set(aOther
.mEntryStore
.Get(), &mGeneration
);
247 mChecker
= std::move(aOther
.mChecker
);
250 recordreplay::MovePLDHashTableContents(aOther
.mOps
, mOps
);
252 // Clear up |aOther| so its destruction will be a no-op and it reports being
256 AutoDestructorOp
op(mChecker
);
258 aOther
.mEntryCount
= 0;
259 aOther
.mEntryStore
.Set(nullptr, &aOther
.mGeneration
);
266 PLDHashTable::Hash1(PLDHashNumber aHash0
) const
268 return aHash0
>> mHashShift
;
272 PLDHashTable::Hash2(PLDHashNumber aHash0
,
273 uint32_t& aHash2Out
, uint32_t& aSizeMaskOut
) const
275 uint32_t sizeLog2
= kPLDHashNumberBits
- mHashShift
;
276 uint32_t sizeMask
= (PLDHashNumber(1) << sizeLog2
) - 1;
277 aSizeMaskOut
= sizeMask
;
279 // The incoming aHash0 always has the low bit unset (since we leave it
280 // free for the collision flag), and should have reasonably random
281 // data in the other 31 bits. We used the high bits of aHash0 for
282 // Hash1, so we use the low bits here. If the table size is large,
283 // the bits we use may overlap, but that's still more random than
286 // Double hashing needs the second hash code to be relatively prime to table
287 // size, so we simply make hash2 odd.
289 // This also conveniently covers up the fact that we have the low bit
290 // unset since aHash0 has the low bit unset.
291 aHash2Out
= (aHash0
& sizeMask
) | 1;
294 // Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note
295 // that a removed-entry sentinel need be stored only if the removed entry had
296 // a colliding entry added after it. Therefore we can use 1 as the collision
297 // flag in addition to the removed-entry sentinel value. Multiplicative hash
298 // uses the high order bits of mKeyHash, so this least-significant reservation
299 // should not hurt the hash function's effectiveness much.
301 // Match an entry's mKeyHash against an unstored one computed from a key.
303 PLDHashTable::MatchEntryKeyhash(const PLDHashEntryHdr
* aEntry
,
304 const PLDHashNumber aKeyHash
)
306 return (aEntry
->mKeyHash
& ~kCollisionFlag
) == aKeyHash
;
309 // Compute the address of the indexed entry in table.
311 PLDHashTable::AddressEntry(uint32_t aIndex
) const
313 return const_cast<PLDHashEntryHdr
*>(
314 reinterpret_cast<const PLDHashEntryHdr
*>(
315 mEntryStore
.Get() + aIndex
* mEntrySize
));
318 PLDHashTable::~PLDHashTable()
321 AutoDestructorOp
op(mChecker
);
324 if (!mEntryStore
.Get()) {
325 recordreplay::DestroyPLDHashTableCallbacks(mOps
);
329 // Clear any remaining live entries.
330 char* entryAddr
= mEntryStore
.Get();
331 char* entryLimit
= entryAddr
+ Capacity() * mEntrySize
;
332 while (entryAddr
< entryLimit
) {
333 PLDHashEntryHdr
* entry
= (PLDHashEntryHdr
*)entryAddr
;
334 if (EntryIsLive(entry
)) {
335 mOps
->clearEntry(this, entry
);
337 entryAddr
+= mEntrySize
;
340 recordreplay::DestroyPLDHashTableCallbacks(mOps
);
342 // Entry storage is freed last, by ~EntryStore().
346 PLDHashTable::ClearAndPrepareForLength(uint32_t aLength
)
348 // Get these values before the destructor clobbers them.
349 const PLDHashTableOps
* ops
= recordreplay::UnwrapPLDHashTableCallbacks(mOps
);
350 uint32_t entrySize
= mEntrySize
;
352 this->~PLDHashTable();
353 new (KnownNotNull
, this) PLDHashTable(ops
, entrySize
, aLength
);
357 PLDHashTable::Clear()
359 ClearAndPrepareForLength(kDefaultInitialLength
);
362 // If |Reason| is |ForAdd|, the return value is always non-null and it may be
363 // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
364 // value is null on a miss, and will never be a previously-removed entry on a
365 // hit. This distinction is a bit grotty but this function is hot enough that
366 // these differences are worthwhile. (It's also hot enough that
367 // MOZ_ALWAYS_INLINE makes a significant difference.)
368 template <PLDHashTable::SearchReason Reason
>
369 MOZ_ALWAYS_INLINE PLDHashEntryHdr
*
370 PLDHashTable::SearchTable(const void* aKey
, PLDHashNumber aKeyHash
) const
372 MOZ_ASSERT(mEntryStore
.Get());
373 NS_ASSERTION(!(aKeyHash
& kCollisionFlag
),
374 "!(aKeyHash & kCollisionFlag)");
376 // Compute the primary hash address.
377 PLDHashNumber hash1
= Hash1(aKeyHash
);
378 PLDHashEntryHdr
* entry
= AddressEntry(hash1
);
380 // Miss: return space for a new entry.
381 if (EntryIsFree(entry
)) {
382 return (Reason
== ForAdd
) ? entry
: nullptr;
385 // Hit: return entry.
386 PLDHashMatchEntry matchEntry
= mOps
->matchEntry
;
387 if (MatchEntryKeyhash(entry
, aKeyHash
) &&
388 matchEntry(entry
, aKey
)) {
392 // Collision: double hash.
395 Hash2(aKeyHash
, hash2
, sizeMask
);
397 // Save the first removed entry pointer so Add() can recycle it. (Only used
398 // if Reason==ForAdd.)
399 PLDHashEntryHdr
* firstRemoved
= nullptr;
402 if (Reason
== ForAdd
&& !firstRemoved
) {
403 if (MOZ_UNLIKELY(EntryIsRemoved(entry
))) {
404 firstRemoved
= entry
;
406 entry
->mKeyHash
|= kCollisionFlag
;
413 entry
= AddressEntry(hash1
);
414 if (EntryIsFree(entry
)) {
415 return (Reason
== ForAdd
) ? (firstRemoved
? firstRemoved
: entry
)
419 if (MatchEntryKeyhash(entry
, aKeyHash
) &&
420 matchEntry(entry
, aKey
)) {
429 // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
430 // 1. assume |Reason| is |ForAdd|,
431 // 2. assume that |aKey| will never match an existing entry, and
432 // 3. assume that no entries have been removed from the current table
434 // Avoiding the need for |aKey| means we can avoid needing a way to map entries
435 // to keys, which means callers can use complex key types more easily.
436 MOZ_ALWAYS_INLINE PLDHashEntryHdr
*
437 PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash
) const
439 MOZ_ASSERT(mEntryStore
.Get());
440 NS_ASSERTION(!(aKeyHash
& kCollisionFlag
),
441 "!(aKeyHash & kCollisionFlag)");
443 // Compute the primary hash address.
444 PLDHashNumber hash1
= Hash1(aKeyHash
);
445 PLDHashEntryHdr
* entry
= AddressEntry(hash1
);
447 // Miss: return space for a new entry.
448 if (EntryIsFree(entry
)) {
452 // Collision: double hash.
455 Hash2(aKeyHash
, hash2
, sizeMask
);
458 NS_ASSERTION(!EntryIsRemoved(entry
),
459 "!EntryIsRemoved(entry)");
460 entry
->mKeyHash
|= kCollisionFlag
;
465 entry
= AddressEntry(hash1
);
466 if (EntryIsFree(entry
)) {
475 PLDHashTable::ChangeTable(int32_t aDeltaLog2
)
477 MOZ_ASSERT(mEntryStore
.Get());
479 // Look, but don't touch, until we succeed in getting new entry store.
480 int32_t oldLog2
= kPLDHashNumberBits
- mHashShift
;
481 int32_t newLog2
= oldLog2
+ aDeltaLog2
;
482 uint32_t newCapacity
= 1u << newLog2
;
483 if (newCapacity
> kMaxCapacity
) {
488 if (!SizeOfEntryStore(newCapacity
, mEntrySize
, &nbytes
)) {
489 return false; // overflowed
492 char* newEntryStore
= (char*)calloc(1, nbytes
);
493 if (!newEntryStore
) {
497 // We can't fail from here on, so update table parameters.
498 mHashShift
= kPLDHashNumberBits
- newLog2
;
501 // Assign the new entry store to table.
504 oldEntryAddr
= oldEntryStore
= mEntryStore
.Get();
505 mEntryStore
.Set(newEntryStore
, &mGeneration
);
506 PLDHashMoveEntry moveEntry
= mOps
->moveEntry
;
508 // Copy only live entries, leaving removed ones behind.
509 uint32_t oldCapacity
= 1u << oldLog2
;
510 for (uint32_t i
= 0; i
< oldCapacity
; ++i
) {
511 PLDHashEntryHdr
* oldEntry
= (PLDHashEntryHdr
*)oldEntryAddr
;
512 if (EntryIsLive(oldEntry
)) {
513 const PLDHashNumber key
= oldEntry
->mKeyHash
& ~kCollisionFlag
;
514 PLDHashEntryHdr
* newEntry
= FindFreeEntry(key
);
515 NS_ASSERTION(EntryIsFree(newEntry
), "EntryIsFree(newEntry)");
516 moveEntry(this, oldEntry
, newEntry
);
517 newEntry
->mKeyHash
= key
;
519 oldEntryAddr
+= mEntrySize
;
526 MOZ_ALWAYS_INLINE PLDHashNumber
527 PLDHashTable::ComputeKeyHash(const void* aKey
) const
529 MOZ_ASSERT(mEntryStore
.Get());
531 PLDHashNumber keyHash
= mozilla::ScrambleHashCode(mOps
->hashKey(aKey
));
533 // Avoid 0 and 1 hash codes, they indicate free and removed entries.
537 keyHash
&= ~kCollisionFlag
;
543 PLDHashTable::Search(const void* aKey
) const
546 AutoReadOp
op(mChecker
);
549 PLDHashEntryHdr
* entry
= mEntryStore
.Get()
550 ? SearchTable
<ForSearchOrRemove
>(aKey
,
551 ComputeKeyHash(aKey
))
557 PLDHashTable::Add(const void* aKey
, const mozilla::fallible_t
&)
560 AutoWriteOp
op(mChecker
);
563 // Allocate the entry storage if it hasn't already been allocated.
564 if (!mEntryStore
.Get()) {
566 // We already checked this in the constructor, so it must still be true.
567 MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize
,
569 mEntryStore
.Set((char*)calloc(1, nbytes
), &mGeneration
);
570 if (!mEntryStore
.Get()) {
575 // If alpha is >= .75, grow or compress the table. If aKey is already in the
576 // table, we may grow once more than necessary, but only if we are on the
577 // edge of being overloaded.
578 uint32_t capacity
= Capacity();
579 if (mEntryCount
+ mRemovedCount
>= MaxLoad(capacity
)) {
580 // Compress if a quarter or more of all entries are removed.
582 if (mRemovedCount
>= capacity
>> 2) {
588 // Grow or compress the table. If ChangeTable() fails, allow overloading up
589 // to the secondary max. Once we hit the secondary max, return null.
590 if (!ChangeTable(deltaLog2
) &&
591 mEntryCount
+ mRemovedCount
>= MaxLoadOnGrowthFailure(capacity
)) {
596 // Look for entry after possibly growing, so we don't have to add it,
597 // then skip it while growing the table and re-add it after.
598 PLDHashNumber keyHash
= ComputeKeyHash(aKey
);
599 PLDHashEntryHdr
* entry
= SearchTable
<ForAdd
>(aKey
, keyHash
);
600 if (!EntryIsLive(entry
)) {
601 // Initialize the entry, indicating that it's no longer free.
602 if (EntryIsRemoved(entry
)) {
604 keyHash
|= kCollisionFlag
;
606 if (mOps
->initEntry
) {
607 mOps
->initEntry(entry
, aKey
);
609 entry
->mKeyHash
= keyHash
;
617 PLDHashTable::Add(const void* aKey
)
619 PLDHashEntryHdr
* entry
= Add(aKey
, fallible
);
621 if (!mEntryStore
.Get()) {
622 // We OOM'd while allocating the initial entry storage.
624 (void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize
, &nbytes
);
625 NS_ABORT_OOM(nbytes
);
627 // We failed to resize the existing entry storage, either due to OOM or
628 // because we exceeded the maximum table capacity or size; report it as
629 // an OOM. The multiplication by 2 gets us the size we tried to allocate,
630 // which is double the current size.
631 NS_ABORT_OOM(2 * EntrySize() * EntryCount());
638 PLDHashTable::Remove(const void* aKey
)
641 AutoWriteOp
op(mChecker
);
644 PLDHashEntryHdr
* entry
= mEntryStore
.Get()
645 ? SearchTable
<ForSearchOrRemove
>(aKey
,
646 ComputeKeyHash(aKey
))
650 ShrinkIfAppropriate();
655 PLDHashTable::RemoveEntry(PLDHashEntryHdr
* aEntry
)
658 AutoWriteOp
op(mChecker
);
662 ShrinkIfAppropriate();
666 PLDHashTable::RawRemove(PLDHashEntryHdr
* aEntry
)
668 // Unfortunately, we can only do weak checking here. That's because
669 // RawRemove() can be called legitimately while an Enumerate() call is
670 // active, which doesn't fit well into how Checker's mState variable works.
671 MOZ_ASSERT(mChecker
.IsWritable());
673 MOZ_ASSERT(mEntryStore
.Get());
675 MOZ_ASSERT(EntryIsLive(aEntry
), "EntryIsLive(aEntry)");
677 // Load keyHash first in case clearEntry() goofs it.
678 PLDHashNumber keyHash
= aEntry
->mKeyHash
;
679 mOps
->clearEntry(this, aEntry
);
680 if (keyHash
& kCollisionFlag
) {
681 MarkEntryRemoved(aEntry
);
684 MarkEntryFree(aEntry
);
689 // Shrink or compress if a quarter or more of all entries are removed, or if the
690 // table is underloaded according to the minimum alpha, and is not minimal-size
693 PLDHashTable::ShrinkIfAppropriate()
695 uint32_t capacity
= Capacity();
696 if (mRemovedCount
>= capacity
>> 2 ||
697 (capacity
> kMinCapacity
&& mEntryCount
<= MinLoad(capacity
))) {
699 BestCapacity(mEntryCount
, &capacity
, &log2
);
701 int32_t deltaLog2
= log2
- (kPLDHashNumberBits
- mHashShift
);
702 MOZ_ASSERT(deltaLog2
<= 0);
704 (void) ChangeTable(deltaLog2
);
709 PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf
) const
712 AutoReadOp
op(mChecker
);
715 return aMallocSizeOf(mEntryStore
.Get());
719 PLDHashTable::ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf
) const
721 return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf
);
724 PLDHashTable::Iterator::Iterator(Iterator
&& aOther
)
725 : mTable(aOther
.mTable
)
726 , mStart(aOther
.mStart
)
727 , mLimit(aOther
.mLimit
)
728 , mCurrent(aOther
.mCurrent
)
729 , mNexts(aOther
.mNexts
)
730 , mNextsLimit(aOther
.mNextsLimit
)
731 , mHaveRemoved(aOther
.mHaveRemoved
)
733 // No need to change |mChecker| here.
734 aOther
.mTable
= nullptr;
735 aOther
.mStart
= nullptr;
736 aOther
.mLimit
= nullptr;
737 aOther
.mCurrent
= nullptr;
739 aOther
.mNextsLimit
= 0;
740 aOther
.mHaveRemoved
= false;
743 PLDHashTable::Iterator::Iterator(PLDHashTable
* aTable
)
745 , mStart(mTable
->mEntryStore
.Get())
746 , mLimit(mTable
->mEntryStore
.Get() + mTable
->Capacity() * mTable
->mEntrySize
)
747 , mCurrent(mTable
->mEntryStore
.Get())
749 , mNextsLimit(mTable
->EntryCount())
750 , mHaveRemoved(false)
753 mTable
->mChecker
.StartReadOp();
756 if (ChaosMode::isActive(ChaosFeature::HashTableIteration
) &&
757 mTable
->Capacity() > 0) {
758 // Start iterating at a random entry. It would be even more chaotic to
759 // iterate in fully random order, but that's harder.
760 mCurrent
+= ChaosMode::randomUint32LessThan(mTable
->Capacity()) *
764 // Advance to the first live entry, if there is one.
766 while (IsOnNonLiveEntry()) {
772 PLDHashTable::Iterator::~Iterator()
776 mTable
->ShrinkIfAppropriate();
779 mTable
->mChecker
.EndReadOp();
784 MOZ_ALWAYS_INLINE
bool
785 PLDHashTable::Iterator::IsOnNonLiveEntry() const
788 return !EntryIsLive(reinterpret_cast<PLDHashEntryHdr
*>(mCurrent
));
791 MOZ_ALWAYS_INLINE
void
792 PLDHashTable::Iterator::MoveToNextEntry()
794 mCurrent
+= mTable
->mEntrySize
;
795 if (mCurrent
== mLimit
) {
796 mCurrent
= mStart
; // Wrap-around. Possible due to Chaos Mode.
801 PLDHashTable::Iterator::Next()
807 // Advance to the next live entry, if there is one.
811 } while (IsOnNonLiveEntry());
816 PLDHashTable::Iterator::Remove()
818 // This cast is needed for the same reason as the one in the destructor.
819 mTable
->RawRemove(Get());
825 PLDHashTable::MarkImmutable()
827 mChecker
.SetNonWritable();