Bug 1483965 - Fix the checked selection in the RDM device pixel ratio menu. r=caliman
[gecko.git] / xpcom / ds / PLDHashTable.cpp
blob2e77853cbf69317a474125ef3dd459a09f8b4edd
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include <new>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include "PLDHashTable.h"
12 #include "mozilla/HashFunctions.h"
13 #include "mozilla/MathAlgorithms.h"
14 #include "mozilla/OperatorNewExtensions.h"
15 #include "nsAlgorithm.h"
16 #include "nsPointerHashKeys.h"
17 #include "mozilla/Likely.h"
18 #include "mozilla/MemoryReporting.h"
19 #include "mozilla/ChaosMode.h"
21 using namespace mozilla;
23 #ifdef DEBUG
25 class AutoReadOp
27 Checker& mChk;
28 public:
29 explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); }
30 ~AutoReadOp() { mChk.EndReadOp(); }
33 class AutoWriteOp
35 Checker& mChk;
36 public:
37 explicit AutoWriteOp(Checker& aChk) : mChk(aChk) { mChk.StartWriteOp(); }
38 ~AutoWriteOp() { mChk.EndWriteOp(); }
41 class AutoIteratorRemovalOp
43 Checker& mChk;
44 public:
45 explicit AutoIteratorRemovalOp(Checker& aChk)
46 : mChk(aChk)
48 mChk.StartIteratorRemovalOp();
50 ~AutoIteratorRemovalOp() { mChk.EndIteratorRemovalOp(); }
53 class AutoDestructorOp
55 Checker& mChk;
56 public:
57 explicit AutoDestructorOp(Checker& aChk)
58 : mChk(aChk)
60 mChk.StartDestructorOp();
62 ~AutoDestructorOp() { mChk.EndDestructorOp(); }
65 #endif
67 /* static */ PLDHashNumber
68 PLDHashTable::HashStringKey(const void* aKey)
70 return HashString(static_cast<const char*>(aKey));
73 /* static */ PLDHashNumber
74 PLDHashTable::HashVoidPtrKeyStub(const void* aKey)
76 return nsPtrHashKey<void>::HashKey(aKey);
79 /* static */ bool
80 PLDHashTable::MatchEntryStub(const PLDHashEntryHdr* aEntry, const void* aKey)
82 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
84 return stub->key == aKey;
87 /* static */ bool
88 PLDHashTable::MatchStringKey(const PLDHashEntryHdr* aEntry, const void* aKey)
90 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
92 // XXX tolerate null keys on account of sloppy Mozilla callers.
93 return stub->key == aKey ||
94 (stub->key && aKey &&
95 strcmp((const char*)stub->key, (const char*)aKey) == 0);
98 /* static */ void
99 PLDHashTable::MoveEntryStub(PLDHashTable* aTable,
100 const PLDHashEntryHdr* aFrom,
101 PLDHashEntryHdr* aTo)
103 memcpy(aTo, aFrom, aTable->mEntrySize);
106 /* static */ void
107 PLDHashTable::ClearEntryStub(PLDHashTable* aTable, PLDHashEntryHdr* aEntry)
109 memset(aEntry, 0, aTable->mEntrySize);
112 static const PLDHashTableOps gStubOps = {
113 PLDHashTable::HashVoidPtrKeyStub,
114 PLDHashTable::MatchEntryStub,
115 PLDHashTable::MoveEntryStub,
116 PLDHashTable::ClearEntryStub,
117 nullptr
120 /* static */ const PLDHashTableOps*
121 PLDHashTable::StubOps()
123 return &gStubOps;
126 static bool
127 SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize, uint32_t* aNbytes)
129 uint64_t nbytes64 = uint64_t(aCapacity) * uint64_t(aEntrySize);
130 *aNbytes = aCapacity * aEntrySize;
131 return uint64_t(*aNbytes) == nbytes64; // returns false on overflow
134 // Compute max and min load numbers (entry counts). We have a secondary max
135 // that allows us to overload a table reasonably if it cannot be grown further
136 // (i.e. if ChangeTable() fails). The table slows down drastically if the
137 // secondary max is too close to 1, but 0.96875 gives only a slight slowdown
138 // while allowing 1.3x more elements.
139 static inline uint32_t
140 MaxLoad(uint32_t aCapacity)
142 return aCapacity - (aCapacity >> 2); // == aCapacity * 0.75
144 static inline uint32_t
145 MaxLoadOnGrowthFailure(uint32_t aCapacity)
147 return aCapacity - (aCapacity >> 5); // == aCapacity * 0.96875
149 static inline uint32_t
150 MinLoad(uint32_t aCapacity)
152 return aCapacity >> 2; // == aCapacity * 0.25
155 // Compute the minimum capacity (and the Log2 of that capacity) for a table
156 // containing |aLength| elements while respecting the following contraints:
157 // - table must be at most 75% full;
158 // - capacity must be a power of two;
159 // - capacity cannot be too small.
160 static inline void
161 BestCapacity(uint32_t aLength, uint32_t* aCapacityOut,
162 uint32_t* aLog2CapacityOut)
164 // Compute the smallest capacity allowing |aLength| elements to be inserted
165 // without rehashing.
166 uint32_t capacity = (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
167 if (capacity < PLDHashTable::kMinCapacity) {
168 capacity = PLDHashTable::kMinCapacity;
171 // Round up capacity to next power-of-two.
172 uint32_t log2 = CeilingLog2(capacity);
173 capacity = 1u << log2;
174 MOZ_ASSERT(capacity <= PLDHashTable::kMaxCapacity);
176 *aCapacityOut = capacity;
177 *aLog2CapacityOut = log2;
180 /* static */ MOZ_ALWAYS_INLINE uint32_t
181 PLDHashTable::HashShift(uint32_t aEntrySize, uint32_t aLength)
183 if (aLength > kMaxInitialLength) {
184 MOZ_CRASH("Initial length is too large");
187 uint32_t capacity, log2;
188 BestCapacity(aLength, &capacity, &log2);
190 uint32_t nbytes;
191 if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
192 MOZ_CRASH("Initial entry store size is too large");
195 // Compute the hashShift value.
196 return kPLDHashNumberBits - log2;
199 PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
200 uint32_t aLength)
201 : mOps(recordreplay::GeneratePLDHashTableCallbacks(aOps))
202 , mEntryStore()
203 , mGeneration(0)
204 , mHashShift(HashShift(aEntrySize, aLength))
205 , mEntrySize(aEntrySize)
206 , mEntryCount(0)
207 , mRemovedCount(0)
208 #ifdef DEBUG
209 , mChecker()
210 #endif
212 // An entry size greater than 0xff is unlikely, but let's check anyway. If
213 // you hit this, your hashtable would waste lots of space for unused entries
214 // and you should change your hash table's entries to pointers.
215 if (aEntrySize != uint32_t(mEntrySize)) {
216 MOZ_CRASH("Entry size is too large");
220 PLDHashTable&
221 PLDHashTable::operator=(PLDHashTable&& aOther)
223 if (this == &aOther) {
224 return *this;
227 // |mOps| and |mEntrySize| are required to stay the same, they're
228 // conceptually part of the type -- indeed, if PLDHashTable was a templated
229 // type like nsTHashtable, they *would* be part of the type -- so it only
230 // makes sense to assign in cases where they match. An exception is when we
231 // are recording or replaying the execution, in which case custom ops are
232 // generated for each table.
233 MOZ_RELEASE_ASSERT(mOps == aOther.mOps || !mOps || recordreplay::IsRecordingOrReplaying());
234 MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize || !mEntrySize);
236 // Reconstruct |this|.
237 const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(aOther.mOps);
238 this->~PLDHashTable();
239 new (KnownNotNull, this) PLDHashTable(ops, aOther.mEntrySize, 0);
241 // Move non-const pieces over.
242 mHashShift = std::move(aOther.mHashShift);
243 mEntryCount = std::move(aOther.mEntryCount);
244 mRemovedCount = std::move(aOther.mRemovedCount);
245 mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration);
246 #ifdef DEBUG
247 mChecker = std::move(aOther.mChecker);
248 #endif
250 recordreplay::MovePLDHashTableContents(aOther.mOps, mOps);
252 // Clear up |aOther| so its destruction will be a no-op and it reports being
253 // empty.
255 #ifdef DEBUG
256 AutoDestructorOp op(mChecker);
257 #endif
258 aOther.mEntryCount = 0;
259 aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
262 return *this;
265 PLDHashNumber
266 PLDHashTable::Hash1(PLDHashNumber aHash0) const
268 return aHash0 >> mHashShift;
271 void
272 PLDHashTable::Hash2(PLDHashNumber aHash0,
273 uint32_t& aHash2Out, uint32_t& aSizeMaskOut) const
275 uint32_t sizeLog2 = kPLDHashNumberBits - mHashShift;
276 uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1;
277 aSizeMaskOut = sizeMask;
279 // The incoming aHash0 always has the low bit unset (since we leave it
280 // free for the collision flag), and should have reasonably random
281 // data in the other 31 bits. We used the high bits of aHash0 for
282 // Hash1, so we use the low bits here. If the table size is large,
283 // the bits we use may overlap, but that's still more random than
284 // filling with 0s.
286 // Double hashing needs the second hash code to be relatively prime to table
287 // size, so we simply make hash2 odd.
289 // This also conveniently covers up the fact that we have the low bit
290 // unset since aHash0 has the low bit unset.
291 aHash2Out = (aHash0 & sizeMask) | 1;
294 // Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note
295 // that a removed-entry sentinel need be stored only if the removed entry had
296 // a colliding entry added after it. Therefore we can use 1 as the collision
297 // flag in addition to the removed-entry sentinel value. Multiplicative hash
298 // uses the high order bits of mKeyHash, so this least-significant reservation
299 // should not hurt the hash function's effectiveness much.
301 // Match an entry's mKeyHash against an unstored one computed from a key.
302 /* static */ bool
303 PLDHashTable::MatchEntryKeyhash(const PLDHashEntryHdr* aEntry,
304 const PLDHashNumber aKeyHash)
306 return (aEntry->mKeyHash & ~kCollisionFlag) == aKeyHash;
309 // Compute the address of the indexed entry in table.
310 PLDHashEntryHdr*
311 PLDHashTable::AddressEntry(uint32_t aIndex) const
313 return const_cast<PLDHashEntryHdr*>(
314 reinterpret_cast<const PLDHashEntryHdr*>(
315 mEntryStore.Get() + aIndex * mEntrySize));
318 PLDHashTable::~PLDHashTable()
320 #ifdef DEBUG
321 AutoDestructorOp op(mChecker);
322 #endif
324 if (!mEntryStore.Get()) {
325 recordreplay::DestroyPLDHashTableCallbacks(mOps);
326 return;
329 // Clear any remaining live entries.
330 char* entryAddr = mEntryStore.Get();
331 char* entryLimit = entryAddr + Capacity() * mEntrySize;
332 while (entryAddr < entryLimit) {
333 PLDHashEntryHdr* entry = (PLDHashEntryHdr*)entryAddr;
334 if (EntryIsLive(entry)) {
335 mOps->clearEntry(this, entry);
337 entryAddr += mEntrySize;
340 recordreplay::DestroyPLDHashTableCallbacks(mOps);
342 // Entry storage is freed last, by ~EntryStore().
345 void
346 PLDHashTable::ClearAndPrepareForLength(uint32_t aLength)
348 // Get these values before the destructor clobbers them.
349 const PLDHashTableOps* ops = recordreplay::UnwrapPLDHashTableCallbacks(mOps);
350 uint32_t entrySize = mEntrySize;
352 this->~PLDHashTable();
353 new (KnownNotNull, this) PLDHashTable(ops, entrySize, aLength);
356 void
357 PLDHashTable::Clear()
359 ClearAndPrepareForLength(kDefaultInitialLength);
362 // If |Reason| is |ForAdd|, the return value is always non-null and it may be
363 // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
364 // value is null on a miss, and will never be a previously-removed entry on a
365 // hit. This distinction is a bit grotty but this function is hot enough that
366 // these differences are worthwhile. (It's also hot enough that
367 // MOZ_ALWAYS_INLINE makes a significant difference.)
368 template <PLDHashTable::SearchReason Reason>
369 MOZ_ALWAYS_INLINE PLDHashEntryHdr*
370 PLDHashTable::SearchTable(const void* aKey, PLDHashNumber aKeyHash) const
372 MOZ_ASSERT(mEntryStore.Get());
373 NS_ASSERTION(!(aKeyHash & kCollisionFlag),
374 "!(aKeyHash & kCollisionFlag)");
376 // Compute the primary hash address.
377 PLDHashNumber hash1 = Hash1(aKeyHash);
378 PLDHashEntryHdr* entry = AddressEntry(hash1);
380 // Miss: return space for a new entry.
381 if (EntryIsFree(entry)) {
382 return (Reason == ForAdd) ? entry : nullptr;
385 // Hit: return entry.
386 PLDHashMatchEntry matchEntry = mOps->matchEntry;
387 if (MatchEntryKeyhash(entry, aKeyHash) &&
388 matchEntry(entry, aKey)) {
389 return entry;
392 // Collision: double hash.
393 PLDHashNumber hash2;
394 uint32_t sizeMask;
395 Hash2(aKeyHash, hash2, sizeMask);
397 // Save the first removed entry pointer so Add() can recycle it. (Only used
398 // if Reason==ForAdd.)
399 PLDHashEntryHdr* firstRemoved = nullptr;
401 for (;;) {
402 if (Reason == ForAdd && !firstRemoved) {
403 if (MOZ_UNLIKELY(EntryIsRemoved(entry))) {
404 firstRemoved = entry;
405 } else {
406 entry->mKeyHash |= kCollisionFlag;
410 hash1 -= hash2;
411 hash1 &= sizeMask;
413 entry = AddressEntry(hash1);
414 if (EntryIsFree(entry)) {
415 return (Reason == ForAdd) ? (firstRemoved ? firstRemoved : entry)
416 : nullptr;
419 if (MatchEntryKeyhash(entry, aKeyHash) &&
420 matchEntry(entry, aKey)) {
421 return entry;
425 // NOTREACHED
426 return nullptr;
429 // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
430 // 1. assume |Reason| is |ForAdd|,
431 // 2. assume that |aKey| will never match an existing entry, and
432 // 3. assume that no entries have been removed from the current table
433 // structure.
434 // Avoiding the need for |aKey| means we can avoid needing a way to map entries
435 // to keys, which means callers can use complex key types more easily.
436 MOZ_ALWAYS_INLINE PLDHashEntryHdr*
437 PLDHashTable::FindFreeEntry(PLDHashNumber aKeyHash) const
439 MOZ_ASSERT(mEntryStore.Get());
440 NS_ASSERTION(!(aKeyHash & kCollisionFlag),
441 "!(aKeyHash & kCollisionFlag)");
443 // Compute the primary hash address.
444 PLDHashNumber hash1 = Hash1(aKeyHash);
445 PLDHashEntryHdr* entry = AddressEntry(hash1);
447 // Miss: return space for a new entry.
448 if (EntryIsFree(entry)) {
449 return entry;
452 // Collision: double hash.
453 PLDHashNumber hash2;
454 uint32_t sizeMask;
455 Hash2(aKeyHash, hash2, sizeMask);
457 for (;;) {
458 NS_ASSERTION(!EntryIsRemoved(entry),
459 "!EntryIsRemoved(entry)");
460 entry->mKeyHash |= kCollisionFlag;
462 hash1 -= hash2;
463 hash1 &= sizeMask;
465 entry = AddressEntry(hash1);
466 if (EntryIsFree(entry)) {
467 return entry;
471 // NOTREACHED
474 bool
475 PLDHashTable::ChangeTable(int32_t aDeltaLog2)
477 MOZ_ASSERT(mEntryStore.Get());
479 // Look, but don't touch, until we succeed in getting new entry store.
480 int32_t oldLog2 = kPLDHashNumberBits - mHashShift;
481 int32_t newLog2 = oldLog2 + aDeltaLog2;
482 uint32_t newCapacity = 1u << newLog2;
483 if (newCapacity > kMaxCapacity) {
484 return false;
487 uint32_t nbytes;
488 if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
489 return false; // overflowed
492 char* newEntryStore = (char*)calloc(1, nbytes);
493 if (!newEntryStore) {
494 return false;
497 // We can't fail from here on, so update table parameters.
498 mHashShift = kPLDHashNumberBits - newLog2;
499 mRemovedCount = 0;
501 // Assign the new entry store to table.
502 char* oldEntryStore;
503 char* oldEntryAddr;
504 oldEntryAddr = oldEntryStore = mEntryStore.Get();
505 mEntryStore.Set(newEntryStore, &mGeneration);
506 PLDHashMoveEntry moveEntry = mOps->moveEntry;
508 // Copy only live entries, leaving removed ones behind.
509 uint32_t oldCapacity = 1u << oldLog2;
510 for (uint32_t i = 0; i < oldCapacity; ++i) {
511 PLDHashEntryHdr* oldEntry = (PLDHashEntryHdr*)oldEntryAddr;
512 if (EntryIsLive(oldEntry)) {
513 const PLDHashNumber key = oldEntry->mKeyHash & ~kCollisionFlag;
514 PLDHashEntryHdr* newEntry = FindFreeEntry(key);
515 NS_ASSERTION(EntryIsFree(newEntry), "EntryIsFree(newEntry)");
516 moveEntry(this, oldEntry, newEntry);
517 newEntry->mKeyHash = key;
519 oldEntryAddr += mEntrySize;
522 free(oldEntryStore);
523 return true;
526 MOZ_ALWAYS_INLINE PLDHashNumber
527 PLDHashTable::ComputeKeyHash(const void* aKey) const
529 MOZ_ASSERT(mEntryStore.Get());
531 PLDHashNumber keyHash = mozilla::ScrambleHashCode(mOps->hashKey(aKey));
533 // Avoid 0 and 1 hash codes, they indicate free and removed entries.
534 if (keyHash < 2) {
535 keyHash -= 2;
537 keyHash &= ~kCollisionFlag;
539 return keyHash;
542 PLDHashEntryHdr*
543 PLDHashTable::Search(const void* aKey) const
545 #ifdef DEBUG
546 AutoReadOp op(mChecker);
547 #endif
549 PLDHashEntryHdr* entry = mEntryStore.Get()
550 ? SearchTable<ForSearchOrRemove>(aKey,
551 ComputeKeyHash(aKey))
552 : nullptr;
553 return entry;
556 PLDHashEntryHdr*
557 PLDHashTable::Add(const void* aKey, const mozilla::fallible_t&)
559 #ifdef DEBUG
560 AutoWriteOp op(mChecker);
561 #endif
563 // Allocate the entry storage if it hasn't already been allocated.
564 if (!mEntryStore.Get()) {
565 uint32_t nbytes;
566 // We already checked this in the constructor, so it must still be true.
567 MOZ_RELEASE_ASSERT(SizeOfEntryStore(CapacityFromHashShift(), mEntrySize,
568 &nbytes));
569 mEntryStore.Set((char*)calloc(1, nbytes), &mGeneration);
570 if (!mEntryStore.Get()) {
571 return nullptr;
575 // If alpha is >= .75, grow or compress the table. If aKey is already in the
576 // table, we may grow once more than necessary, but only if we are on the
577 // edge of being overloaded.
578 uint32_t capacity = Capacity();
579 if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
580 // Compress if a quarter or more of all entries are removed.
581 int deltaLog2;
582 if (mRemovedCount >= capacity >> 2) {
583 deltaLog2 = 0;
584 } else {
585 deltaLog2 = 1;
588 // Grow or compress the table. If ChangeTable() fails, allow overloading up
589 // to the secondary max. Once we hit the secondary max, return null.
590 if (!ChangeTable(deltaLog2) &&
591 mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) {
592 return nullptr;
596 // Look for entry after possibly growing, so we don't have to add it,
597 // then skip it while growing the table and re-add it after.
598 PLDHashNumber keyHash = ComputeKeyHash(aKey);
599 PLDHashEntryHdr* entry = SearchTable<ForAdd>(aKey, keyHash);
600 if (!EntryIsLive(entry)) {
601 // Initialize the entry, indicating that it's no longer free.
602 if (EntryIsRemoved(entry)) {
603 mRemovedCount--;
604 keyHash |= kCollisionFlag;
606 if (mOps->initEntry) {
607 mOps->initEntry(entry, aKey);
609 entry->mKeyHash = keyHash;
610 mEntryCount++;
613 return entry;
616 PLDHashEntryHdr*
617 PLDHashTable::Add(const void* aKey)
619 PLDHashEntryHdr* entry = Add(aKey, fallible);
620 if (!entry) {
621 if (!mEntryStore.Get()) {
622 // We OOM'd while allocating the initial entry storage.
623 uint32_t nbytes;
624 (void) SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes);
625 NS_ABORT_OOM(nbytes);
626 } else {
627 // We failed to resize the existing entry storage, either due to OOM or
628 // because we exceeded the maximum table capacity or size; report it as
629 // an OOM. The multiplication by 2 gets us the size we tried to allocate,
630 // which is double the current size.
631 NS_ABORT_OOM(2 * EntrySize() * EntryCount());
634 return entry;
637 void
638 PLDHashTable::Remove(const void* aKey)
640 #ifdef DEBUG
641 AutoWriteOp op(mChecker);
642 #endif
644 PLDHashEntryHdr* entry = mEntryStore.Get()
645 ? SearchTable<ForSearchOrRemove>(aKey,
646 ComputeKeyHash(aKey))
647 : nullptr;
648 if (entry) {
649 RawRemove(entry);
650 ShrinkIfAppropriate();
654 void
655 PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry)
657 #ifdef DEBUG
658 AutoWriteOp op(mChecker);
659 #endif
661 RawRemove(aEntry);
662 ShrinkIfAppropriate();
665 void
666 PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry)
668 // Unfortunately, we can only do weak checking here. That's because
669 // RawRemove() can be called legitimately while an Enumerate() call is
670 // active, which doesn't fit well into how Checker's mState variable works.
671 MOZ_ASSERT(mChecker.IsWritable());
673 MOZ_ASSERT(mEntryStore.Get());
675 MOZ_ASSERT(EntryIsLive(aEntry), "EntryIsLive(aEntry)");
677 // Load keyHash first in case clearEntry() goofs it.
678 PLDHashNumber keyHash = aEntry->mKeyHash;
679 mOps->clearEntry(this, aEntry);
680 if (keyHash & kCollisionFlag) {
681 MarkEntryRemoved(aEntry);
682 mRemovedCount++;
683 } else {
684 MarkEntryFree(aEntry);
686 mEntryCount--;
689 // Shrink or compress if a quarter or more of all entries are removed, or if the
690 // table is underloaded according to the minimum alpha, and is not minimal-size
691 // already.
692 void
693 PLDHashTable::ShrinkIfAppropriate()
695 uint32_t capacity = Capacity();
696 if (mRemovedCount >= capacity >> 2 ||
697 (capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) {
698 uint32_t log2;
699 BestCapacity(mEntryCount, &capacity, &log2);
701 int32_t deltaLog2 = log2 - (kPLDHashNumberBits - mHashShift);
702 MOZ_ASSERT(deltaLog2 <= 0);
704 (void) ChangeTable(deltaLog2);
708 size_t
709 PLDHashTable::ShallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
711 #ifdef DEBUG
712 AutoReadOp op(mChecker);
713 #endif
715 return aMallocSizeOf(mEntryStore.Get());
718 size_t
719 PLDHashTable::ShallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
721 return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf);
724 PLDHashTable::Iterator::Iterator(Iterator&& aOther)
725 : mTable(aOther.mTable)
726 , mStart(aOther.mStart)
727 , mLimit(aOther.mLimit)
728 , mCurrent(aOther.mCurrent)
729 , mNexts(aOther.mNexts)
730 , mNextsLimit(aOther.mNextsLimit)
731 , mHaveRemoved(aOther.mHaveRemoved)
733 // No need to change |mChecker| here.
734 aOther.mTable = nullptr;
735 aOther.mStart = nullptr;
736 aOther.mLimit = nullptr;
737 aOther.mCurrent = nullptr;
738 aOther.mNexts = 0;
739 aOther.mNextsLimit = 0;
740 aOther.mHaveRemoved = false;
743 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable)
744 : mTable(aTable)
745 , mStart(mTable->mEntryStore.Get())
746 , mLimit(mTable->mEntryStore.Get() + mTable->Capacity() * mTable->mEntrySize)
747 , mCurrent(mTable->mEntryStore.Get())
748 , mNexts(0)
749 , mNextsLimit(mTable->EntryCount())
750 , mHaveRemoved(false)
752 #ifdef DEBUG
753 mTable->mChecker.StartReadOp();
754 #endif
756 if (ChaosMode::isActive(ChaosFeature::HashTableIteration) &&
757 mTable->Capacity() > 0) {
758 // Start iterating at a random entry. It would be even more chaotic to
759 // iterate in fully random order, but that's harder.
760 mCurrent += ChaosMode::randomUint32LessThan(mTable->Capacity()) *
761 mTable->mEntrySize;
764 // Advance to the first live entry, if there is one.
765 if (!Done()) {
766 while (IsOnNonLiveEntry()) {
767 MoveToNextEntry();
772 PLDHashTable::Iterator::~Iterator()
774 if (mTable) {
775 if (mHaveRemoved) {
776 mTable->ShrinkIfAppropriate();
778 #ifdef DEBUG
779 mTable->mChecker.EndReadOp();
780 #endif
784 MOZ_ALWAYS_INLINE bool
785 PLDHashTable::Iterator::IsOnNonLiveEntry() const
787 MOZ_ASSERT(!Done());
788 return !EntryIsLive(reinterpret_cast<PLDHashEntryHdr*>(mCurrent));
791 MOZ_ALWAYS_INLINE void
792 PLDHashTable::Iterator::MoveToNextEntry()
794 mCurrent += mTable->mEntrySize;
795 if (mCurrent == mLimit) {
796 mCurrent = mStart; // Wrap-around. Possible due to Chaos Mode.
800 void
801 PLDHashTable::Iterator::Next()
803 MOZ_ASSERT(!Done());
805 mNexts++;
807 // Advance to the next live entry, if there is one.
808 if (!Done()) {
809 do {
810 MoveToNextEntry();
811 } while (IsOnNonLiveEntry());
815 void
816 PLDHashTable::Iterator::Remove()
818 // This cast is needed for the same reason as the one in the destructor.
819 mTable->RawRemove(Get());
820 mHaveRemoved = true;
823 #ifdef DEBUG
824 void
825 PLDHashTable::MarkImmutable()
827 mChecker.SetNonWritable();
829 #endif