no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / xpcom / ds / PLDHashTable.cpp
blob75a512ba7ab932e567b62167f2023dfdc3be3672
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include <new>
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include "PLDHashTable.h"
12 #include "nsDebug.h"
13 #include "mozilla/HashFunctions.h"
14 #include "mozilla/MathAlgorithms.h"
15 #include "mozilla/OperatorNewExtensions.h"
16 #include "mozilla/ScopeExit.h"
17 #include "nsAlgorithm.h"
18 #include "nsPointerHashKeys.h"
19 #include "mozilla/Likely.h"
20 #include "mozilla/MemoryReporting.h"
21 #include "mozilla/Maybe.h"
22 #include "mozilla/ChaosMode.h"
24 using namespace mozilla;
26 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
28 class AutoReadOp {
29 Checker& mChk;
31 public:
32 explicit AutoReadOp(Checker& aChk) : mChk(aChk) { mChk.StartReadOp(); }
33 ~AutoReadOp() { mChk.EndReadOp(); }
36 class AutoWriteOp {
37 Checker& mChk;
39 public:
40 explicit AutoWriteOp(Checker& aChk) : mChk(aChk) { mChk.StartWriteOp(); }
41 ~AutoWriteOp() { mChk.EndWriteOp(); }
44 class AutoIteratorRemovalOp {
45 Checker& mChk;
47 public:
48 explicit AutoIteratorRemovalOp(Checker& aChk) : mChk(aChk) {
49 mChk.StartIteratorRemovalOp();
51 ~AutoIteratorRemovalOp() { mChk.EndIteratorRemovalOp(); }
54 class AutoDestructorOp {
55 Checker& mChk;
57 public:
58 explicit AutoDestructorOp(Checker& aChk) : mChk(aChk) {
59 mChk.StartDestructorOp();
61 ~AutoDestructorOp() { mChk.EndDestructorOp(); }
64 #endif
66 /* static */
67 PLDHashNumber PLDHashTable::HashStringKey(const void* aKey) {
68 return HashString(static_cast<const char*>(aKey));
71 /* static */
72 PLDHashNumber PLDHashTable::HashVoidPtrKeyStub(const void* aKey) {
73 return nsPtrHashKey<void>::HashKey(aKey);
76 /* static */
77 bool PLDHashTable::MatchEntryStub(const PLDHashEntryHdr* aEntry,
78 const void* aKey) {
79 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
81 return stub->key == aKey;
84 /* static */
85 bool PLDHashTable::MatchStringKey(const PLDHashEntryHdr* aEntry,
86 const void* aKey) {
87 const PLDHashEntryStub* stub = (const PLDHashEntryStub*)aEntry;
89 // XXX tolerate null keys on account of sloppy Mozilla callers.
90 return stub->key == aKey ||
91 (stub->key && aKey &&
92 strcmp((const char*)stub->key, (const char*)aKey) == 0);
95 /* static */
96 void PLDHashTable::MoveEntryStub(PLDHashTable* aTable,
97 const PLDHashEntryHdr* aFrom,
98 PLDHashEntryHdr* aTo) {
99 memcpy(aTo, aFrom, aTable->mEntrySize);
102 /* static */
103 void PLDHashTable::ClearEntryStub(PLDHashTable* aTable,
104 PLDHashEntryHdr* aEntry) {
105 memset(aEntry, 0, aTable->mEntrySize);
108 static const PLDHashTableOps gStubOps = {
109 PLDHashTable::HashVoidPtrKeyStub, PLDHashTable::MatchEntryStub,
110 PLDHashTable::MoveEntryStub, PLDHashTable::ClearEntryStub, nullptr};
112 /* static */ const PLDHashTableOps* PLDHashTable::StubOps() {
113 return &gStubOps;
116 static bool SizeOfEntryStore(uint32_t aCapacity, uint32_t aEntrySize,
117 uint32_t* aNbytes) {
118 uint32_t slotSize = aEntrySize + sizeof(PLDHashNumber);
119 uint64_t nbytes64 = uint64_t(aCapacity) * uint64_t(slotSize);
120 *aNbytes = aCapacity * slotSize;
121 return uint64_t(*aNbytes) == nbytes64; // returns false on overflow
124 // Compute max and min load numbers (entry counts). We have a secondary max
125 // that allows us to overload a table reasonably if it cannot be grown further
126 // (i.e. if ChangeTable() fails). The table slows down drastically if the
127 // secondary max is too close to 1, but 0.96875 gives only a slight slowdown
128 // while allowing 1.3x more elements.
129 static inline uint32_t MaxLoad(uint32_t aCapacity) {
130 return aCapacity - (aCapacity >> 2); // == aCapacity * 0.75
132 static inline uint32_t MaxLoadOnGrowthFailure(uint32_t aCapacity) {
133 return aCapacity - (aCapacity >> 5); // == aCapacity * 0.96875
135 static inline uint32_t MinLoad(uint32_t aCapacity) {
136 return aCapacity >> 2; // == aCapacity * 0.25
139 // Compute the minimum capacity (and the Log2 of that capacity) for a table
140 // containing |aLength| elements while respecting the following contraints:
141 // - table must be at most 75% full;
142 // - capacity must be a power of two;
143 // - capacity cannot be too small.
144 static inline void BestCapacity(uint32_t aLength, uint32_t* aCapacityOut,
145 uint32_t* aLog2CapacityOut) {
146 // Callers should ensure this is true.
147 MOZ_ASSERT(aLength <= PLDHashTable::kMaxInitialLength);
149 // Compute the smallest capacity allowing |aLength| elements to be inserted
150 // without rehashing.
151 uint32_t capacity = (aLength * 4 + (3 - 1)) / 3; // == ceil(aLength * 4 / 3)
152 if (capacity < PLDHashTable::kMinCapacity) {
153 capacity = PLDHashTable::kMinCapacity;
156 // Round up capacity to next power-of-two.
157 uint32_t log2 = CeilingLog2(capacity);
158 capacity = 1u << log2;
159 MOZ_ASSERT(capacity <= PLDHashTable::kMaxCapacity);
161 *aCapacityOut = capacity;
162 *aLog2CapacityOut = log2;
165 /* static */ MOZ_ALWAYS_INLINE uint32_t
166 PLDHashTable::HashShift(uint32_t aEntrySize, uint32_t aLength) {
167 if (aLength > kMaxInitialLength) {
168 MOZ_CRASH("Initial length is too large");
171 uint32_t capacity, log2;
172 BestCapacity(aLength, &capacity, &log2);
174 uint32_t nbytes;
175 if (!SizeOfEntryStore(capacity, aEntrySize, &nbytes)) {
176 MOZ_CRASH("Initial entry store size is too large");
179 // Compute the hashShift value.
180 return kPLDHashNumberBits - log2;
183 PLDHashTable::PLDHashTable(const PLDHashTableOps* aOps, uint32_t aEntrySize,
184 uint32_t aLength)
185 : mOps(aOps),
186 mGeneration(0),
187 mHashShift(HashShift(aEntrySize, aLength)),
188 mEntrySize(aEntrySize),
189 mEntryCount(0),
190 mRemovedCount(0) {
191 // An entry size greater than 0xff is unlikely, but let's check anyway. If
192 // you hit this, your hashtable would waste lots of space for unused entries
193 // and you should change your hash table's entries to pointers.
194 if (aEntrySize != uint32_t(mEntrySize)) {
195 MOZ_CRASH("Entry size is too large");
199 PLDHashTable& PLDHashTable::operator=(PLDHashTable&& aOther) {
200 if (this == &aOther) {
201 return *this;
204 // |mOps| and |mEntrySize| are required to stay the same, they're
205 // conceptually part of the type -- indeed, if PLDHashTable was a templated
206 // type like nsTHashtable, they *would* be part of the type -- so it only
207 // makes sense to assign in cases where they match.
208 MOZ_RELEASE_ASSERT(mOps == aOther.mOps || !mOps);
209 MOZ_RELEASE_ASSERT(mEntrySize == aOther.mEntrySize || !mEntrySize);
211 // Reconstruct |this|.
212 const PLDHashTableOps* ops = aOther.mOps;
213 this->~PLDHashTable();
214 new (KnownNotNull, this) PLDHashTable(ops, aOther.mEntrySize, 0);
216 // Move non-const pieces over.
217 mHashShift = std::move(aOther.mHashShift);
218 mEntryCount = std::move(aOther.mEntryCount);
219 mRemovedCount = std::move(aOther.mRemovedCount);
220 mEntryStore.Set(aOther.mEntryStore.Get(), &mGeneration);
221 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
222 mChecker = std::move(aOther.mChecker);
223 #endif
225 // Clear up |aOther| so its destruction will be a no-op and it reports being
226 // empty.
228 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
229 AutoDestructorOp op(mChecker);
230 #endif
231 aOther.mEntryCount = 0;
232 aOther.mEntryStore.Set(nullptr, &aOther.mGeneration);
235 return *this;
238 PLDHashNumber PLDHashTable::Hash1(PLDHashNumber aHash0) const {
239 return aHash0 >> mHashShift;
242 void PLDHashTable::Hash2(PLDHashNumber aHash0, uint32_t& aHash2Out,
243 uint32_t& aSizeMaskOut) const {
244 uint32_t sizeLog2 = kPLDHashNumberBits - mHashShift;
245 uint32_t sizeMask = (PLDHashNumber(1) << sizeLog2) - 1;
246 aSizeMaskOut = sizeMask;
248 // The incoming aHash0 always has the low bit unset (since we leave it
249 // free for the collision flag), and should have reasonably random
250 // data in the other 31 bits. We used the high bits of aHash0 for
251 // Hash1, so we use the low bits here. If the table size is large,
252 // the bits we use may overlap, but that's still more random than
253 // filling with 0s.
255 // Double hashing needs the second hash code to be relatively prime to table
256 // size, so we simply make hash2 odd.
258 // This also conveniently covers up the fact that we have the low bit
259 // unset since aHash0 has the low bit unset.
260 aHash2Out = (aHash0 & sizeMask) | 1;
263 // Reserve mKeyHash 0 for free entries and 1 for removed-entry sentinels. Note
264 // that a removed-entry sentinel need be stored only if the removed entry had
265 // a colliding entry added after it. Therefore we can use 1 as the collision
266 // flag in addition to the removed-entry sentinel value. Multiplicative hash
267 // uses the high order bits of mKeyHash, so this least-significant reservation
268 // should not hurt the hash function's effectiveness much.
270 // Match an entry's mKeyHash against an unstored one computed from a key.
271 /* static */
272 bool PLDHashTable::MatchSlotKeyhash(Slot& aSlot, const PLDHashNumber aKeyHash) {
273 return (aSlot.KeyHash() & ~kCollisionFlag) == aKeyHash;
276 // Compute the address of the indexed entry in table.
277 auto PLDHashTable::SlotForIndex(uint32_t aIndex) const -> Slot {
278 return mEntryStore.SlotForIndex(aIndex, mEntrySize, CapacityFromHashShift());
281 PLDHashTable::~PLDHashTable() {
282 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
283 AutoDestructorOp op(mChecker);
284 #endif
286 if (!mEntryStore.IsAllocated()) {
287 return;
290 // Clear any remaining live entries (if not trivially destructible).
291 if (mOps->clearEntry) {
292 mEntryStore.ForEachSlot(Capacity(), mEntrySize, [&](const Slot& aSlot) {
293 if (aSlot.IsLive()) {
294 mOps->clearEntry(this, aSlot.ToEntry());
299 // Entry storage is freed last, by ~EntryStore().
302 void PLDHashTable::ClearAndPrepareForLength(uint32_t aLength) {
303 // Get these values before the destructor clobbers them.
304 const PLDHashTableOps* ops = mOps;
305 uint32_t entrySize = mEntrySize;
307 this->~PLDHashTable();
308 new (KnownNotNull, this) PLDHashTable(ops, entrySize, aLength);
311 void PLDHashTable::Clear() { ClearAndPrepareForLength(kDefaultInitialLength); }
313 // If |Reason| is |ForAdd|, the return value is always non-null and it may be
314 // a previously-removed entry. If |Reason| is |ForSearchOrRemove|, the return
315 // value is null on a miss, and will never be a previously-removed entry on a
316 // hit. This distinction is a bit grotty but this function is hot enough that
317 // these differences are worthwhile. (It's also hot enough that
318 // MOZ_ALWAYS_INLINE makes a significant difference.)
319 template <PLDHashTable::SearchReason Reason, typename Success, typename Failure>
320 MOZ_ALWAYS_INLINE auto PLDHashTable::SearchTable(const void* aKey,
321 PLDHashNumber aKeyHash,
322 Success&& aSuccess,
323 Failure&& aFailure) const {
324 MOZ_ASSERT(mEntryStore.IsAllocated());
325 NS_ASSERTION(!(aKeyHash & kCollisionFlag), "!(aKeyHash & kCollisionFlag)");
327 // Compute the primary hash address.
328 PLDHashNumber hash1 = Hash1(aKeyHash);
329 Slot slot = SlotForIndex(hash1);
331 // Miss: return space for a new entry.
332 if (slot.IsFree()) {
333 return (Reason == ForAdd) ? aSuccess(slot) : aFailure();
336 // Hit: return entry.
337 PLDHashMatchEntry matchEntry = mOps->matchEntry;
338 if (MatchSlotKeyhash(slot, aKeyHash)) {
339 PLDHashEntryHdr* e = slot.ToEntry();
340 if (matchEntry(e, aKey)) {
341 return aSuccess(slot);
345 // Collision: double hash.
346 PLDHashNumber hash2;
347 uint32_t sizeMask;
348 Hash2(aKeyHash, hash2, sizeMask);
350 // Save the first removed entry slot so Add() can recycle it. (Only used
351 // if Reason==ForAdd.)
352 Maybe<Slot> firstRemoved;
354 for (;;) {
355 if (Reason == ForAdd && !firstRemoved) {
356 if (MOZ_UNLIKELY(slot.IsRemoved())) {
357 firstRemoved.emplace(slot);
358 } else {
359 slot.MarkColliding();
363 hash1 -= hash2;
364 hash1 &= sizeMask;
366 slot = SlotForIndex(hash1);
367 if (slot.IsFree()) {
368 if (Reason != ForAdd) {
369 return aFailure();
371 return aSuccess(firstRemoved.refOr(slot));
374 if (MatchSlotKeyhash(slot, aKeyHash)) {
375 PLDHashEntryHdr* e = slot.ToEntry();
376 if (matchEntry(e, aKey)) {
377 return aSuccess(slot);
382 // NOTREACHED
383 return aFailure();
386 // This is a copy of SearchTable(), used by ChangeTable(), hardcoded to
387 // 1. assume |Reason| is |ForAdd|,
388 // 2. assume that |aKey| will never match an existing entry, and
389 // 3. assume that no entries have been removed from the current table
390 // structure.
391 // Avoiding the need for |aKey| means we can avoid needing a way to map entries
392 // to keys, which means callers can use complex key types more easily.
393 MOZ_ALWAYS_INLINE auto PLDHashTable::FindFreeSlot(PLDHashNumber aKeyHash) const
394 -> Slot {
395 MOZ_ASSERT(mEntryStore.IsAllocated());
396 NS_ASSERTION(!(aKeyHash & kCollisionFlag), "!(aKeyHash & kCollisionFlag)");
398 // Compute the primary hash address.
399 PLDHashNumber hash1 = Hash1(aKeyHash);
400 Slot slot = SlotForIndex(hash1);
402 // Miss: return space for a new entry.
403 if (slot.IsFree()) {
404 return slot;
407 // Collision: double hash.
408 PLDHashNumber hash2;
409 uint32_t sizeMask;
410 Hash2(aKeyHash, hash2, sizeMask);
412 for (;;) {
413 MOZ_ASSERT(!slot.IsRemoved());
414 slot.MarkColliding();
416 hash1 -= hash2;
417 hash1 &= sizeMask;
419 slot = SlotForIndex(hash1);
420 if (slot.IsFree()) {
421 return slot;
425 // NOTREACHED
428 bool PLDHashTable::ChangeTable(int32_t aDeltaLog2) {
429 MOZ_ASSERT(mEntryStore.IsAllocated());
431 // Look, but don't touch, until we succeed in getting new entry store.
432 int32_t oldLog2 = kPLDHashNumberBits - mHashShift;
433 int32_t newLog2 = oldLog2 + aDeltaLog2;
434 uint32_t newCapacity = 1u << newLog2;
435 if (newCapacity > kMaxCapacity) {
436 return false;
439 uint32_t nbytes;
440 if (!SizeOfEntryStore(newCapacity, mEntrySize, &nbytes)) {
441 return false; // overflowed
444 char* newEntryStore = (char*)calloc(1, nbytes);
445 if (!newEntryStore) {
446 return false;
449 // We can't fail from here on, so update table parameters.
450 mHashShift = kPLDHashNumberBits - newLog2;
451 mRemovedCount = 0;
453 // Assign the new entry store to table.
454 char* oldEntryStore = mEntryStore.Get();
455 mEntryStore.Set(newEntryStore, &mGeneration);
456 PLDHashMoveEntry moveEntry = mOps->moveEntry;
458 // Copy only live entries, leaving removed ones behind.
459 uint32_t oldCapacity = 1u << oldLog2;
460 EntryStore::ForEachSlot(
461 oldEntryStore, oldCapacity, mEntrySize, [&](const Slot& slot) {
462 if (slot.IsLive()) {
463 const PLDHashNumber key = slot.KeyHash() & ~kCollisionFlag;
464 Slot newSlot = FindFreeSlot(key);
465 MOZ_ASSERT(newSlot.IsFree());
466 moveEntry(this, slot.ToEntry(), newSlot.ToEntry());
467 newSlot.SetKeyHash(key);
471 free(oldEntryStore);
472 return true;
475 MOZ_ALWAYS_INLINE PLDHashNumber
476 PLDHashTable::ComputeKeyHash(const void* aKey) const {
477 MOZ_ASSERT(mEntryStore.IsAllocated());
479 PLDHashNumber keyHash = mozilla::ScrambleHashCode(mOps->hashKey(aKey));
481 // Avoid 0 and 1 hash codes, they indicate free and removed entries.
482 if (keyHash < 2) {
483 keyHash -= 2;
485 keyHash &= ~kCollisionFlag;
487 return keyHash;
490 PLDHashEntryHdr* PLDHashTable::Search(const void* aKey) const {
491 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
492 AutoReadOp op(mChecker);
493 #endif
495 if (!mEntryStore.IsAllocated()) {
496 return nullptr;
499 return SearchTable<ForSearchOrRemove>(
500 aKey, ComputeKeyHash(aKey),
501 [&](Slot& slot) -> PLDHashEntryHdr* { return slot.ToEntry(); },
502 [&]() -> PLDHashEntryHdr* { return nullptr; });
505 PLDHashEntryHdr* PLDHashTable::Add(const void* aKey,
506 const mozilla::fallible_t& aFallible) {
507 auto maybeEntryHandle = MakeEntryHandle(aKey, aFallible);
508 if (!maybeEntryHandle) {
509 return nullptr;
511 return maybeEntryHandle->OrInsert([&aKey, this](PLDHashEntryHdr* entry) {
512 if (mOps->initEntry) {
513 mOps->initEntry(entry, aKey);
518 PLDHashEntryHdr* PLDHashTable::Add(const void* aKey) {
519 return MakeEntryHandle(aKey).OrInsert([&aKey, this](PLDHashEntryHdr* entry) {
520 if (mOps->initEntry) {
521 mOps->initEntry(entry, aKey);
526 void PLDHashTable::Remove(const void* aKey) {
527 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
528 AutoWriteOp op(mChecker);
529 #endif
531 if (!mEntryStore.IsAllocated()) {
532 return;
535 PLDHashNumber keyHash = ComputeKeyHash(aKey);
536 SearchTable<ForSearchOrRemove>(
537 aKey, keyHash,
538 [&](Slot& slot) {
539 RawRemove(slot);
540 ShrinkIfAppropriate();
542 [&]() {
543 // Do nothing.
547 void PLDHashTable::RemoveEntry(PLDHashEntryHdr* aEntry) {
548 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
549 AutoWriteOp op(mChecker);
550 #endif
552 RawRemove(aEntry);
553 ShrinkIfAppropriate();
556 void PLDHashTable::RawRemove(PLDHashEntryHdr* aEntry) {
557 Slot slot(mEntryStore.SlotForPLDHashEntry(aEntry, Capacity(), mEntrySize));
558 RawRemove(slot);
561 void PLDHashTable::RawRemove(Slot& aSlot) {
562 // Unfortunately, we can only do weak checking here. That's because
563 // RawRemove() can be called legitimately while an Enumerate() call is
564 // active, which doesn't fit well into how Checker's mState variable works.
565 MOZ_ASSERT(mChecker.IsWritable());
567 MOZ_ASSERT(mEntryStore.IsAllocated());
569 MOZ_ASSERT(aSlot.IsLive());
571 // Load keyHash first in case clearEntry() goofs it.
572 PLDHashNumber keyHash = aSlot.KeyHash();
573 if (mOps->clearEntry) {
574 PLDHashEntryHdr* entry = aSlot.ToEntry();
575 mOps->clearEntry(this, entry);
577 if (keyHash & kCollisionFlag) {
578 aSlot.MarkRemoved();
579 mRemovedCount++;
580 } else {
581 aSlot.MarkFree();
583 mEntryCount--;
586 // Shrink or compress if a quarter or more of all entries are removed, or if the
587 // table is underloaded according to the minimum alpha, and is not minimal-size
588 // already.
589 void PLDHashTable::ShrinkIfAppropriate() {
590 uint32_t capacity = Capacity();
591 if (mRemovedCount >= capacity >> 2 ||
592 (capacity > kMinCapacity && mEntryCount <= MinLoad(capacity))) {
593 uint32_t log2;
594 BestCapacity(mEntryCount, &capacity, &log2);
596 int32_t deltaLog2 = log2 - (kPLDHashNumberBits - mHashShift);
597 MOZ_ASSERT(deltaLog2 <= 0);
599 (void)ChangeTable(deltaLog2);
603 size_t PLDHashTable::ShallowSizeOfExcludingThis(
604 MallocSizeOf aMallocSizeOf) const {
605 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
606 AutoReadOp op(mChecker);
607 #endif
609 return aMallocSizeOf(mEntryStore.Get());
612 size_t PLDHashTable::ShallowSizeOfIncludingThis(
613 MallocSizeOf aMallocSizeOf) const {
614 return aMallocSizeOf(this) + ShallowSizeOfExcludingThis(aMallocSizeOf);
617 mozilla::Maybe<PLDHashTable::EntryHandle> PLDHashTable::MakeEntryHandle(
618 const void* aKey, const mozilla::fallible_t&) {
619 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
620 mChecker.StartWriteOp();
621 auto endWriteOp = MakeScopeExit([&] { mChecker.EndWriteOp(); });
622 #endif
624 // Allocate the entry storage if it hasn't already been allocated.
625 if (!mEntryStore.IsAllocated()) {
626 uint32_t nbytes;
627 // We already checked this in the constructor, so it must still be true.
628 MOZ_RELEASE_ASSERT(
629 SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes));
630 mEntryStore.Set((char*)calloc(1, nbytes), &mGeneration);
631 if (!mEntryStore.IsAllocated()) {
632 return Nothing();
636 // If alpha is >= .75, grow or compress the table. If aKey is already in the
637 // table, we may grow once more than necessary, but only if we are on the
638 // edge of being overloaded.
639 uint32_t capacity = Capacity();
640 if (mEntryCount + mRemovedCount >= MaxLoad(capacity)) {
641 // Compress if a quarter or more of all entries are removed.
642 int deltaLog2 = 1;
643 if (mRemovedCount >= capacity >> 2) {
644 deltaLog2 = 0;
647 // Grow or compress the table. If ChangeTable() fails, allow overloading up
648 // to the secondary max. Once we hit the secondary max, return null.
649 if (!ChangeTable(deltaLog2) &&
650 mEntryCount + mRemovedCount >= MaxLoadOnGrowthFailure(capacity)) {
651 return Nothing();
655 // Look for entry after possibly growing, so we don't have to add it,
656 // then skip it while growing the table and re-add it after.
657 PLDHashNumber keyHash = ComputeKeyHash(aKey);
658 Slot slot = SearchTable<ForAdd>(
659 aKey, keyHash, [](Slot& found) -> Slot { return found; },
660 []() -> Slot {
661 MOZ_CRASH("Nope");
662 return Slot(nullptr, nullptr);
665 // The `EntryHandle` will handle ending the write op when it is destroyed.
666 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
667 endWriteOp.release();
668 #endif
670 return Some(EntryHandle{this, keyHash, slot});
673 PLDHashTable::EntryHandle PLDHashTable::MakeEntryHandle(const void* aKey) {
674 auto res = MakeEntryHandle(aKey, fallible);
675 if (!res) {
676 if (!mEntryStore.IsAllocated()) {
677 // We OOM'd while allocating the initial entry storage.
678 uint32_t nbytes;
679 (void)SizeOfEntryStore(CapacityFromHashShift(), mEntrySize, &nbytes);
680 NS_ABORT_OOM(nbytes);
681 } else {
682 // We failed to resize the existing entry storage, either due to OOM or
683 // because we exceeded the maximum table capacity or size; report it as
684 // an OOM. The multiplication by 2 gets us the size we tried to allocate,
685 // which is double the current size.
686 NS_ABORT_OOM(2 * EntrySize() * EntryCount());
689 return res.extract();
692 PLDHashTable::EntryHandle::EntryHandle(PLDHashTable* aTable,
693 PLDHashNumber aKeyHash, Slot aSlot)
694 : mTable(aTable), mKeyHash(aKeyHash), mSlot(aSlot) {}
696 PLDHashTable::EntryHandle::EntryHandle(EntryHandle&& aOther) noexcept
697 : mTable(std::exchange(aOther.mTable, nullptr)),
698 mKeyHash(aOther.mKeyHash),
699 mSlot(aOther.mSlot) {}
701 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
702 PLDHashTable::EntryHandle::~EntryHandle() {
703 if (!mTable) {
704 return;
707 mTable->mChecker.EndWriteOp();
709 #endif
711 void PLDHashTable::EntryHandle::Remove() {
712 MOZ_ASSERT(HasEntry());
714 mTable->RawRemove(mSlot);
717 void PLDHashTable::EntryHandle::OrRemove() {
718 if (HasEntry()) {
719 Remove();
723 void PLDHashTable::EntryHandle::OccupySlot() {
724 MOZ_ASSERT(!HasEntry());
726 PLDHashNumber keyHash = mKeyHash;
727 if (mSlot.IsRemoved()) {
728 mTable->mRemovedCount--;
729 keyHash |= kCollisionFlag;
731 mSlot.SetKeyHash(keyHash);
732 mTable->mEntryCount++;
735 PLDHashTable::Iterator::Iterator(Iterator&& aOther)
736 : mTable(aOther.mTable),
737 mCurrent(aOther.mCurrent),
738 mNexts(aOther.mNexts),
739 mNextsLimit(aOther.mNextsLimit),
740 mHaveRemoved(aOther.mHaveRemoved),
741 mEntrySize(aOther.mEntrySize) {
742 // No need to change |mChecker| here.
743 aOther.mTable = nullptr;
744 // We don't really have the concept of a null slot, so leave mCurrent.
745 aOther.mNexts = 0;
746 aOther.mNextsLimit = 0;
747 aOther.mHaveRemoved = false;
748 aOther.mEntrySize = 0;
751 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable)
752 : mTable(aTable),
753 mCurrent(mTable->mEntryStore.SlotForIndex(0, mTable->mEntrySize,
754 mTable->Capacity())),
755 mNexts(0),
756 mNextsLimit(mTable->EntryCount()),
757 mHaveRemoved(false),
758 mEntrySize(aTable->mEntrySize) {
759 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
760 mTable->mChecker.StartReadOp();
761 #endif
763 if (ChaosMode::isActive(ChaosFeature::HashTableIteration) &&
764 mTable->Capacity() > 0) {
765 // Start iterating at a random entry. It would be even more chaotic to
766 // iterate in fully random order, but that's harder.
767 uint32_t capacity = mTable->CapacityFromHashShift();
768 uint32_t i = ChaosMode::randomUint32LessThan(capacity);
769 mCurrent =
770 mTable->mEntryStore.SlotForIndex(i, mTable->mEntrySize, capacity);
773 // Advance to the first live entry, if there is one.
774 if (!Done() && IsOnNonLiveEntry()) {
775 MoveToNextLiveEntry();
779 PLDHashTable::Iterator::Iterator(PLDHashTable* aTable, EndIteratorTag aTag)
780 : mTable(aTable),
781 mCurrent(mTable->mEntryStore.SlotForIndex(0, mTable->mEntrySize,
782 mTable->Capacity())),
783 mNexts(mTable->EntryCount()),
784 mNextsLimit(mTable->EntryCount()),
785 mHaveRemoved(false),
786 mEntrySize(aTable->mEntrySize) {
787 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
788 mTable->mChecker.StartReadOp();
789 #endif
791 MOZ_ASSERT(Done());
794 PLDHashTable::Iterator::Iterator(const Iterator& aOther)
795 : mTable(aOther.mTable),
796 mCurrent(aOther.mCurrent),
797 mNexts(aOther.mNexts),
798 mNextsLimit(aOther.mNextsLimit),
799 mHaveRemoved(aOther.mHaveRemoved),
800 mEntrySize(aOther.mEntrySize) {
801 // TODO: Is this necessary?
802 MOZ_ASSERT(!mHaveRemoved);
804 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
805 mTable->mChecker.StartReadOp();
806 #endif
809 PLDHashTable::Iterator::~Iterator() {
810 if (mTable) {
811 if (mHaveRemoved) {
812 mTable->ShrinkIfAppropriate();
814 #ifdef MOZ_HASH_TABLE_CHECKS_ENABLED
815 mTable->mChecker.EndReadOp();
816 #endif
820 MOZ_ALWAYS_INLINE bool PLDHashTable::Iterator::IsOnNonLiveEntry() const {
821 MOZ_ASSERT(!Done());
822 return !mCurrent.IsLive();
825 void PLDHashTable::Iterator::Next() {
826 MOZ_ASSERT(!Done());
828 mNexts++;
830 // Advance to the next live entry, if there is one.
831 if (!Done()) {
832 MoveToNextLiveEntry();
836 MOZ_ALWAYS_INLINE void PLDHashTable::Iterator::MoveToNextLiveEntry() {
837 // Chaos mode requires wraparound to cover all possible entries, so we can't
838 // simply move to the next live entry and stop when we hit the end of the
839 // entry store. But we don't want to introduce extra branches into our inner
840 // loop. So we are going to exploit the structure of the entry store in this
841 // method to implement an efficient inner loop.
843 // The idea is that since we are really only iterating through the stored
844 // hashes and because we know that there are a power-of-two number of
845 // hashes, we can use masking to implement the wraparound for us. This
846 // method does have the downside of needing to recalculate where the
847 // associated entry is once we've found it, but that seems OK.
849 // Our current slot and its associated hash.
850 Slot slot = mCurrent;
851 PLDHashNumber* p = slot.HashPtr();
852 const uint32_t capacity = mTable->CapacityFromHashShift();
853 const uint32_t mask = capacity - 1;
854 auto hashes = reinterpret_cast<PLDHashNumber*>(mTable->mEntryStore.Get());
855 uint32_t slotIndex = p - hashes;
857 do {
858 slotIndex = (slotIndex + 1) & mask;
859 } while (!Slot::IsLiveHash(hashes[slotIndex]));
861 // slotIndex now indicates where a live slot is. Rematerialize the entry
862 // and the slot.
863 mCurrent = mTable->mEntryStore.SlotForIndex(slotIndex, mEntrySize, capacity);
866 void PLDHashTable::Iterator::Remove() {
867 mTable->RawRemove(mCurrent);
868 mHaveRemoved = true;