Bug 1719578 log cubeb speaker enumeration results in MediaEngineWebRTC r=padenot
[gecko.git] / mfbt / HashTable.h
blob116d17f6870b9f25b44c03fa6013d5f762b8d9c2
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 //---------------------------------------------------------------------------
8 // Overview
9 //---------------------------------------------------------------------------
11 // This file defines HashMap<Key, Value> and HashSet<T>, hash tables that are
12 // fast and have a nice API.
14 // Both hash tables have two optional template parameters.
16 // - HashPolicy. This defines the operations for hashing and matching keys. The
17 // default HashPolicy is appropriate when both of the following two
18 // conditions are true.
20 // - The key type stored in the table (|Key| for |HashMap<Key, Value>|, |T|
21 // for |HashSet<T>|) is an integer, pointer, UniquePtr, float, or double.
23 // - The type used for lookups (|Lookup|) is the same as the key type. This
24 // is usually the case, but not always.
26 // There is also a |CStringHasher| policy for |char*| keys. If your keys
27 // don't match any of the above cases, you must provide your own hash policy;
28 // see the "Hash Policy" section below.
30 // - AllocPolicy. This defines how allocations are done by the table.
32 // - |MallocAllocPolicy| is the default and is usually appropriate; note that
33 // operations (such as insertions) that might cause allocations are
34 // fallible and must be checked for OOM. These checks are enforced by the
35 // use of [[nodiscard]].
37 // - |InfallibleAllocPolicy| is another possibility; it allows the
38 // abovementioned OOM checks to be done with MOZ_ALWAYS_TRUE().
40 // Note that entry storage allocation is lazy, and not done until the first
41 // lookupForAdd(), put(), or putNew() is performed.
43 // See AllocPolicy.h for more details.
45 // Documentation on how to use HashMap and HashSet, including examples, is
46 // present within those classes. Search for "class HashMap" and "class
47 // HashSet".
49 // Both HashMap and HashSet are implemented on top of a third class, HashTable.
50 // You only need to look at HashTable if you want to understand the
51 // implementation.
53 // How does mozilla::HashTable (this file) compare with PLDHashTable (and its
54 // subclasses, such as nsTHashtable)?
56 // - mozilla::HashTable is a lot faster, largely because it uses templates
57 // throughout *and* inlines everything. PLDHashTable inlines operations much
58 // less aggressively, and also uses "virtual ops" for operations like hashing
59 // and matching entries that require function calls.
61 // - Correspondingly, mozilla::HashTable use is likely to increase executable
62 // size much more than PLDHashTable.
64 // - mozilla::HashTable has a nicer API, with a proper HashSet vs. HashMap
65 // distinction.
67 // - mozilla::HashTable requires more explicit OOM checking. As mentioned
68 // above, the use of |InfallibleAllocPolicy| can simplify things.
70 // - mozilla::HashTable has a default capacity on creation of 32 and a minimum
71 // capacity of 4. PLDHashTable has a default capacity on creation of 8 and a
72 // minimum capacity of 8.
74 #ifndef mozilla_HashTable_h
75 #define mozilla_HashTable_h
77 #include <utility>
78 #include <type_traits>
80 #include "mozilla/AllocPolicy.h"
81 #include "mozilla/Assertions.h"
82 #include "mozilla/Attributes.h"
83 #include "mozilla/Casting.h"
84 #include "mozilla/HashFunctions.h"
85 #include "mozilla/MathAlgorithms.h"
86 #include "mozilla/Maybe.h"
87 #include "mozilla/MemoryChecking.h"
88 #include "mozilla/MemoryReporting.h"
89 #include "mozilla/Opaque.h"
90 #include "mozilla/OperatorNewExtensions.h"
91 #include "mozilla/ReentrancyGuard.h"
92 #include "mozilla/UniquePtr.h"
93 #include "mozilla/WrappingOperations.h"
95 namespace mozilla {
97 template <class, class = void>
98 struct DefaultHasher;
100 template <class, class>
101 class HashMapEntry;
103 namespace detail {
105 template <typename T>
106 class HashTableEntry;
108 template <class T, class HashPolicy, class AllocPolicy>
109 class HashTable;
111 } // namespace detail
113 // The "generation" of a hash table is an opaque value indicating the state of
114 // modification of the hash table through its lifetime. If the generation of
115 // a hash table compares equal at times T1 and T2, then lookups in the hash
116 // table, pointers to (or into) hash table entries, etc. at time T1 are valid
117 // at time T2. If the generation compares unequal, these computations are all
118 // invalid and must be performed again to be used.
120 // Generations are meaningfully comparable only with respect to a single hash
121 // table. It's always nonsensical to compare the generation of distinct hash
122 // tables H1 and H2.
123 using Generation = Opaque<uint64_t>;
125 //---------------------------------------------------------------------------
126 // HashMap
127 //---------------------------------------------------------------------------
129 // HashMap is a fast hash-based map from keys to values.
131 // Template parameter requirements:
132 // - Key/Value: movable, destructible, assignable.
133 // - HashPolicy: see the "Hash Policy" section below.
134 // - AllocPolicy: see AllocPolicy.h.
136 // Note:
137 // - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
138 // called by HashMap must not call back into the same HashMap object.
140 template <class Key, class Value, class HashPolicy = DefaultHasher<Key>,
141 class AllocPolicy = MallocAllocPolicy>
142 class HashMap {
143 // -- Implementation details -----------------------------------------------
145 // HashMap is not copyable or assignable.
146 HashMap(const HashMap& hm) = delete;
147 HashMap& operator=(const HashMap& hm) = delete;
149 using TableEntry = HashMapEntry<Key, Value>;
151 struct MapHashPolicy : HashPolicy {
152 using Base = HashPolicy;
153 using KeyType = Key;
155 static const Key& getKey(TableEntry& aEntry) { return aEntry.key(); }
157 static void setKey(TableEntry& aEntry, Key& aKey) {
158 HashPolicy::rekey(aEntry.mutableKey(), aKey);
162 using Impl = detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy>;
163 Impl mImpl;
165 friend class Impl::Enum;
167 public:
168 using Lookup = typename HashPolicy::Lookup;
169 using Entry = TableEntry;
171 // -- Initialization -------------------------------------------------------
173 explicit HashMap(AllocPolicy aAllocPolicy = AllocPolicy(),
174 uint32_t aLen = Impl::sDefaultLen)
175 : mImpl(std::move(aAllocPolicy), aLen) {}
177 explicit HashMap(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
179 // HashMap is movable.
180 HashMap(HashMap&& aRhs) = default;
181 HashMap& operator=(HashMap&& aRhs) = default;
183 // -- Status and sizing ----------------------------------------------------
185 // The map's current generation.
186 Generation generation() const { return mImpl.generation(); }
188 // Is the map empty?
189 bool empty() const { return mImpl.empty(); }
191 // Number of keys/values in the map.
192 uint32_t count() const { return mImpl.count(); }
194 // Number of key/value slots in the map. Note: resize will happen well before
195 // count() == capacity().
196 uint32_t capacity() const { return mImpl.capacity(); }
198 // The size of the map's entry storage, in bytes. If the keys/values contain
199 // pointers to other heap blocks, you must iterate over the map and measure
200 // them separately; hence the "shallow" prefix.
201 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
202 return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
204 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
205 return aMallocSizeOf(this) +
206 mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
209 // Attempt to minimize the capacity(). If the table is empty, this will free
210 // the empty storage and upon regrowth it will be given the minimum capacity.
211 void compact() { mImpl.compact(); }
213 // Attempt to reserve enough space to fit at least |aLen| elements. Does
214 // nothing if the map already has sufficient capacity.
215 [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
217 // -- Lookups --------------------------------------------------------------
219 // Does the map contain a key/value matching |aLookup|?
220 bool has(const Lookup& aLookup) const {
221 return mImpl.lookup(aLookup).found();
224 // Return a Ptr indicating whether a key/value matching |aLookup| is
225 // present in the map. E.g.:
227 // using HM = HashMap<int,char>;
228 // HM h;
229 // if (HM::Ptr p = h.lookup(3)) {
230 // assert(p->key() == 3);
231 // char val = p->value();
232 // }
234 using Ptr = typename Impl::Ptr;
235 MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
236 return mImpl.lookup(aLookup);
239 // Like lookup(), but does not assert if two threads call it at the same
240 // time. Only use this method when none of the threads will modify the map.
241 MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
242 return mImpl.readonlyThreadsafeLookup(aLookup);
245 // -- Insertions -----------------------------------------------------------
247 // Overwrite existing value with |aValue|, or add it if not present. Returns
248 // false on OOM.
249 template <typename KeyInput, typename ValueInput>
250 [[nodiscard]] bool put(KeyInput&& aKey, ValueInput&& aValue) {
251 AddPtr p = lookupForAdd(aKey);
252 if (p) {
253 p->value() = std::forward<ValueInput>(aValue);
254 return true;
256 return add(p, std::forward<KeyInput>(aKey),
257 std::forward<ValueInput>(aValue));
260 // Like put(), but slightly faster. Must only be used when the given key is
261 // not already present. (In debug builds, assertions check this.)
262 template <typename KeyInput, typename ValueInput>
263 [[nodiscard]] bool putNew(KeyInput&& aKey, ValueInput&& aValue) {
264 return mImpl.putNew(aKey, std::forward<KeyInput>(aKey),
265 std::forward<ValueInput>(aValue));
268 template <typename KeyInput, typename ValueInput>
269 [[nodiscard]] bool putNew(const Lookup& aLookup, KeyInput&& aKey,
270 ValueInput&& aValue) {
271 return mImpl.putNew(aLookup, std::forward<KeyInput>(aKey),
272 std::forward<ValueInput>(aValue));
275 // Like putNew(), but should be only used when the table is known to be big
276 // enough for the insertion, and hashing cannot fail. Typically this is used
277 // to populate an empty map with known-unique keys after reserving space with
278 // reserve(), e.g.
280 // using HM = HashMap<int,char>;
281 // HM h;
282 // if (!h.reserve(3)) {
283 // MOZ_CRASH("OOM");
284 // }
285 // h.putNewInfallible(1, 'a'); // unique key
286 // h.putNewInfallible(2, 'b'); // unique key
287 // h.putNewInfallible(3, 'c'); // unique key
289 template <typename KeyInput, typename ValueInput>
290 void putNewInfallible(KeyInput&& aKey, ValueInput&& aValue) {
291 mImpl.putNewInfallible(aKey, std::forward<KeyInput>(aKey),
292 std::forward<ValueInput>(aValue));
295 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
296 // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
297 // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new key/value. E.g.:
299 // using HM = HashMap<int,char>;
300 // HM h;
301 // HM::AddPtr p = h.lookupForAdd(3);
302 // if (!p) {
303 // if (!h.add(p, 3, 'a')) {
304 // return false;
305 // }
306 // }
307 // assert(p->key() == 3);
308 // char val = p->value();
310 // N.B. The caller must ensure that no mutating hash table operations occur
311 // between a pair of lookupForAdd() and add() calls. To avoid looking up the
312 // key a second time, the caller may use the more efficient relookupOrAdd()
313 // method. This method reuses part of the hashing computation to more
314 // efficiently insert the key if it has not been added. For example, a
315 // mutation-handling version of the previous example:
317 // HM::AddPtr p = h.lookupForAdd(3);
318 // if (!p) {
319 // call_that_may_mutate_h();
320 // if (!h.relookupOrAdd(p, 3, 'a')) {
321 // return false;
322 // }
323 // }
324 // assert(p->key() == 3);
325 // char val = p->value();
327 using AddPtr = typename Impl::AddPtr;
328 MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
329 return mImpl.lookupForAdd(aLookup);
332 // Add a key/value. Returns false on OOM.
333 template <typename KeyInput, typename ValueInput>
334 [[nodiscard]] bool add(AddPtr& aPtr, KeyInput&& aKey, ValueInput&& aValue) {
335 return mImpl.add(aPtr, std::forward<KeyInput>(aKey),
336 std::forward<ValueInput>(aValue));
339 // See the comment above lookupForAdd() for details.
340 template <typename KeyInput, typename ValueInput>
341 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, KeyInput&& aKey,
342 ValueInput&& aValue) {
343 return mImpl.relookupOrAdd(aPtr, aKey, std::forward<KeyInput>(aKey),
344 std::forward<ValueInput>(aValue));
347 // -- Removal --------------------------------------------------------------
349 // Lookup and remove the key/value matching |aLookup|, if present.
350 void remove(const Lookup& aLookup) {
351 if (Ptr p = lookup(aLookup)) {
352 remove(p);
356 // Remove a previously found key/value (assuming aPtr.found()). The map must
357 // not have been mutated in the interim.
358 void remove(Ptr aPtr) { mImpl.remove(aPtr); }
360 // Remove all keys/values without changing the capacity.
361 void clear() { mImpl.clear(); }
363 // Like clear() followed by compact().
364 void clearAndCompact() { mImpl.clearAndCompact(); }
366 // -- Rekeying -------------------------------------------------------------
368 // Infallibly rekey one entry, if necessary. Requires that template
369 // parameters Key and HashPolicy::Lookup are the same type.
370 void rekeyIfMoved(const Key& aOldKey, const Key& aNewKey) {
371 if (aOldKey != aNewKey) {
372 rekeyAs(aOldKey, aNewKey, aNewKey);
376 // Infallibly rekey one entry if present, and return whether that happened.
377 bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
378 const Key& aNewKey) {
379 if (Ptr p = lookup(aOldLookup)) {
380 mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewKey);
381 return true;
383 return false;
386 // -- Iteration ------------------------------------------------------------
388 // |iter()| returns an Iterator:
390 // HashMap<int, char> h;
391 // for (auto iter = h.iter(); !iter.done(); iter.next()) {
392 // char c = iter.get().value();
393 // }
395 using Iterator = typename Impl::Iterator;
396 Iterator iter() const { return mImpl.iter(); }
398 // |modIter()| returns a ModIterator:
400 // HashMap<int, char> h;
401 // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
402 // if (iter.get().value() == 'l') {
403 // iter.remove();
404 // }
405 // }
407 // Table resize may occur in ModIterator's destructor.
408 using ModIterator = typename Impl::ModIterator;
409 ModIterator modIter() { return mImpl.modIter(); }
411 // These are similar to Iterator/ModIterator/iter(), but use different
412 // terminology.
413 using Range = typename Impl::Range;
414 using Enum = typename Impl::Enum;
415 Range all() const { return mImpl.all(); }
418 //---------------------------------------------------------------------------
419 // HashSet
420 //---------------------------------------------------------------------------
422 // HashSet is a fast hash-based set of values.
424 // Template parameter requirements:
425 // - T: movable, destructible, assignable.
426 // - HashPolicy: see the "Hash Policy" section below.
427 // - AllocPolicy: see AllocPolicy.h
429 // Note:
430 // - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
431 // HashSet must not call back into the same HashSet object.
433 template <class T, class HashPolicy = DefaultHasher<T>,
434 class AllocPolicy = MallocAllocPolicy>
435 class HashSet {
436 // -- Implementation details -----------------------------------------------
438 // HashSet is not copyable or assignable.
439 HashSet(const HashSet& hs) = delete;
440 HashSet& operator=(const HashSet& hs) = delete;
442 struct SetHashPolicy : HashPolicy {
443 using Base = HashPolicy;
444 using KeyType = T;
446 static const KeyType& getKey(const T& aT) { return aT; }
448 static void setKey(T& aT, KeyType& aKey) { HashPolicy::rekey(aT, aKey); }
451 using Impl = detail::HashTable<const T, SetHashPolicy, AllocPolicy>;
452 Impl mImpl;
454 friend class Impl::Enum;
456 public:
457 using Lookup = typename HashPolicy::Lookup;
458 using Entry = T;
460 // -- Initialization -------------------------------------------------------
462 explicit HashSet(AllocPolicy aAllocPolicy = AllocPolicy(),
463 uint32_t aLen = Impl::sDefaultLen)
464 : mImpl(std::move(aAllocPolicy), aLen) {}
466 explicit HashSet(uint32_t aLen) : mImpl(AllocPolicy(), aLen) {}
468 // HashSet is movable.
469 HashSet(HashSet&& aRhs) = default;
470 HashSet& operator=(HashSet&& aRhs) = default;
472 // -- Status and sizing ----------------------------------------------------
474 // The set's current generation.
475 Generation generation() const { return mImpl.generation(); }
477 // Is the set empty?
478 bool empty() const { return mImpl.empty(); }
480 // Number of elements in the set.
481 uint32_t count() const { return mImpl.count(); }
483 // Number of element slots in the set. Note: resize will happen well before
484 // count() == capacity().
485 uint32_t capacity() const { return mImpl.capacity(); }
487 // The size of the set's entry storage, in bytes. If the elements contain
488 // pointers to other heap blocks, you must iterate over the set and measure
489 // them separately; hence the "shallow" prefix.
490 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
491 return mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
493 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
494 return aMallocSizeOf(this) +
495 mImpl.shallowSizeOfExcludingThis(aMallocSizeOf);
498 // Attempt to minimize the capacity(). If the table is empty, this will free
499 // the empty storage and upon regrowth it will be given the minimum capacity.
500 void compact() { mImpl.compact(); }
502 // Attempt to reserve enough space to fit at least |aLen| elements. Does
503 // nothing if the map already has sufficient capacity.
504 [[nodiscard]] bool reserve(uint32_t aLen) { return mImpl.reserve(aLen); }
506 // -- Lookups --------------------------------------------------------------
508 // Does the set contain an element matching |aLookup|?
509 bool has(const Lookup& aLookup) const {
510 return mImpl.lookup(aLookup).found();
513 // Return a Ptr indicating whether an element matching |aLookup| is present
514 // in the set. E.g.:
516 // using HS = HashSet<int>;
517 // HS h;
518 // if (HS::Ptr p = h.lookup(3)) {
519 // assert(*p == 3); // p acts like a pointer to int
520 // }
522 using Ptr = typename Impl::Ptr;
523 MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
524 return mImpl.lookup(aLookup);
527 // Like lookup(), but does not assert if two threads call it at the same
528 // time. Only use this method when none of the threads will modify the set.
529 MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
530 return mImpl.readonlyThreadsafeLookup(aLookup);
533 // -- Insertions -----------------------------------------------------------
535 // Add |aU| if it is not present already. Returns false on OOM.
536 template <typename U>
537 [[nodiscard]] bool put(U&& aU) {
538 AddPtr p = lookupForAdd(aU);
539 return p ? true : add(p, std::forward<U>(aU));
542 // Like put(), but slightly faster. Must only be used when the given element
543 // is not already present. (In debug builds, assertions check this.)
544 template <typename U>
545 [[nodiscard]] bool putNew(U&& aU) {
546 return mImpl.putNew(aU, std::forward<U>(aU));
549 // Like the other putNew(), but for when |Lookup| is different to |T|.
550 template <typename U>
551 [[nodiscard]] bool putNew(const Lookup& aLookup, U&& aU) {
552 return mImpl.putNew(aLookup, std::forward<U>(aU));
555 // Like putNew(), but should be only used when the table is known to be big
556 // enough for the insertion, and hashing cannot fail. Typically this is used
557 // to populate an empty set with known-unique elements after reserving space
558 // with reserve(), e.g.
560 // using HS = HashMap<int>;
561 // HS h;
562 // if (!h.reserve(3)) {
563 // MOZ_CRASH("OOM");
564 // }
565 // h.putNewInfallible(1); // unique element
566 // h.putNewInfallible(2); // unique element
567 // h.putNewInfallible(3); // unique element
569 template <typename U>
570 void putNewInfallible(const Lookup& aLookup, U&& aU) {
571 mImpl.putNewInfallible(aLookup, std::forward<U>(aU));
574 // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
575 // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
576 // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
578 // using HS = HashSet<int>;
579 // HS h;
580 // HS::AddPtr p = h.lookupForAdd(3);
581 // if (!p) {
582 // if (!h.add(p, 3)) {
583 // return false;
584 // }
585 // }
586 // assert(*p == 3); // p acts like a pointer to int
588 // N.B. The caller must ensure that no mutating hash table operations occur
589 // between a pair of lookupForAdd() and add() calls. To avoid looking up the
590 // key a second time, the caller may use the more efficient relookupOrAdd()
591 // method. This method reuses part of the hashing computation to more
592 // efficiently insert the key if it has not been added. For example, a
593 // mutation-handling version of the previous example:
595 // HS::AddPtr p = h.lookupForAdd(3);
596 // if (!p) {
597 // call_that_may_mutate_h();
598 // if (!h.relookupOrAdd(p, 3, 3)) {
599 // return false;
600 // }
601 // }
602 // assert(*p == 3);
604 // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
605 // entry |t|, where the caller ensures match(l,t).
606 using AddPtr = typename Impl::AddPtr;
607 MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
608 return mImpl.lookupForAdd(aLookup);
611 // Add an element. Returns false on OOM.
612 template <typename U>
613 [[nodiscard]] bool add(AddPtr& aPtr, U&& aU) {
614 return mImpl.add(aPtr, std::forward<U>(aU));
617 // See the comment above lookupForAdd() for details.
618 template <typename U>
619 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
620 U&& aU) {
621 return mImpl.relookupOrAdd(aPtr, aLookup, std::forward<U>(aU));
624 // -- Removal --------------------------------------------------------------
626 // Lookup and remove the element matching |aLookup|, if present.
627 void remove(const Lookup& aLookup) {
628 if (Ptr p = lookup(aLookup)) {
629 remove(p);
633 // Remove a previously found element (assuming aPtr.found()). The set must
634 // not have been mutated in the interim.
635 void remove(Ptr aPtr) { mImpl.remove(aPtr); }
637 // Remove all keys/values without changing the capacity.
638 void clear() { mImpl.clear(); }
640 // Like clear() followed by compact().
641 void clearAndCompact() { mImpl.clearAndCompact(); }
643 // -- Rekeying -------------------------------------------------------------
645 // Infallibly rekey one entry, if present. Requires that template parameters
646 // T and HashPolicy::Lookup are the same type.
647 void rekeyIfMoved(const Lookup& aOldValue, const T& aNewValue) {
648 if (aOldValue != aNewValue) {
649 rekeyAs(aOldValue, aNewValue, aNewValue);
653 // Infallibly rekey one entry if present, and return whether that happened.
654 bool rekeyAs(const Lookup& aOldLookup, const Lookup& aNewLookup,
655 const T& aNewValue) {
656 if (Ptr p = lookup(aOldLookup)) {
657 mImpl.rekeyAndMaybeRehash(p, aNewLookup, aNewValue);
658 return true;
660 return false;
663 // Infallibly replace the current key at |aPtr| with an equivalent key.
664 // Specifically, both HashPolicy::hash and HashPolicy::match must return
665 // identical results for the new and old key when applied against all
666 // possible matching values.
667 void replaceKey(Ptr aPtr, const Lookup& aLookup, const T& aNewValue) {
668 MOZ_ASSERT(aPtr.found());
669 MOZ_ASSERT(*aPtr != aNewValue);
670 MOZ_ASSERT(HashPolicy::match(*aPtr, aLookup));
671 MOZ_ASSERT(HashPolicy::match(aNewValue, aLookup));
672 const_cast<T&>(*aPtr) = aNewValue;
673 MOZ_ASSERT(*lookup(aLookup) == aNewValue);
675 void replaceKey(Ptr aPtr, const T& aNewValue) {
676 replaceKey(aPtr, aNewValue, aNewValue);
679 // -- Iteration ------------------------------------------------------------
681 // |iter()| returns an Iterator:
683 // HashSet<int> h;
684 // for (auto iter = h.iter(); !iter.done(); iter.next()) {
685 // int i = iter.get();
686 // }
688 using Iterator = typename Impl::Iterator;
689 Iterator iter() const { return mImpl.iter(); }
691 // |modIter()| returns a ModIterator:
693 // HashSet<int> h;
694 // for (auto iter = h.modIter(); !iter.done(); iter.next()) {
695 // if (iter.get() == 42) {
696 // iter.remove();
697 // }
698 // }
700 // Table resize may occur in ModIterator's destructor.
701 using ModIterator = typename Impl::ModIterator;
702 ModIterator modIter() { return mImpl.modIter(); }
704 // These are similar to Iterator/ModIterator/iter(), but use different
705 // terminology.
706 using Range = typename Impl::Range;
707 using Enum = typename Impl::Enum;
708 Range all() const { return mImpl.all(); }
711 //---------------------------------------------------------------------------
712 // Hash Policy
713 //---------------------------------------------------------------------------
715 // A hash policy |HP| for a hash table with key-type |Key| must provide:
717 // - a type |HP::Lookup| to use to lookup table entries;
719 // - a static member function |HP::hash| that hashes lookup values:
721 // static mozilla::HashNumber hash(const Lookup&);
723 // - a static member function |HP::match| that tests equality of key and
724 // lookup values:
726 // static bool match(const Key&, const Lookup&);
728 // Normally, Lookup = Key. In general, though, different values and types of
729 // values can be used to lookup and store. If a Lookup value |l| is not equal
730 // to the added Key value |k|, the user must ensure that |HP::match(k,l)| is
731 // true. E.g.:
733 // mozilla::HashSet<Key, HP>::AddPtr p = h.lookup(l);
734 // if (!p) {
735 // assert(HP::match(k, l)); // must hold
736 // h.add(p, k);
737 // }
739 // A pointer hashing policy that uses HashGeneric() to create good hashes for
740 // pointers. Note that we don't shift out the lowest k bits because we don't
741 // want to assume anything about the alignment of the pointers.
742 template <typename Key>
743 struct PointerHasher {
744 using Lookup = Key;
746 static HashNumber hash(const Lookup& aLookup) {
747 size_t word = reinterpret_cast<size_t>(aLookup);
748 return HashGeneric(word);
751 static bool match(const Key& aKey, const Lookup& aLookup) {
752 return aKey == aLookup;
755 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
758 // The default hash policy, which only works with integers.
759 template <class Key, typename>
760 struct DefaultHasher {
761 using Lookup = Key;
763 static HashNumber hash(const Lookup& aLookup) {
764 // Just convert the integer to a HashNumber and use that as is. (This
765 // discards the high 32-bits of 64-bit integers!) ScrambleHashCode() is
766 // subsequently called on the value to improve the distribution.
767 return aLookup;
770 static bool match(const Key& aKey, const Lookup& aLookup) {
771 // Use builtin or overloaded operator==.
772 return aKey == aLookup;
775 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
778 // A DefaultHasher specialization for enums.
779 template <class T>
780 struct DefaultHasher<T, std::enable_if_t<std::is_enum_v<T>>> {
781 using Key = T;
782 using Lookup = Key;
784 static HashNumber hash(const Lookup& aLookup) { return HashGeneric(aLookup); }
786 static bool match(const Key& aKey, const Lookup& aLookup) {
787 // Use builtin or overloaded operator==.
788 return aKey == static_cast<Key>(aLookup);
791 static void rekey(Key& aKey, const Key& aNewKey) { aKey = aNewKey; }
794 // A DefaultHasher specialization for pointers.
795 template <class T>
796 struct DefaultHasher<T*> : PointerHasher<T*> {};
798 // A DefaultHasher specialization for mozilla::UniquePtr.
799 template <class T, class D>
800 struct DefaultHasher<UniquePtr<T, D>> {
801 using Key = UniquePtr<T, D>;
802 using Lookup = Key;
803 using PtrHasher = PointerHasher<T*>;
805 static HashNumber hash(const Lookup& aLookup) {
806 return PtrHasher::hash(aLookup.get());
809 static bool match(const Key& aKey, const Lookup& aLookup) {
810 return PtrHasher::match(aKey.get(), aLookup.get());
813 static void rekey(UniquePtr<T, D>& aKey, UniquePtr<T, D>&& aNewKey) {
814 aKey = std::move(aNewKey);
818 // A DefaultHasher specialization for doubles.
819 template <>
820 struct DefaultHasher<double> {
821 using Key = double;
822 using Lookup = Key;
824 static HashNumber hash(const Lookup& aLookup) {
825 // Just xor the high bits with the low bits, and then treat the bits of the
826 // result as a uint32_t.
827 static_assert(sizeof(HashNumber) == 4,
828 "subsequent code assumes a four-byte hash");
829 uint64_t u = BitwiseCast<uint64_t>(aLookup);
830 return HashNumber(u ^ (u >> 32));
833 static bool match(const Key& aKey, const Lookup& aLookup) {
834 return BitwiseCast<uint64_t>(aKey) == BitwiseCast<uint64_t>(aLookup);
838 // A DefaultHasher specialization for floats.
839 template <>
840 struct DefaultHasher<float> {
841 using Key = float;
842 using Lookup = Key;
844 static HashNumber hash(const Lookup& aLookup) {
845 // Just use the value as if its bits form an integer. ScrambleHashCode() is
846 // subsequently called on the value to improve the distribution.
847 static_assert(sizeof(HashNumber) == 4,
848 "subsequent code assumes a four-byte hash");
849 return HashNumber(BitwiseCast<uint32_t>(aLookup));
852 static bool match(const Key& aKey, const Lookup& aLookup) {
853 return BitwiseCast<uint32_t>(aKey) == BitwiseCast<uint32_t>(aLookup);
857 // A hash policy for C strings.
858 struct CStringHasher {
859 using Key = const char*;
860 using Lookup = const char*;
862 static HashNumber hash(const Lookup& aLookup) { return HashString(aLookup); }
864 static bool match(const Key& aKey, const Lookup& aLookup) {
865 return strcmp(aKey, aLookup) == 0;
869 //---------------------------------------------------------------------------
870 // Fallible Hashing Interface
871 //---------------------------------------------------------------------------
873 // Most of the time generating a hash code is infallible so this class provides
874 // default methods that always succeed. Specialize this class for your own hash
875 // policy to provide fallible hashing.
877 // This is used by MovableCellHasher to handle the fact that generating a unique
878 // ID for cell pointer may fail due to OOM.
879 template <typename HashPolicy>
880 struct FallibleHashMethods {
881 // Return true if a hashcode is already available for its argument. Once
882 // this returns true for a specific argument it must continue to do so.
883 template <typename Lookup>
884 static bool hasHash(Lookup&& aLookup) {
885 return true;
888 // Fallible method to ensure a hashcode exists for its argument and create
889 // one if not. Returns false on error, e.g. out of memory.
890 template <typename Lookup>
891 static bool ensureHash(Lookup&& aLookup) {
892 return true;
896 template <typename HashPolicy, typename Lookup>
897 static bool HasHash(Lookup&& aLookup) {
898 return FallibleHashMethods<typename HashPolicy::Base>::hasHash(
899 std::forward<Lookup>(aLookup));
902 template <typename HashPolicy, typename Lookup>
903 static bool EnsureHash(Lookup&& aLookup) {
904 return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(
905 std::forward<Lookup>(aLookup));
908 //---------------------------------------------------------------------------
909 // Implementation Details (HashMapEntry, HashTableEntry, HashTable)
910 //---------------------------------------------------------------------------
912 // Both HashMap and HashSet are implemented by a single HashTable that is even
913 // more heavily parameterized than the other two. This leaves HashTable gnarly
914 // and extremely coupled to HashMap and HashSet; thus code should not use
915 // HashTable directly.
917 template <class Key, class Value>
918 class HashMapEntry {
919 Key key_;
920 Value value_;
922 template <class, class, class>
923 friend class detail::HashTable;
924 template <class>
925 friend class detail::HashTableEntry;
926 template <class, class, class, class>
927 friend class HashMap;
929 public:
930 template <typename KeyInput, typename ValueInput>
931 HashMapEntry(KeyInput&& aKey, ValueInput&& aValue)
932 : key_(std::forward<KeyInput>(aKey)),
933 value_(std::forward<ValueInput>(aValue)) {}
935 HashMapEntry(HashMapEntry&& aRhs) = default;
936 HashMapEntry& operator=(HashMapEntry&& aRhs) = default;
938 using KeyType = Key;
939 using ValueType = Value;
941 const Key& key() const { return key_; }
943 // Use this method with caution! If the key is changed such that its hash
944 // value also changes, the map will be left in an invalid state.
945 Key& mutableKey() { return key_; }
947 const Value& value() const { return value_; }
948 Value& value() { return value_; }
950 private:
951 HashMapEntry(const HashMapEntry&) = delete;
952 void operator=(const HashMapEntry&) = delete;
955 namespace detail {
957 template <class T, class HashPolicy, class AllocPolicy>
958 class HashTable;
960 template <typename T>
961 class EntrySlot;
963 template <typename T>
964 class HashTableEntry {
965 private:
966 using NonConstT = std::remove_const_t<T>;
968 // Instead of having a hash table entry store that looks like this:
970 // +--------+--------+--------+--------+
971 // | entry0 | entry1 | .... | entryN |
972 // +--------+--------+--------+--------+
974 // where the entries contained their cached hash code, we're going to lay out
975 // the entry store thusly:
977 // +-------+-------+-------+-------+--------+--------+--------+--------+
978 // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
979 // +-------+-------+-------+-------+--------+--------+--------+--------+
981 // with all the cached hashes prior to the actual entries themselves.
983 // We do this because implementing the first strategy requires us to make
984 // HashTableEntry look roughly like:
986 // template <typename T>
987 // class HashTableEntry {
988 // HashNumber mKeyHash;
989 // T mValue;
990 // };
992 // The problem with this setup is that, depending on the layout of `T`, there
993 // may be platform ABI-mandated padding between `mKeyHash` and the first
994 // member of `T`. This ABI-mandated padding is wasted space, and can be
995 // surprisingly common, e.g. when `T` is a single pointer on 64-bit platforms.
996 // In such cases, we're throwing away a quarter of our entry store on padding,
997 // which is undesirable.
999 // The second layout above, namely:
1001 // +-------+-------+-------+-------+--------+--------+--------+--------+
1002 // | hash0 | hash1 | ... | hashN | entry0 | entry1 | .... | entryN |
1003 // +-------+-------+-------+-------+--------+--------+--------+--------+
1005 // means there is no wasted space between the hashes themselves, and no wasted
1006 // space between the entries themselves. However, we would also like there to
1007 // be no gap between the last hash and the first entry. The memory allocator
1008 // guarantees the alignment of the start of the hashes. The use of a
1009 // power-of-two capacity of at least 4 guarantees that the alignment of the
1010 // *end* of the hash array is no less than the alignment of the start.
1011 // Finally, the static_asserts here guarantee that the entries themselves
1012 // don't need to be any more aligned than the alignment of the entry store
1013 // itself.
1015 // This assertion is safe for 32-bit builds because on both Windows and Linux
1016 // (including Android), the minimum alignment for allocations larger than 8
1017 // bytes is 8 bytes, and the actual data for entries in our entry store is
1018 // guaranteed to have that alignment as well, thanks to the power-of-two
1019 // number of cached hash values stored prior to the entry data.
1021 // The allocation policy must allocate a table with at least this much
1022 // alignment.
1023 static constexpr size_t kMinimumAlignment = 8;
1025 static_assert(alignof(HashNumber) <= kMinimumAlignment,
1026 "[N*2 hashes, N*2 T values] allocation's alignment must be "
1027 "enough to align each hash");
1028 static_assert(alignof(NonConstT) <= 2 * sizeof(HashNumber),
1029 "subsequent N*2 T values must not require more than an even "
1030 "number of HashNumbers provides");
1032 static const HashNumber sFreeKey = 0;
1033 static const HashNumber sRemovedKey = 1;
1034 static const HashNumber sCollisionBit = 1;
1036 alignas(NonConstT) unsigned char mValueData[sizeof(NonConstT)];
1038 private:
1039 template <class, class, class>
1040 friend class HashTable;
1041 template <typename>
1042 friend class EntrySlot;
1044 // Some versions of GCC treat it as a -Wstrict-aliasing violation (ergo a
1045 // -Werror compile error) to reinterpret_cast<> |mValueData| to |T*|, even
1046 // through |void*|. Placing the latter cast in these separate functions
1047 // breaks the chain such that affected GCC versions no longer warn/error.
1048 void* rawValuePtr() { return mValueData; }
1050 static bool isLiveHash(HashNumber hash) { return hash > sRemovedKey; }
1052 HashTableEntry(const HashTableEntry&) = delete;
1053 void operator=(const HashTableEntry&) = delete;
1055 NonConstT* valuePtr() { return reinterpret_cast<NonConstT*>(rawValuePtr()); }
1057 void destroyStoredT() {
1058 NonConstT* ptr = valuePtr();
1059 ptr->~T();
1060 MOZ_MAKE_MEM_UNDEFINED(ptr, sizeof(*ptr));
1063 public:
1064 HashTableEntry() = default;
1066 ~HashTableEntry() { MOZ_MAKE_MEM_UNDEFINED(this, sizeof(*this)); }
1068 void destroy() { destroyStoredT(); }
1070 void swap(HashTableEntry* aOther, bool aIsLive) {
1071 // This allows types to use Argument-Dependent-Lookup, and thus use a custom
1072 // std::swap, which is needed by types like JS::Heap and such.
1073 using std::swap;
1075 if (this == aOther) {
1076 return;
1078 if (aIsLive) {
1079 swap(*valuePtr(), *aOther->valuePtr());
1080 } else {
1081 *aOther->valuePtr() = std::move(*valuePtr());
1082 destroy();
1086 T& get() { return *valuePtr(); }
1088 NonConstT& getMutable() { return *valuePtr(); }
1091 // A slot represents a cached hash value and its associated entry stored
1092 // in the hash table. These two things are not stored in contiguous memory.
1093 template <class T>
1094 class EntrySlot {
1095 using NonConstT = std::remove_const_t<T>;
1097 using Entry = HashTableEntry<T>;
1099 Entry* mEntry;
1100 HashNumber* mKeyHash;
1102 template <class, class, class>
1103 friend class HashTable;
1105 EntrySlot(Entry* aEntry, HashNumber* aKeyHash)
1106 : mEntry(aEntry), mKeyHash(aKeyHash) {}
1108 public:
1109 static bool isLiveHash(HashNumber hash) { return hash > Entry::sRemovedKey; }
1111 EntrySlot(const EntrySlot&) = default;
1112 EntrySlot(EntrySlot&& aOther) = default;
1114 EntrySlot& operator=(const EntrySlot&) = default;
1115 EntrySlot& operator=(EntrySlot&&) = default;
1117 bool operator==(const EntrySlot& aRhs) const { return mEntry == aRhs.mEntry; }
1119 bool operator<(const EntrySlot& aRhs) const { return mEntry < aRhs.mEntry; }
1121 EntrySlot& operator++() {
1122 ++mEntry;
1123 ++mKeyHash;
1124 return *this;
1127 void destroy() { mEntry->destroy(); }
1129 void swap(EntrySlot& aOther) {
1130 mEntry->swap(aOther.mEntry, aOther.isLive());
1131 std::swap(*mKeyHash, *aOther.mKeyHash);
1134 T& get() const { return mEntry->get(); }
1136 NonConstT& getMutable() { return mEntry->getMutable(); }
1138 bool isFree() const { return *mKeyHash == Entry::sFreeKey; }
1140 void clearLive() {
1141 MOZ_ASSERT(isLive());
1142 *mKeyHash = Entry::sFreeKey;
1143 mEntry->destroyStoredT();
1146 void clear() {
1147 if (isLive()) {
1148 mEntry->destroyStoredT();
1150 MOZ_MAKE_MEM_UNDEFINED(mEntry, sizeof(*mEntry));
1151 *mKeyHash = Entry::sFreeKey;
1154 bool isRemoved() const { return *mKeyHash == Entry::sRemovedKey; }
1156 void removeLive() {
1157 MOZ_ASSERT(isLive());
1158 *mKeyHash = Entry::sRemovedKey;
1159 mEntry->destroyStoredT();
1162 bool isLive() const { return isLiveHash(*mKeyHash); }
1164 void setCollision() {
1165 MOZ_ASSERT(isLive());
1166 *mKeyHash |= Entry::sCollisionBit;
1168 void unsetCollision() { *mKeyHash &= ~Entry::sCollisionBit; }
1169 bool hasCollision() const { return *mKeyHash & Entry::sCollisionBit; }
1170 bool matchHash(HashNumber hn) {
1171 return (*mKeyHash & ~Entry::sCollisionBit) == hn;
1173 HashNumber getKeyHash() const { return *mKeyHash & ~Entry::sCollisionBit; }
1175 template <typename... Args>
1176 void setLive(HashNumber aHashNumber, Args&&... aArgs) {
1177 MOZ_ASSERT(!isLive());
1178 *mKeyHash = aHashNumber;
1179 new (KnownNotNull, mEntry->valuePtr()) T(std::forward<Args>(aArgs)...);
1180 MOZ_ASSERT(isLive());
1183 Entry* toEntry() const { return mEntry; }
1186 template <class T, class HashPolicy, class AllocPolicy>
1187 class HashTable : private AllocPolicy {
1188 friend class mozilla::ReentrancyGuard;
1190 using NonConstT = std::remove_const_t<T>;
1191 using Key = typename HashPolicy::KeyType;
1192 using Lookup = typename HashPolicy::Lookup;
1194 public:
1195 using Entry = HashTableEntry<T>;
1196 using Slot = EntrySlot<T>;
1198 template <typename F>
1199 static void forEachSlot(char* aTable, uint32_t aCapacity, F&& f) {
1200 auto hashes = reinterpret_cast<HashNumber*>(aTable);
1201 auto entries = reinterpret_cast<Entry*>(&hashes[aCapacity]);
1202 Slot slot(entries, hashes);
1203 for (size_t i = 0; i < size_t(aCapacity); ++i) {
1204 f(slot);
1205 ++slot;
1209 // A nullable pointer to a hash table element. A Ptr |p| can be tested
1210 // either explicitly |if (p.found()) p->...| or using boolean conversion
1211 // |if (p) p->...|. Ptr objects must not be used after any mutating hash
1212 // table operations unless |generation()| is tested.
1213 class Ptr {
1214 friend class HashTable;
1216 Slot mSlot;
1217 #ifdef DEBUG
1218 const HashTable* mTable;
1219 Generation mGeneration;
1220 #endif
1222 protected:
1223 Ptr(Slot aSlot, const HashTable& aTable)
1224 : mSlot(aSlot)
1225 #ifdef DEBUG
1227 mTable(&aTable),
1228 mGeneration(aTable.generation())
1229 #endif
1233 // This constructor is used only by AddPtr() within lookupForAdd().
1234 explicit Ptr(const HashTable& aTable)
1235 : mSlot(nullptr, nullptr)
1236 #ifdef DEBUG
1238 mTable(&aTable),
1239 mGeneration(aTable.generation())
1240 #endif
1244 bool isValid() const { return !!mSlot.toEntry(); }
1246 public:
1247 Ptr()
1248 : mSlot(nullptr, nullptr)
1249 #ifdef DEBUG
1251 mTable(nullptr),
1252 mGeneration(0)
1253 #endif
1257 bool found() const {
1258 if (!isValid()) {
1259 return false;
1261 #ifdef DEBUG
1262 MOZ_ASSERT(mGeneration == mTable->generation());
1263 #endif
1264 return mSlot.isLive();
1267 explicit operator bool() const { return found(); }
1269 bool operator==(const Ptr& aRhs) const {
1270 MOZ_ASSERT(found() && aRhs.found());
1271 return mSlot == aRhs.mSlot;
1274 bool operator!=(const Ptr& aRhs) const {
1275 #ifdef DEBUG
1276 MOZ_ASSERT(mGeneration == mTable->generation());
1277 #endif
1278 return !(*this == aRhs);
1281 T& operator*() const {
1282 #ifdef DEBUG
1283 MOZ_ASSERT(found());
1284 MOZ_ASSERT(mGeneration == mTable->generation());
1285 #endif
1286 return mSlot.get();
1289 T* operator->() const {
1290 #ifdef DEBUG
1291 MOZ_ASSERT(found());
1292 MOZ_ASSERT(mGeneration == mTable->generation());
1293 #endif
1294 return &mSlot.get();
1298 // A Ptr that can be used to add a key after a failed lookup.
1299 class AddPtr : public Ptr {
1300 friend class HashTable;
1302 HashNumber mKeyHash;
1303 #ifdef DEBUG
1304 uint64_t mMutationCount;
1305 #endif
1307 AddPtr(Slot aSlot, const HashTable& aTable, HashNumber aHashNumber)
1308 : Ptr(aSlot, aTable),
1309 mKeyHash(aHashNumber)
1310 #ifdef DEBUG
1312 mMutationCount(aTable.mMutationCount)
1313 #endif
1317 // This constructor is used when lookupForAdd() is performed on a table
1318 // lacking entry storage; it leaves mSlot null but initializes everything
1319 // else.
1320 AddPtr(const HashTable& aTable, HashNumber aHashNumber)
1321 : Ptr(aTable),
1322 mKeyHash(aHashNumber)
1323 #ifdef DEBUG
1325 mMutationCount(aTable.mMutationCount)
1326 #endif
1328 MOZ_ASSERT(isLive());
1331 bool isLive() const { return isLiveHash(mKeyHash); }
1333 public:
1334 AddPtr() : mKeyHash(0) {}
1337 // A hash table iterator that (mostly) doesn't allow table modifications.
1338 // As with Ptr/AddPtr, Iterator objects must not be used after any mutating
1339 // hash table operation unless the |generation()| is tested.
1340 class Iterator {
1341 void moveToNextLiveEntry() {
1342 while (++mCur < mEnd && !mCur.isLive()) {
1343 continue;
1347 protected:
1348 friend class HashTable;
1350 explicit Iterator(const HashTable& aTable)
1351 : mCur(aTable.slotForIndex(0)),
1352 mEnd(aTable.slotForIndex(aTable.capacity()))
1353 #ifdef DEBUG
1355 mTable(aTable),
1356 mMutationCount(aTable.mMutationCount),
1357 mGeneration(aTable.generation()),
1358 mValidEntry(true)
1359 #endif
1361 if (!done() && !mCur.isLive()) {
1362 moveToNextLiveEntry();
1366 Slot mCur;
1367 Slot mEnd;
1368 #ifdef DEBUG
1369 const HashTable& mTable;
1370 uint64_t mMutationCount;
1371 Generation mGeneration;
1372 bool mValidEntry;
1373 #endif
1375 public:
1376 bool done() const {
1377 MOZ_ASSERT(mGeneration == mTable.generation());
1378 MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
1379 return mCur == mEnd;
1382 T& get() const {
1383 MOZ_ASSERT(!done());
1384 MOZ_ASSERT(mValidEntry);
1385 MOZ_ASSERT(mGeneration == mTable.generation());
1386 MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
1387 return mCur.get();
1390 void next() {
1391 MOZ_ASSERT(!done());
1392 MOZ_ASSERT(mGeneration == mTable.generation());
1393 MOZ_ASSERT(mMutationCount == mTable.mMutationCount);
1394 moveToNextLiveEntry();
1395 #ifdef DEBUG
1396 mValidEntry = true;
1397 #endif
1401 // A hash table iterator that permits modification, removal and rekeying.
1402 // Since rehashing when elements were removed during enumeration would be
1403 // bad, it is postponed until the ModIterator is destructed. Since the
1404 // ModIterator's destructor touches the hash table, the user must ensure
1405 // that the hash table is still alive when the destructor runs.
1406 class ModIterator : public Iterator {
1407 friend class HashTable;
1409 HashTable& mTable;
1410 bool mRekeyed;
1411 bool mRemoved;
1413 // ModIterator is movable but not copyable.
1414 ModIterator(const ModIterator&) = delete;
1415 void operator=(const ModIterator&) = delete;
1417 protected:
1418 explicit ModIterator(HashTable& aTable)
1419 : Iterator(aTable), mTable(aTable), mRekeyed(false), mRemoved(false) {}
1421 public:
1422 MOZ_IMPLICIT ModIterator(ModIterator&& aOther)
1423 : Iterator(aOther),
1424 mTable(aOther.mTable),
1425 mRekeyed(aOther.mRekeyed),
1426 mRemoved(aOther.mRemoved) {
1427 aOther.mRekeyed = false;
1428 aOther.mRemoved = false;
1431 // Removes the current element from the table, leaving |get()|
1432 // invalid until the next call to |next()|.
1433 void remove() {
1434 mTable.remove(this->mCur);
1435 mRemoved = true;
1436 #ifdef DEBUG
1437 this->mValidEntry = false;
1438 this->mMutationCount = mTable.mMutationCount;
1439 #endif
1442 NonConstT& getMutable() {
1443 MOZ_ASSERT(!this->done());
1444 MOZ_ASSERT(this->mValidEntry);
1445 MOZ_ASSERT(this->mGeneration == this->Iterator::mTable.generation());
1446 MOZ_ASSERT(this->mMutationCount == this->Iterator::mTable.mMutationCount);
1447 return this->mCur.getMutable();
1450 // Removes the current element and re-inserts it into the table with
1451 // a new key at the new Lookup position. |get()| is invalid after
1452 // this operation until the next call to |next()|.
1453 void rekey(const Lookup& l, const Key& k) {
1454 MOZ_ASSERT(&k != &HashPolicy::getKey(this->mCur.get()));
1455 Ptr p(this->mCur, mTable);
1456 mTable.rekeyWithoutRehash(p, l, k);
1457 mRekeyed = true;
1458 #ifdef DEBUG
1459 this->mValidEntry = false;
1460 this->mMutationCount = mTable.mMutationCount;
1461 #endif
1464 void rekey(const Key& k) { rekey(k, k); }
1466 // Potentially rehashes the table.
1467 ~ModIterator() {
1468 if (mRekeyed) {
1469 mTable.mGen++;
1470 mTable.infallibleRehashIfOverloaded();
1473 if (mRemoved) {
1474 mTable.compact();
1479 // Range is similar to Iterator, but uses different terminology.
1480 class Range {
1481 friend class HashTable;
1483 Iterator mIter;
1485 protected:
1486 explicit Range(const HashTable& table) : mIter(table) {}
1488 public:
1489 bool empty() const { return mIter.done(); }
1491 T& front() const { return mIter.get(); }
1493 void popFront() { return mIter.next(); }
1496 // Enum is similar to ModIterator, but uses different terminology.
1497 class Enum {
1498 ModIterator mIter;
1500 // Enum is movable but not copyable.
1501 Enum(const Enum&) = delete;
1502 void operator=(const Enum&) = delete;
1504 public:
1505 template <class Map>
1506 explicit Enum(Map& map) : mIter(map.mImpl) {}
1508 MOZ_IMPLICIT Enum(Enum&& other) : mIter(std::move(other.mIter)) {}
1510 bool empty() const { return mIter.done(); }
1512 T& front() const { return mIter.get(); }
1514 void popFront() { return mIter.next(); }
1516 void removeFront() { mIter.remove(); }
1518 NonConstT& mutableFront() { return mIter.getMutable(); }
1520 void rekeyFront(const Lookup& aLookup, const Key& aKey) {
1521 mIter.rekey(aLookup, aKey);
1524 void rekeyFront(const Key& aKey) { mIter.rekey(aKey); }
1527 // HashTable is movable
1528 HashTable(HashTable&& aRhs) : AllocPolicy(std::move(aRhs)) { moveFrom(aRhs); }
1529 HashTable& operator=(HashTable&& aRhs) {
1530 MOZ_ASSERT(this != &aRhs, "self-move assignment is prohibited");
1531 if (mTable) {
1532 destroyTable(*this, mTable, capacity());
1534 AllocPolicy::operator=(std::move(aRhs));
1535 moveFrom(aRhs);
1536 return *this;
1539 private:
1540 void moveFrom(HashTable& aRhs) {
1541 mGen = aRhs.mGen;
1542 mHashShift = aRhs.mHashShift;
1543 mTable = aRhs.mTable;
1544 mEntryCount = aRhs.mEntryCount;
1545 mRemovedCount = aRhs.mRemovedCount;
1546 #ifdef DEBUG
1547 mMutationCount = aRhs.mMutationCount;
1548 mEntered = aRhs.mEntered;
1549 #endif
1550 aRhs.mTable = nullptr;
1551 aRhs.clearAndCompact();
1554 // HashTable is not copyable or assignable
1555 HashTable(const HashTable&) = delete;
1556 void operator=(const HashTable&) = delete;
1558 static const uint32_t CAP_BITS = 30;
1560 public:
1561 uint64_t mGen : 56; // entry storage generation number
1562 uint64_t mHashShift : 8; // multiplicative hash shift
1563 char* mTable; // entry storage
1564 uint32_t mEntryCount; // number of entries in mTable
1565 uint32_t mRemovedCount; // removed entry sentinels in mTable
1567 #ifdef DEBUG
1568 uint64_t mMutationCount;
1569 mutable bool mEntered;
1570 #endif
1572 // The default initial capacity is 32 (enough to hold 16 elements), but it
1573 // can be as low as 4.
1574 static const uint32_t sDefaultLen = 16;
1575 static const uint32_t sMinCapacity = 4;
1576 // See the comments in HashTableEntry about this value.
1577 static_assert(sMinCapacity >= 4, "too-small sMinCapacity breaks assumptions");
1578 static const uint32_t sMaxInit = 1u << (CAP_BITS - 1);
1579 static const uint32_t sMaxCapacity = 1u << CAP_BITS;
1581 // Hash-table alpha is conceptually a fraction, but to avoid floating-point
1582 // math we implement it as a ratio of integers.
1583 static const uint8_t sAlphaDenominator = 4;
1584 static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
1585 static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
1587 static const HashNumber sFreeKey = Entry::sFreeKey;
1588 static const HashNumber sRemovedKey = Entry::sRemovedKey;
1589 static const HashNumber sCollisionBit = Entry::sCollisionBit;
1591 static uint32_t bestCapacity(uint32_t aLen) {
1592 static_assert(
1593 (sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
1594 "multiplication in numerator below could overflow");
1595 static_assert(
1596 sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
1597 "numerator calculation below could potentially overflow");
1599 // Callers should ensure this is true.
1600 MOZ_ASSERT(aLen <= sMaxInit);
1602 // Compute the smallest capacity allowing |aLen| elements to be
1603 // inserted without rehashing: ceil(aLen / max-alpha). (Ceiling
1604 // integral division: <http://stackoverflow.com/a/2745086>.)
1605 uint32_t capacity = (aLen * sAlphaDenominator + sMaxAlphaNumerator - 1) /
1606 sMaxAlphaNumerator;
1607 capacity = (capacity < sMinCapacity) ? sMinCapacity : RoundUpPow2(capacity);
1609 MOZ_ASSERT(capacity >= aLen);
1610 MOZ_ASSERT(capacity <= sMaxCapacity);
1612 return capacity;
1615 static uint32_t hashShift(uint32_t aLen) {
1616 // Reject all lengths whose initial computed capacity would exceed
1617 // sMaxCapacity. Round that maximum aLen down to the nearest power of two
1618 // for speedier code.
1619 if (MOZ_UNLIKELY(aLen > sMaxInit)) {
1620 MOZ_CRASH("initial length is too large");
1623 return kHashNumberBits - mozilla::CeilingLog2(bestCapacity(aLen));
1626 static bool isLiveHash(HashNumber aHash) { return Entry::isLiveHash(aHash); }
1628 static HashNumber prepareHash(const Lookup& aLookup) {
1629 HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(aLookup));
1631 // Avoid reserved hash codes.
1632 if (!isLiveHash(keyHash)) {
1633 keyHash -= (sRemovedKey + 1);
1635 return keyHash & ~sCollisionBit;
1638 enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
1640 // Fake a struct that we're going to alloc. See the comments in
1641 // HashTableEntry about how the table is laid out, and why it's safe.
1642 struct FakeSlot {
1643 unsigned char c[sizeof(HashNumber) + sizeof(typename Entry::NonConstT)];
1646 static char* createTable(AllocPolicy& aAllocPolicy, uint32_t aCapacity,
1647 FailureBehavior aReportFailure = ReportFailure) {
1648 FakeSlot* fake =
1649 aReportFailure
1650 ? aAllocPolicy.template pod_malloc<FakeSlot>(aCapacity)
1651 : aAllocPolicy.template maybe_pod_malloc<FakeSlot>(aCapacity);
1653 MOZ_ASSERT((reinterpret_cast<uintptr_t>(fake) % Entry::kMinimumAlignment) ==
1656 char* table = reinterpret_cast<char*>(fake);
1657 if (table) {
1658 forEachSlot(table, aCapacity, [&](Slot& slot) {
1659 *slot.mKeyHash = sFreeKey;
1660 new (KnownNotNull, slot.toEntry()) Entry();
1663 return table;
1666 static void destroyTable(AllocPolicy& aAllocPolicy, char* aOldTable,
1667 uint32_t aCapacity) {
1668 forEachSlot(aOldTable, aCapacity, [&](const Slot& slot) {
1669 if (slot.isLive()) {
1670 slot.toEntry()->destroyStoredT();
1673 freeTable(aAllocPolicy, aOldTable, aCapacity);
1676 static void freeTable(AllocPolicy& aAllocPolicy, char* aOldTable,
1677 uint32_t aCapacity) {
1678 FakeSlot* fake = reinterpret_cast<FakeSlot*>(aOldTable);
1679 aAllocPolicy.free_(fake, aCapacity);
1682 public:
1683 HashTable(AllocPolicy aAllocPolicy, uint32_t aLen)
1684 : AllocPolicy(std::move(aAllocPolicy)),
1685 mGen(0),
1686 mHashShift(hashShift(aLen)),
1687 mTable(nullptr),
1688 mEntryCount(0),
1689 mRemovedCount(0)
1690 #ifdef DEBUG
1692 mMutationCount(0),
1693 mEntered(false)
1694 #endif
1698 explicit HashTable(AllocPolicy aAllocPolicy)
1699 : HashTable(aAllocPolicy, sDefaultLen) {}
1701 ~HashTable() {
1702 if (mTable) {
1703 destroyTable(*this, mTable, capacity());
1707 private:
1708 HashNumber hash1(HashNumber aHash0) const { return aHash0 >> mHashShift; }
1710 struct DoubleHash {
1711 HashNumber mHash2;
1712 HashNumber mSizeMask;
1715 DoubleHash hash2(HashNumber aCurKeyHash) const {
1716 uint32_t sizeLog2 = kHashNumberBits - mHashShift;
1717 DoubleHash dh = {((aCurKeyHash << sizeLog2) >> mHashShift) | 1,
1718 (HashNumber(1) << sizeLog2) - 1};
1719 return dh;
1722 static HashNumber applyDoubleHash(HashNumber aHash1,
1723 const DoubleHash& aDoubleHash) {
1724 return WrappingSubtract(aHash1, aDoubleHash.mHash2) & aDoubleHash.mSizeMask;
1727 static MOZ_ALWAYS_INLINE bool match(T& aEntry, const Lookup& aLookup) {
1728 return HashPolicy::match(HashPolicy::getKey(aEntry), aLookup);
1731 enum LookupReason { ForNonAdd, ForAdd };
1733 Slot slotForIndex(HashNumber aIndex) const {
1734 auto hashes = reinterpret_cast<HashNumber*>(mTable);
1735 auto entries = reinterpret_cast<Entry*>(&hashes[capacity()]);
1736 return Slot(&entries[aIndex], &hashes[aIndex]);
1739 // Warning: in order for readonlyThreadsafeLookup() to be safe this
1740 // function must not modify the table in any way when Reason==ForNonAdd.
1741 template <LookupReason Reason>
1742 MOZ_ALWAYS_INLINE Slot lookup(const Lookup& aLookup,
1743 HashNumber aKeyHash) const {
1744 MOZ_ASSERT(isLiveHash(aKeyHash));
1745 MOZ_ASSERT(!(aKeyHash & sCollisionBit));
1746 MOZ_ASSERT(mTable);
1748 // Compute the primary hash address.
1749 HashNumber h1 = hash1(aKeyHash);
1750 Slot slot = slotForIndex(h1);
1752 // Miss: return space for a new entry.
1753 if (slot.isFree()) {
1754 return slot;
1757 // Hit: return entry.
1758 if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
1759 return slot;
1762 // Collision: double hash.
1763 DoubleHash dh = hash2(aKeyHash);
1765 // Save the first removed entry pointer so we can recycle later.
1766 Maybe<Slot> firstRemoved;
1768 while (true) {
1769 if (Reason == ForAdd && !firstRemoved) {
1770 if (MOZ_UNLIKELY(slot.isRemoved())) {
1771 firstRemoved.emplace(slot);
1772 } else {
1773 slot.setCollision();
1777 h1 = applyDoubleHash(h1, dh);
1779 slot = slotForIndex(h1);
1780 if (slot.isFree()) {
1781 return firstRemoved.refOr(slot);
1784 if (slot.matchHash(aKeyHash) && match(slot.get(), aLookup)) {
1785 return slot;
1790 // This is a copy of lookup() hardcoded to the assumptions:
1791 // 1. the lookup is for an add;
1792 // 2. the key, whose |keyHash| has been passed, is not in the table.
1793 Slot findNonLiveSlot(HashNumber aKeyHash) {
1794 MOZ_ASSERT(!(aKeyHash & sCollisionBit));
1795 MOZ_ASSERT(mTable);
1797 // We assume 'aKeyHash' has already been distributed.
1799 // Compute the primary hash address.
1800 HashNumber h1 = hash1(aKeyHash);
1801 Slot slot = slotForIndex(h1);
1803 // Miss: return space for a new entry.
1804 if (!slot.isLive()) {
1805 return slot;
1808 // Collision: double hash.
1809 DoubleHash dh = hash2(aKeyHash);
1811 while (true) {
1812 slot.setCollision();
1814 h1 = applyDoubleHash(h1, dh);
1816 slot = slotForIndex(h1);
1817 if (!slot.isLive()) {
1818 return slot;
1823 enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
1825 RebuildStatus changeTableSize(
1826 uint32_t newCapacity, FailureBehavior aReportFailure = ReportFailure) {
1827 MOZ_ASSERT(IsPowerOfTwo(newCapacity));
1828 MOZ_ASSERT(!!mTable == !!capacity());
1830 // Look, but don't touch, until we succeed in getting new entry store.
1831 char* oldTable = mTable;
1832 uint32_t oldCapacity = capacity();
1833 uint32_t newLog2 = mozilla::CeilingLog2(newCapacity);
1835 if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
1836 if (aReportFailure) {
1837 this->reportAllocOverflow();
1839 return RehashFailed;
1842 char* newTable = createTable(*this, newCapacity, aReportFailure);
1843 if (!newTable) {
1844 return RehashFailed;
1847 // We can't fail from here on, so update table parameters.
1848 mHashShift = kHashNumberBits - newLog2;
1849 mRemovedCount = 0;
1850 mGen++;
1851 mTable = newTable;
1853 // Copy only live entries, leaving removed ones behind.
1854 forEachSlot(oldTable, oldCapacity, [&](Slot& slot) {
1855 if (slot.isLive()) {
1856 HashNumber hn = slot.getKeyHash();
1857 findNonLiveSlot(hn).setLive(
1858 hn, std::move(const_cast<typename Entry::NonConstT&>(slot.get())));
1861 slot.clear();
1864 // All entries have been destroyed, no need to destroyTable.
1865 freeTable(*this, oldTable, oldCapacity);
1866 return Rehashed;
1869 RebuildStatus rehashIfOverloaded(
1870 FailureBehavior aReportFailure = ReportFailure) {
1871 static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
1872 "multiplication below could overflow");
1874 // Note: if capacity() is zero, this will always succeed, which is
1875 // what we want.
1876 bool overloaded = mEntryCount + mRemovedCount >=
1877 capacity() * sMaxAlphaNumerator / sAlphaDenominator;
1879 if (!overloaded) {
1880 return NotOverloaded;
1883 // Succeed if a quarter or more of all entries are removed. Note that this
1884 // always succeeds if capacity() == 0 (i.e. entry storage has not been
1885 // allocated), which is what we want, because it means changeTableSize()
1886 // will allocate the requested capacity rather than doubling it.
1887 bool manyRemoved = mRemovedCount >= (capacity() >> 2);
1888 uint32_t newCapacity = manyRemoved ? rawCapacity() : rawCapacity() * 2;
1889 return changeTableSize(newCapacity, aReportFailure);
1892 void infallibleRehashIfOverloaded() {
1893 if (rehashIfOverloaded(DontReportFailure) == RehashFailed) {
1894 rehashTableInPlace();
1898 void remove(Slot& aSlot) {
1899 MOZ_ASSERT(mTable);
1901 if (aSlot.hasCollision()) {
1902 aSlot.removeLive();
1903 mRemovedCount++;
1904 } else {
1905 aSlot.clearLive();
1907 mEntryCount--;
1908 #ifdef DEBUG
1909 mMutationCount++;
1910 #endif
1913 void shrinkIfUnderloaded() {
1914 static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
1915 "multiplication below could overflow");
1916 bool underloaded =
1917 capacity() > sMinCapacity &&
1918 mEntryCount <= capacity() * sMinAlphaNumerator / sAlphaDenominator;
1920 if (underloaded) {
1921 (void)changeTableSize(capacity() / 2, DontReportFailure);
1925 // This is identical to changeTableSize(currentSize), but without requiring
1926 // a second table. We do this by recycling the collision bits to tell us if
1927 // the element is already inserted or still waiting to be inserted. Since
1928 // already-inserted elements win any conflicts, we get the same table as we
1929 // would have gotten through random insertion order.
1930 void rehashTableInPlace() {
1931 mRemovedCount = 0;
1932 mGen++;
1933 forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.unsetCollision(); });
1934 for (uint32_t i = 0; i < capacity();) {
1935 Slot src = slotForIndex(i);
1937 if (!src.isLive() || src.hasCollision()) {
1938 ++i;
1939 continue;
1942 HashNumber keyHash = src.getKeyHash();
1943 HashNumber h1 = hash1(keyHash);
1944 DoubleHash dh = hash2(keyHash);
1945 Slot tgt = slotForIndex(h1);
1946 while (true) {
1947 if (!tgt.hasCollision()) {
1948 src.swap(tgt);
1949 tgt.setCollision();
1950 break;
1953 h1 = applyDoubleHash(h1, dh);
1954 tgt = slotForIndex(h1);
1958 // TODO: this algorithm leaves collision bits on *all* elements, even if
1959 // they are on no collision path. We have the option of setting the
1960 // collision bits correctly on a subsequent pass or skipping the rehash
1961 // unless we are totally filled with tombstones: benchmark to find out
1962 // which approach is best.
1965 // Note: |aLookup| may be a reference to a piece of |u|, so this function
1966 // must take care not to use |aLookup| after moving |u|.
1968 // Prefer to use putNewInfallible; this function does not check
1969 // invariants.
1970 template <typename... Args>
1971 void putNewInfallibleInternal(const Lookup& aLookup, Args&&... aArgs) {
1972 MOZ_ASSERT(mTable);
1974 HashNumber keyHash = prepareHash(aLookup);
1975 Slot slot = findNonLiveSlot(keyHash);
1977 if (slot.isRemoved()) {
1978 mRemovedCount--;
1979 keyHash |= sCollisionBit;
1982 slot.setLive(keyHash, std::forward<Args>(aArgs)...);
1983 mEntryCount++;
1984 #ifdef DEBUG
1985 mMutationCount++;
1986 #endif
1989 public:
1990 void clear() {
1991 forEachSlot(mTable, capacity(), [&](Slot& slot) { slot.clear(); });
1992 mRemovedCount = 0;
1993 mEntryCount = 0;
1994 #ifdef DEBUG
1995 mMutationCount++;
1996 #endif
1999 // Resize the table down to the smallest capacity that doesn't overload the
2000 // table. Since we call shrinkIfUnderloaded() on every remove, you only need
2001 // to call this after a bulk removal of items done without calling remove().
2002 void compact() {
2003 if (empty()) {
2004 // Free the entry storage.
2005 freeTable(*this, mTable, capacity());
2006 mGen++;
2007 mHashShift = hashShift(0); // gives minimum capacity on regrowth
2008 mTable = nullptr;
2009 mRemovedCount = 0;
2010 return;
2013 uint32_t bestCapacity = this->bestCapacity(mEntryCount);
2014 MOZ_ASSERT(bestCapacity <= capacity());
2016 if (bestCapacity < capacity()) {
2017 (void)changeTableSize(bestCapacity, DontReportFailure);
2021 void clearAndCompact() {
2022 clear();
2023 compact();
2026 [[nodiscard]] bool reserve(uint32_t aLen) {
2027 if (aLen == 0) {
2028 return true;
2031 if (MOZ_UNLIKELY(aLen > sMaxInit)) {
2032 return false;
2035 uint32_t bestCapacity = this->bestCapacity(aLen);
2036 if (bestCapacity <= capacity()) {
2037 return true; // Capacity is already sufficient.
2040 RebuildStatus status = changeTableSize(bestCapacity, ReportFailure);
2041 MOZ_ASSERT(status != NotOverloaded);
2042 return status != RehashFailed;
2045 Iterator iter() const { return Iterator(*this); }
2047 ModIterator modIter() { return ModIterator(*this); }
2049 Range all() const { return Range(*this); }
2051 bool empty() const { return mEntryCount == 0; }
2053 uint32_t count() const { return mEntryCount; }
2055 uint32_t rawCapacity() const { return 1u << (kHashNumberBits - mHashShift); }
2057 uint32_t capacity() const { return mTable ? rawCapacity() : 0; }
2059 Generation generation() const { return Generation(mGen); }
2061 size_t shallowSizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
2062 return aMallocSizeOf(mTable);
2065 size_t shallowSizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
2066 return aMallocSizeOf(this) + shallowSizeOfExcludingThis(aMallocSizeOf);
2069 MOZ_ALWAYS_INLINE Ptr readonlyThreadsafeLookup(const Lookup& aLookup) const {
2070 if (empty() || !HasHash<HashPolicy>(aLookup)) {
2071 return Ptr();
2073 HashNumber keyHash = prepareHash(aLookup);
2074 return Ptr(lookup<ForNonAdd>(aLookup, keyHash), *this);
2077 MOZ_ALWAYS_INLINE Ptr lookup(const Lookup& aLookup) const {
2078 ReentrancyGuard g(*this);
2079 return readonlyThreadsafeLookup(aLookup);
2082 MOZ_ALWAYS_INLINE AddPtr lookupForAdd(const Lookup& aLookup) {
2083 ReentrancyGuard g(*this);
2084 if (!EnsureHash<HashPolicy>(aLookup)) {
2085 return AddPtr();
2088 HashNumber keyHash = prepareHash(aLookup);
2090 if (!mTable) {
2091 return AddPtr(*this, keyHash);
2094 // Directly call the constructor in the return statement to avoid
2095 // excess copying when building with Visual Studio 2017.
2096 // See bug 1385181.
2097 return AddPtr(lookup<ForAdd>(aLookup, keyHash), *this, keyHash);
2100 template <typename... Args>
2101 [[nodiscard]] bool add(AddPtr& aPtr, Args&&... aArgs) {
2102 ReentrancyGuard g(*this);
2103 MOZ_ASSERT_IF(aPtr.isValid(), mTable);
2104 MOZ_ASSERT_IF(aPtr.isValid(), aPtr.mTable == this);
2105 MOZ_ASSERT(!aPtr.found());
2106 MOZ_ASSERT(!(aPtr.mKeyHash & sCollisionBit));
2108 // Check for error from ensureHash() here.
2109 if (!aPtr.isLive()) {
2110 return false;
2113 MOZ_ASSERT(aPtr.mGeneration == generation());
2114 #ifdef DEBUG
2115 MOZ_ASSERT(aPtr.mMutationCount == mMutationCount);
2116 #endif
2118 if (!aPtr.isValid()) {
2119 MOZ_ASSERT(!mTable && mEntryCount == 0);
2120 uint32_t newCapacity = rawCapacity();
2121 RebuildStatus status = changeTableSize(newCapacity, ReportFailure);
2122 MOZ_ASSERT(status != NotOverloaded);
2123 if (status == RehashFailed) {
2124 return false;
2126 aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
2128 } else if (aPtr.mSlot.isRemoved()) {
2129 // Changing an entry from removed to live does not affect whether we are
2130 // overloaded and can be handled separately.
2131 if (!this->checkSimulatedOOM()) {
2132 return false;
2134 mRemovedCount--;
2135 aPtr.mKeyHash |= sCollisionBit;
2137 } else {
2138 // Preserve the validity of |aPtr.mSlot|.
2139 RebuildStatus status = rehashIfOverloaded();
2140 if (status == RehashFailed) {
2141 return false;
2143 if (status == NotOverloaded && !this->checkSimulatedOOM()) {
2144 return false;
2146 if (status == Rehashed) {
2147 aPtr.mSlot = findNonLiveSlot(aPtr.mKeyHash);
2151 aPtr.mSlot.setLive(aPtr.mKeyHash, std::forward<Args>(aArgs)...);
2152 mEntryCount++;
2153 #ifdef DEBUG
2154 mMutationCount++;
2155 aPtr.mGeneration = generation();
2156 aPtr.mMutationCount = mMutationCount;
2157 #endif
2158 return true;
2161 // Note: |aLookup| may be a reference to a piece of |u|, so this function
2162 // must take care not to use |aLookup| after moving |u|.
2163 template <typename... Args>
2164 void putNewInfallible(const Lookup& aLookup, Args&&... aArgs) {
2165 MOZ_ASSERT(!lookup(aLookup).found());
2166 ReentrancyGuard g(*this);
2167 putNewInfallibleInternal(aLookup, std::forward<Args>(aArgs)...);
2170 // Note: |aLookup| may be alias arguments in |aArgs|, so this function must
2171 // take care not to use |aLookup| after moving |aArgs|.
2172 template <typename... Args>
2173 [[nodiscard]] bool putNew(const Lookup& aLookup, Args&&... aArgs) {
2174 if (!this->checkSimulatedOOM()) {
2175 return false;
2177 if (!EnsureHash<HashPolicy>(aLookup)) {
2178 return false;
2180 if (rehashIfOverloaded() == RehashFailed) {
2181 return false;
2183 putNewInfallible(aLookup, std::forward<Args>(aArgs)...);
2184 return true;
2187 // Note: |aLookup| may be a reference to a piece of |u|, so this function
2188 // must take care not to use |aLookup| after moving |u|.
2189 template <typename... Args>
2190 [[nodiscard]] bool relookupOrAdd(AddPtr& aPtr, const Lookup& aLookup,
2191 Args&&... aArgs) {
2192 // Check for error from ensureHash() here.
2193 if (!aPtr.isLive()) {
2194 return false;
2196 #ifdef DEBUG
2197 aPtr.mGeneration = generation();
2198 aPtr.mMutationCount = mMutationCount;
2199 #endif
2200 if (mTable) {
2201 ReentrancyGuard g(*this);
2202 // Check that aLookup has not been destroyed.
2203 MOZ_ASSERT(prepareHash(aLookup) == aPtr.mKeyHash);
2204 aPtr.mSlot = lookup<ForAdd>(aLookup, aPtr.mKeyHash);
2205 if (aPtr.found()) {
2206 return true;
2208 } else {
2209 // Clear aPtr so it's invalid; add() will allocate storage and redo the
2210 // lookup.
2211 aPtr.mSlot = Slot(nullptr, nullptr);
2213 return add(aPtr, std::forward<Args>(aArgs)...);
2216 void remove(Ptr aPtr) {
2217 MOZ_ASSERT(mTable);
2218 ReentrancyGuard g(*this);
2219 MOZ_ASSERT(aPtr.found());
2220 MOZ_ASSERT(aPtr.mGeneration == generation());
2221 remove(aPtr.mSlot);
2222 shrinkIfUnderloaded();
2225 void rekeyWithoutRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
2226 MOZ_ASSERT(mTable);
2227 ReentrancyGuard g(*this);
2228 MOZ_ASSERT(aPtr.found());
2229 MOZ_ASSERT(aPtr.mGeneration == generation());
2230 typename HashTableEntry<T>::NonConstT t(std::move(*aPtr));
2231 HashPolicy::setKey(t, const_cast<Key&>(aKey));
2232 remove(aPtr.mSlot);
2233 putNewInfallibleInternal(aLookup, std::move(t));
2236 void rekeyAndMaybeRehash(Ptr aPtr, const Lookup& aLookup, const Key& aKey) {
2237 rekeyWithoutRehash(aPtr, aLookup, aKey);
2238 infallibleRehashIfOverloaded();
2242 } // namespace detail
2243 } // namespace mozilla
2245 #endif /* mozilla_HashTable_h */