Merge mozilla-central and tracemonkey. (a=blockers)
[mozilla-central.git] / js / src / jshashtable.h
blobc0969d0af9bf9e4f94118aa636f8004b13be849f
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 * vim: set ts=8 sw=4 et tw=99 ft=cpp:
4 * ***** BEGIN LICENSE BLOCK *****
5 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
7 * The contents of this file are subject to the Mozilla Public License Version
8 * 1.1 (the "License"); you may not use this file except in compliance with
9 * the License. You may obtain a copy of the License at
10 * http://www.mozilla.org/MPL/
12 * Software distributed under the License is distributed on an "AS IS" basis,
13 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
14 * for the specific language governing rights and limitations under the
15 * License.
17 * The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
18 * November 13, 2009.
20 * The Initial Developer of the Original Code is
21 * the Mozilla Corporation.
23 * Contributor(s):
24 * Brendan Eich <brendan@mozilla.org> (Original Author)
25 * Chris Waterson <waterson@netscape.com>
26 * L. David Baron <dbaron@dbaron.org>, Mozilla Corporation
27 * Luke Wagner <lw@mozilla.com>
29 * Alternatively, the contents of this file may be used under the terms of
30 * either of the GNU General Public License Version 2 or later (the "GPL"),
31 * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
32 * in which case the provisions of the GPL or the LGPL are applicable instead
33 * of those above. If you wish to allow use of your version of this file only
34 * under the terms of either the GPL or the LGPL, and not to allow others to
35 * use your version of this file under the terms of the MPL, indicate your
36 * decision by deleting the provisions above and replace them with the notice
37 * and other provisions required by the GPL or the LGPL. If you do not delete
38 * the provisions above, a recipient may use your version of this file under
39 * the terms of any one of the MPL, the GPL or the LGPL.
41 * ***** END LICENSE BLOCK ***** */
43 #ifndef jshashtable_h_
44 #define jshashtable_h_
46 #include "jstl.h"
48 namespace js {
50 /* Integral types for all hash functions. */
51 typedef uint32 HashNumber;
53 namespace detail {
55 /* Reusable implementation of HashMap and HashSet. */
56 template <class T, class HashPolicy, class AllocPolicy>
57 class HashTable : AllocPolicy
59 typedef typename tl::StripConst<T>::result NonConstT;
60 typedef typename HashPolicy::KeyType Key;
61 typedef typename HashPolicy::Lookup Lookup;
64 * T::operator= is a private operation for HashMap::Entry. HashMap::Entry
65 * makes HashTable a friend, but MSVC does not allow HashMap::Entry to make
66 * HashTable::Entry a friend. So do assignment here:
68 static void assignT(NonConstT &dst, const T &src) { dst = src; }
70 public:
71 class Entry {
72 HashNumber keyHash;
74 public:
75 Entry() : keyHash(0), t() {}
76 void operator=(const Entry &rhs) { keyHash = rhs.keyHash; assignT(t, rhs.t); }
78 NonConstT t;
80 bool isFree() const { return keyHash == sFreeKey; }
81 void setFree() { keyHash = sFreeKey; assignT(t, T()); }
82 bool isRemoved() const { return keyHash == sRemovedKey; }
83 void setRemoved() { keyHash = sRemovedKey; assignT(t, T()); }
84 bool isLive() const { return isLiveHash(keyHash); }
85 void setLive(HashNumber hn) { JS_ASSERT(isLiveHash(hn)); keyHash = hn; }
87 void setCollision() { JS_ASSERT(isLive()); keyHash |= sCollisionBit; }
88 void setCollision(HashNumber collisionBit) {
89 JS_ASSERT(isLive()); keyHash |= collisionBit;
91 void unsetCollision() { JS_ASSERT(isLive()); keyHash &= ~sCollisionBit; }
92 bool hasCollision() const { JS_ASSERT(isLive()); return keyHash & sCollisionBit; }
93 bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
94 HashNumber getKeyHash() const { JS_ASSERT(!hasCollision()); return keyHash; }
98 * A nullable pointer to a hash table element. A Ptr |p| can be tested
99 * either explicitly |if (p.found()) p->...| or using boolean conversion
100 * |if (p) p->...|. Ptr objects must not be used after any mutating hash
101 * table operations unless |generation()| is tested.
103 class Ptr
105 friend class HashTable;
106 typedef void (Ptr::* ConvertibleToBool)();
107 void nonNull() {}
109 Entry *entry;
111 protected:
112 Ptr(Entry &entry) : entry(&entry) {}
114 public:
115 bool found() const { return entry->isLive(); }
116 operator ConvertibleToBool() const { return found() ? &Ptr::nonNull : 0; }
117 bool operator==(const Ptr &rhs) const { JS_ASSERT(found() && rhs.found()); return entry == rhs.entry; }
118 bool operator!=(const Ptr &rhs) const { return !(*this == rhs); }
120 T &operator*() const { return entry->t; }
121 T *operator->() const { return &entry->t; }
124 /* A Ptr that can be used to add a key after a failed lookup. */
125 class AddPtr : public Ptr
127 friend class HashTable;
128 HashNumber keyHash;
129 #ifdef DEBUG
130 uint64 mutationCount;
132 AddPtr(Entry &entry, HashNumber hn, uint64 mutationCount)
133 : Ptr(entry), keyHash(hn), mutationCount(mutationCount) {}
134 #else
135 AddPtr(Entry &entry, HashNumber hn) : Ptr(entry), keyHash(hn) {}
136 #endif
140 * A collection of hash table entries. The collection is enumerated by
141 * calling |front()| followed by |popFront()| as long as |!empty()|. As
142 * with Ptr/AddPtr, Range objects must not be used after any mutating hash
143 * table operation unless the |generation()| is tested.
145 class Range
147 protected:
148 friend class HashTable;
150 Range(Entry *c, Entry *e) : cur(c), end(e) {
151 while (cur != end && !cur->isLive())
152 ++cur;
155 Entry *cur, *end;
157 public:
158 bool empty() const {
159 return cur == end;
162 T &front() const {
163 JS_ASSERT(!empty());
164 return cur->t;
167 void popFront() {
168 JS_ASSERT(!empty());
169 while (++cur != end && !cur->isLive());
174 * A Range whose lifetime delimits a mutating enumeration of a hash table.
175 * Since rehashing when elements were removed during enumeration would be
176 * bad, it is postponed until |endEnumeration()| is called. If
177 * |endEnumeration()| is not called before an Enum's constructor, it will
178 * be called automatically. Since |endEnumeration()| touches the hash
179 * table, the user must ensure that the hash table is still alive when this
180 * happens.
182 class Enum : public Range
184 friend class HashTable;
186 HashTable &table;
187 bool removed;
189 /* Not copyable. */
190 Enum(const Enum &);
191 void operator=(const Enum &);
193 public:
194 template<class Map> explicit
195 Enum(Map &map) : Range(map.all()), table(map.impl), removed(false) {}
198 * Removes the |front()| element from the table, leaving |front()|
199 * invalid until the next call to |popFront()|. For example:
201 * HashSet<int> s;
202 * for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
203 * if (e.front() == 42)
204 * e.removeFront();
206 void removeFront() {
207 table.remove(*this->cur);
208 removed = true;
211 /* Potentially rehashes the table. */
212 ~Enum() {
213 if (removed)
214 table.checkUnderloaded();
217 /* Can be used to end the enumeration before the destructor. */
218 void endEnumeration() {
219 if (removed) {
220 table.checkUnderloaded();
221 removed = false;
226 private:
227 uint32 hashShift; /* multiplicative hash shift */
228 uint32 tableCapacity; /* = JS_BIT(sHashBits - hashShift) */
229 uint32 entryCount; /* number of entries in table */
230 uint32 gen; /* entry storage generation number */
231 uint32 removedCount; /* removed entry sentinels in table */
232 Entry *table; /* entry storage */
234 void setTableSizeLog2(unsigned sizeLog2) {
235 hashShift = sHashBits - sizeLog2;
236 tableCapacity = JS_BIT(sizeLog2);
239 #ifdef DEBUG
240 mutable struct Stats {
241 uint32 searches; /* total number of table searches */
242 uint32 steps; /* hash chain links traversed */
243 uint32 hits; /* searches that found key */
244 uint32 misses; /* searches that didn't find key */
245 uint32 addOverRemoved; /* adds that recycled a removed entry */
246 uint32 removes; /* calls to remove */
247 uint32 removeFrees; /* calls to remove that freed the entry */
248 uint32 grows; /* table expansions */
249 uint32 shrinks; /* table contractions */
250 uint32 compresses; /* table compressions */
251 } stats;
252 # define METER(x) x
253 #else
254 # define METER(x)
255 #endif
257 #ifdef DEBUG
258 friend class js::ReentrancyGuard;
259 mutable bool entered;
260 uint64 mutationCount;
261 #endif
263 static const unsigned sMinSizeLog2 = 4;
264 static const unsigned sMinSize = 1 << sMinSizeLog2;
265 static const unsigned sSizeLimit = JS_BIT(24);
266 static const unsigned sHashBits = tl::BitSize<HashNumber>::result;
267 static const uint8 sMinAlphaFrac = 64; /* (0x100 * .25) taken from jsdhash.h */
268 static const uint8 sMaxAlphaFrac = 192; /* (0x100 * .75) taken from jsdhash.h */
269 static const uint8 sInvMaxAlpha = 171; /* (ceil(0x100 / .75) >> 1) */
270 static const HashNumber sGoldenRatio = 0x9E3779B9U; /* taken from jsdhash.h */
271 static const HashNumber sCollisionBit = 1;
272 static const HashNumber sFreeKey = 0;
273 static const HashNumber sRemovedKey = 1;
275 static bool isLiveHash(HashNumber hash)
277 return hash > sRemovedKey;
280 static HashNumber prepareHash(const Lookup& l)
282 HashNumber keyHash = HashPolicy::hash(l);
284 /* Improve keyHash distribution. */
285 keyHash *= sGoldenRatio;
287 /* Avoid reserved hash codes. */
288 if (!isLiveHash(keyHash))
289 keyHash -= (sRemovedKey + 1);
290 return keyHash & ~sCollisionBit;
293 static Entry *createTable(AllocPolicy &alloc, uint32 capacity)
295 Entry *newTable = (Entry *)alloc.malloc(capacity * sizeof(Entry));
296 if (!newTable)
297 return NULL;
298 for (Entry *e = newTable, *end = e + capacity; e != end; ++e)
299 new(e) Entry();
300 return newTable;
303 static void destroyTable(AllocPolicy &alloc, Entry *oldTable, uint32 capacity)
305 for (Entry *e = oldTable, *end = e + capacity; e != end; ++e)
306 e->~Entry();
307 alloc.free(oldTable);
310 public:
311 HashTable(AllocPolicy ap)
312 : AllocPolicy(ap),
313 entryCount(0),
314 gen(0),
315 removedCount(0),
316 table(NULL)
317 #ifdef DEBUG
318 , entered(false),
319 mutationCount(0)
320 #endif
323 bool init(uint32 length)
325 /* Make sure that init isn't called twice. */
326 JS_ASSERT(table == NULL);
329 * Correct for sMaxAlphaFrac such that the table will not resize
330 * when adding 'length' entries.
332 JS_ASSERT(length < (uint32(1) << 23));
333 uint32 capacity = (length * sInvMaxAlpha) >> 7;
335 if (capacity < sMinSize)
336 capacity = sMinSize;
338 /* FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034). */
339 uint32 roundUp = sMinSize, roundUpLog2 = sMinSizeLog2;
340 while (roundUp < capacity) {
341 roundUp <<= 1;
342 ++roundUpLog2;
345 capacity = roundUp;
346 if (capacity >= sSizeLimit) {
347 this->reportAllocOverflow();
348 return false;
351 table = createTable(*this, capacity);
352 if (!table)
353 return false;
355 setTableSizeLog2(roundUpLog2);
356 METER(memset(&stats, 0, sizeof(stats)));
357 return true;
360 bool initialized() const
362 return !!table;
365 ~HashTable()
367 if (table)
368 destroyTable(*this, table, tableCapacity);
371 private:
372 static HashNumber hash1(HashNumber hash0, uint32 shift) {
373 return hash0 >> shift;
376 static HashNumber hash2(HashNumber hash0, uint32 log2, uint32 shift) {
377 return ((hash0 << log2) >> shift) | 1;
380 bool overloaded() {
381 return entryCount + removedCount >= ((sMaxAlphaFrac * tableCapacity) >> 8);
384 bool underloaded() {
385 return tableCapacity > sMinSize &&
386 entryCount <= ((sMinAlphaFrac * tableCapacity) >> 8);
389 static bool match(Entry &e, const Lookup &l) {
390 return HashPolicy::match(HashPolicy::getKey(e.t), l);
393 Entry &lookup(const Lookup &l, HashNumber keyHash, unsigned collisionBit) const
395 JS_ASSERT(isLiveHash(keyHash));
396 JS_ASSERT(!(keyHash & sCollisionBit));
397 JS_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
398 JS_ASSERT(table);
399 METER(stats.searches++);
401 /* Compute the primary hash address. */
402 HashNumber h1 = hash1(keyHash, hashShift);
403 Entry *entry = &table[h1];
405 /* Miss: return space for a new entry. */
406 if (entry->isFree()) {
407 METER(stats.misses++);
408 return *entry;
411 /* Hit: return entry. */
412 if (entry->matchHash(keyHash) && match(*entry, l)) {
413 METER(stats.hits++);
414 return *entry;
417 /* Collision: double hash. */
418 unsigned sizeLog2 = sHashBits - hashShift;
419 HashNumber h2 = hash2(keyHash, sizeLog2, hashShift);
420 HashNumber sizeMask = (HashNumber(1) << sizeLog2) - 1;
422 /* Save the first removed entry pointer so we can recycle later. */
423 Entry *firstRemoved = NULL;
425 while(true) {
426 if (JS_UNLIKELY(entry->isRemoved())) {
427 if (!firstRemoved)
428 firstRemoved = entry;
429 } else {
430 entry->setCollision(collisionBit);
433 METER(stats.steps++);
434 h1 -= h2;
435 h1 &= sizeMask;
437 entry = &table[h1];
438 if (entry->isFree()) {
439 METER(stats.misses++);
440 return firstRemoved ? *firstRemoved : *entry;
443 if (entry->matchHash(keyHash) && match(*entry, l)) {
444 METER(stats.hits++);
445 return *entry;
451 * This is a copy of lookup hardcoded to the assumptions:
452 * 1. the lookup is a lookupForAdd
453 * 2. the key, whose |keyHash| has been passed is not in the table,
454 * 3. no entries have been removed from the table.
455 * This specialized search avoids the need for recovering lookup values
456 * from entries, which allows more flexible Lookup/Key types.
458 Entry &findFreeEntry(HashNumber keyHash)
460 METER(stats.searches++);
461 JS_ASSERT(!(keyHash & sCollisionBit));
463 /* N.B. the |keyHash| has already been distributed. */
465 /* Compute the primary hash address. */
466 HashNumber h1 = hash1(keyHash, hashShift);
467 Entry *entry = &table[h1];
469 /* Miss: return space for a new entry. */
470 if (entry->isFree()) {
471 METER(stats.misses++);
472 return *entry;
475 /* Collision: double hash. */
476 unsigned sizeLog2 = sHashBits - hashShift;
477 HashNumber h2 = hash2(keyHash, sizeLog2, hashShift);
478 HashNumber sizeMask = (HashNumber(1) << sizeLog2) - 1;
480 while(true) {
481 JS_ASSERT(!entry->isRemoved());
482 entry->setCollision();
484 METER(stats.steps++);
485 h1 -= h2;
486 h1 &= sizeMask;
488 entry = &table[h1];
489 if (entry->isFree()) {
490 METER(stats.misses++);
491 return *entry;
496 bool changeTableSize(int deltaLog2)
498 /* Look, but don't touch, until we succeed in getting new entry store. */
499 Entry *oldTable = table;
500 uint32 oldCap = tableCapacity;
501 uint32 newLog2 = sHashBits - hashShift + deltaLog2;
502 uint32 newCapacity = JS_BIT(newLog2);
503 if (newCapacity >= sSizeLimit) {
504 this->reportAllocOverflow();
505 return false;
508 Entry *newTable = createTable(*this, newCapacity);
509 if (!newTable)
510 return false;
512 /* We can't fail from here on, so update table parameters. */
513 setTableSizeLog2(newLog2);
514 removedCount = 0;
515 gen++;
516 table = newTable;
518 /* Copy only live entries, leaving removed ones behind. */
519 for (Entry *src = oldTable, *end = src + oldCap; src != end; ++src) {
520 if (src->isLive()) {
521 src->unsetCollision();
522 findFreeEntry(src->getKeyHash()) = *src;
526 destroyTable(*this, oldTable, oldCap);
527 return true;
530 void remove(Entry &e)
532 METER(stats.removes++);
533 if (e.hasCollision()) {
534 e.setRemoved();
535 removedCount++;
536 } else {
537 METER(stats.removeFrees++);
538 e.setFree();
540 entryCount--;
541 #ifdef DEBUG
542 mutationCount++;
543 #endif
546 void checkUnderloaded()
548 if (underloaded()) {
549 METER(stats.shrinks++);
550 (void) changeTableSize(-1);
554 public:
555 void clear()
557 for (Entry *e = table, *end = table + tableCapacity; e != end; ++e)
558 *e = Entry();
559 removedCount = 0;
560 entryCount = 0;
561 #ifdef DEBUG
562 mutationCount++;
563 #endif
566 Range all() const {
567 return Range(table, table + tableCapacity);
570 bool empty() const {
571 return !entryCount;
574 uint32 count() const{
575 return entryCount;
578 uint32 generation() const {
579 return gen;
582 Ptr lookup(const Lookup &l) const {
583 ReentrancyGuard g(*this);
584 HashNumber keyHash = prepareHash(l);
585 return Ptr(lookup(l, keyHash, 0));
588 AddPtr lookupForAdd(const Lookup &l) const {
589 ReentrancyGuard g(*this);
590 HashNumber keyHash = prepareHash(l);
591 Entry &entry = lookup(l, keyHash, sCollisionBit);
592 #ifdef DEBUG
593 return AddPtr(entry, keyHash, mutationCount);
594 #else
595 return AddPtr(entry, keyHash);
596 #endif
599 bool add(AddPtr &p)
601 ReentrancyGuard g(*this);
602 JS_ASSERT(mutationCount == p.mutationCount);
603 JS_ASSERT(table);
604 JS_ASSERT(!p.found());
605 JS_ASSERT(!(p.keyHash & sCollisionBit));
608 * Changing an entry from removed to live does not affect whether we
609 * are overloaded and can be handled separately.
611 if (p.entry->isRemoved()) {
612 METER(stats.addOverRemoved++);
613 removedCount--;
614 p.keyHash |= sCollisionBit;
615 } else {
616 /* If alpha is >= .75, grow or compress the table. */
617 if (overloaded()) {
618 /* Compress if a quarter or more of all entries are removed. */
619 int deltaLog2;
620 if (removedCount >= (tableCapacity >> 2)) {
621 METER(stats.compresses++);
622 deltaLog2 = 0;
623 } else {
624 METER(stats.grows++);
625 deltaLog2 = 1;
628 if (!changeTableSize(deltaLog2))
629 return false;
631 /* Preserve the validity of |p.entry|. */
632 p.entry = &findFreeEntry(p.keyHash);
636 p.entry->setLive(p.keyHash);
637 entryCount++;
638 #ifdef DEBUG
639 mutationCount++;
640 #endif
641 return true;
645 * There is an important contract between the caller and callee for this
646 * function: if add() returns true, the caller must assign the T value
647 * which produced p before using the hashtable again.
649 bool add(AddPtr &p, T** pentry)
651 if (!add(p))
652 return false;
653 *pentry = &p.entry->t;
654 return true;
657 bool add(AddPtr &p, const T &t)
659 if (!add(p))
660 return false;
661 p.entry->t = t;
662 return true;
665 bool relookupOrAdd(AddPtr& p, const Lookup &l, const T& t)
667 #ifdef DEBUG
668 p.mutationCount = mutationCount;
669 #endif
671 ReentrancyGuard g(*this);
672 p.entry = &lookup(l, p.keyHash, sCollisionBit);
674 return p.found() || add(p, t);
677 void remove(Ptr p)
679 ReentrancyGuard g(*this);
680 JS_ASSERT(p.found());
681 remove(*p.entry);
682 checkUnderloaded();
684 #undef METER
690 * Hash policy
692 * A hash policy P for a hash table with key-type Key must provide:
693 * - a type |P::Lookup| to use to lookup table entries;
694 * - a static member function |P::hash| with signature
696 * static js::HashNumber hash(Lookup)
698 * to use to hash the lookup type; and
699 * - a static member function |P::match| with signature
701 * static bool match(Key, Lookup)
703 * to use to test equality of key and lookup values.
705 * Normally, Lookup = Key. In general, though, different values and types of
706 * values can be used to lookup and store. If a Lookup value |l| is != to the
707 * added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
709 * js::HashSet<Key, P>::AddPtr p = h.lookup(l);
710 * if (!p) {
711 * assert(P::match(k, l)); // must hold
712 * h.add(p, k);
716 /* Default hashing policies. */
717 template <class Key>
718 struct DefaultHasher
720 typedef Key Lookup;
721 static HashNumber hash(const Lookup &l) {
722 /* Hash if can implicitly cast to hash number type. */
723 return l;
725 static bool match(const Key &k, const Lookup &l) {
726 /* Use builtin or overloaded operator==. */
727 return k == l;
731 /* Specialized hashing policy for pointer types. */
732 template <class T>
733 struct DefaultHasher<T *>
735 typedef T *Lookup;
736 static HashNumber hash(T *l) {
738 * Strip often-0 lower bits for better distribution after multiplying
739 * by the sGoldenRatio.
741 return HashNumber(reinterpret_cast<size_t>(l) >>
742 tl::FloorLog2<sizeof(void *)>::result);
744 static bool match(T *k, T *l) {
745 return k == l;
750 * JS-friendly, STL-like container providing a hash-based map from keys to
751 * values. In particular, HashMap calls constructors and destructors of all
752 * objects added so non-PODs may be used safely.
754 * Key/Value requirements:
755 * - default constructible, copyable, destructible, assignable
756 * HashPolicy requirements:
757 * - see "Hash policy" above (default js::DefaultHasher<Key>)
758 * AllocPolicy:
759 * - see "Allocation policies" in jstl.h (default js::ContextAllocPolicy)
761 * N.B: HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
762 * called by HashMap must not call back into the same HashMap object.
763 * N.B: Due to the lack of exception handling, the user must call |init()|.
765 template <class Key, class Value, class HashPolicy, class AllocPolicy>
766 class HashMap
768 public:
769 typedef typename HashPolicy::Lookup Lookup;
771 class Entry
773 template <class, class, class> friend class detail::HashTable;
774 void operator=(const Entry &rhs) {
775 const_cast<Key &>(key) = rhs.key;
776 value = rhs.value;
779 public:
780 Entry() : key(), value() {}
781 Entry(const Key &k, const Value &v) : key(k), value(v) {}
783 const Key key;
784 Value value;
787 private:
788 /* Implement HashMap using HashTable. Lift |Key| operations to |Entry|. */
789 struct MapHashPolicy : HashPolicy
791 typedef Key KeyType;
792 static const Key &getKey(Entry &e) { return e.key; }
794 typedef detail::HashTable<Entry, MapHashPolicy, AllocPolicy> Impl;
796 friend class Impl::Enum;
798 /* Not implicitly copyable (expensive). May add explicit |clone| later. */
799 HashMap(const HashMap &);
800 HashMap &operator=(const HashMap &);
802 Impl impl;
804 public:
806 * HashMap construction is fallible (due to OOM); thus the user must call
807 * init after constructing a HashMap and check the return value.
809 HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {}
810 bool init(uint32 len = 0) { return impl.init(len); }
811 bool initialized() const { return impl.initialized(); }
814 * Return whether the given lookup value is present in the map. E.g.:
816 * typedef HashMap<int,char> HM;
817 * HM h;
818 * if (HM::Ptr p = h.lookup(3)) {
819 * const HM::Entry &e = *p; // p acts like a pointer to Entry
820 * assert(p->key == 3); // Entry contains the key
821 * char val = p->value; // and value
824 * Also see the definition of Ptr in HashTable above (with T = Entry).
826 typedef typename Impl::Ptr Ptr;
827 Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
829 /* Assuming |p.found()|, remove |*p|. */
830 void remove(Ptr p) { impl.remove(p); }
833 * Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
834 * insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
835 * |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
837 * typedef HashMap<int,char> HM;
838 * HM h;
839 * HM::AddPtr p = h.lookupForAdd(3);
840 * if (!p) {
841 * if (!h.add(p, 3, 'a'))
842 * return false;
844 * const HM::Entry &e = *p; // p acts like a pointer to Entry
845 * assert(p->key == 3); // Entry contains the key
846 * char val = p->value; // and value
848 * Also see the definition of AddPtr in HashTable above (with T = Entry).
850 * N.B. The caller must ensure that no mutating hash table operations
851 * occur between a pair of |lookupForAdd| and |add| calls. To avoid
852 * looking up the key a second time, the caller may use the more efficient
853 * relookupOrAdd method. This method reuses part of the hashing computation
854 * to more efficiently insert the key if it has not been added. For
855 * example, a mutation-handling version of the previous example:
857 * HM::AddPtr p = h.lookupForAdd(3);
858 * if (!p) {
859 * call_that_may_mutate_h();
860 * if (!h.relookupOrAdd(p, 3, 'a'))
861 * return false;
863 * const HM::Entry &e = *p;
864 * assert(p->key == 3);
865 * char val = p->value;
867 typedef typename Impl::AddPtr AddPtr;
868 AddPtr lookupForAdd(const Lookup &l) const {
869 return impl.lookupForAdd(l);
872 bool add(AddPtr &p, const Key &k, const Value &v) {
873 Entry *pentry;
874 if (!impl.add(p, &pentry))
875 return false;
876 const_cast<Key &>(pentry->key) = k;
877 pentry->value = v;
878 return true;
881 bool add(AddPtr &p, const Key &k) {
882 Entry *pentry;
883 if (!impl.add(p, &pentry))
884 return false;
885 const_cast<Key &>(pentry->key) = k;
886 return true;
889 bool relookupOrAdd(AddPtr &p, const Key &k, const Value &v) {
890 return impl.relookupOrAdd(p, k, Entry(k, v));
894 * |all()| returns a Range containing |count()| elements. E.g.:
896 * typedef HashMap<int,char> HM;
897 * HM h;
898 * for (HM::Range r = h.all(); !r.empty(); r.popFront())
899 * char c = r.front().value;
901 * Also see the definition of Range in HashTable above (with T = Entry).
903 typedef typename Impl::Range Range;
904 Range all() const { return impl.all(); }
905 size_t count() const { return impl.count(); }
908 * Typedef for the enumeration class. An Enum may be used to examine and
909 * remove table entries:
911 * typedef HashMap<int,char> HM;
912 * HM s;
913 * for (HM::Enum e(s); !e.empty(); e.popFront())
914 * if (e.front().value == 'l')
915 * e.removeFront();
917 * Table resize may occur in Enum's destructor. Also see the definition of
918 * Enum in HashTable above (with T = Entry).
920 typedef typename Impl::Enum Enum;
922 /* Remove all entries. */
923 void clear() { impl.clear(); }
925 /* Does the table contain any entries? */
926 bool empty() const { return impl.empty(); }
929 * If |generation()| is the same before and after a HashMap operation,
930 * pointers into the table remain valid.
932 unsigned generation() const { return impl.generation(); }
934 /* Shorthand operations: */
936 bool has(const Lookup &l) const {
937 return impl.lookup(l) != NULL;
940 Entry *put(const Key &k, const Value &v) {
941 AddPtr p = lookupForAdd(k);
942 if (p) {
943 p->value = v;
944 return &*p;
946 return add(p, k, v) ? &*p : NULL;
949 void remove(const Lookup &l) {
950 if (Ptr p = lookup(l))
951 remove(p);
956 * JS-friendly, STL-like container providing a hash-based set of values. In
957 * particular, HashSet calls constructors and destructors of all objects added
958 * so non-PODs may be used safely.
960 * T requirements:
961 * - default constructible, copyable, destructible, assignable
962 * HashPolicy requirements:
963 * - see "Hash policy" above (default js::DefaultHasher<Key>)
964 * AllocPolicy:
965 * - see "Allocation policies" in jstl.h (default js::ContextAllocPolicy)
967 * N.B: HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
968 * HashSet must not call back into the same HashSet object.
969 * N.B: Due to the lack of exception handling, the user must call |init()|.
971 template <class T, class HashPolicy, class AllocPolicy>
972 class HashSet
974 typedef typename HashPolicy::Lookup Lookup;
976 /* Implement HashSet in terms of HashTable. */
977 struct SetOps : HashPolicy {
978 typedef T KeyType;
979 static const KeyType &getKey(const T &t) { return t; }
981 typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
983 friend class Impl::Enum;
985 /* Not implicitly copyable (expensive). May add explicit |clone| later. */
986 HashSet(const HashSet &);
987 HashSet &operator=(const HashSet &);
989 Impl impl;
991 public:
993 * HashSet construction is fallible (due to OOM); thus the user must call
994 * init after constructing a HashSet and check the return value.
996 HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {}
997 bool init(uint32 len = 0) { return impl.init(len); }
998 bool initialized() const { return impl.initialized(); }
1001 * Return whether the given lookup value is present in the map. E.g.:
1003 * typedef HashSet<int> HS;
1004 * HS h;
1005 * if (HS::Ptr p = h.lookup(3)) {
1006 * assert(*p == 3); // p acts like a pointer to int
1009 * Also see the definition of Ptr in HashTable above.
1011 typedef typename Impl::Ptr Ptr;
1012 Ptr lookup(const Lookup &l) const { return impl.lookup(l); }
1014 /* Assuming |p.found()|, remove |*p|. */
1015 void remove(Ptr p) { impl.remove(p); }
1018 * Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
1019 * insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
1020 * |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
1022 * typedef HashSet<int> HS;
1023 * HS h;
1024 * HS::AddPtr p = h.lookupForAdd(3);
1025 * if (!p) {
1026 * if (!h.add(p, 3))
1027 * return false;
1029 * assert(*p == 3); // p acts like a pointer to int
1031 * Also see the definition of AddPtr in HashTable above.
1033 * N.B. The caller must ensure that no mutating hash table operations
1034 * occur between a pair of |lookupForAdd| and |add| calls. To avoid
1035 * looking up the key a second time, the caller may use the more efficient
1036 * relookupOrAdd method. This method reuses part of the hashing computation
1037 * to more efficiently insert the key if it has not been added. For
1038 * example, a mutation-handling version of the previous example:
1040 * HS::AddPtr p = h.lookupForAdd(3);
1041 * if (!p) {
1042 * call_that_may_mutate_h();
1043 * if (!h.relookupOrAdd(p, 3, 3))
1044 * return false;
1046 * assert(*p == 3);
1048 * Note that relookupOrAdd(p,l,t) performs Lookup using l and adds the
1049 * entry t, where the caller ensures match(l,t).
1051 typedef typename Impl::AddPtr AddPtr;
1052 AddPtr lookupForAdd(const Lookup &l) const {
1053 return impl.lookupForAdd(l);
1056 bool add(AddPtr &p, const T &t) {
1057 return impl.add(p, t);
1060 bool relookupOrAdd(AddPtr &p, const Lookup &l, const T &t) {
1061 return impl.relookupOrAdd(p, l, t);
1065 * |all()| returns a Range containing |count()| elements:
1067 * typedef HashSet<int> HS;
1068 * HS h;
1069 * for (HS::Range r = h.all(); !r.empty(); r.popFront())
1070 * int i = r.front();
1072 * Also see the definition of Range in HashTable above.
1074 typedef typename Impl::Range Range;
1075 Range all() const { return impl.all(); }
1076 size_t count() const { return impl.count(); }
1079 * Typedef for the enumeration class. An Enum may be used to examine and
1080 * remove table entries:
1082 * typedef HashSet<int> HS;
1083 * HS s;
1084 * for (HS::Enum e(s); !e.empty(); e.popFront())
1085 * if (e.front() == 42)
1086 * e.removeFront();
1088 * Table resize may occur in Enum's destructor. Also see the definition of
1089 * Enum in HashTable above.
1091 typedef typename Impl::Enum Enum;
1093 /* Remove all entries. */
1094 void clear() { impl.clear(); }
1096 /* Does the table contain any entries? */
1097 bool empty() const { return impl.empty(); }
1100 * If |generation()| is the same before and after a HashSet operation,
1101 * pointers into the table remain valid.
1103 unsigned generation() const { return impl.generation(); }
1105 /* Shorthand operations: */
1107 bool has(const Lookup &l) const {
1108 return impl.lookup(l) != NULL;
1111 const T *put(const T &t) {
1112 AddPtr p = lookupForAdd(t);
1113 return p ? &*p : (add(p, t) ? &*p : NULL);
1116 void remove(const Lookup &l) {
1117 if (Ptr p = lookup(l))
1118 remove(p);
1122 } /* namespace js */
1124 #endif