Bug 1837557 - Productionize webgpu pushErrorScope/popErrorScope. r=webgpu-reviewers...
[gecko.git] / mfbt / HashFunctions.h
blob4b740a3db16bca04ff3e73d1973c8f82335de4fb
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /* Utilities for hashing. */
9 /*
10 * This file exports functions for hashing data down to a uint32_t (a.k.a.
11 * mozilla::HashNumber), including:
13 * - HashString Hash a char* or char16_t/wchar_t* of known or unknown
14 * length.
16 * - HashBytes Hash a byte array of known length.
18 * - HashGeneric Hash one or more values. Currently, we support uint32_t,
19 * types which can be implicitly cast to uint32_t, data
20 * pointers, and function pointers.
22 * - AddToHash Add one or more values to the given hash. This supports the
23 * same list of types as HashGeneric.
26 * You can chain these functions together to hash complex objects. For example:
28 * class ComplexObject
29 * {
30 * char* mStr;
31 * uint32_t mUint1, mUint2;
32 * void (*mCallbackFn)();
34 * public:
35 * HashNumber hash()
36 * {
37 * HashNumber hash = HashString(mStr);
38 * hash = AddToHash(hash, mUint1, mUint2);
39 * return AddToHash(hash, mCallbackFn);
40 * }
41 * };
43 * If you want to hash an nsAString or nsACString, use the HashString functions
44 * in nsHashKeys.h.
47 #ifndef mozilla_HashFunctions_h
48 #define mozilla_HashFunctions_h
50 #include "mozilla/Assertions.h"
51 #include "mozilla/Attributes.h"
52 #include "mozilla/Char16.h"
53 #include "mozilla/MathAlgorithms.h"
54 #include "mozilla/Types.h"
55 #include "mozilla/WrappingOperations.h"
57 #include <stdint.h>
58 #include <type_traits>
60 namespace mozilla {
62 using HashNumber = uint32_t;
63 static const uint32_t kHashNumberBits = 32;
65 /**
66 * The golden ratio as a 32-bit fixed-point value.
68 static const HashNumber kGoldenRatioU32 = 0x9E3779B9U;
71 * Given a raw hash code, h, return a number that can be used to select a hash
72 * bucket.
74 * This function aims to produce as uniform an output distribution as possible,
75 * especially in the most significant (leftmost) bits, even though the input
76 * distribution may be highly nonrandom, given the constraints that this must
77 * be deterministic and quick to compute.
79 * Since the leftmost bits of the result are best, the hash bucket index is
80 * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
81 * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
83 constexpr HashNumber ScrambleHashCode(HashNumber h) {
85 * Simply returning h would not cause any hash tables to produce wrong
86 * answers. But it can produce pathologically bad performance: The caller
87 * right-shifts the result, keeping only the highest bits. The high bits of
88 * hash codes are very often completely entropy-free. (So are the lowest
89 * bits.)
91 * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
92 * Programming, 6.4. This mixes all the bits of the input hash code h.
94 * The value of goldenRatio is taken from the hex expansion of the golden
95 * ratio, which starts 1.9E3779B9.... This value is especially good if
96 * values with consecutive hash codes are stored in a hash table; see Knuth
97 * for details.
99 return mozilla::WrappingMultiply(h, kGoldenRatioU32);
102 namespace detail {
104 MOZ_NO_SANITIZE_UNSIGNED_OVERFLOW
105 constexpr HashNumber RotateLeft5(HashNumber aValue) {
106 return (aValue << 5) | (aValue >> 27);
109 constexpr HashNumber AddU32ToHash(HashNumber aHash, uint32_t aValue) {
111 * This is the meat of all our hash routines. This hash function is not
112 * particularly sophisticated, but it seems to work well for our mostly
113 * plain-text inputs. Implementation notes follow.
115 * Our use of the golden ratio here is arbitrary; we could pick almost any
116 * number which:
118 * * is odd (because otherwise, all our hash values will be even)
120 * * has a reasonably-even mix of 1's and 0's (consider the extreme case
121 * where we multiply by 0x3 or 0xeffffff -- this will not produce good
122 * mixing across all bits of the hash).
124 * The rotation length of 5 is also arbitrary, although an odd number is again
125 * preferable so our hash explores the whole universe of possible rotations.
127 * Finally, we multiply by the golden ratio *after* xor'ing, not before.
128 * Otherwise, if |aHash| is 0 (as it often is for the beginning of a
129 * message), the expression
131 * mozilla::WrappingMultiply(kGoldenRatioU32, RotateLeft5(aHash))
132 * |xor|
133 * aValue
135 * evaluates to |aValue|.
137 * (Number-theoretic aside: Because any odd number |m| is relatively prime to
138 * our modulus (2**32), the list
140 * [x * m (mod 2**32) for 0 <= x < 2**32]
142 * has no duplicate elements. This means that multiplying by |m| does not
143 * cause us to skip any possible hash values.
145 * It's also nice if |m| has large-ish order mod 2**32 -- that is, if the
146 * smallest k such that m**k == 1 (mod 2**32) is large -- so we can safely
147 * multiply our hash value by |m| a few times without negating the
148 * multiplicative effect. Our golden ratio constant has order 2**29, which is
149 * more than enough for our purposes.)
151 return mozilla::WrappingMultiply(kGoldenRatioU32,
152 RotateLeft5(aHash) ^ aValue);
156 * AddUintptrToHash takes sizeof(uintptr_t) as a template parameter.
158 template <size_t PtrSize>
159 constexpr HashNumber AddUintptrToHash(HashNumber aHash, uintptr_t aValue) {
160 return AddU32ToHash(aHash, static_cast<uint32_t>(aValue));
163 template <>
164 inline HashNumber AddUintptrToHash<8>(HashNumber aHash, uintptr_t aValue) {
165 uint32_t v1 = static_cast<uint32_t>(aValue);
166 uint32_t v2 = static_cast<uint32_t>(static_cast<uint64_t>(aValue) >> 32);
167 return AddU32ToHash(AddU32ToHash(aHash, v1), v2);
170 } /* namespace detail */
173 * AddToHash takes a hash and some values and returns a new hash based on the
174 * inputs.
176 * Currently, we support hashing uint32_t's, values which we can implicitly
177 * convert to uint32_t, data pointers, and function pointers.
179 template <typename T, bool TypeIsNotIntegral = !std::is_integral_v<T>,
180 bool TypeIsNotEnum = !std::is_enum_v<T>,
181 std::enable_if_t<TypeIsNotIntegral && TypeIsNotEnum, int> = 0>
182 [[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, T aA) {
184 * Try to convert |A| to uint32_t implicitly. If this works, great. If not,
185 * we'll error out.
187 return detail::AddU32ToHash(aHash, aA);
190 template <typename A>
191 [[nodiscard]] inline HashNumber AddToHash(HashNumber aHash, A* aA) {
193 * You might think this function should just take a void*. But then we'd only
194 * catch data pointers and couldn't handle function pointers.
197 static_assert(sizeof(aA) == sizeof(uintptr_t), "Strange pointer!");
199 return detail::AddUintptrToHash<sizeof(uintptr_t)>(aHash, uintptr_t(aA));
202 // We use AddUintptrToHash() for hashing all integral types. 8-byte integral
203 // types are treated the same as 64-bit pointers, and smaller integral types are
204 // first implicitly converted to 32 bits and then passed to AddUintptrToHash()
205 // to be hashed.
206 template <typename T, std::enable_if_t<std::is_integral_v<T>, int> = 0>
207 [[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) {
208 return detail::AddUintptrToHash<sizeof(T)>(aHash, aA);
211 template <typename T, std::enable_if_t<std::is_enum_v<T>, int> = 0>
212 [[nodiscard]] constexpr HashNumber AddToHash(HashNumber aHash, T aA) {
213 // Hash using AddUintptrToHash with the underlying type of the enum type
214 using UnderlyingType = typename std::underlying_type<T>::type;
215 return detail::AddUintptrToHash<sizeof(UnderlyingType)>(
216 aHash, static_cast<UnderlyingType>(aA));
219 template <typename A, typename... Args>
220 [[nodiscard]] HashNumber AddToHash(HashNumber aHash, A aArg, Args... aArgs) {
221 return AddToHash(AddToHash(aHash, aArg), aArgs...);
225 * The HashGeneric class of functions let you hash one or more values.
227 * If you want to hash together two values x and y, calling HashGeneric(x, y) is
228 * much better than calling AddToHash(x, y), because AddToHash(x, y) assumes
229 * that x has already been hashed.
231 template <typename... Args>
232 [[nodiscard]] inline HashNumber HashGeneric(Args... aArgs) {
233 return AddToHash(0, aArgs...);
237 * Hash successive |*aIter| until |!*aIter|, i.e. til null-termination.
239 * This function is *not* named HashString like the non-template overloads
240 * below. Some users define HashString overloads and pass inexactly-matching
241 * values to them -- but an inexactly-matching value would match this overload
242 * instead! We follow the general rule and don't mix and match template and
243 * regular overloads to avoid this.
245 * If you have the string's length, call HashStringKnownLength: it may be
246 * marginally faster.
248 template <typename Iterator>
249 [[nodiscard]] constexpr HashNumber HashStringUntilZero(Iterator aIter) {
250 HashNumber hash = 0;
251 for (; auto c = *aIter; ++aIter) {
252 hash = AddToHash(hash, c);
254 return hash;
258 * Hash successive |aIter[i]| up to |i == aLength|.
260 template <typename Iterator>
261 [[nodiscard]] constexpr HashNumber HashStringKnownLength(Iterator aIter,
262 size_t aLength) {
263 HashNumber hash = 0;
264 for (size_t i = 0; i < aLength; i++) {
265 hash = AddToHash(hash, aIter[i]);
267 return hash;
271 * The HashString overloads below do just what you'd expect.
273 * These functions are non-template functions so that users can 1) overload them
274 * with their own types 2) in a way that allows implicit conversions to happen.
276 [[nodiscard]] inline HashNumber HashString(const char* aStr) {
277 // Use the |const unsigned char*| version of the above so that all ordinary
278 // character data hashes identically.
279 return HashStringUntilZero(reinterpret_cast<const unsigned char*>(aStr));
282 [[nodiscard]] inline HashNumber HashString(const char* aStr, size_t aLength) {
283 // Delegate to the |const unsigned char*| version of the above to share
284 // template instantiations.
285 return HashStringKnownLength(reinterpret_cast<const unsigned char*>(aStr),
286 aLength);
289 [[nodiscard]] inline HashNumber HashString(const unsigned char* aStr,
290 size_t aLength) {
291 return HashStringKnownLength(aStr, aLength);
294 [[nodiscard]] constexpr HashNumber HashString(const char16_t* aStr) {
295 return HashStringUntilZero(aStr);
298 [[nodiscard]] inline HashNumber HashString(const char16_t* aStr,
299 size_t aLength) {
300 return HashStringKnownLength(aStr, aLength);
304 * HashString overloads for |wchar_t| on platforms where it isn't |char16_t|.
306 template <typename WCharT, typename = typename std::enable_if<
307 std::is_same<WCharT, wchar_t>::value &&
308 !std::is_same<wchar_t, char16_t>::value>::type>
309 [[nodiscard]] inline HashNumber HashString(const WCharT* aStr) {
310 return HashStringUntilZero(aStr);
313 template <typename WCharT, typename = typename std::enable_if<
314 std::is_same<WCharT, wchar_t>::value &&
315 !std::is_same<wchar_t, char16_t>::value>::type>
316 [[nodiscard]] inline HashNumber HashString(const WCharT* aStr, size_t aLength) {
317 return HashStringKnownLength(aStr, aLength);
321 * Hash some number of bytes.
323 * This hash walks word-by-word, rather than byte-by-byte, so you won't get the
324 * same result out of HashBytes as you would out of HashString.
326 [[nodiscard]] extern MFBT_API HashNumber HashBytes(const void* bytes,
327 size_t aLength);
330 * A pseudorandom function mapping 32-bit integers to 32-bit integers.
332 * This is for when you're feeding private data (like pointer values or credit
333 * card numbers) to a non-crypto hash function (like HashBytes) and then using
334 * the hash code for something that untrusted parties could observe (like a JS
335 * Map). Plug in a HashCodeScrambler before that last step to avoid leaking the
336 * private data.
338 * By itself, this does not prevent hash-flooding DoS attacks, because an
339 * attacker can still generate many values with exactly equal hash codes by
340 * attacking the non-crypto hash function alone. Equal hash codes will, of
341 * course, still be equal however much you scramble them.
343 * The algorithm is SipHash-1-3. See <https://131002.net/siphash/>.
345 class HashCodeScrambler {
346 struct SipHasher;
348 uint64_t mK0, mK1;
350 public:
351 /** Creates a new scrambler with the given 128-bit key. */
352 constexpr HashCodeScrambler(uint64_t aK0, uint64_t aK1)
353 : mK0(aK0), mK1(aK1) {}
356 * Scramble a hash code. Always produces the same result for the same
357 * combination of key and hash code.
359 HashNumber scramble(HashNumber aHashCode) const {
360 SipHasher hasher(mK0, mK1);
361 return HashNumber(hasher.sipHash(aHashCode));
364 static constexpr size_t offsetOfMK0() {
365 return offsetof(HashCodeScrambler, mK0);
368 static constexpr size_t offsetOfMK1() {
369 return offsetof(HashCodeScrambler, mK1);
372 private:
373 struct SipHasher {
374 SipHasher(uint64_t aK0, uint64_t aK1) {
375 // 1. Initialization.
376 mV0 = aK0 ^ UINT64_C(0x736f6d6570736575);
377 mV1 = aK1 ^ UINT64_C(0x646f72616e646f6d);
378 mV2 = aK0 ^ UINT64_C(0x6c7967656e657261);
379 mV3 = aK1 ^ UINT64_C(0x7465646279746573);
382 uint64_t sipHash(uint64_t aM) {
383 // 2. Compression.
384 mV3 ^= aM;
385 sipRound();
386 mV0 ^= aM;
388 // 3. Finalization.
389 mV2 ^= 0xff;
390 for (int i = 0; i < 3; i++) sipRound();
391 return mV0 ^ mV1 ^ mV2 ^ mV3;
394 void sipRound() {
395 mV0 = WrappingAdd(mV0, mV1);
396 mV1 = RotateLeft(mV1, 13);
397 mV1 ^= mV0;
398 mV0 = RotateLeft(mV0, 32);
399 mV2 = WrappingAdd(mV2, mV3);
400 mV3 = RotateLeft(mV3, 16);
401 mV3 ^= mV2;
402 mV0 = WrappingAdd(mV0, mV3);
403 mV3 = RotateLeft(mV3, 21);
404 mV3 ^= mV0;
405 mV2 = WrappingAdd(mV2, mV1);
406 mV1 = RotateLeft(mV1, 17);
407 mV1 ^= mV2;
408 mV2 = RotateLeft(mV2, 32);
411 uint64_t mV0, mV1, mV2, mV3;
415 } /* namespace mozilla */
417 #endif /* mozilla_HashFunctions_h */