1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
10 #include "mozilla/Array.h"
11 #include "mozilla/MathAlgorithms.h"
12 #include "mozilla/Maybe.h"
13 #include "mozilla/MruCache.h"
14 #include "mozilla/TemplateLib.h"
15 #include "mozilla/UniquePtr.h"
17 #include "frontend/ScopeBindingCache.h"
18 #include "gc/Tracer.h"
19 #include "js/RootingAPI.h"
20 #include "js/TypeDecls.h"
21 #include "vm/JSScript.h"
23 #include "vm/StencilCache.h" // js::DelazificationCache
24 #include "vm/StringType.h"
28 struct EvalCacheEntry
{
31 JSScript
* callerScript
;
34 // We sweep this cache after a nursery collection to update entries with
35 // string keys that have been tenured.
37 // The entire cache is purged on a major GC, so we don't need to sweep it
39 bool traceWeak(JSTracer
* trc
) {
40 MOZ_ASSERT(trc
->kind() == JS::TracerKind::MinorSweeping
);
41 return TraceManuallyBarrieredWeakEdge(trc
, &str
, "EvalCacheEntry::str");
45 struct EvalCacheLookup
{
46 explicit EvalCacheLookup(JSContext
* cx
) : str(cx
), callerScript(cx
) {}
47 Rooted
<JSLinearString
*> str
;
48 RootedScript callerScript
;
49 MOZ_INIT_OUTSIDE_CTOR jsbytecode
* pc
;
52 struct EvalCacheHashPolicy
{
53 using Lookup
= EvalCacheLookup
;
55 static HashNumber
hash(const Lookup
& l
);
56 static bool match(const EvalCacheEntry
& entry
, const EvalCacheLookup
& l
);
60 GCHashSet
<EvalCacheEntry
, EvalCacheHashPolicy
, SystemAllocPolicy
>;
62 class MegamorphicCacheEntry
{
63 // Receiver object's shape.
64 Shape
* shape_
= nullptr;
66 // The atom or symbol property being accessed.
69 // Slot offset and isFixedSlot flag of the data property.
70 TaggedSlotOffset slotOffset_
;
72 // This entry is valid iff the generation matches the cache's generation.
73 uint16_t generation_
= 0;
75 // Number of hops on the proto chain to get to the holder object. If this is
76 // zero, the property exists on the receiver object. It can also be one of
77 // the sentinel values indicating a missing property lookup.
80 friend class MegamorphicCache
;
83 static constexpr uint8_t MaxHopsForDataProperty
= UINT8_MAX
- 2;
84 static constexpr uint8_t NumHopsForMissingProperty
= UINT8_MAX
- 1;
85 static constexpr uint8_t NumHopsForMissingOwnProperty
= UINT8_MAX
;
87 void init(Shape
* shape
, PropertyKey key
, uint16_t generation
, uint8_t numHops
,
88 TaggedSlotOffset slotOffset
) {
91 slotOffset_
= slotOffset
;
92 generation_
= generation
;
94 MOZ_ASSERT(numHops_
== numHops
, "numHops must fit in numHops_");
96 bool isMissingProperty() const {
97 return numHops_
== NumHopsForMissingProperty
;
99 bool isMissingOwnProperty() const {
100 return numHops_
== NumHopsForMissingOwnProperty
;
102 bool isDataProperty() const { return numHops_
<= MaxHopsForDataProperty
; }
103 uint16_t numHops() const {
104 MOZ_ASSERT(isDataProperty());
107 TaggedSlotOffset
slotOffset() const {
108 MOZ_ASSERT(isDataProperty());
112 static constexpr size_t offsetOfShape() {
113 return offsetof(MegamorphicCacheEntry
, shape_
);
116 static constexpr size_t offsetOfKey() {
117 return offsetof(MegamorphicCacheEntry
, key_
);
120 static constexpr size_t offsetOfGeneration() {
121 return offsetof(MegamorphicCacheEntry
, generation_
);
124 static constexpr size_t offsetOfSlotOffset() {
125 return offsetof(MegamorphicCacheEntry
, slotOffset_
);
128 static constexpr size_t offsetOfNumHops() {
129 return offsetof(MegamorphicCacheEntry
, numHops_
);
133 // [SMDOC] Megamorphic Property Lookup Cache (MegamorphicCache)
135 // MegamorphicCache is a data structure used to speed up megamorphic property
136 // lookups from JIT code. The same cache is currently used for both GetProp and
137 // HasProp (in, hasOwnProperty) operations.
139 // This is implemented as a fixed-size array of entries. Lookups are performed
140 // based on the receiver object's Shape + PropertyKey. If found in the cache,
141 // the result of a lookup represents either:
143 // * A data property on the receiver or on its proto chain (stored as number of
144 // 'hops' up the proto chain + the slot of the data property).
146 // * A missing property on the receiver or its proto chain.
148 // * A missing property on the receiver, but it might exist on the proto chain.
149 // This lets us optimize hasOwnProperty better.
151 // Collisions are handled by simply overwriting the previous entry stored in the
152 // slot. This is sufficient to achieve a high hit rate on typical web workloads
153 // while ensuring cache lookups are always fast and simple.
155 // Lookups always check the receiver object's shape (ensuring the properties and
156 // prototype are unchanged). Because the cache also caches lookups on the proto
157 // chain, Watchtower is used to invalidate the cache when prototype objects are
158 // mutated. This is done by incrementing the cache's generation counter to
159 // invalidate all entries.
161 // The cache is also invalidated on each major GC.
162 class MegamorphicCache
{
164 using Entry
= MegamorphicCacheEntry
;
166 static constexpr size_t NumEntries
= 1024;
167 static constexpr uint8_t ShapeHashShift1
=
168 mozilla::tl::FloorLog2
<alignof(Shape
)>::value
;
169 static constexpr uint8_t ShapeHashShift2
=
170 ShapeHashShift1
+ mozilla::tl::FloorLog2
<NumEntries
>::value
;
172 static_assert(mozilla::IsPowerOfTwo(alignof(Shape
)) &&
173 mozilla::IsPowerOfTwo(NumEntries
),
174 "FloorLog2 is exact because alignof(Shape) and NumEntries are "
175 "both powers of two");
178 mozilla::Array
<Entry
, NumEntries
> entries_
;
180 // Generation counter used to invalidate all entries.
181 uint16_t generation_
= 0;
183 // NOTE: this logic is mirrored in MacroAssembler::emitMegamorphicCacheLookup
184 Entry
& getEntry(Shape
* shape
, PropertyKey key
) {
185 static_assert(mozilla::IsPowerOfTwo(NumEntries
),
186 "NumEntries must be a power-of-two for fast modulo");
187 uintptr_t hash
= uintptr_t(shape
) >> ShapeHashShift1
;
188 hash
^= uintptr_t(shape
) >> ShapeHashShift2
;
189 hash
+= HashAtomOrSymbolPropertyKey(key
);
190 return entries_
[hash
% NumEntries
];
194 void bumpGeneration() {
196 if (generation_
== 0) {
197 // Generation overflowed. Invalidate the whole cache.
198 for (size_t i
= 0; i
< NumEntries
; i
++) {
199 entries_
[i
].shape_
= nullptr;
203 bool lookup(Shape
* shape
, PropertyKey key
, Entry
** entryp
) {
204 Entry
& entry
= getEntry(shape
, key
);
206 return (entry
.shape_
== shape
&& entry
.key_
== key
&&
207 entry
.generation_
== generation_
);
209 void initEntryForMissingProperty(Entry
* entry
, Shape
* shape
,
211 entry
->init(shape
, key
, generation_
, Entry::NumHopsForMissingProperty
,
214 void initEntryForMissingOwnProperty(Entry
* entry
, Shape
* shape
,
216 entry
->init(shape
, key
, generation_
, Entry::NumHopsForMissingOwnProperty
,
219 void initEntryForDataProperty(Entry
* entry
, Shape
* shape
, PropertyKey key
,
220 size_t numHops
, TaggedSlotOffset slotOffset
) {
221 if (numHops
> Entry::MaxHopsForDataProperty
) {
224 entry
->init(shape
, key
, generation_
, numHops
, slotOffset
);
227 static constexpr size_t offsetOfEntries() {
228 return offsetof(MegamorphicCache
, entries_
);
231 static constexpr size_t offsetOfGeneration() {
232 return offsetof(MegamorphicCache
, generation_
);
236 class MegamorphicSetPropCacheEntry
{
237 Shape
* beforeShape_
= nullptr;
238 Shape
* afterShape_
= nullptr;
240 // The atom or symbol property being accessed.
243 // Slot offset and isFixedSlot flag of the data property.
244 TaggedSlotOffset slotOffset_
;
246 // If slots need to be grown, this is the new capacity we need.
247 uint16_t newCapacity_
= 0;
249 // This entry is valid iff the generation matches the cache's generation.
250 uint16_t generation_
= 0;
252 friend class MegamorphicSetPropCache
;
255 void init(Shape
* beforeShape
, Shape
* afterShape
, PropertyKey key
,
256 uint16_t generation
, TaggedSlotOffset slotOffset
,
257 uint16_t newCapacity
) {
258 beforeShape_
= beforeShape
;
259 afterShape_
= afterShape
;
261 slotOffset_
= slotOffset
;
262 newCapacity_
= newCapacity
;
263 generation_
= generation
;
265 TaggedSlotOffset
slotOffset() const { return slotOffset_
; }
266 Shape
* afterShape() const { return afterShape_
; }
268 static constexpr size_t offsetOfShape() {
269 return offsetof(MegamorphicSetPropCacheEntry
, beforeShape_
);
271 static constexpr size_t offsetOfAfterShape() {
272 return offsetof(MegamorphicSetPropCacheEntry
, afterShape_
);
275 static constexpr size_t offsetOfKey() {
276 return offsetof(MegamorphicSetPropCacheEntry
, key_
);
279 static constexpr size_t offsetOfNewCapacity() {
280 return offsetof(MegamorphicSetPropCacheEntry
, newCapacity_
);
283 static constexpr size_t offsetOfGeneration() {
284 return offsetof(MegamorphicSetPropCacheEntry
, generation_
);
287 static constexpr size_t offsetOfSlotOffset() {
288 return offsetof(MegamorphicSetPropCacheEntry
, slotOffset_
);
292 class MegamorphicSetPropCache
{
294 using Entry
= MegamorphicSetPropCacheEntry
;
295 // We can get more hits if we increase this, but this seems to be around
296 // the sweet spot where we are getting most of the hits we would get with
297 // an infinitely sized cache
298 static constexpr size_t NumEntries
= 1024;
299 static constexpr uint8_t ShapeHashShift1
=
300 mozilla::tl::FloorLog2
<alignof(Shape
)>::value
;
301 static constexpr uint8_t ShapeHashShift2
=
302 ShapeHashShift1
+ mozilla::tl::FloorLog2
<NumEntries
>::value
;
304 static_assert(mozilla::IsPowerOfTwo(alignof(Shape
)) &&
305 mozilla::IsPowerOfTwo(NumEntries
),
306 "FloorLog2 is exact because alignof(Shape) and NumEntries are "
307 "both powers of two");
310 mozilla::Array
<Entry
, NumEntries
> entries_
;
312 // Generation counter used to invalidate all entries.
313 uint16_t generation_
= 0;
315 Entry
& getEntry(Shape
* beforeShape
, PropertyKey key
) {
316 static_assert(mozilla::IsPowerOfTwo(NumEntries
),
317 "NumEntries must be a power-of-two for fast modulo");
318 uintptr_t hash
= uintptr_t(beforeShape
) >> ShapeHashShift1
;
319 hash
^= uintptr_t(beforeShape
) >> ShapeHashShift2
;
320 hash
+= HashAtomOrSymbolPropertyKey(key
);
321 return entries_
[hash
% NumEntries
];
325 void bumpGeneration() {
327 if (generation_
== 0) {
328 // Generation overflowed. Invalidate the whole cache.
329 for (size_t i
= 0; i
< NumEntries
; i
++) {
330 entries_
[i
].beforeShape_
= nullptr;
334 void set(Shape
* beforeShape
, Shape
* afterShape
, PropertyKey key
,
335 TaggedSlotOffset slotOffset
, uint32_t newCapacity
) {
336 uint16_t newSlots
= (uint16_t)newCapacity
;
337 if (newSlots
!= newCapacity
) {
340 Entry
& entry
= getEntry(beforeShape
, key
);
341 entry
.init(beforeShape
, afterShape
, key
, generation_
, slotOffset
, newSlots
);
345 bool lookup(Shape
* beforeShape
, PropertyKey key
, Entry
** entryp
) {
346 Entry
& entry
= getEntry(beforeShape
, key
);
348 return (entry
.beforeShape_
== beforeShape
&& entry
.key_
== key
&&
349 entry
.generation_
== generation_
);
353 static constexpr size_t offsetOfEntries() {
354 return offsetof(MegamorphicSetPropCache
, entries_
);
357 static constexpr size_t offsetOfGeneration() {
358 return offsetof(MegamorphicSetPropCache
, generation_
);
362 // Cache for AtomizeString, mapping JSString* or JS::Latin1Char* to the
363 // corresponding JSAtom*. The cache has three different optimizations:
365 // * The two most recent lookups are cached. This has a hit rate of 30-65% on
366 // typical web workloads.
368 // * MruCache is used for short JS::Latin1Char strings.
370 // * For longer strings, there's also a JSLinearString* => JSAtom* HashMap,
371 // because hashing the string characters repeatedly can be slow.
372 // This map is also used by nursery GC to de-duplicate strings to atoms.
374 // This cache is purged on minor and major GC.
375 class StringToAtomCache
{
378 JSString
* string
= nullptr;
379 JSAtom
* atom
= nullptr;
381 static constexpr size_t offsetOfString() {
382 return offsetof(LastLookup
, string
);
385 static constexpr size_t offsetOfAtom() {
386 return offsetof(LastLookup
, atom
);
389 static constexpr size_t NumLastLookups
= 2;
391 struct AtomTableKey
{
392 explicit AtomTableKey(const JS::Latin1Char
* str
, size_t len
)
393 : string_(str
), length_(len
) {
394 hash_
= mozilla::HashString(string_
, length_
);
397 const JS::Latin1Char
* string_
;
404 : public mozilla::MruCache
<AtomTableKey
, JSAtom
*, RopeAtomCache
> {
405 static HashNumber
Hash(const AtomTableKey
& key
) { return key
.hash_
; }
406 static bool Match(const AtomTableKey
& key
, const JSAtom
* val
) {
407 JS::AutoCheckCannotGC nogc
;
408 return val
->length() == key
.length_
&&
409 EqualChars(key
.string_
, val
->latin1Chars(nogc
), key
.length_
);
413 HashMap
<JSString
*, JSAtom
*, PointerHasher
<JSString
*>, SystemAllocPolicy
>;
415 mozilla::Array
<LastLookup
, NumLastLookups
> lastLookups_
;
416 RopeAtomCache ropeCharCache_
;
419 // Don't use the HashMap for short strings. Hashing them is less expensive.
420 // But the length needs to long enough to cover common identifiers in React.
421 static constexpr size_t MinStringLength
= 39;
423 JSAtom
* lookupInMap(JSString
* s
) const {
424 MOZ_ASSERT(s
->inStringToAtomCache());
425 MOZ_ASSERT(s
->length() >= MinStringLength
);
427 auto p
= map_
.lookup(s
);
428 JSAtom
* atom
= p
? p
->value() : nullptr;
432 MOZ_ALWAYS_INLINE JSAtom
* lookup(JSString
* s
) const {
433 MOZ_ASSERT(!s
->isAtom());
434 for (const LastLookup
& entry
: lastLookups_
) {
435 if (entry
.string
== s
) {
440 if (!s
->inStringToAtomCache()) {
441 MOZ_ASSERT(!map_
.lookup(s
));
445 return lookupInMap(s
);
448 MOZ_ALWAYS_INLINE JSAtom
* lookupWithRopeChars(
449 const JS::Latin1Char
* str
, size_t len
,
450 mozilla::Maybe
<AtomTableKey
>& key
) {
451 MOZ_ASSERT(len
< MinStringLength
);
452 key
.emplace(str
, len
);
453 if (auto p
= ropeCharCache_
.Lookup(key
.value())) {
459 static constexpr size_t offsetOfLastLookups() {
460 return offsetof(StringToAtomCache
, lastLookups_
);
463 void maybePut(JSString
* s
, JSAtom
* atom
, mozilla::Maybe
<AtomTableKey
>& key
) {
465 ropeCharCache_
.Put(key
.value(), atom
);
468 for (size_t i
= NumLastLookups
- 1; i
> 0; i
--) {
469 lastLookups_
[i
] = lastLookups_
[i
- 1];
471 lastLookups_
[0].string
= s
;
472 lastLookups_
[0].atom
= atom
;
474 if (s
->length() < MinStringLength
) {
477 if (!map_
.putNew(s
, atom
)) {
480 s
->setInStringToAtomCache();
484 map_
.clearAndCompact();
485 for (LastLookup
& entry
: lastLookups_
) {
486 entry
.string
= nullptr;
487 entry
.atom
= nullptr;
490 ropeCharCache_
.Clear();
494 class RuntimeCaches
{
496 MegamorphicCache megamorphicCache
;
497 UniquePtr
<MegamorphicSetPropCache
> megamorphicSetPropCache
;
498 UncompressedSourceCache uncompressedSourceCache
;
500 StringToAtomCache stringToAtomCache
;
502 // Delazification: Cache binding for runtime objects which are used during
503 // delazification to quickly resolve NameLocation of bindings without linearly
504 // iterating over the list of bindings.
505 frontend::RuntimeScopeBindingCache scopeCache
;
507 void sweepAfterMinorGC(JSTracer
* trc
) { evalCache
.traceWeak(trc
); }
508 #ifdef JSGC_HASH_TABLE_CHECKS
509 void checkEvalCacheAfterMinorGC();
512 void purgeForCompaction() {
514 stringToAtomCache
.purge();
515 megamorphicCache
.bumpGeneration();
516 if (megamorphicSetPropCache
) {
517 // MegamorphicSetPropCache can be null if we failed out of
518 // JSRuntime::init. We will then try to destroy the runtime which will
519 // do a GC and land us here.
520 megamorphicSetPropCache
->bumpGeneration();
525 void purgeStencils() {
526 DelazificationCache
& cache
= DelazificationCache::getSingleton();
527 cache
.clearAndDisable();
531 purgeForCompaction();
532 uncompressedSourceCache
.purge();
539 #endif /* vm_Caches_h */