1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/JitcodeMap.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/Maybe.h"
12 #include "mozilla/ScopeExit.h"
14 #include "gc/Marking.h"
15 #include "jit/BaselineJIT.h"
16 #include "jit/InlineScriptTree.h"
17 #include "jit/JitRuntime.h"
18 #include "jit/JitSpewer.h"
19 #include "js/Vector.h"
20 #include "vm/BytecodeLocation.h" // for BytecodeLocation
21 #include "vm/GeckoProfiler.h"
23 #include "vm/GeckoProfiler-inl.h"
24 #include "vm/JSScript-inl.h"
31 static inline JitcodeRegionEntry
RegionAtAddr(const IonEntry
& entry
, void* ptr
,
32 uint32_t* ptrOffset
) {
33 MOZ_ASSERT(entry
.containsPointer(ptr
));
34 *ptrOffset
= reinterpret_cast<uint8_t*>(ptr
) -
35 reinterpret_cast<uint8_t*>(entry
.nativeStartAddr());
37 uint32_t regionIdx
= entry
.regionTable()->findRegionEntry(*ptrOffset
);
38 MOZ_ASSERT(regionIdx
< entry
.regionTable()->numRegions());
40 return entry
.regionTable()->regionEntry(regionIdx
);
43 void* IonEntry::canonicalNativeAddrFor(void* ptr
) const {
45 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
46 return (void*)(((uint8_t*)nativeStartAddr()) + region
.nativeOffset());
49 bool IonEntry::callStackAtAddr(void* ptr
, BytecodeLocationVector
& results
,
50 uint32_t* depth
) const {
52 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
53 *depth
= region
.scriptDepth();
55 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
56 MOZ_ASSERT(locationIter
.hasMore());
58 while (locationIter
.hasMore()) {
59 uint32_t scriptIdx
, pcOffset
;
60 locationIter
.readNext(&scriptIdx
, &pcOffset
);
61 // For the first entry pushed (innermost frame), the pcOffset is obtained
62 // from the delta-run encodings.
64 pcOffset
= region
.findPcOffset(ptrOffset
, pcOffset
);
67 JSScript
* script
= getScript(scriptIdx
);
68 jsbytecode
* pc
= script
->offsetToPC(pcOffset
);
69 if (!results
.append(BytecodeLocation(script
, pc
))) {
77 uint32_t IonEntry::callStackAtAddr(void* ptr
, const char** results
,
78 uint32_t maxResults
) const {
79 MOZ_ASSERT(maxResults
>= 1);
82 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
84 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
85 MOZ_ASSERT(locationIter
.hasMore());
87 while (locationIter
.hasMore()) {
88 uint32_t scriptIdx
, pcOffset
;
90 locationIter
.readNext(&scriptIdx
, &pcOffset
);
91 MOZ_ASSERT(getStr(scriptIdx
));
93 results
[count
++] = getStr(scriptIdx
);
94 if (count
>= maxResults
) {
102 uint64_t IonEntry::lookupRealmID(void* ptr
) const {
104 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
105 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
106 MOZ_ASSERT(locationIter
.hasMore());
107 uint32_t scriptIdx
, pcOffset
;
108 locationIter
.readNext(&scriptIdx
, &pcOffset
);
110 JSScript
* script
= getScript(scriptIdx
);
111 return script
->realm()->creationOptions().profilerRealmID();
114 IonEntry::~IonEntry() {
115 // The region table is stored at the tail of the compacted data,
116 // which means the start of the region table is a pointer to
117 // the _middle_ of the memory space allocated for it.
119 // When freeing it, obtain the payload start pointer first.
120 MOZ_ASSERT(regionTable_
);
121 js_free((void*)(regionTable_
->payloadStart()));
122 regionTable_
= nullptr;
125 static IonEntry
& IonEntryForIonIC(JSRuntime
* rt
, const IonICEntry
* icEntry
) {
126 // The table must have an IonEntry for the IC's rejoin address.
127 auto* table
= rt
->jitRuntime()->getJitcodeGlobalTable();
128 auto* entry
= table
->lookup(icEntry
->rejoinAddr());
130 MOZ_RELEASE_ASSERT(entry
->isIon());
131 return entry
->asIon();
134 void* IonICEntry::canonicalNativeAddrFor(void* ptr
) const { return ptr
; }
136 bool IonICEntry::callStackAtAddr(JSRuntime
* rt
, void* ptr
,
137 BytecodeLocationVector
& results
,
138 uint32_t* depth
) const {
139 const IonEntry
& entry
= IonEntryForIonIC(rt
, this);
140 return entry
.callStackAtAddr(rejoinAddr(), results
, depth
);
143 uint32_t IonICEntry::callStackAtAddr(JSRuntime
* rt
, void* ptr
,
144 const char** results
,
145 uint32_t maxResults
) const {
146 const IonEntry
& entry
= IonEntryForIonIC(rt
, this);
147 return entry
.callStackAtAddr(rejoinAddr(), results
, maxResults
);
150 uint64_t IonICEntry::lookupRealmID(JSRuntime
* rt
, void* ptr
) const {
151 const IonEntry
& entry
= IonEntryForIonIC(rt
, this);
152 return entry
.lookupRealmID(rejoinAddr());
155 void* BaselineEntry::canonicalNativeAddrFor(void* ptr
) const {
156 // TODO: We can't yet normalize Baseline addresses until we unify
157 // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
161 bool BaselineEntry::callStackAtAddr(void* ptr
, BytecodeLocationVector
& results
,
162 uint32_t* depth
) const {
163 MOZ_ASSERT(containsPointer(ptr
));
164 MOZ_ASSERT(script_
->hasBaselineScript());
166 uint8_t* addr
= reinterpret_cast<uint8_t*>(ptr
);
168 script_
->baselineScript()->approximatePcForNativeAddress(script_
, addr
);
169 if (!results
.append(BytecodeLocation(script_
, pc
))) {
178 uint32_t BaselineEntry::callStackAtAddr(void* ptr
, const char** results
,
179 uint32_t maxResults
) const {
180 MOZ_ASSERT(containsPointer(ptr
));
181 MOZ_ASSERT(maxResults
>= 1);
187 uint64_t BaselineEntry::lookupRealmID() const {
188 return script_
->realm()->creationOptions().profilerRealmID();
191 void* BaselineInterpreterEntry::canonicalNativeAddrFor(void* ptr
) const {
195 bool BaselineInterpreterEntry::callStackAtAddr(void* ptr
,
196 BytecodeLocationVector
& results
,
197 uint32_t* depth
) const {
198 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
201 uint32_t BaselineInterpreterEntry::callStackAtAddr(void* ptr
,
202 const char** results
,
203 uint32_t maxResults
) const {
204 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
207 uint64_t BaselineInterpreterEntry::lookupRealmID() const {
208 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
211 const JitcodeGlobalEntry
* JitcodeGlobalTable::lookupForSampler(
212 void* ptr
, JSRuntime
* rt
, uint64_t samplePosInBuffer
) {
213 JitcodeGlobalEntry
* entry
= lookupInternal(ptr
);
218 entry
->setSamplePositionInBuffer(samplePosInBuffer
);
220 // IonIC entries must keep their corresponding Ion entries alive.
221 if (entry
->isIonIC()) {
222 IonEntry
& ionEntry
= IonEntryForIonIC(rt
, &entry
->asIonIC());
223 ionEntry
.setSamplePositionInBuffer(samplePosInBuffer
);
226 // JitcodeGlobalEntries are marked at the end of the mark phase. A read
227 // barrier is not needed. Any JS frames sampled during the sweep phase of
228 // the GC must be on stack, and on-stack frames must already be marked at
229 // the beginning of the sweep phase. It's not possible to assert this here
230 // as we may be off main thread when called from the gecko profiler.
235 JitcodeGlobalEntry
* JitcodeGlobalTable::lookupInternal(void* ptr
) {
236 // Search for an entry containing the one-byte range starting at |ptr|.
237 JitCodeRange
range(ptr
, static_cast<uint8_t*>(ptr
) + 1);
239 if (JitCodeRange
** entry
= tree_
.maybeLookup(&range
)) {
240 MOZ_ASSERT((*entry
)->containsPointer(ptr
));
241 return static_cast<JitcodeGlobalEntry
*>(*entry
);
247 bool JitcodeGlobalTable::addEntry(UniqueJitcodeGlobalEntry entry
) {
248 MOZ_ASSERT(entry
->isIon() || entry
->isIonIC() || entry
->isBaseline() ||
249 entry
->isBaselineInterpreter() || entry
->isDummy());
251 // Assert the new entry does not have a code range that's equal to (or
252 // contained in) one of the existing entries, because that would confuse the
254 MOZ_ASSERT(!tree_
.maybeLookup(entry
.get()));
256 // Suppress profiler sampling while data structures are being mutated.
257 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
259 if (!entries_
.append(std::move(entry
))) {
262 if (!tree_
.insert(entries_
.back().get())) {
270 void JitcodeGlobalTable::setAllEntriesAsExpired() {
271 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
272 for (EntryVector::Range
r(entries_
.all()); !r
.empty(); r
.popFront()) {
273 auto& entry
= r
.front();
274 entry
->setAsExpired();
278 bool JitcodeGlobalTable::markIteratively(GCMarker
* marker
) {
279 // JitcodeGlobalTable must keep entries that are in the sampler buffer
280 // alive. This conditionality is akin to holding the entries weakly.
282 // If this table were marked at the beginning of the mark phase, then
283 // sampling would require a read barrier for sampling in between
284 // incremental GC slices. However, invoking read barriers from the sampler
285 // is wildly unsafe. The sampler may run at any time, including during GC
288 // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
289 // phase, along with weak references. The key assumption is the
290 // following. At the beginning of the sweep phase, any JS frames that the
291 // sampler may put in its buffer that are not already there at the
292 // beginning of the mark phase must have already been marked, as either 1)
293 // the frame was on-stack at the beginning of the sweep phase, or 2) the
294 // frame was pushed between incremental sweep slices. Frames of case 1)
295 // are already marked. Frames of case 2) must have been reachable to have
296 // been newly pushed, and thus are already marked.
298 // The approach above obviates the need for read barriers. The assumption
299 // above is checked in JitcodeGlobalTable::lookupForSampler.
301 MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
303 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
305 // If the profiler is off, rangeStart will be Nothing() and all entries are
306 // considered to be expired.
307 Maybe
<uint64_t> rangeStart
=
308 marker
->runtime()->profilerSampleBufferRangeStart();
310 bool markedAny
= false;
311 for (EntryVector::Range
r(entries_
.all()); !r
.empty(); r
.popFront()) {
312 auto& entry
= r
.front();
314 // If an entry is not sampled, reset its buffer position to the invalid
315 // position, and conditionally mark the rest of the entry if its
316 // JitCode is not already marked. This conditional marking ensures
317 // that so long as the JitCode *may* be sampled, we keep any
318 // information that may be handed out to the sampler, like tracked
319 // types used by optimizations and scripts used for pc to line number
320 // mapping, alive as well.
321 if (!rangeStart
|| !entry
->isSampled(*rangeStart
)) {
322 entry
->setAsExpired();
323 if (!entry
->isJitcodeMarkedFromAnyThread(marker
->runtime())) {
328 // The table is runtime-wide. Not all zones may be participating in
330 if (!entry
->zone()->isCollecting() || entry
->zone()->isGCFinished()) {
334 markedAny
|= entry
->trace(marker
->tracer());
340 void JitcodeGlobalTable::traceWeak(JSRuntime
* rt
, JSTracer
* trc
) {
341 AutoSuppressProfilerSampling
suppressSampling(rt
->mainContextFromOwnThread());
343 entries_
.eraseIf([&](auto& entry
) {
344 if (!entry
->zone()->isCollecting() || entry
->zone()->isGCFinished()) {
348 if (TraceManuallyBarrieredWeakEdge(
349 trc
, entry
->jitcodePtr(),
350 "JitcodeGlobalTable::JitcodeGlobalEntry::jitcode_")) {
351 entry
->traceWeak(trc
);
355 // We have to remove the entry.
357 Maybe
<uint64_t> rangeStart
= rt
->profilerSampleBufferRangeStart();
358 MOZ_ASSERT_IF(rangeStart
, !entry
->isSampled(*rangeStart
));
360 tree_
.remove(entry
.get());
364 MOZ_ASSERT(tree_
.empty() == entries_
.empty());
367 bool JitcodeGlobalEntry::traceJitcode(JSTracer
* trc
) {
368 if (!IsMarkedUnbarriered(trc
->runtime(), jitcode_
)) {
369 TraceManuallyBarrieredEdge(trc
, &jitcode_
,
370 "jitcodglobaltable-baseentry-jitcode");
376 bool JitcodeGlobalEntry::isJitcodeMarkedFromAnyThread(JSRuntime
* rt
) {
377 return IsMarkedUnbarriered(rt
, jitcode_
);
380 bool BaselineEntry::trace(JSTracer
* trc
) {
381 if (!IsMarkedUnbarriered(trc
->runtime(), script_
)) {
382 TraceManuallyBarrieredEdge(trc
, &script_
,
383 "jitcodeglobaltable-baselineentry-script");
389 void BaselineEntry::traceWeak(JSTracer
* trc
) {
391 TraceManuallyBarrieredWeakEdge(trc
, &script_
, "BaselineEntry::script_"));
394 bool IonEntry::trace(JSTracer
* trc
) {
395 bool tracedAny
= false;
397 JSRuntime
* rt
= trc
->runtime();
398 for (auto& pair
: scriptList_
) {
399 if (!IsMarkedUnbarriered(rt
, pair
.script
)) {
400 TraceManuallyBarrieredEdge(trc
, &pair
.script
,
401 "jitcodeglobaltable-ionentry-script");
409 void IonEntry::traceWeak(JSTracer
* trc
) {
410 for (auto& pair
: scriptList_
) {
411 JSScript
** scriptp
= &pair
.script
;
413 TraceManuallyBarrieredWeakEdge(trc
, scriptp
, "IonEntry script"));
417 bool IonICEntry::trace(JSTracer
* trc
) {
418 IonEntry
& entry
= IonEntryForIonIC(trc
->runtime(), this);
419 return entry
.trace(trc
);
422 void IonICEntry::traceWeak(JSTracer
* trc
) {
423 IonEntry
& entry
= IonEntryForIonIC(trc
->runtime(), this);
424 entry
.traceWeak(trc
);
427 [[nodiscard
]] bool JitcodeGlobalEntry::callStackAtAddr(
428 JSRuntime
* rt
, void* ptr
, BytecodeLocationVector
& results
,
429 uint32_t* depth
) const {
432 return asIon().callStackAtAddr(ptr
, results
, depth
);
434 return asIonIC().callStackAtAddr(rt
, ptr
, results
, depth
);
436 return asBaseline().callStackAtAddr(ptr
, results
, depth
);
437 case Kind::BaselineInterpreter
:
438 return asBaselineInterpreter().callStackAtAddr(ptr
, results
, depth
);
440 return asDummy().callStackAtAddr(rt
, ptr
, results
, depth
);
442 MOZ_CRASH("Invalid kind");
445 uint32_t JitcodeGlobalEntry::callStackAtAddr(JSRuntime
* rt
, void* ptr
,
446 const char** results
,
447 uint32_t maxResults
) const {
450 return asIon().callStackAtAddr(ptr
, results
, maxResults
);
452 return asIonIC().callStackAtAddr(rt
, ptr
, results
, maxResults
);
454 return asBaseline().callStackAtAddr(ptr
, results
, maxResults
);
455 case Kind::BaselineInterpreter
:
456 return asBaselineInterpreter().callStackAtAddr(ptr
, results
, maxResults
);
458 return asDummy().callStackAtAddr(rt
, ptr
, results
, maxResults
);
460 MOZ_CRASH("Invalid kind");
463 uint64_t JitcodeGlobalEntry::lookupRealmID(JSRuntime
* rt
, void* ptr
) const {
466 return asIon().lookupRealmID(ptr
);
468 return asIonIC().lookupRealmID(rt
, ptr
);
470 return asBaseline().lookupRealmID();
472 return asDummy().lookupRealmID();
473 case Kind::BaselineInterpreter
:
476 MOZ_CRASH("Invalid kind");
479 bool JitcodeGlobalEntry::trace(JSTracer
* trc
) {
480 bool tracedAny
= traceJitcode(trc
);
483 tracedAny
|= asIon().trace(trc
);
486 tracedAny
|= asIonIC().trace(trc
);
489 tracedAny
|= asBaseline().trace(trc
);
491 case Kind::BaselineInterpreter
:
498 void JitcodeGlobalEntry::traceWeak(JSTracer
* trc
) {
501 asIon().traceWeak(trc
);
504 asIonIC().traceWeak(trc
);
507 asBaseline().traceWeak(trc
);
509 case Kind::BaselineInterpreter
:
515 void* JitcodeGlobalEntry::canonicalNativeAddrFor(JSRuntime
* rt
,
519 return asIon().canonicalNativeAddrFor(ptr
);
521 return asIonIC().canonicalNativeAddrFor(ptr
);
523 return asBaseline().canonicalNativeAddrFor(ptr
);
525 return asDummy().canonicalNativeAddrFor(rt
, ptr
);
526 case Kind::BaselineInterpreter
:
529 MOZ_CRASH("Invalid kind");
533 void JitcodeGlobalEntry::DestroyPolicy::operator()(JitcodeGlobalEntry
* entry
) {
534 switch (entry
->kind()) {
535 case JitcodeGlobalEntry::Kind::Ion
:
536 js_delete(&entry
->asIon());
538 case JitcodeGlobalEntry::Kind::IonIC
:
539 js_delete(&entry
->asIonIC());
541 case JitcodeGlobalEntry::Kind::Baseline
:
542 js_delete(&entry
->asBaseline());
544 case JitcodeGlobalEntry::Kind::BaselineInterpreter
:
545 js_delete(&entry
->asBaselineInterpreter());
547 case JitcodeGlobalEntry::Kind::Dummy
:
548 js_delete(&entry
->asDummy());
554 void JitcodeRegionEntry::WriteHead(CompactBufferWriter
& writer
,
555 uint32_t nativeOffset
, uint8_t scriptDepth
) {
556 writer
.writeUnsigned(nativeOffset
);
557 writer
.writeByte(scriptDepth
);
561 void JitcodeRegionEntry::ReadHead(CompactBufferReader
& reader
,
562 uint32_t* nativeOffset
,
563 uint8_t* scriptDepth
) {
564 *nativeOffset
= reader
.readUnsigned();
565 *scriptDepth
= reader
.readByte();
569 void JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter
& writer
,
570 uint32_t scriptIdx
, uint32_t pcOffset
) {
571 writer
.writeUnsigned(scriptIdx
);
572 writer
.writeUnsigned(pcOffset
);
576 void JitcodeRegionEntry::ReadScriptPc(CompactBufferReader
& reader
,
577 uint32_t* scriptIdx
, uint32_t* pcOffset
) {
578 *scriptIdx
= reader
.readUnsigned();
579 *pcOffset
= reader
.readUnsigned();
583 void JitcodeRegionEntry::WriteDelta(CompactBufferWriter
& writer
,
584 uint32_t nativeDelta
, int32_t pcDelta
) {
586 // 1 and 2-byte formats possible.
589 if (pcDelta
<= ENC1_PC_DELTA_MAX
&& nativeDelta
<= ENC1_NATIVE_DELTA_MAX
) {
590 uint8_t encVal
= ENC1_MASK_VAL
| (pcDelta
<< ENC1_PC_DELTA_SHIFT
) |
591 (nativeDelta
<< ENC1_NATIVE_DELTA_SHIFT
);
592 writer
.writeByte(encVal
);
596 // NNNN-NNNN BBBB-BB01
597 if (pcDelta
<= ENC2_PC_DELTA_MAX
&& nativeDelta
<= ENC2_NATIVE_DELTA_MAX
) {
598 uint16_t encVal
= ENC2_MASK_VAL
| (pcDelta
<< ENC2_PC_DELTA_SHIFT
) |
599 (nativeDelta
<< ENC2_NATIVE_DELTA_SHIFT
);
600 writer
.writeByte(encVal
& 0xff);
601 writer
.writeByte((encVal
>> 8) & 0xff);
606 // NNNN-NNNN NNNB-BBBB BBBB-B011
607 if (pcDelta
>= ENC3_PC_DELTA_MIN
&& pcDelta
<= ENC3_PC_DELTA_MAX
&&
608 nativeDelta
<= ENC3_NATIVE_DELTA_MAX
) {
611 ((uint32_t(pcDelta
) << ENC3_PC_DELTA_SHIFT
) & ENC3_PC_DELTA_MASK
) |
612 (nativeDelta
<< ENC3_NATIVE_DELTA_SHIFT
);
613 writer
.writeByte(encVal
& 0xff);
614 writer
.writeByte((encVal
>> 8) & 0xff);
615 writer
.writeByte((encVal
>> 16) & 0xff);
619 // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
620 if (pcDelta
>= ENC4_PC_DELTA_MIN
&& pcDelta
<= ENC4_PC_DELTA_MAX
&&
621 nativeDelta
<= ENC4_NATIVE_DELTA_MAX
) {
624 ((uint32_t(pcDelta
) << ENC4_PC_DELTA_SHIFT
) & ENC4_PC_DELTA_MASK
) |
625 (nativeDelta
<< ENC4_NATIVE_DELTA_SHIFT
);
626 writer
.writeByte(encVal
& 0xff);
627 writer
.writeByte((encVal
>> 8) & 0xff);
628 writer
.writeByte((encVal
>> 16) & 0xff);
629 writer
.writeByte((encVal
>> 24) & 0xff);
633 // Should never get here.
634 MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
638 void JitcodeRegionEntry::ReadDelta(CompactBufferReader
& reader
,
639 uint32_t* nativeDelta
, int32_t* pcDelta
) {
641 // It's possible to get nativeDeltas with value 0 in two cases:
643 // 1. The last region's run. This is because the region table's start
644 // must be 4-byte aligned, and we must insert padding bytes to align the
645 // payload section before emitting the table.
647 // 2. A zero-offset nativeDelta with a negative pcDelta.
649 // So if nativeDelta is zero, then pcDelta must be <= 0.
652 const uint32_t firstByte
= reader
.readByte();
653 if ((firstByte
& ENC1_MASK
) == ENC1_MASK_VAL
) {
654 uint32_t encVal
= firstByte
;
655 *nativeDelta
= encVal
>> ENC1_NATIVE_DELTA_SHIFT
;
656 *pcDelta
= (encVal
& ENC1_PC_DELTA_MASK
) >> ENC1_PC_DELTA_SHIFT
;
657 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
661 // NNNN-NNNN BBBB-BB01
662 const uint32_t secondByte
= reader
.readByte();
663 if ((firstByte
& ENC2_MASK
) == ENC2_MASK_VAL
) {
664 uint32_t encVal
= firstByte
| secondByte
<< 8;
665 *nativeDelta
= encVal
>> ENC2_NATIVE_DELTA_SHIFT
;
666 *pcDelta
= (encVal
& ENC2_PC_DELTA_MASK
) >> ENC2_PC_DELTA_SHIFT
;
667 MOZ_ASSERT(*pcDelta
!= 0);
668 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
672 // NNNN-NNNN NNNB-BBBB BBBB-B011
673 const uint32_t thirdByte
= reader
.readByte();
674 if ((firstByte
& ENC3_MASK
) == ENC3_MASK_VAL
) {
675 uint32_t encVal
= firstByte
| secondByte
<< 8 | thirdByte
<< 16;
676 *nativeDelta
= encVal
>> ENC3_NATIVE_DELTA_SHIFT
;
678 uint32_t pcDeltaU
= (encVal
& ENC3_PC_DELTA_MASK
) >> ENC3_PC_DELTA_SHIFT
;
679 // Fix sign if necessary.
680 if (pcDeltaU
> static_cast<uint32_t>(ENC3_PC_DELTA_MAX
)) {
681 pcDeltaU
|= ~ENC3_PC_DELTA_MAX
;
684 MOZ_ASSERT(*pcDelta
!= 0);
685 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
689 // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
690 MOZ_ASSERT((firstByte
& ENC4_MASK
) == ENC4_MASK_VAL
);
691 const uint32_t fourthByte
= reader
.readByte();
693 firstByte
| secondByte
<< 8 | thirdByte
<< 16 | fourthByte
<< 24;
694 *nativeDelta
= encVal
>> ENC4_NATIVE_DELTA_SHIFT
;
696 uint32_t pcDeltaU
= (encVal
& ENC4_PC_DELTA_MASK
) >> ENC4_PC_DELTA_SHIFT
;
697 // fix sign if necessary
698 if (pcDeltaU
> static_cast<uint32_t>(ENC4_PC_DELTA_MAX
)) {
699 pcDeltaU
|= ~ENC4_PC_DELTA_MAX
;
703 MOZ_ASSERT(*pcDelta
!= 0);
704 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
708 uint32_t JitcodeRegionEntry::ExpectedRunLength(const NativeToBytecode
* entry
,
709 const NativeToBytecode
* end
) {
710 MOZ_ASSERT(entry
< end
);
712 // We always use the first entry, so runLength starts at 1
713 uint32_t runLength
= 1;
715 uint32_t curNativeOffset
= entry
->nativeOffset
.offset();
716 uint32_t curBytecodeOffset
= entry
->tree
->script()->pcToOffset(entry
->pc
);
718 for (auto nextEntry
= entry
+ 1; nextEntry
!= end
; nextEntry
+= 1) {
719 // If the next run moves to a different inline site, stop the run.
720 if (nextEntry
->tree
!= entry
->tree
) {
724 uint32_t nextNativeOffset
= nextEntry
->nativeOffset
.offset();
725 uint32_t nextBytecodeOffset
=
726 nextEntry
->tree
->script()->pcToOffset(nextEntry
->pc
);
727 MOZ_ASSERT(nextNativeOffset
>= curNativeOffset
);
729 uint32_t nativeDelta
= nextNativeOffset
- curNativeOffset
;
730 int32_t bytecodeDelta
=
731 int32_t(nextBytecodeOffset
) - int32_t(curBytecodeOffset
);
733 // If deltas are too large (very unlikely), stop the run.
734 if (!IsDeltaEncodeable(nativeDelta
, bytecodeDelta
)) {
740 // If the run has grown to its maximum length, stop the run.
741 if (runLength
== MAX_RUN_LENGTH
) {
745 curNativeOffset
= nextNativeOffset
;
746 curBytecodeOffset
= nextBytecodeOffset
;
752 struct JitcodeMapBufferWriteSpewer
{
754 CompactBufferWriter
* writer
;
757 static const uint32_t DumpMaxBytes
= 50;
759 explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter
& w
)
760 : writer(&w
), startPos(writer
->length()) {}
762 void spewAndAdvance(const char* name
) {
767 uint32_t curPos
= writer
->length();
768 const uint8_t* start
= writer
->buffer() + startPos
;
769 const uint8_t* end
= writer
->buffer() + curPos
;
770 const char* MAP
= "0123456789ABCDEF";
771 uint32_t bytes
= end
- start
;
773 char buffer
[DumpMaxBytes
* 3];
774 for (uint32_t i
= 0; i
< bytes
; i
++) {
775 buffer
[i
* 3] = MAP
[(start
[i
] >> 4) & 0xf];
776 buffer
[i
* 3 + 1] = MAP
[(start
[i
] >> 0) & 0xf];
777 buffer
[i
* 3 + 2] = ' ';
779 if (bytes
>= DumpMaxBytes
) {
780 buffer
[DumpMaxBytes
* 3 - 1] = '\0';
782 buffer
[bytes
* 3 - 1] = '\0';
785 JitSpew(JitSpew_Profiling
, "%s@%d[%d bytes] - %s", name
, int(startPos
),
788 // Move to the end of the current buffer.
789 startPos
= writer
->length();
792 explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter
& w
) {}
793 void spewAndAdvance(const char* name
) {}
797 // Write a run, starting at the given NativeToBytecode entry, into the given
800 bool JitcodeRegionEntry::WriteRun(CompactBufferWriter
& writer
,
801 const IonEntry::ScriptList
& scriptList
,
803 const NativeToBytecode
* entry
) {
804 MOZ_ASSERT(runLength
> 0);
805 MOZ_ASSERT(runLength
<= MAX_RUN_LENGTH
);
807 // Calculate script depth.
808 MOZ_ASSERT(entry
->tree
->depth() <= 0xff);
809 uint8_t scriptDepth
= entry
->tree
->depth();
810 uint32_t regionNativeOffset
= entry
->nativeOffset
.offset();
812 JitcodeMapBufferWriteSpewer
spewer(writer
);
814 // Write the head info.
815 JitSpew(JitSpew_Profiling
, " Head Info: nativeOffset=%d scriptDepth=%d",
816 int(regionNativeOffset
), int(scriptDepth
));
817 WriteHead(writer
, regionNativeOffset
, scriptDepth
);
818 spewer
.spewAndAdvance(" ");
820 // Write each script/pc pair.
822 InlineScriptTree
* curTree
= entry
->tree
;
823 jsbytecode
* curPc
= entry
->pc
;
824 for (uint8_t i
= 0; i
< scriptDepth
; i
++) {
825 // Find the index of the script within the list.
826 // NB: scriptList is guaranteed to contain curTree->script()
827 uint32_t scriptIdx
= 0;
828 for (; scriptIdx
< scriptList
.length(); scriptIdx
++) {
829 if (scriptList
[scriptIdx
].script
== curTree
->script()) {
833 MOZ_ASSERT(scriptIdx
< scriptList
.length());
835 uint32_t pcOffset
= curTree
->script()->pcToOffset(curPc
);
837 JitSpew(JitSpew_Profiling
, " Script/PC %d: scriptIdx=%d pcOffset=%d",
838 int(i
), int(scriptIdx
), int(pcOffset
));
839 WriteScriptPc(writer
, scriptIdx
, pcOffset
);
840 spewer
.spewAndAdvance(" ");
842 MOZ_ASSERT_IF(i
< scriptDepth
- 1, curTree
->hasCaller());
843 curPc
= curTree
->callerPc();
844 curTree
= curTree
->caller();
848 // Start writing runs.
849 uint32_t curNativeOffset
= entry
->nativeOffset
.offset();
850 uint32_t curBytecodeOffset
= entry
->tree
->script()->pcToOffset(entry
->pc
);
852 JitSpew(JitSpew_Profiling
,
853 " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
854 int(curNativeOffset
), int(curBytecodeOffset
));
856 // Skip first entry because it is implicit in the header. Start at subsequent
858 for (uint32_t i
= 1; i
< runLength
; i
++) {
859 MOZ_ASSERT(entry
[i
].tree
== entry
->tree
);
861 uint32_t nextNativeOffset
= entry
[i
].nativeOffset
.offset();
862 uint32_t nextBytecodeOffset
=
863 entry
[i
].tree
->script()->pcToOffset(entry
[i
].pc
);
864 MOZ_ASSERT(nextNativeOffset
>= curNativeOffset
);
866 uint32_t nativeDelta
= nextNativeOffset
- curNativeOffset
;
867 int32_t bytecodeDelta
=
868 int32_t(nextBytecodeOffset
) - int32_t(curBytecodeOffset
);
869 MOZ_ASSERT(IsDeltaEncodeable(nativeDelta
, bytecodeDelta
));
871 JitSpew(JitSpew_Profiling
,
872 " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
873 int(curNativeOffset
), int(nextNativeOffset
), int(nativeDelta
),
874 int(curBytecodeOffset
), int(nextBytecodeOffset
),
876 WriteDelta(writer
, nativeDelta
, bytecodeDelta
);
878 // Spew the bytecode in these ranges.
879 if (curBytecodeOffset
< nextBytecodeOffset
) {
880 JitSpewStart(JitSpew_Profiling
, " OPS: ");
881 uint32_t curBc
= curBytecodeOffset
;
882 while (curBc
< nextBytecodeOffset
) {
883 jsbytecode
* pc
= entry
[i
].tree
->script()->offsetToPC(curBc
);
886 JitSpewCont(JitSpew_Profiling
, "%s ", CodeName(op
));
888 curBc
+= GetBytecodeLength(pc
);
890 JitSpewFin(JitSpew_Profiling
);
892 spewer
.spewAndAdvance(" ");
894 curNativeOffset
= nextNativeOffset
;
895 curBytecodeOffset
= nextBytecodeOffset
;
905 void JitcodeRegionEntry::unpack() {
906 CompactBufferReader
reader(data_
, end_
);
907 ReadHead(reader
, &nativeOffset_
, &scriptDepth_
);
908 MOZ_ASSERT(scriptDepth_
> 0);
910 scriptPcStack_
= reader
.currentPosition();
911 // Skip past script/pc stack
912 for (unsigned i
= 0; i
< scriptDepth_
; i
++) {
913 uint32_t scriptIdx
, pcOffset
;
914 ReadScriptPc(reader
, &scriptIdx
, &pcOffset
);
917 deltaRun_
= reader
.currentPosition();
920 uint32_t JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset
,
921 uint32_t startPcOffset
) const {
922 DeltaIterator iter
= deltaIterator();
923 uint32_t curNativeOffset
= nativeOffset();
924 uint32_t curPcOffset
= startPcOffset
;
925 while (iter
.hasMore()) {
926 uint32_t nativeDelta
;
928 iter
.readNext(&nativeDelta
, &pcDelta
);
930 // The start address of the next delta-run entry is counted towards
931 // the current delta-run entry, because return addresses should
932 // associate with the bytecode op prior (the call) not the op after.
933 if (queryNativeOffset
<= curNativeOffset
+ nativeDelta
) {
936 curNativeOffset
+= nativeDelta
;
937 curPcOffset
+= pcDelta
;
942 uint32_t JitcodeIonTable::findRegionEntry(uint32_t nativeOffset
) const {
943 static const uint32_t LINEAR_SEARCH_THRESHOLD
= 8;
944 uint32_t regions
= numRegions();
945 MOZ_ASSERT(regions
> 0);
947 // For small region lists, just search linearly.
948 if (regions
<= LINEAR_SEARCH_THRESHOLD
) {
949 JitcodeRegionEntry previousEntry
= regionEntry(0);
950 for (uint32_t i
= 1; i
< regions
; i
++) {
951 JitcodeRegionEntry nextEntry
= regionEntry(i
);
952 MOZ_ASSERT(nextEntry
.nativeOffset() >= previousEntry
.nativeOffset());
954 // See note in binary-search code below about why we use '<=' here
955 // instead of '<'. Short explanation: regions are closed at their
956 // ending addresses, and open at their starting addresses.
957 if (nativeOffset
<= nextEntry
.nativeOffset()) {
961 previousEntry
= nextEntry
;
963 // If nothing found, assume it falls within last region.
967 // For larger ones, binary search the region table.
969 uint32_t count
= regions
;
971 uint32_t step
= count
/ 2;
972 uint32_t mid
= idx
+ step
;
973 JitcodeRegionEntry midEntry
= regionEntry(mid
);
975 // A region memory range is closed at its ending address, not starting
976 // address. This is because the return address for calls must associate
977 // with the call's bytecode PC, not the PC of the bytecode operator after
980 // So a query is < an entry if the query nativeOffset is <= the start
981 // address of the entry, and a query is >= an entry if the query
982 // nativeOffset is > the start address of an entry.
983 if (nativeOffset
<= midEntry
.nativeOffset()) {
984 // Target entry is below midEntry.
986 } else { // if (nativeOffset > midEntry.nativeOffset())
987 // Target entry is at midEntry or above.
996 bool JitcodeIonTable::WriteIonTable(CompactBufferWriter
& writer
,
997 const IonEntry::ScriptList
& scriptList
,
998 const NativeToBytecode
* start
,
999 const NativeToBytecode
* end
,
1000 uint32_t* tableOffsetOut
,
1001 uint32_t* numRegionsOut
) {
1002 MOZ_ASSERT(tableOffsetOut
!= nullptr);
1003 MOZ_ASSERT(numRegionsOut
!= nullptr);
1004 MOZ_ASSERT(writer
.length() == 0);
1005 MOZ_ASSERT(scriptList
.length() > 0);
1007 JitSpew(JitSpew_Profiling
,
1008 "Writing native to bytecode map for %s:%u:%u (%zu entries)",
1009 scriptList
[0].script
->filename(), scriptList
[0].script
->lineno(),
1010 scriptList
[0].script
->column().oneOriginValue(),
1011 mozilla::PointerRangeSize(start
, end
));
1013 JitSpew(JitSpew_Profiling
, " ScriptList of size %u",
1014 unsigned(scriptList
.length()));
1015 for (uint32_t i
= 0; i
< scriptList
.length(); i
++) {
1016 JitSpew(JitSpew_Profiling
, " Script %u - %s:%u:%u", i
,
1017 scriptList
[i
].script
->filename(), scriptList
[i
].script
->lineno(),
1018 scriptList
[i
].script
->column().oneOriginValue());
1021 // Write out runs first. Keep a vector tracking the positive offsets from
1022 // payload start to the run.
1023 const NativeToBytecode
* curEntry
= start
;
1024 js::Vector
<uint32_t, 32, SystemAllocPolicy
> runOffsets
;
1026 while (curEntry
!= end
) {
1027 // Calculate the length of the next run.
1028 uint32_t runLength
= JitcodeRegionEntry::ExpectedRunLength(curEntry
, end
);
1029 MOZ_ASSERT(runLength
> 0);
1030 MOZ_ASSERT(runLength
<= uintptr_t(end
- curEntry
));
1031 JitSpew(JitSpew_Profiling
, " Run at entry %d, length %d, buffer offset %d",
1032 int(curEntry
- start
), int(runLength
), int(writer
.length()));
1034 // Store the offset of the run.
1035 if (!runOffsets
.append(writer
.length())) {
1040 if (!JitcodeRegionEntry::WriteRun(writer
, scriptList
, runLength
,
1045 curEntry
+= runLength
;
1048 // Done encoding regions. About to start table. Ensure we are aligned to 4
1049 // bytes since table is composed of uint32_t values.
1050 uint32_t padding
= sizeof(uint32_t) - (writer
.length() % sizeof(uint32_t));
1051 if (padding
== sizeof(uint32_t)) {
1054 JitSpew(JitSpew_Profiling
, " Padding %d bytes after run @%d", int(padding
),
1055 int(writer
.length()));
1056 for (uint32_t i
= 0; i
< padding
; i
++) {
1057 writer
.writeByte(0);
1060 // Now at start of table.
1061 uint32_t tableOffset
= writer
.length();
1063 // The table being written at this point will be accessed directly via
1064 // uint32_t pointers, so all writes below use native endianness.
1066 // Write out numRegions
1067 JitSpew(JitSpew_Profiling
, " Writing numRuns=%d", int(runOffsets
.length()));
1068 writer
.writeNativeEndianUint32_t(runOffsets
.length());
1070 // Write out region offset table. The offsets in |runOffsets| are currently
1071 // forward offsets from the beginning of the buffer. We convert them to
1072 // backwards offsets from the start of the table before writing them into
1073 // their table entries.
1074 for (uint32_t i
= 0; i
< runOffsets
.length(); i
++) {
1075 JitSpew(JitSpew_Profiling
, " Run %d offset=%d backOffset=%d @%d", int(i
),
1076 int(runOffsets
[i
]), int(tableOffset
- runOffsets
[i
]),
1077 int(writer
.length()));
1078 writer
.writeNativeEndianUint32_t(tableOffset
- runOffsets
[i
]);
1085 *tableOffsetOut
= tableOffset
;
1086 *numRegionsOut
= runOffsets
.length();
1093 JS::ProfiledFrameHandle::ProfiledFrameHandle(JSRuntime
* rt
,
1094 js::jit::JitcodeGlobalEntry
& entry
,
1095 void* addr
, const char* label
,
1100 canonicalAddr_(nullptr),
1103 if (!canonicalAddr_
) {
1104 canonicalAddr_
= entry_
.canonicalNativeAddrFor(rt_
, addr_
);
1108 JS_PUBLIC_API
JS::ProfilingFrameIterator::FrameKind
1109 JS::ProfiledFrameHandle::frameKind() const {
1110 if (entry_
.isBaselineInterpreter()) {
1111 return JS::ProfilingFrameIterator::Frame_BaselineInterpreter
;
1113 if (entry_
.isBaseline()) {
1114 return JS::ProfilingFrameIterator::Frame_Baseline
;
1116 return JS::ProfilingFrameIterator::Frame_Ion
;
1119 JS_PUBLIC_API
uint64_t JS::ProfiledFrameHandle::realmID() const {
1120 return entry_
.lookupRealmID(rt_
, addr_
);
1123 JS_PUBLIC_API
JS::ProfiledFrameRange
JS::GetProfiledFrames(JSContext
* cx
,
1125 JSRuntime
* rt
= cx
->runtime();
1126 js::jit::JitcodeGlobalTable
* table
=
1127 rt
->jitRuntime()->getJitcodeGlobalTable();
1128 js::jit::JitcodeGlobalEntry
* entry
= table
->lookup(addr
);
1130 ProfiledFrameRange
result(rt
, addr
, entry
);
1133 result
.depth_
= entry
->callStackAtAddr(rt
, addr
, result
.labels_
,
1134 MOZ_ARRAY_LENGTH(result
.labels_
));
1139 JS::ProfiledFrameHandle
JS::ProfiledFrameRange::Iter::operator*() const {
1140 // The iterator iterates in high depth to low depth order. index_ goes up,
1141 // and the depth we need to pass to ProfiledFrameHandle goes down.
1142 uint32_t depth
= range_
.depth_
- 1 - index_
;
1143 return ProfiledFrameHandle(range_
.rt_
, *range_
.entry_
, range_
.addr_
,
1144 range_
.labels_
[depth
], depth
);