1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/JitcodeMap.h"
9 #include "mozilla/ArrayUtils.h"
10 #include "mozilla/MathAlgorithms.h"
11 #include "mozilla/Maybe.h"
12 #include "mozilla/ScopeExit.h"
13 #include "mozilla/Sprintf.h"
15 #include "gc/Marking.h"
16 #include "gc/Statistics.h"
17 #include "jit/BaselineJIT.h"
18 #include "jit/JitRealm.h"
19 #include "jit/JitSpewer.h"
20 #include "js/Vector.h"
21 #include "vm/GeckoProfiler.h"
23 #include "vm/GeckoProfiler-inl.h"
24 #include "vm/JSScript-inl.h"
25 #include "vm/TypeInference-inl.h"
32 static inline JitcodeRegionEntry
RegionAtAddr(
33 const JitcodeGlobalEntry::IonEntry
& entry
, void* ptr
, uint32_t* ptrOffset
) {
34 MOZ_ASSERT(entry
.containsPointer(ptr
));
35 *ptrOffset
= reinterpret_cast<uint8_t*>(ptr
) -
36 reinterpret_cast<uint8_t*>(entry
.nativeStartAddr());
38 uint32_t regionIdx
= entry
.regionTable()->findRegionEntry(*ptrOffset
);
39 MOZ_ASSERT(regionIdx
< entry
.regionTable()->numRegions());
41 return entry
.regionTable()->regionEntry(regionIdx
);
44 void* JitcodeGlobalEntry::IonEntry::canonicalNativeAddrFor(void* ptr
) const {
46 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
47 return (void*)(((uint8_t*)nativeStartAddr()) + region
.nativeOffset());
50 bool JitcodeGlobalEntry::IonEntry::callStackAtAddr(
51 void* ptr
, BytecodeLocationVector
& results
, uint32_t* depth
) const {
53 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
54 *depth
= region
.scriptDepth();
56 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
57 MOZ_ASSERT(locationIter
.hasMore());
59 while (locationIter
.hasMore()) {
60 uint32_t scriptIdx
, pcOffset
;
61 locationIter
.readNext(&scriptIdx
, &pcOffset
);
62 // For the first entry pushed (innermost frame), the pcOffset is obtained
63 // from the delta-run encodings.
65 pcOffset
= region
.findPcOffset(ptrOffset
, pcOffset
);
68 JSScript
* script
= getScript(scriptIdx
);
69 jsbytecode
* pc
= script
->offsetToPC(pcOffset
);
70 if (!results
.append(BytecodeLocation(script
, pc
))) {
78 uint32_t JitcodeGlobalEntry::IonEntry::callStackAtAddr(
79 void* ptr
, const char** results
, uint32_t maxResults
) const {
80 MOZ_ASSERT(maxResults
>= 1);
83 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
85 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
86 MOZ_ASSERT(locationIter
.hasMore());
88 while (locationIter
.hasMore()) {
89 uint32_t scriptIdx
, pcOffset
;
91 locationIter
.readNext(&scriptIdx
, &pcOffset
);
92 MOZ_ASSERT(getStr(scriptIdx
));
94 results
[count
++] = getStr(scriptIdx
);
95 if (count
>= maxResults
) {
103 uint64_t JitcodeGlobalEntry::IonEntry::lookupRealmID(void* ptr
) const {
105 JitcodeRegionEntry region
= RegionAtAddr(*this, ptr
, &ptrOffset
);
106 JitcodeRegionEntry::ScriptPcIterator locationIter
= region
.scriptPcIterator();
107 MOZ_ASSERT(locationIter
.hasMore());
108 uint32_t scriptIdx
, pcOffset
;
109 locationIter
.readNext(&scriptIdx
, &pcOffset
);
111 JSScript
* script
= getScript(scriptIdx
);
112 return script
->realm()->creationOptions().profilerRealmID();
115 void JitcodeGlobalEntry::IonEntry::destroy() {
116 // The region table is stored at the tail of the compacted data,
117 // which means the start of the region table is a pointer to
118 // the _middle_ of the memory space allocated for it.
120 // When freeing it, obtain the payload start pointer first.
122 js_free((void*)(regionTable_
->payloadStart()));
124 regionTable_
= nullptr;
126 // Free the scriptList strs.
127 for (uint32_t i
= 0; i
< scriptList_
->size
; i
++) {
128 js_free(scriptList_
->pairs
[i
].str
);
129 scriptList_
->pairs
[i
].str
= nullptr;
132 // Free the script list
133 js_free(scriptList_
);
134 scriptList_
= nullptr;
137 void* JitcodeGlobalEntry::BaselineEntry::canonicalNativeAddrFor(
139 // TODO: We can't yet normalize Baseline addresses until we unify
140 // BaselineScript's PCMappingEntries with JitcodeGlobalTable.
144 bool JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(
145 void* ptr
, BytecodeLocationVector
& results
, uint32_t* depth
) const {
146 MOZ_ASSERT(containsPointer(ptr
));
147 MOZ_ASSERT(script_
->hasBaselineScript());
149 uint8_t* addr
= reinterpret_cast<uint8_t*>(ptr
);
151 script_
->baselineScript()->approximatePcForNativeAddress(script_
, addr
);
152 if (!results
.append(BytecodeLocation(script_
, pc
))) {
161 uint32_t JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(
162 void* ptr
, const char** results
, uint32_t maxResults
) const {
163 MOZ_ASSERT(containsPointer(ptr
));
164 MOZ_ASSERT(maxResults
>= 1);
170 uint64_t JitcodeGlobalEntry::BaselineEntry::lookupRealmID() const {
171 return script_
->realm()->creationOptions().profilerRealmID();
174 void JitcodeGlobalEntry::BaselineEntry::destroy() {
178 js_free((void*)str_
);
182 void* JitcodeGlobalEntry::BaselineInterpreterEntry::canonicalNativeAddrFor(
187 bool JitcodeGlobalEntry::BaselineInterpreterEntry::callStackAtAddr(
188 void* ptr
, BytecodeLocationVector
& results
, uint32_t* depth
) const {
189 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
192 uint32_t JitcodeGlobalEntry::BaselineInterpreterEntry::callStackAtAddr(
193 void* ptr
, const char** results
, uint32_t maxResults
) const {
194 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
197 uint64_t JitcodeGlobalEntry::BaselineInterpreterEntry::lookupRealmID() const {
198 MOZ_CRASH("shouldn't be called for BaselineInterpreter entries");
201 static int ComparePointers(const void* a
, const void* b
) {
202 const uint8_t* a_ptr
= reinterpret_cast<const uint8_t*>(a
);
203 const uint8_t* b_ptr
= reinterpret_cast<const uint8_t*>(b
);
214 int JitcodeGlobalEntry::compare(const JitcodeGlobalEntry
& ent1
,
215 const JitcodeGlobalEntry
& ent2
) {
216 // Both parts of compare cannot be a query.
217 MOZ_ASSERT(!(ent1
.isQuery() && ent2
.isQuery()));
219 // Ensure no overlaps for non-query lookups.
220 MOZ_ASSERT_IF(!ent1
.isQuery() && !ent2
.isQuery(), !ent1
.overlapsWith(ent2
));
222 // For two non-query entries, just comapare the start addresses.
223 if (!ent1
.isQuery() && !ent2
.isQuery()) {
224 return ComparePointers(ent1
.nativeStartAddr(), ent2
.nativeStartAddr());
227 void* ptr
= ent1
.isQuery() ? ent1
.nativeStartAddr() : ent2
.nativeStartAddr();
228 const JitcodeGlobalEntry
& ent
= ent1
.isQuery() ? ent2
: ent1
;
229 int flip
= ent1
.isQuery() ? 1 : -1;
231 if (ent
.startsBelowPointer(ptr
)) {
232 if (ent
.endsAbovePointer(ptr
)) {
244 JitcodeGlobalTable::Enum::Enum(JitcodeGlobalTable
& table
, JSRuntime
* rt
)
245 : Range(table
), rt_(rt
), next_(cur_
? cur_
->tower_
->next(0) : nullptr) {
246 for (int level
= JitcodeSkiplistTower::MAX_HEIGHT
- 1; level
>= 0; level
--) {
247 prevTower_
[level
] = nullptr;
251 void JitcodeGlobalTable::Enum::popFront() {
252 MOZ_ASSERT(!empty());
254 // Did not remove current entry; advance prevTower_.
255 if (cur_
!= table_
.freeEntries_
) {
256 for (int level
= cur_
->tower_
->height() - 1; level
>= 0; level
--) {
257 JitcodeGlobalEntry
* prevTowerEntry
= prevTower_
[level
];
259 if (prevTowerEntry
) {
260 if (prevTowerEntry
->tower_
->next(level
) == cur_
) {
261 prevTower_
[level
] = cur_
;
264 prevTower_
[level
] = table_
.startTower_
[level
];
271 next_
= cur_
->tower_
->next(0);
275 void JitcodeGlobalTable::Enum::removeFront() {
276 MOZ_ASSERT(!empty());
277 table_
.releaseEntry(*cur_
, prevTower_
, rt_
);
280 const JitcodeGlobalEntry
* JitcodeGlobalTable::lookupForSampler(
281 void* ptr
, JSRuntime
* rt
, uint64_t samplePosInBuffer
) {
282 JitcodeGlobalEntry
* entry
= lookupInternal(ptr
);
287 entry
->setSamplePositionInBuffer(samplePosInBuffer
);
289 // JitcodeGlobalEntries are marked at the end of the mark phase. A read
290 // barrier is not needed. Any JS frames sampled during the sweep phase of
291 // the GC must be on stack, and on-stack frames must already be marked at
292 // the beginning of the sweep phase. It's not possible to assert this here
293 // as we may be off main thread when called from the gecko profiler.
298 JitcodeGlobalEntry
* JitcodeGlobalTable::lookupInternal(void* ptr
) {
299 JitcodeGlobalEntry query
= JitcodeGlobalEntry::MakeQuery(ptr
);
300 JitcodeGlobalEntry
* searchTower
[JitcodeSkiplistTower::MAX_HEIGHT
];
301 searchInternal(query
, searchTower
);
303 if (searchTower
[0] == nullptr) {
305 if (startTower_
[0] == nullptr) {
309 MOZ_ASSERT(startTower_
[0]->compareTo(query
) >= 0);
310 int cmp
= startTower_
[0]->compareTo(query
);
311 MOZ_ASSERT(cmp
>= 0);
312 return (cmp
== 0) ? startTower_
[0] : nullptr;
315 JitcodeGlobalEntry
* bottom
= searchTower
[0];
316 MOZ_ASSERT(bottom
->compareTo(query
) < 0);
318 JitcodeGlobalEntry
* bottomNext
= bottom
->tower_
->next(0);
319 if (bottomNext
== nullptr) {
323 int cmp
= bottomNext
->compareTo(query
);
324 MOZ_ASSERT(cmp
>= 0);
325 return (cmp
== 0) ? bottomNext
: nullptr;
328 bool JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry
& entry
) {
329 MOZ_ASSERT(entry
.isIon() || entry
.isBaseline() ||
330 entry
.isBaselineInterpreter() || entry
.isDummy());
332 JitcodeGlobalEntry
* searchTower
[JitcodeSkiplistTower::MAX_HEIGHT
];
333 searchInternal(entry
, searchTower
);
335 // Allocate a new entry and tower.
336 JitcodeSkiplistTower
* newTower
= allocateTower(generateTowerHeight());
341 JitcodeGlobalEntry
* newEntry
= allocateEntry();
347 newEntry
->tower_
= newTower
;
349 // Suppress profiler sampling while skiplist is being mutated.
350 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
352 // Link up entry with forward entries taken from tower.
353 for (int level
= newTower
->height() - 1; level
>= 0; level
--) {
354 JitcodeGlobalEntry
* searchTowerEntry
= searchTower
[level
];
355 if (searchTowerEntry
) {
356 MOZ_ASSERT(searchTowerEntry
->compareTo(*newEntry
) < 0);
357 JitcodeGlobalEntry
* searchTowerNextEntry
=
358 searchTowerEntry
->tower_
->next(level
);
360 MOZ_ASSERT_IF(searchTowerNextEntry
,
361 searchTowerNextEntry
->compareTo(*newEntry
) > 0);
363 newTower
->setNext(level
, searchTowerNextEntry
);
364 searchTowerEntry
->tower_
->setNext(level
, newEntry
);
366 newTower
->setNext(level
, startTower_
[level
]);
367 startTower_
[level
] = newEntry
;
371 // verifySkiplist(); - disabled for release.
376 void JitcodeGlobalTable::removeEntry(JitcodeGlobalEntry
& entry
,
377 JitcodeGlobalEntry
** prevTower
) {
378 MOZ_ASSERT(!TlsContext
.get()->isProfilerSamplingEnabled());
380 // Unlink query entry.
381 for (int level
= entry
.tower_
->height() - 1; level
>= 0; level
--) {
382 JitcodeGlobalEntry
* prevTowerEntry
= prevTower
[level
];
383 if (prevTowerEntry
) {
384 MOZ_ASSERT(prevTowerEntry
->tower_
->next(level
) == &entry
);
385 prevTowerEntry
->tower_
->setNext(level
, entry
.tower_
->next(level
));
387 startTower_
[level
] = entry
.tower_
->next(level
);
391 // verifySkiplist(); - disabled for release.
393 // Entry has been unlinked.
395 entry
.tower_
->addToFreeList(&(freeTowers_
[entry
.tower_
->height() - 1]));
396 entry
.tower_
= nullptr;
397 entry
= JitcodeGlobalEntry();
398 entry
.addToFreeList(&freeEntries_
);
401 void JitcodeGlobalTable::releaseEntry(JitcodeGlobalEntry
& entry
,
402 JitcodeGlobalEntry
** prevTower
,
405 Maybe
<uint64_t> rangeStart
= rt
->profilerSampleBufferRangeStart();
406 MOZ_ASSERT_IF(rangeStart
, !entry
.isSampled(*rangeStart
));
408 removeEntry(entry
, prevTower
);
411 void JitcodeGlobalTable::searchInternal(const JitcodeGlobalEntry
& query
,
412 JitcodeGlobalEntry
** towerOut
) {
413 JitcodeGlobalEntry
* cur
= nullptr;
414 for (int level
= JitcodeSkiplistTower::MAX_HEIGHT
- 1; level
>= 0; level
--) {
415 JitcodeGlobalEntry
* entry
= searchAtHeight(level
, cur
, query
);
416 MOZ_ASSERT_IF(entry
== nullptr, cur
== nullptr);
417 towerOut
[level
] = entry
;
421 // Validate the resulting tower.
423 for (int level
= JitcodeSkiplistTower::MAX_HEIGHT
- 1; level
>= 0; level
--) {
424 if (towerOut
[level
] == nullptr) {
425 // If we got NULL for a given level, then we should have gotten NULL
426 // for the level above as well.
427 MOZ_ASSERT_IF(unsigned(level
) < (JitcodeSkiplistTower::MAX_HEIGHT
- 1),
428 towerOut
[level
+ 1] == nullptr);
432 JitcodeGlobalEntry
* cur
= towerOut
[level
];
434 // Non-null result at a given level must sort < query.
435 MOZ_ASSERT(cur
->compareTo(query
) < 0);
437 // The entry must have a tower height that accomodates level.
438 if (!cur
->tower_
->next(level
)) {
442 JitcodeGlobalEntry
* next
= cur
->tower_
->next(level
);
444 // Next entry must have tower height that accomodates level.
445 MOZ_ASSERT(unsigned(level
) < next
->tower_
->height());
447 // Next entry must sort >= query.
448 MOZ_ASSERT(next
->compareTo(query
) >= 0);
453 JitcodeGlobalEntry
* JitcodeGlobalTable::searchAtHeight(
454 unsigned level
, JitcodeGlobalEntry
* start
,
455 const JitcodeGlobalEntry
& query
) {
456 JitcodeGlobalEntry
* cur
= start
;
458 // If starting with nullptr, use the start tower.
459 if (start
== nullptr) {
460 cur
= startTower_
[level
];
461 if (cur
== nullptr || cur
->compareTo(query
) >= 0) {
466 // Keep skipping at |level| until we reach an entry < query whose
467 // successor is an entry >= query.
469 JitcodeGlobalEntry
* next
= cur
->tower_
->next(level
);
470 if (next
== nullptr || next
->compareTo(query
) >= 0) {
478 unsigned JitcodeGlobalTable::generateTowerHeight() {
479 // Implementation taken from Hars L. and Pteruska G.,
480 // "Pseudorandom Recursions: Small and fast Pseudorandom number generators for
481 // embedded applications."
482 rand_
^= mozilla::RotateLeft(rand_
, 5) ^ mozilla::RotateLeft(rand_
, 24);
485 // Return 1 + number of lowbit zeros in new randval, capped at MAX_HEIGHT.
487 for (unsigned i
= 0; i
< JitcodeSkiplistTower::MAX_HEIGHT
- 1; i
++) {
488 if ((rand_
>> i
) & 0x1) {
496 JitcodeSkiplistTower
* JitcodeGlobalTable::allocateTower(unsigned height
) {
497 MOZ_ASSERT(height
>= 1);
498 JitcodeSkiplistTower
* tower
=
499 JitcodeSkiplistTower::PopFromFreeList(&freeTowers_
[height
- 1]);
504 size_t size
= JitcodeSkiplistTower::CalculateSize(height
);
505 tower
= (JitcodeSkiplistTower
*)alloc_
.alloc(size
);
510 return new (tower
) JitcodeSkiplistTower(height
);
513 JitcodeGlobalEntry
* JitcodeGlobalTable::allocateEntry() {
514 JitcodeGlobalEntry
* entry
=
515 JitcodeGlobalEntry::PopFromFreeList(&freeEntries_
);
520 return alloc_
.new_
<JitcodeGlobalEntry
>();
524 void JitcodeGlobalTable::verifySkiplist() {
525 JitcodeGlobalEntry
* curTower
[JitcodeSkiplistTower::MAX_HEIGHT
];
526 for (unsigned i
= 0; i
< JitcodeSkiplistTower::MAX_HEIGHT
; i
++) {
527 curTower
[i
] = startTower_
[i
];
531 JitcodeGlobalEntry
* curEntry
= startTower_
[0];
534 unsigned curHeight
= curEntry
->tower_
->height();
535 MOZ_ASSERT(curHeight
>= 1);
537 for (unsigned i
= 0; i
< JitcodeSkiplistTower::MAX_HEIGHT
; i
++) {
539 MOZ_ASSERT(curTower
[i
] == curEntry
);
540 JitcodeGlobalEntry
* nextEntry
= curEntry
->tower_
->next(i
);
541 MOZ_ASSERT_IF(nextEntry
, curEntry
->compareTo(*nextEntry
) < 0);
542 curTower
[i
] = nextEntry
;
544 MOZ_ASSERT_IF(curTower
[i
], curTower
[i
]->compareTo(*curEntry
) > 0);
547 curEntry
= curEntry
->tower_
->next(0);
550 MOZ_ASSERT(count
== skiplistSize_
);
554 void JitcodeGlobalTable::setAllEntriesAsExpired() {
555 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
556 for (Range
r(*this); !r
.empty(); r
.popFront()) {
557 auto entry
= r
.front();
558 entry
->setAsExpired();
562 struct Unconditionally
{
563 template <typename T
>
564 static bool ShouldTrace(JSRuntime
* rt
, T
* thingp
) {
570 template <typename T
>
571 static bool ShouldTrace(JSRuntime
* rt
, T
* thingp
) {
572 return !IsMarkedUnbarriered(rt
, thingp
);
577 bool IfUnmarked::ShouldTrace
<TypeSet::Type
>(JSRuntime
* rt
,
578 TypeSet::Type
* type
) {
579 return !TypeSet::IsTypeMarked(rt
, type
);
582 bool JitcodeGlobalTable::markIteratively(GCMarker
* marker
) {
583 // JitcodeGlobalTable must keep entries that are in the sampler buffer
584 // alive. This conditionality is akin to holding the entries weakly.
586 // If this table were marked at the beginning of the mark phase, then
587 // sampling would require a read barrier for sampling in between
588 // incremental GC slices. However, invoking read barriers from the sampler
589 // is wildly unsafe. The sampler may run at any time, including during GC
592 // Instead, JitcodeGlobalTable is marked at the beginning of the sweep
593 // phase, along with weak references. The key assumption is the
594 // following. At the beginning of the sweep phase, any JS frames that the
595 // sampler may put in its buffer that are not already there at the
596 // beginning of the mark phase must have already been marked, as either 1)
597 // the frame was on-stack at the beginning of the sweep phase, or 2) the
598 // frame was pushed between incremental sweep slices. Frames of case 1)
599 // are already marked. Frames of case 2) must have been reachable to have
600 // been newly pushed, and thus are already marked.
602 // The approach above obviates the need for read barriers. The assumption
603 // above is checked in JitcodeGlobalTable::lookupForSampler.
605 MOZ_ASSERT(!JS::RuntimeHeapIsMinorCollecting());
607 AutoSuppressProfilerSampling
suppressSampling(TlsContext
.get());
609 // If the profiler is off, rangeStart will be Nothing() and all entries are
610 // considered to be expired.
611 Maybe
<uint64_t> rangeStart
=
612 marker
->runtime()->profilerSampleBufferRangeStart();
614 bool markedAny
= false;
615 for (Range
r(*this); !r
.empty(); r
.popFront()) {
616 JitcodeGlobalEntry
* entry
= r
.front();
618 // If an entry is not sampled, reset its buffer position to the invalid
619 // position, and conditionally mark the rest of the entry if its
620 // JitCode is not already marked. This conditional marking ensures
621 // that so long as the JitCode *may* be sampled, we keep any
622 // information that may be handed out to the sampler, like tracked
623 // types used by optimizations and scripts used for pc to line number
624 // mapping, alive as well.
625 if (!rangeStart
|| !entry
->isSampled(*rangeStart
)) {
626 entry
->setAsExpired();
627 if (!entry
->baseEntry().isJitcodeMarkedFromAnyThread(marker
->runtime())) {
632 // The table is runtime-wide. Not all zones may be participating in
634 if (!entry
->zone()->isCollecting() || entry
->zone()->isGCFinished()) {
638 markedAny
|= entry
->trace
<IfUnmarked
>(marker
);
644 void JitcodeGlobalTable::traceWeak(JSRuntime
* rt
, JSTracer
* trc
) {
645 AutoSuppressProfilerSampling
suppressSampling(rt
->mainContextFromOwnThread());
646 for (Enum
e(*this, rt
); !e
.empty(); e
.popFront()) {
647 JitcodeGlobalEntry
* entry
= e
.front();
649 if (!entry
->zone()->isCollecting() || entry
->zone()->isGCFinished()) {
653 if (!TraceManuallyBarrieredWeakEdge(
654 trc
, &entry
->baseEntry().jitcode_
,
655 "JitcodeGlobalTable::JitcodeGlobalEntry::jitcode_")) {
658 entry
->sweepChildren(rt
);
663 template <class ShouldTraceProvider
>
664 bool JitcodeGlobalEntry::BaseEntry::traceJitcode(JSTracer
* trc
) {
665 if (ShouldTraceProvider::ShouldTrace(trc
->runtime(), &jitcode_
)) {
666 TraceManuallyBarrieredEdge(trc
, &jitcode_
,
667 "jitcodglobaltable-baseentry-jitcode");
673 bool JitcodeGlobalEntry::BaseEntry::isJitcodeMarkedFromAnyThread(
675 return IsMarkedUnbarriered(rt
, &jitcode_
);
678 template <class ShouldTraceProvider
>
679 bool JitcodeGlobalEntry::BaselineEntry::trace(JSTracer
* trc
) {
680 if (ShouldTraceProvider::ShouldTrace(trc
->runtime(), &script_
)) {
681 TraceManuallyBarrieredEdge(trc
, &script_
,
682 "jitcodeglobaltable-baselineentry-script");
688 void JitcodeGlobalEntry::BaselineEntry::sweepChildren() {
689 MOZ_ALWAYS_FALSE(IsAboutToBeFinalizedUnbarriered(&script_
));
692 bool JitcodeGlobalEntry::BaselineEntry::isMarkedFromAnyThread(JSRuntime
* rt
) {
693 return IsMarkedUnbarriered(rt
, &script_
);
696 template <class ShouldTraceProvider
>
697 bool JitcodeGlobalEntry::IonEntry::trace(JSTracer
* trc
) {
698 bool tracedAny
= false;
700 JSRuntime
* rt
= trc
->runtime();
701 for (unsigned i
= 0; i
< numScripts(); i
++) {
702 if (ShouldTraceProvider::ShouldTrace(rt
,
703 &sizedScriptList()->pairs
[i
].script
)) {
704 TraceManuallyBarrieredEdge(trc
, &sizedScriptList()->pairs
[i
].script
,
705 "jitcodeglobaltable-ionentry-script");
713 void JitcodeGlobalEntry::IonEntry::sweepChildren() {
714 for (unsigned i
= 0; i
< numScripts(); i
++) {
716 IsAboutToBeFinalizedUnbarriered(&sizedScriptList()->pairs
[i
].script
));
720 bool JitcodeGlobalEntry::IonEntry::isMarkedFromAnyThread(JSRuntime
* rt
) {
721 for (unsigned i
= 0; i
< numScripts(); i
++) {
722 if (!IsMarkedUnbarriered(rt
, &sizedScriptList()->pairs
[i
].script
)) {
731 void JitcodeRegionEntry::WriteHead(CompactBufferWriter
& writer
,
732 uint32_t nativeOffset
, uint8_t scriptDepth
) {
733 writer
.writeUnsigned(nativeOffset
);
734 writer
.writeByte(scriptDepth
);
738 void JitcodeRegionEntry::ReadHead(CompactBufferReader
& reader
,
739 uint32_t* nativeOffset
,
740 uint8_t* scriptDepth
) {
741 *nativeOffset
= reader
.readUnsigned();
742 *scriptDepth
= reader
.readByte();
746 void JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter
& writer
,
747 uint32_t scriptIdx
, uint32_t pcOffset
) {
748 writer
.writeUnsigned(scriptIdx
);
749 writer
.writeUnsigned(pcOffset
);
753 void JitcodeRegionEntry::ReadScriptPc(CompactBufferReader
& reader
,
754 uint32_t* scriptIdx
, uint32_t* pcOffset
) {
755 *scriptIdx
= reader
.readUnsigned();
756 *pcOffset
= reader
.readUnsigned();
760 void JitcodeRegionEntry::WriteDelta(CompactBufferWriter
& writer
,
761 uint32_t nativeDelta
, int32_t pcDelta
) {
763 // 1 and 2-byte formats possible.
766 if (pcDelta
<= ENC1_PC_DELTA_MAX
&& nativeDelta
<= ENC1_NATIVE_DELTA_MAX
) {
767 uint8_t encVal
= ENC1_MASK_VAL
| (pcDelta
<< ENC1_PC_DELTA_SHIFT
) |
768 (nativeDelta
<< ENC1_NATIVE_DELTA_SHIFT
);
769 writer
.writeByte(encVal
);
773 // NNNN-NNNN BBBB-BB01
774 if (pcDelta
<= ENC2_PC_DELTA_MAX
&& nativeDelta
<= ENC2_NATIVE_DELTA_MAX
) {
775 uint16_t encVal
= ENC2_MASK_VAL
| (pcDelta
<< ENC2_PC_DELTA_SHIFT
) |
776 (nativeDelta
<< ENC2_NATIVE_DELTA_SHIFT
);
777 writer
.writeByte(encVal
& 0xff);
778 writer
.writeByte((encVal
>> 8) & 0xff);
783 // NNNN-NNNN NNNB-BBBB BBBB-B011
784 if (pcDelta
>= ENC3_PC_DELTA_MIN
&& pcDelta
<= ENC3_PC_DELTA_MAX
&&
785 nativeDelta
<= ENC3_NATIVE_DELTA_MAX
) {
788 ((uint32_t(pcDelta
) << ENC3_PC_DELTA_SHIFT
) & ENC3_PC_DELTA_MASK
) |
789 (nativeDelta
<< ENC3_NATIVE_DELTA_SHIFT
);
790 writer
.writeByte(encVal
& 0xff);
791 writer
.writeByte((encVal
>> 8) & 0xff);
792 writer
.writeByte((encVal
>> 16) & 0xff);
796 // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
797 if (pcDelta
>= ENC4_PC_DELTA_MIN
&& pcDelta
<= ENC4_PC_DELTA_MAX
&&
798 nativeDelta
<= ENC4_NATIVE_DELTA_MAX
) {
801 ((uint32_t(pcDelta
) << ENC4_PC_DELTA_SHIFT
) & ENC4_PC_DELTA_MASK
) |
802 (nativeDelta
<< ENC4_NATIVE_DELTA_SHIFT
);
803 writer
.writeByte(encVal
& 0xff);
804 writer
.writeByte((encVal
>> 8) & 0xff);
805 writer
.writeByte((encVal
>> 16) & 0xff);
806 writer
.writeByte((encVal
>> 24) & 0xff);
810 // Should never get here.
811 MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
815 void JitcodeRegionEntry::ReadDelta(CompactBufferReader
& reader
,
816 uint32_t* nativeDelta
, int32_t* pcDelta
) {
818 // It's possible to get nativeDeltas with value 0 in two cases:
820 // 1. The last region's run. This is because the region table's start
821 // must be 4-byte aligned, and we must insert padding bytes to align the
822 // payload section before emitting the table.
824 // 2. A zero-offset nativeDelta with a negative pcDelta.
826 // So if nativeDelta is zero, then pcDelta must be <= 0.
829 const uint32_t firstByte
= reader
.readByte();
830 if ((firstByte
& ENC1_MASK
) == ENC1_MASK_VAL
) {
831 uint32_t encVal
= firstByte
;
832 *nativeDelta
= encVal
>> ENC1_NATIVE_DELTA_SHIFT
;
833 *pcDelta
= (encVal
& ENC1_PC_DELTA_MASK
) >> ENC1_PC_DELTA_SHIFT
;
834 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
838 // NNNN-NNNN BBBB-BB01
839 const uint32_t secondByte
= reader
.readByte();
840 if ((firstByte
& ENC2_MASK
) == ENC2_MASK_VAL
) {
841 uint32_t encVal
= firstByte
| secondByte
<< 8;
842 *nativeDelta
= encVal
>> ENC2_NATIVE_DELTA_SHIFT
;
843 *pcDelta
= (encVal
& ENC2_PC_DELTA_MASK
) >> ENC2_PC_DELTA_SHIFT
;
844 MOZ_ASSERT(*pcDelta
!= 0);
845 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
849 // NNNN-NNNN NNNB-BBBB BBBB-B011
850 const uint32_t thirdByte
= reader
.readByte();
851 if ((firstByte
& ENC3_MASK
) == ENC3_MASK_VAL
) {
852 uint32_t encVal
= firstByte
| secondByte
<< 8 | thirdByte
<< 16;
853 *nativeDelta
= encVal
>> ENC3_NATIVE_DELTA_SHIFT
;
855 uint32_t pcDeltaU
= (encVal
& ENC3_PC_DELTA_MASK
) >> ENC3_PC_DELTA_SHIFT
;
856 // Fix sign if necessary.
857 if (pcDeltaU
> static_cast<uint32_t>(ENC3_PC_DELTA_MAX
)) {
858 pcDeltaU
|= ~ENC3_PC_DELTA_MAX
;
861 MOZ_ASSERT(*pcDelta
!= 0);
862 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
866 // NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
867 MOZ_ASSERT((firstByte
& ENC4_MASK
) == ENC4_MASK_VAL
);
868 const uint32_t fourthByte
= reader
.readByte();
870 firstByte
| secondByte
<< 8 | thirdByte
<< 16 | fourthByte
<< 24;
871 *nativeDelta
= encVal
>> ENC4_NATIVE_DELTA_SHIFT
;
873 uint32_t pcDeltaU
= (encVal
& ENC4_PC_DELTA_MASK
) >> ENC4_PC_DELTA_SHIFT
;
874 // fix sign if necessary
875 if (pcDeltaU
> static_cast<uint32_t>(ENC4_PC_DELTA_MAX
)) {
876 pcDeltaU
|= ~ENC4_PC_DELTA_MAX
;
880 MOZ_ASSERT(*pcDelta
!= 0);
881 MOZ_ASSERT_IF(*nativeDelta
== 0, *pcDelta
<= 0);
885 uint32_t JitcodeRegionEntry::ExpectedRunLength(const NativeToBytecode
* entry
,
886 const NativeToBytecode
* end
) {
887 MOZ_ASSERT(entry
< end
);
889 // We always use the first entry, so runLength starts at 1
890 uint32_t runLength
= 1;
892 uint32_t curNativeOffset
= entry
->nativeOffset
.offset();
893 uint32_t curBytecodeOffset
= entry
->tree
->script()->pcToOffset(entry
->pc
);
895 for (auto nextEntry
= entry
+ 1; nextEntry
!= end
; nextEntry
+= 1) {
896 // If the next run moves to a different inline site, stop the run.
897 if (nextEntry
->tree
!= entry
->tree
) {
901 uint32_t nextNativeOffset
= nextEntry
->nativeOffset
.offset();
902 uint32_t nextBytecodeOffset
=
903 nextEntry
->tree
->script()->pcToOffset(nextEntry
->pc
);
904 MOZ_ASSERT(nextNativeOffset
>= curNativeOffset
);
906 uint32_t nativeDelta
= nextNativeOffset
- curNativeOffset
;
907 int32_t bytecodeDelta
=
908 int32_t(nextBytecodeOffset
) - int32_t(curBytecodeOffset
);
910 // If deltas are too large (very unlikely), stop the run.
911 if (!IsDeltaEncodeable(nativeDelta
, bytecodeDelta
)) {
917 // If the run has grown to its maximum length, stop the run.
918 if (runLength
== MAX_RUN_LENGTH
) {
922 curNativeOffset
= nextNativeOffset
;
923 curBytecodeOffset
= nextBytecodeOffset
;
929 struct JitcodeMapBufferWriteSpewer
{
931 CompactBufferWriter
* writer
;
934 static const uint32_t DumpMaxBytes
= 50;
936 explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter
& w
)
937 : writer(&w
), startPos(writer
->length()) {}
939 void spewAndAdvance(const char* name
) {
944 uint32_t curPos
= writer
->length();
945 const uint8_t* start
= writer
->buffer() + startPos
;
946 const uint8_t* end
= writer
->buffer() + curPos
;
947 const char* MAP
= "0123456789ABCDEF";
948 uint32_t bytes
= end
- start
;
950 char buffer
[DumpMaxBytes
* 3];
951 for (uint32_t i
= 0; i
< bytes
; i
++) {
952 buffer
[i
* 3] = MAP
[(start
[i
] >> 4) & 0xf];
953 buffer
[i
* 3 + 1] = MAP
[(start
[i
] >> 0) & 0xf];
954 buffer
[i
* 3 + 2] = ' ';
956 if (bytes
>= DumpMaxBytes
) {
957 buffer
[DumpMaxBytes
* 3 - 1] = '\0';
959 buffer
[bytes
* 3 - 1] = '\0';
962 JitSpew(JitSpew_Profiling
, "%s@%d[%d bytes] - %s", name
, int(startPos
),
965 // Move to the end of the current buffer.
966 startPos
= writer
->length();
969 explicit JitcodeMapBufferWriteSpewer(CompactBufferWriter
& w
) {}
970 void spewAndAdvance(const char* name
) {}
974 // Write a run, starting at the given NativeToBytecode entry, into the given
977 bool JitcodeRegionEntry::WriteRun(CompactBufferWriter
& writer
,
978 JSScript
** scriptList
,
979 uint32_t scriptListSize
, uint32_t runLength
,
980 const NativeToBytecode
* entry
) {
981 MOZ_ASSERT(runLength
> 0);
982 MOZ_ASSERT(runLength
<= MAX_RUN_LENGTH
);
984 // Calculate script depth.
985 MOZ_ASSERT(entry
->tree
->depth() <= 0xff);
986 uint8_t scriptDepth
= entry
->tree
->depth();
987 uint32_t regionNativeOffset
= entry
->nativeOffset
.offset();
989 JitcodeMapBufferWriteSpewer
spewer(writer
);
991 // Write the head info.
992 JitSpew(JitSpew_Profiling
, " Head Info: nativeOffset=%d scriptDepth=%d",
993 int(regionNativeOffset
), int(scriptDepth
));
994 WriteHead(writer
, regionNativeOffset
, scriptDepth
);
995 spewer
.spewAndAdvance(" ");
997 // Write each script/pc pair.
999 InlineScriptTree
* curTree
= entry
->tree
;
1000 jsbytecode
* curPc
= entry
->pc
;
1001 for (uint8_t i
= 0; i
< scriptDepth
; i
++) {
1002 // Find the index of the script within the list.
1003 // NB: scriptList is guaranteed to contain curTree->script()
1004 uint32_t scriptIdx
= 0;
1005 for (; scriptIdx
< scriptListSize
; scriptIdx
++) {
1006 if (scriptList
[scriptIdx
] == curTree
->script()) {
1010 MOZ_ASSERT(scriptIdx
< scriptListSize
);
1012 uint32_t pcOffset
= curTree
->script()->pcToOffset(curPc
);
1014 JitSpew(JitSpew_Profiling
, " Script/PC %d: scriptIdx=%d pcOffset=%d",
1015 int(i
), int(scriptIdx
), int(pcOffset
));
1016 WriteScriptPc(writer
, scriptIdx
, pcOffset
);
1017 spewer
.spewAndAdvance(" ");
1019 MOZ_ASSERT_IF(i
< scriptDepth
- 1, curTree
->hasCaller());
1020 curPc
= curTree
->callerPc();
1021 curTree
= curTree
->caller();
1025 // Start writing runs.
1026 uint32_t curNativeOffset
= entry
->nativeOffset
.offset();
1027 uint32_t curBytecodeOffset
= entry
->tree
->script()->pcToOffset(entry
->pc
);
1029 JitSpew(JitSpew_Profiling
,
1030 " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
1031 int(curNativeOffset
), int(curBytecodeOffset
));
1033 // Skip first entry because it is implicit in the header. Start at subsequent
1035 for (uint32_t i
= 1; i
< runLength
; i
++) {
1036 MOZ_ASSERT(entry
[i
].tree
== entry
->tree
);
1038 uint32_t nextNativeOffset
= entry
[i
].nativeOffset
.offset();
1039 uint32_t nextBytecodeOffset
=
1040 entry
[i
].tree
->script()->pcToOffset(entry
[i
].pc
);
1041 MOZ_ASSERT(nextNativeOffset
>= curNativeOffset
);
1043 uint32_t nativeDelta
= nextNativeOffset
- curNativeOffset
;
1044 int32_t bytecodeDelta
=
1045 int32_t(nextBytecodeOffset
) - int32_t(curBytecodeOffset
);
1046 MOZ_ASSERT(IsDeltaEncodeable(nativeDelta
, bytecodeDelta
));
1048 JitSpew(JitSpew_Profiling
,
1049 " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
1050 int(curNativeOffset
), int(nextNativeOffset
), int(nativeDelta
),
1051 int(curBytecodeOffset
), int(nextBytecodeOffset
),
1052 int(bytecodeDelta
));
1053 WriteDelta(writer
, nativeDelta
, bytecodeDelta
);
1055 // Spew the bytecode in these ranges.
1056 if (curBytecodeOffset
< nextBytecodeOffset
) {
1057 JitSpewStart(JitSpew_Profiling
, " OPS: ");
1058 uint32_t curBc
= curBytecodeOffset
;
1059 while (curBc
< nextBytecodeOffset
) {
1060 jsbytecode
* pc
= entry
[i
].tree
->script()->offsetToPC(curBc
);
1062 JSOp op
= JSOp(*pc
);
1063 JitSpewCont(JitSpew_Profiling
, "%s ", CodeName(op
));
1065 curBc
+= GetBytecodeLength(pc
);
1067 JitSpewFin(JitSpew_Profiling
);
1069 spewer
.spewAndAdvance(" ");
1071 curNativeOffset
= nextNativeOffset
;
1072 curBytecodeOffset
= nextBytecodeOffset
;
1082 void JitcodeRegionEntry::unpack() {
1083 CompactBufferReader
reader(data_
, end_
);
1084 ReadHead(reader
, &nativeOffset_
, &scriptDepth_
);
1085 MOZ_ASSERT(scriptDepth_
> 0);
1087 scriptPcStack_
= reader
.currentPosition();
1088 // Skip past script/pc stack
1089 for (unsigned i
= 0; i
< scriptDepth_
; i
++) {
1090 uint32_t scriptIdx
, pcOffset
;
1091 ReadScriptPc(reader
, &scriptIdx
, &pcOffset
);
1094 deltaRun_
= reader
.currentPosition();
1097 uint32_t JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset
,
1098 uint32_t startPcOffset
) const {
1099 DeltaIterator iter
= deltaIterator();
1100 uint32_t curNativeOffset
= nativeOffset();
1101 uint32_t curPcOffset
= startPcOffset
;
1102 while (iter
.hasMore()) {
1103 uint32_t nativeDelta
;
1105 iter
.readNext(&nativeDelta
, &pcDelta
);
1107 // The start address of the next delta-run entry is counted towards
1108 // the current delta-run entry, because return addresses should
1109 // associate with the bytecode op prior (the call) not the op after.
1110 if (queryNativeOffset
<= curNativeOffset
+ nativeDelta
) {
1113 curNativeOffset
+= nativeDelta
;
1114 curPcOffset
+= pcDelta
;
1119 bool JitcodeIonTable::makeIonEntry(JSContext
* cx
, JitCode
* code
,
1120 uint32_t numScripts
, JSScript
** scripts
,
1121 JitcodeGlobalEntry::IonEntry
& out
) {
1122 using SizedScriptList
= JitcodeGlobalEntry::IonEntry::SizedScriptList
;
1124 MOZ_ASSERT(numScripts
> 0);
1126 // Create profiling strings for script, within vector.
1127 typedef js::Vector
<char*, 32, SystemAllocPolicy
> ProfilingStringVector
;
1129 ProfilingStringVector profilingStrings
;
1130 if (!profilingStrings
.reserve(numScripts
)) {
1134 // Cleanup allocations on failure.
1135 auto autoFreeProfilingStrings
= mozilla::MakeScopeExit([&] {
1136 for (auto elem
: profilingStrings
) {
1141 for (uint32_t i
= 0; i
< numScripts
; i
++) {
1142 UniqueChars str
= GeckoProfilerRuntime::allocProfileString(cx
, scripts
[i
]);
1146 if (!profilingStrings
.append(str
.release())) {
1151 // Create SizedScriptList
1153 (void*)cx
->pod_malloc
<uint8_t>(SizedScriptList::AllocSizeFor(numScripts
));
1158 // Keep allocated profiling strings.
1159 autoFreeProfilingStrings
.release();
1161 SizedScriptList
* scriptList
=
1162 new (mem
) SizedScriptList(numScripts
, scripts
, &profilingStrings
[0]);
1163 out
.init(code
, code
->raw(), code
->rawEnd(), scriptList
, this);
1167 uint32_t JitcodeIonTable::findRegionEntry(uint32_t nativeOffset
) const {
1168 static const uint32_t LINEAR_SEARCH_THRESHOLD
= 8;
1169 uint32_t regions
= numRegions();
1170 MOZ_ASSERT(regions
> 0);
1172 // For small region lists, just search linearly.
1173 if (regions
<= LINEAR_SEARCH_THRESHOLD
) {
1174 JitcodeRegionEntry previousEntry
= regionEntry(0);
1175 for (uint32_t i
= 1; i
< regions
; i
++) {
1176 JitcodeRegionEntry nextEntry
= regionEntry(i
);
1177 MOZ_ASSERT(nextEntry
.nativeOffset() >= previousEntry
.nativeOffset());
1179 // See note in binary-search code below about why we use '<=' here
1180 // instead of '<'. Short explanation: regions are closed at their
1181 // ending addresses, and open at their starting addresses.
1182 if (nativeOffset
<= nextEntry
.nativeOffset()) {
1186 previousEntry
= nextEntry
;
1188 // If nothing found, assume it falls within last region.
1192 // For larger ones, binary search the region table.
1194 uint32_t count
= regions
;
1196 uint32_t step
= count
/ 2;
1197 uint32_t mid
= idx
+ step
;
1198 JitcodeRegionEntry midEntry
= regionEntry(mid
);
1200 // A region memory range is closed at its ending address, not starting
1201 // address. This is because the return address for calls must associate
1202 // with the call's bytecode PC, not the PC of the bytecode operator after
1205 // So a query is < an entry if the query nativeOffset is <= the start
1206 // address of the entry, and a query is >= an entry if the query
1207 // nativeOffset is > the start address of an entry.
1208 if (nativeOffset
<= midEntry
.nativeOffset()) {
1209 // Target entry is below midEntry.
1211 } else { // if (nativeOffset > midEntry.nativeOffset())
1212 // Target entry is at midEntry or above.
1221 bool JitcodeIonTable::WriteIonTable(
1222 CompactBufferWriter
& writer
, JSScript
** scriptList
, uint32_t scriptListSize
,
1223 const NativeToBytecode
* start
, const NativeToBytecode
* end
,
1224 uint32_t* tableOffsetOut
, uint32_t* numRegionsOut
) {
1225 MOZ_ASSERT(tableOffsetOut
!= nullptr);
1226 MOZ_ASSERT(numRegionsOut
!= nullptr);
1227 MOZ_ASSERT(writer
.length() == 0);
1228 MOZ_ASSERT(scriptListSize
> 0);
1230 JitSpew(JitSpew_Profiling
,
1231 "Writing native to bytecode map for %s:%u:%u (%zu entries)",
1232 scriptList
[0]->filename(), scriptList
[0]->lineno(),
1233 scriptList
[0]->column(), mozilla::PointerRangeSize(start
, end
));
1235 JitSpew(JitSpew_Profiling
, " ScriptList of size %d", int(scriptListSize
));
1236 for (uint32_t i
= 0; i
< scriptListSize
; i
++) {
1237 JitSpew(JitSpew_Profiling
, " Script %d - %s:%u:%u", int(i
),
1238 scriptList
[i
]->filename(), scriptList
[i
]->lineno(),
1239 scriptList
[i
]->column());
1242 // Write out runs first. Keep a vector tracking the positive offsets from
1243 // payload start to the run.
1244 const NativeToBytecode
* curEntry
= start
;
1245 js::Vector
<uint32_t, 32, SystemAllocPolicy
> runOffsets
;
1247 while (curEntry
!= end
) {
1248 // Calculate the length of the next run.
1249 uint32_t runLength
= JitcodeRegionEntry::ExpectedRunLength(curEntry
, end
);
1250 MOZ_ASSERT(runLength
> 0);
1251 MOZ_ASSERT(runLength
<= uintptr_t(end
- curEntry
));
1252 JitSpew(JitSpew_Profiling
, " Run at entry %d, length %d, buffer offset %d",
1253 int(curEntry
- start
), int(runLength
), int(writer
.length()));
1255 // Store the offset of the run.
1256 if (!runOffsets
.append(writer
.length())) {
1261 if (!JitcodeRegionEntry::WriteRun(writer
, scriptList
, scriptListSize
,
1262 runLength
, curEntry
)) {
1266 curEntry
+= runLength
;
1269 // Done encoding regions. About to start table. Ensure we are aligned to 4
1270 // bytes since table is composed of uint32_t values.
1271 uint32_t padding
= sizeof(uint32_t) - (writer
.length() % sizeof(uint32_t));
1272 if (padding
== sizeof(uint32_t)) {
1275 JitSpew(JitSpew_Profiling
, " Padding %d bytes after run @%d", int(padding
),
1276 int(writer
.length()));
1277 for (uint32_t i
= 0; i
< padding
; i
++) {
1278 writer
.writeByte(0);
1281 // Now at start of table.
1282 uint32_t tableOffset
= writer
.length();
1284 // The table being written at this point will be accessed directly via
1285 // uint32_t pointers, so all writes below use native endianness.
1287 // Write out numRegions
1288 JitSpew(JitSpew_Profiling
, " Writing numRuns=%d", int(runOffsets
.length()));
1289 writer
.writeNativeEndianUint32_t(runOffsets
.length());
1291 // Write out region offset table. The offsets in |runOffsets| are currently
1292 // forward offsets from the beginning of the buffer. We convert them to
1293 // backwards offsets from the start of the table before writing them into
1294 // their table entries.
1295 for (uint32_t i
= 0; i
< runOffsets
.length(); i
++) {
1296 JitSpew(JitSpew_Profiling
, " Run %d offset=%d backOffset=%d @%d", int(i
),
1297 int(runOffsets
[i
]), int(tableOffset
- runOffsets
[i
]),
1298 int(writer
.length()));
1299 writer
.writeNativeEndianUint32_t(tableOffset
- runOffsets
[i
]);
1306 *tableOffsetOut
= tableOffset
;
1307 *numRegionsOut
= runOffsets
.length();
1314 JS::ProfiledFrameHandle::ProfiledFrameHandle(JSRuntime
* rt
,
1315 js::jit::JitcodeGlobalEntry
& entry
,
1316 void* addr
, const char* label
,
1321 canonicalAddr_(nullptr),
1324 if (!canonicalAddr_
) {
1325 canonicalAddr_
= entry_
.canonicalNativeAddrFor(rt_
, addr_
);
1329 JS_PUBLIC_API
JS::ProfilingFrameIterator::FrameKind
1330 JS::ProfiledFrameHandle::frameKind() const {
1331 if (entry_
.isBaselineInterpreter()) {
1332 return JS::ProfilingFrameIterator::Frame_BaselineInterpreter
;
1334 if (entry_
.isBaseline()) {
1335 return JS::ProfilingFrameIterator::Frame_Baseline
;
1337 return JS::ProfilingFrameIterator::Frame_Ion
;
1340 JS_PUBLIC_API
uint64_t JS::ProfiledFrameHandle::realmID() const {
1341 return entry_
.lookupRealmID(rt_
, addr_
);
1344 JS_PUBLIC_API
JS::ProfiledFrameRange
JS::GetProfiledFrames(JSContext
* cx
,
1346 JSRuntime
* rt
= cx
->runtime();
1347 js::jit::JitcodeGlobalTable
* table
=
1348 rt
->jitRuntime()->getJitcodeGlobalTable();
1349 js::jit::JitcodeGlobalEntry
* entry
= table
->lookup(addr
);
1351 ProfiledFrameRange
result(rt
, addr
, entry
);
1354 result
.depth_
= entry
->callStackAtAddr(rt
, addr
, result
.labels_
,
1355 MOZ_ARRAY_LENGTH(result
.labels_
));
1360 JS::ProfiledFrameHandle
JS::ProfiledFrameRange::Iter::operator*() const {
1361 // The iterator iterates in high depth to low depth order. index_ goes up,
1362 // and the depth we need to pass to ProfiledFrameHandle goes down.
1363 uint32_t depth
= range_
.depth_
- 1 - index_
;
1364 return ProfiledFrameHandle(range_
.rt_
, *range_
.entry_
, range_
.addr_
,
1365 range_
.labels_
[depth
], depth
);