1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/Snapshots.h"
9 #include "jit/JitSpewer.h"
10 #ifdef TRACK_SNAPSHOTS
14 #include "jit/Recover.h"
15 #include "js/Printer.h"
18 using namespace js::jit
;
20 // [SMDOC] IonMonkey Snapshot encoding
23 // [ptr] A fixed-size pointer.
24 // [vwu] A variable-width unsigned integer.
25 // [vws] A variable-width signed integer.
26 // [u8] An 8-bit unsigned integer.
27 // [u8'] An 8-bit unsigned integer which is potentially extended with packed
29 // [u8"] Packed data which is stored and packed in the previous [u8'].
30 // [vwu*] A list of variable-width unsigned integers.
31 // [pld] Payload of Recover Value Allocation:
33 // There is no payload.
36 // [vwu] Index, such as the constant pool index.
38 // PAYLOAD_STACK_OFFSET:
39 // [vws] Stack offset based on the base of the Ion frame.
42 // [u8] Code of the general register.
45 // [u8] Code of the FPU register.
47 // PAYLOAD_PACKED_TAG:
48 // [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode
49 // of the RValueAllocation.
53 // [vwu] bits ((n+1)-31]: recover instruction offset
54 // bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS)
56 // Snapshot body, repeated "frame count" times, from oldest frame to newest
57 // frame. Note that the first frame doesn't have the "parent PC" field.
59 // [ptr] Debug only: JSScript*
61 // [vwu] # of RVA's indexes, including nargs
62 // [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains
63 // nargs + nfixed + stackDepth items.
65 // Recover value allocations are encoded at the end of the Snapshot buffer, and
66 // they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each
67 // allocation is determined by the RValueAllocation::Layout, which can be
68 // obtained from the RValueAllocation::Mode with layoutFromMode function. The
69 // layout structure list the type of payload which are used to serialized /
70 // deserialized / dumped the content of the allocations.
72 // R(ecover)ValueAllocation items:
73 // [u8'] Mode, which defines the type of the payload as well as the
75 // [pld] first payload (packed tag, index, stack offset, register, ...)
76 // [pld] second payload (register, stack offset, none)
80 // Index into the constant pool.
83 // Constant value which correspond to the "undefined" JS value.
86 // Constant value which correspond to the "null" JS value.
88 // DOUBLE_REG [FPU_REG]
89 // Double value stored in a FPU register.
91 // ANY_FLOAT_REG [FPU_REG]
92 // Any Float value (float32, simd) stored in a FPU register.
94 // ANY_FLOAT_STACK [STACK_OFFSET]
95 // Any Float value (float32, simd) stored on the stack.
97 // UNTYPED_REG [GPR_REG]
98 // UNTYPED_STACK [STACK_OFFSET]
99 // UNTYPED_REG_REG [GPR_REG, GPR_REG]
100 // UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET]
101 // UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG]
102 // UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET]
103 // Value with dynamically known type. On 32 bits architecture, the
104 // first register/stack-offset correspond to the holder of the type,
105 // and the second correspond to the payload of the JS Value.
107 // RECOVER_INSTRUCTION [INDEX]
108 // Index into the list of recovered instruction results.
110 // RI_WITH_DEFAULT_CST [INDEX] [INDEX]
111 // The first payload is the index into the list of recovered
112 // instruction results. The second payload is the index in the
115 // TYPED_REG [PACKED_TAG, GPR_REG]:
116 // Value with statically known type, which payload is stored in a
119 // TYPED_STACK [PACKED_TAG, STACK_OFFSET]:
120 // Value with statically known type, which payload is stored at an
121 // offset on the stack.
124 const RValueAllocation::Layout
& RValueAllocation::layoutFromMode(Mode mode
) {
127 static const RValueAllocation::Layout layout
= {PAYLOAD_INDEX
,
128 PAYLOAD_NONE
, "constant"};
132 case CST_UNDEFINED
: {
133 static const RValueAllocation::Layout layout
= {
134 PAYLOAD_NONE
, PAYLOAD_NONE
, "undefined"};
139 static const RValueAllocation::Layout layout
= {PAYLOAD_NONE
,
140 PAYLOAD_NONE
, "null"};
145 static const RValueAllocation::Layout layout
= {PAYLOAD_FPU
, PAYLOAD_NONE
,
149 case ANY_FLOAT_REG
: {
150 static const RValueAllocation::Layout layout
= {PAYLOAD_FPU
, PAYLOAD_NONE
,
151 "float register content"};
154 case ANY_FLOAT_STACK
: {
155 static const RValueAllocation::Layout layout
= {
156 PAYLOAD_STACK_OFFSET
, PAYLOAD_NONE
, "float register content"};
159 #if defined(JS_NUNBOX32)
160 case UNTYPED_REG_REG
: {
161 static const RValueAllocation::Layout layout
= {PAYLOAD_GPR
, PAYLOAD_GPR
,
165 case UNTYPED_REG_STACK
: {
166 static const RValueAllocation::Layout layout
= {
167 PAYLOAD_GPR
, PAYLOAD_STACK_OFFSET
, "value"};
170 case UNTYPED_STACK_REG
: {
171 static const RValueAllocation::Layout layout
= {PAYLOAD_STACK_OFFSET
,
172 PAYLOAD_GPR
, "value"};
175 case UNTYPED_STACK_STACK
: {
176 static const RValueAllocation::Layout layout
= {
177 PAYLOAD_STACK_OFFSET
, PAYLOAD_STACK_OFFSET
, "value"};
180 #elif defined(JS_PUNBOX64)
182 static const RValueAllocation::Layout layout
= {PAYLOAD_GPR
, PAYLOAD_NONE
,
186 case UNTYPED_STACK
: {
187 static const RValueAllocation::Layout layout
= {PAYLOAD_STACK_OFFSET
,
188 PAYLOAD_NONE
, "value"};
192 case RECOVER_INSTRUCTION
: {
193 static const RValueAllocation::Layout layout
= {
194 PAYLOAD_INDEX
, PAYLOAD_NONE
, "instruction"};
197 case RI_WITH_DEFAULT_CST
: {
198 static const RValueAllocation::Layout layout
= {
199 PAYLOAD_INDEX
, PAYLOAD_INDEX
, "instruction with default"};
204 static const RValueAllocation::Layout regLayout
= {
205 PAYLOAD_PACKED_TAG
, PAYLOAD_GPR
, "typed value"};
207 static const RValueAllocation::Layout stackLayout
= {
208 PAYLOAD_PACKED_TAG
, PAYLOAD_STACK_OFFSET
, "typed value"};
210 if (mode
>= TYPED_REG_MIN
&& mode
<= TYPED_REG_MAX
) {
213 if (mode
>= TYPED_STACK_MIN
&& mode
<= TYPED_STACK_MAX
) {
219 MOZ_CRASH_UNSAFE_PRINTF("Unexpected mode: 0x%x", uint32_t(mode
));
222 // Pad serialized RValueAllocations by a multiple of X bytes in the allocation
223 // buffer. By padding serialized value allocations, we are building an
224 // indexable table of elements of X bytes, and thus we can safely divide any
225 // offset within the buffer by X to obtain an index.
227 // By padding, we are loosing space within the allocation buffer, but we
228 // multiple by X the number of indexes that we can store on one byte in each
231 // Some value allocations are taking more than X bytes to be encoded, in which
232 // case we will pad to a multiple of X, and we are wasting indexes. The choice
233 // of X should be balanced between the wasted padding of serialized value
234 // allocation, and the saving made in snapshot indexes.
235 static const size_t ALLOCATION_TABLE_ALIGNMENT
= 2; /* bytes */
237 void RValueAllocation::readPayload(CompactBufferReader
& reader
,
238 PayloadType type
, uint8_t* mode
,
244 p
->index
= reader
.readUnsigned();
246 case PAYLOAD_STACK_OFFSET
:
247 p
->stackOffset
= reader
.readSigned();
250 p
->gpr
= Register::FromCode(reader
.readByte());
253 p
->fpu
.data
= reader
.readByte();
255 case PAYLOAD_PACKED_TAG
:
256 p
->type
= JSValueType(*mode
& PACKED_TAG_MASK
);
257 *mode
= *mode
& ~PACKED_TAG_MASK
;
262 RValueAllocation
RValueAllocation::read(CompactBufferReader
& reader
) {
263 uint8_t mode
= reader
.readByte();
264 const Layout
& layout
= layoutFromMode(Mode(mode
& MODE_BITS_MASK
));
267 readPayload(reader
, layout
.type1
, &mode
, &arg1
);
268 readPayload(reader
, layout
.type2
, &mode
, &arg2
);
269 return RValueAllocation(Mode(mode
), arg1
, arg2
);
272 void RValueAllocation::writePayload(CompactBufferWriter
& writer
,
273 PayloadType type
, Payload p
) {
278 writer
.writeUnsigned(p
.index
);
280 case PAYLOAD_STACK_OFFSET
:
281 writer
.writeSigned(p
.stackOffset
);
284 static_assert(Registers::Total
<= 0x100,
285 "Not enough bytes to encode all registers.");
286 writer
.writeByte(p
.gpr
.code());
289 static_assert(FloatRegisters::Total
<= 0x100,
290 "Not enough bytes to encode all float registers.");
291 writer
.writeByte(p
.fpu
.code());
293 case PAYLOAD_PACKED_TAG
: {
294 // This code assumes that the PACKED_TAG payload is following the
295 // writeByte of the mode.
297 MOZ_ASSERT(writer
.length());
298 uint8_t* mode
= writer
.buffer() + (writer
.length() - 1);
299 MOZ_ASSERT((*mode
& PACKED_TAG_MASK
) == 0 &&
300 (p
.type
& ~PACKED_TAG_MASK
) == 0);
301 *mode
= *mode
| p
.type
;
308 void RValueAllocation::writePadding(CompactBufferWriter
& writer
) {
309 // Write 0x7f in all padding bytes.
310 while (writer
.length() % ALLOCATION_TABLE_ALIGNMENT
) {
311 writer
.writeByte(0x7f);
315 void RValueAllocation::write(CompactBufferWriter
& writer
) const {
316 const Layout
& layout
= layoutFromMode(mode());
317 MOZ_ASSERT(layout
.type2
!= PAYLOAD_PACKED_TAG
);
318 MOZ_ASSERT(writer
.length() % ALLOCATION_TABLE_ALIGNMENT
== 0);
320 writer
.writeByte(mode_
);
321 writePayload(writer
, layout
.type1
, arg1_
);
322 writePayload(writer
, layout
.type2
, arg2_
);
323 writePadding(writer
);
326 HashNumber
RValueAllocation::hash() const {
328 res
= HashNumber(mode_
);
329 res
= arg1_
.index
+ (res
<< 6) + (res
<< 16) - res
;
330 res
= arg2_
.index
+ (res
<< 6) + (res
<< 16) - res
;
335 void RValueAllocation::dumpPayload(GenericPrinter
& out
, PayloadType type
,
341 out
.printf("index %u", p
.index
);
343 case PAYLOAD_STACK_OFFSET
:
344 out
.printf("stack %d", p
.stackOffset
);
347 out
.printf("reg %s", p
.gpr
.name());
350 out
.printf("reg %s", p
.fpu
.name());
352 case PAYLOAD_PACKED_TAG
:
353 out
.printf("%s", ValTypeToString(p
.type
));
358 void RValueAllocation::dump(GenericPrinter
& out
) const {
359 const Layout
& layout
= layoutFromMode(mode());
360 out
.printf("%s", layout
.name
);
362 if (layout
.type1
!= PAYLOAD_NONE
) {
365 dumpPayload(out
, layout
.type1
, arg1_
);
366 if (layout
.type2
!= PAYLOAD_NONE
) {
369 dumpPayload(out
, layout
.type2
, arg2_
);
370 if (layout
.type1
!= PAYLOAD_NONE
) {
376 SnapshotReader::SnapshotReader(const uint8_t* snapshots
, uint32_t offset
,
377 uint32_t RVATableSize
, uint32_t listSize
)
378 : reader_(snapshots
+ offset
, snapshots
+ listSize
),
379 allocReader_(snapshots
+ listSize
, snapshots
+ listSize
+ RVATableSize
),
380 allocTable_(snapshots
+ listSize
),
385 JitSpew(JitSpew_IonSnapshots
, "Creating snapshot reader");
386 readSnapshotHeader();
389 #define COMPUTE_SHIFT_AFTER_(name) (name##_BITS + name##_SHIFT)
390 #define COMPUTE_MASK_(name) ((uint32_t(1 << name##_BITS) - 1) << name##_SHIFT)
392 // Details of snapshot header packing.
393 static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT
= 0;
394 static const uint32_t SNAPSHOT_BAILOUTKIND_BITS
= 6;
395 static const uint32_t SNAPSHOT_BAILOUTKIND_MASK
=
396 COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND
);
398 static_assert((1 << SNAPSHOT_BAILOUTKIND_BITS
) - 1 >=
399 uint8_t(BailoutKind::Limit
),
400 "Not enough bits for BailoutKinds");
402 static const uint32_t SNAPSHOT_ROFFSET_SHIFT
=
403 COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND
);
404 static const uint32_t SNAPSHOT_ROFFSET_BITS
= 32 - SNAPSHOT_ROFFSET_SHIFT
;
405 static const uint32_t SNAPSHOT_ROFFSET_MASK
= COMPUTE_MASK_(SNAPSHOT_ROFFSET
);
408 #undef COMPUTE_SHIFT_AFTER_
410 void SnapshotReader::readSnapshotHeader() {
411 uint32_t bits
= reader_
.readUnsigned();
413 bailoutKind_
= BailoutKind((bits
& SNAPSHOT_BAILOUTKIND_MASK
) >>
414 SNAPSHOT_BAILOUTKIND_SHIFT
);
415 recoverOffset_
= (bits
& SNAPSHOT_ROFFSET_MASK
) >> SNAPSHOT_ROFFSET_SHIFT
;
417 JitSpew(JitSpew_IonSnapshots
, "Read snapshot header with bailout kind %u",
418 uint32_t(bailoutKind_
));
420 #ifdef TRACK_SNAPSHOTS
425 #ifdef TRACK_SNAPSHOTS
426 void SnapshotReader::readTrackSnapshot() {
427 pcOpcode_
= reader_
.readUnsigned();
428 mirOpcode_
= reader_
.readUnsigned();
429 mirId_
= reader_
.readUnsigned();
430 lirOpcode_
= reader_
.readUnsigned();
431 lirId_
= reader_
.readUnsigned();
434 void SnapshotReader::spewBailingFrom() const {
436 if (JitSpewEnabled(JitSpew_IonBailouts
)) {
437 JitSpewHeader(JitSpew_IonBailouts
);
438 Fprinter
& out
= JitSpewPrinter();
439 out
.printf(" bailing from bytecode: %s, MIR: ", CodeName(JSOp(pcOpcode_
)));
440 MDefinition::PrintOpcodeName(out
, MDefinition::Opcode(mirOpcode_
));
441 out
.printf(" [%u], LIR: ", mirId_
);
442 LInstruction::printName(out
, LInstruction::Opcode(lirOpcode_
));
443 out
.printf(" [%u]", lirId_
);
450 uint32_t SnapshotReader::readAllocationIndex() {
452 return reader_
.readUnsigned();
455 RValueAllocation
SnapshotReader::readAllocation() {
456 JitSpew(JitSpew_IonSnapshots
, "Reading slot %u", allocRead_
);
457 uint32_t offset
= readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT
;
458 allocReader_
.seek(allocTable_
, offset
);
459 return RValueAllocation::read(allocReader_
);
462 SnapshotWriter::SnapshotWriter()
463 // Based on the measurements made in Bug 962555 comment 20, this length
464 // should be enough to prevent the reallocation of the hash table for at
465 // least half of the compilations.
468 RecoverReader::RecoverReader(SnapshotReader
& snapshot
, const uint8_t* recovers
,
470 : reader_(nullptr, nullptr), numInstructions_(0), numInstructionsRead_(0) {
475 CompactBufferReader(recovers
+ snapshot
.recoverOffset(), recovers
+ size
);
480 RecoverReader::RecoverReader(const RecoverReader
& rr
)
481 : reader_(rr
.reader_
),
482 numInstructions_(rr
.numInstructions_
),
483 numInstructionsRead_(rr
.numInstructionsRead_
) {
484 if (reader_
.currentPosition()) {
485 rr
.instruction()->cloneInto(&rawData_
);
489 RecoverReader
& RecoverReader::operator=(const RecoverReader
& rr
) {
490 reader_
= rr
.reader_
;
491 numInstructions_
= rr
.numInstructions_
;
492 numInstructionsRead_
= rr
.numInstructionsRead_
;
493 if (reader_
.currentPosition()) {
494 rr
.instruction()->cloneInto(&rawData_
);
499 void RecoverReader::readRecoverHeader() {
500 numInstructions_
= reader_
.readUnsigned();
501 MOZ_ASSERT(numInstructions_
);
503 JitSpew(JitSpew_IonSnapshots
, "Read recover header with instructionCount %u",
507 void RecoverReader::readInstruction() {
508 MOZ_ASSERT(moreInstructions());
509 RInstruction::readRecoverData(reader_
, &rawData_
);
510 numInstructionsRead_
++;
513 SnapshotOffset
SnapshotWriter::startSnapshot(RecoverOffset recoverOffset
,
515 lastStart_
= writer_
.length();
518 JitSpew(JitSpew_IonSnapshots
,
519 "starting snapshot with recover offset %u, bailout kind %u",
520 recoverOffset
, uint32_t(kind
));
522 MOZ_ASSERT(uint32_t(kind
) < (1 << SNAPSHOT_BAILOUTKIND_BITS
));
523 MOZ_ASSERT(recoverOffset
< (1 << SNAPSHOT_ROFFSET_BITS
));
524 uint32_t bits
= (uint32_t(kind
) << SNAPSHOT_BAILOUTKIND_SHIFT
) |
525 (recoverOffset
<< SNAPSHOT_ROFFSET_SHIFT
);
527 writer_
.writeUnsigned(bits
);
531 #ifdef TRACK_SNAPSHOTS
532 void SnapshotWriter::trackSnapshot(uint32_t pcOpcode
, uint32_t mirOpcode
,
533 uint32_t mirId
, uint32_t lirOpcode
,
535 writer_
.writeUnsigned(pcOpcode
);
536 writer_
.writeUnsigned(mirOpcode
);
537 writer_
.writeUnsigned(mirId
);
538 writer_
.writeUnsigned(lirOpcode
);
539 writer_
.writeUnsigned(lirId
);
543 bool SnapshotWriter::add(const RValueAllocation
& alloc
) {
545 RValueAllocMap::AddPtr p
= allocMap_
.lookupForAdd(alloc
);
547 offset
= allocWriter_
.length();
548 alloc
.write(allocWriter_
);
549 if (!allocMap_
.add(p
, alloc
, offset
)) {
550 allocWriter_
.setOOM();
558 if (JitSpewEnabled(JitSpew_IonSnapshots
)) {
559 JitSpewHeader(JitSpew_IonSnapshots
);
560 Fprinter
& out
= JitSpewPrinter();
561 out
.printf(" slot %u (%u): ", allocWritten_
, offset
);
568 writer_
.writeUnsigned(offset
/ ALLOCATION_TABLE_ALIGNMENT
);
572 void SnapshotWriter::endSnapshot() {
573 // Place a sentinel for asserting on the other end.
575 writer_
.writeSigned(-1);
578 JitSpew(JitSpew_IonSnapshots
,
579 "ending snapshot total size: %u bytes (start %u)",
580 uint32_t(writer_
.length() - lastStart_
), lastStart_
);
583 RecoverOffset
RecoverWriter::startRecover(uint32_t instructionCount
) {
584 MOZ_ASSERT(instructionCount
);
585 instructionCount_
= instructionCount
;
586 instructionsWritten_
= 0;
588 JitSpew(JitSpew_IonSnapshots
, "starting recover with %u instruction(s)",
591 RecoverOffset recoverOffset
= writer_
.length();
592 writer_
.writeUnsigned(instructionCount
);
593 return recoverOffset
;
596 void RecoverWriter::writeInstruction(const MNode
* rp
) {
597 if (!rp
->writeRecoverData(writer_
)) {
600 instructionsWritten_
++;
603 void RecoverWriter::endRecover() {
604 MOZ_ASSERT(instructionCount_
== instructionsWritten_
);