Bug 1890750 - Part 1: Include NATIVE_JIT_ENTRY in FunctionFlags::HasJitEntryFlags...
[gecko.git] / js / src / jit / Snapshots.cpp
blob2b6c2a945b856f49d25ee433f0fdf5d42c0f1d00
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/Snapshots.h"
9 #include "jit/JitSpewer.h"
10 #ifdef TRACK_SNAPSHOTS
11 # include "jit/LIR.h"
12 #endif
13 #include "jit/MIR.h"
14 #include "jit/Recover.h"
15 #include "js/Printer.h"
17 using namespace js;
18 using namespace js::jit;
20 // [SMDOC] IonMonkey Snapshot encoding
22 // Encodings:
23 // [ptr] A fixed-size pointer.
24 // [vwu] A variable-width unsigned integer.
25 // [vws] A variable-width signed integer.
26 // [u8] An 8-bit unsigned integer.
27 // [u8'] An 8-bit unsigned integer which is potentially extended with packed
28 // data.
29 // [u8"] Packed data which is stored and packed in the previous [u8'].
30 // [vwu*] A list of variable-width unsigned integers.
31 // [pld] Payload of Recover Value Allocation:
32 // PAYLOAD_NONE:
33 // There is no payload.
35 // PAYLOAD_INDEX:
36 // [vwu] Index, such as the constant pool index.
38 // PAYLOAD_STACK_OFFSET:
39 // [vws] Stack offset based on the base of the Ion frame.
41 // PAYLOAD_GPR:
42 // [u8] Code of the general register.
44 // PAYLOAD_FPU:
45 // [u8] Code of the FPU register.
47 // PAYLOAD_PACKED_TAG:
48 // [u8"] Bits 5-7: JSValueType is encoded on the low bits of the Mode
49 // of the RValueAllocation.
51 // Snapshot header:
53 // [vwu] bits ((n+1)-31]: recover instruction offset
54 // bits [0,n): bailout kind (n = SNAPSHOT_BAILOUTKIND_BITS)
56 // Snapshot body, repeated "frame count" times, from oldest frame to newest
57 // frame. Note that the first frame doesn't have the "parent PC" field.
59 // [ptr] Debug only: JSScript*
60 // [vwu] pc offset
61 // [vwu] # of RVA's indexes, including nargs
62 // [vwu*] List of indexes to R(ecover)ValueAllocation table. Contains
63 // nargs + nfixed + stackDepth items.
65 // Recover value allocations are encoded at the end of the Snapshot buffer, and
66 // they are padded on ALLOCATION_TABLE_ALIGNMENT. The encoding of each
67 // allocation is determined by the RValueAllocation::Layout, which can be
68 // obtained from the RValueAllocation::Mode with layoutFromMode function. The
69 // layout structure list the type of payload which are used to serialized /
70 // deserialized / dumped the content of the allocations.
72 // R(ecover)ValueAllocation items:
73 // [u8'] Mode, which defines the type of the payload as well as the
74 // interpretation.
75 // [pld] first payload (packed tag, index, stack offset, register, ...)
76 // [pld] second payload (register, stack offset, none)
78 // Modes:
79 // CONSTANT [INDEX]
80 // Index into the constant pool.
82 // CST_UNDEFINED []
83 // Constant value which correspond to the "undefined" JS value.
85 // CST_NULL []
86 // Constant value which correspond to the "null" JS value.
88 // DOUBLE_REG [FPU_REG]
89 // Double value stored in a FPU register.
91 // ANY_FLOAT_REG [FPU_REG]
92 // Any Float value (float32, simd) stored in a FPU register.
94 // ANY_FLOAT_STACK [STACK_OFFSET]
95 // Any Float value (float32, simd) stored on the stack.
97 // UNTYPED_REG [GPR_REG]
98 // UNTYPED_STACK [STACK_OFFSET]
99 // UNTYPED_REG_REG [GPR_REG, GPR_REG]
100 // UNTYPED_REG_STACK [GPR_REG, STACK_OFFSET]
101 // UNTYPED_STACK_REG [STACK_OFFSET, GPR_REG]
102 // UNTYPED_STACK_STACK [STACK_OFFSET, STACK_OFFSET]
103 // Value with dynamically known type. On 32 bits architecture, the
104 // first register/stack-offset correspond to the holder of the type,
105 // and the second correspond to the payload of the JS Value.
107 // RECOVER_INSTRUCTION [INDEX]
108 // Index into the list of recovered instruction results.
110 // RI_WITH_DEFAULT_CST [INDEX] [INDEX]
111 // The first payload is the index into the list of recovered
112 // instruction results. The second payload is the index in the
113 // constant pool.
115 // TYPED_REG [PACKED_TAG, GPR_REG]:
116 // Value with statically known type, which payload is stored in a
117 // register.
119 // TYPED_STACK [PACKED_TAG, STACK_OFFSET]:
120 // Value with statically known type, which payload is stored at an
121 // offset on the stack.
124 const RValueAllocation::Layout& RValueAllocation::layoutFromMode(Mode mode) {
125 switch (mode) {
126 case CONSTANT: {
127 static const RValueAllocation::Layout layout = {PAYLOAD_INDEX,
128 PAYLOAD_NONE, "constant"};
129 return layout;
132 case CST_UNDEFINED: {
133 static const RValueAllocation::Layout layout = {
134 PAYLOAD_NONE, PAYLOAD_NONE, "undefined"};
135 return layout;
138 case CST_NULL: {
139 static const RValueAllocation::Layout layout = {PAYLOAD_NONE,
140 PAYLOAD_NONE, "null"};
141 return layout;
144 case DOUBLE_REG: {
145 static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE,
146 "double"};
147 return layout;
149 case ANY_FLOAT_REG: {
150 static const RValueAllocation::Layout layout = {PAYLOAD_FPU, PAYLOAD_NONE,
151 "float register content"};
152 return layout;
154 case ANY_FLOAT_STACK: {
155 static const RValueAllocation::Layout layout = {
156 PAYLOAD_STACK_OFFSET, PAYLOAD_NONE, "float register content"};
157 return layout;
159 #if defined(JS_NUNBOX32)
160 case UNTYPED_REG_REG: {
161 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_GPR,
162 "value"};
163 return layout;
165 case UNTYPED_REG_STACK: {
166 static const RValueAllocation::Layout layout = {
167 PAYLOAD_GPR, PAYLOAD_STACK_OFFSET, "value"};
168 return layout;
170 case UNTYPED_STACK_REG: {
171 static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET,
172 PAYLOAD_GPR, "value"};
173 return layout;
175 case UNTYPED_STACK_STACK: {
176 static const RValueAllocation::Layout layout = {
177 PAYLOAD_STACK_OFFSET, PAYLOAD_STACK_OFFSET, "value"};
178 return layout;
180 #elif defined(JS_PUNBOX64)
181 case UNTYPED_REG: {
182 static const RValueAllocation::Layout layout = {PAYLOAD_GPR, PAYLOAD_NONE,
183 "value"};
184 return layout;
186 case UNTYPED_STACK: {
187 static const RValueAllocation::Layout layout = {PAYLOAD_STACK_OFFSET,
188 PAYLOAD_NONE, "value"};
189 return layout;
191 #endif
192 case RECOVER_INSTRUCTION: {
193 static const RValueAllocation::Layout layout = {
194 PAYLOAD_INDEX, PAYLOAD_NONE, "instruction"};
195 return layout;
197 case RI_WITH_DEFAULT_CST: {
198 static const RValueAllocation::Layout layout = {
199 PAYLOAD_INDEX, PAYLOAD_INDEX, "instruction with default"};
200 return layout;
203 default: {
204 static const RValueAllocation::Layout regLayout = {
205 PAYLOAD_PACKED_TAG, PAYLOAD_GPR, "typed value"};
207 static const RValueAllocation::Layout stackLayout = {
208 PAYLOAD_PACKED_TAG, PAYLOAD_STACK_OFFSET, "typed value"};
210 if (mode >= TYPED_REG_MIN && mode <= TYPED_REG_MAX) {
211 return regLayout;
213 if (mode >= TYPED_STACK_MIN && mode <= TYPED_STACK_MAX) {
214 return stackLayout;
219 MOZ_CRASH_UNSAFE_PRINTF("Unexpected mode: 0x%x", uint32_t(mode));
222 // Pad serialized RValueAllocations by a multiple of X bytes in the allocation
223 // buffer. By padding serialized value allocations, we are building an
224 // indexable table of elements of X bytes, and thus we can safely divide any
225 // offset within the buffer by X to obtain an index.
227 // By padding, we are loosing space within the allocation buffer, but we
228 // multiple by X the number of indexes that we can store on one byte in each
229 // snapshots.
231 // Some value allocations are taking more than X bytes to be encoded, in which
232 // case we will pad to a multiple of X, and we are wasting indexes. The choice
233 // of X should be balanced between the wasted padding of serialized value
234 // allocation, and the saving made in snapshot indexes.
235 static const size_t ALLOCATION_TABLE_ALIGNMENT = 2; /* bytes */
237 void RValueAllocation::readPayload(CompactBufferReader& reader,
238 PayloadType type, uint8_t* mode,
239 Payload* p) {
240 switch (type) {
241 case PAYLOAD_NONE:
242 break;
243 case PAYLOAD_INDEX:
244 p->index = reader.readUnsigned();
245 break;
246 case PAYLOAD_STACK_OFFSET:
247 p->stackOffset = reader.readSigned();
248 break;
249 case PAYLOAD_GPR:
250 p->gpr = Register::FromCode(reader.readByte());
251 break;
252 case PAYLOAD_FPU:
253 p->fpu.data = reader.readByte();
254 break;
255 case PAYLOAD_PACKED_TAG:
256 p->type = JSValueType(*mode & PACKED_TAG_MASK);
257 *mode = *mode & ~PACKED_TAG_MASK;
258 break;
262 RValueAllocation RValueAllocation::read(CompactBufferReader& reader) {
263 uint8_t mode = reader.readByte();
264 const Layout& layout = layoutFromMode(Mode(mode & MODE_BITS_MASK));
265 Payload arg1, arg2;
267 readPayload(reader, layout.type1, &mode, &arg1);
268 readPayload(reader, layout.type2, &mode, &arg2);
269 return RValueAllocation(Mode(mode), arg1, arg2);
272 void RValueAllocation::writePayload(CompactBufferWriter& writer,
273 PayloadType type, Payload p) {
274 switch (type) {
275 case PAYLOAD_NONE:
276 break;
277 case PAYLOAD_INDEX:
278 writer.writeUnsigned(p.index);
279 break;
280 case PAYLOAD_STACK_OFFSET:
281 writer.writeSigned(p.stackOffset);
282 break;
283 case PAYLOAD_GPR:
284 static_assert(Registers::Total <= 0x100,
285 "Not enough bytes to encode all registers.");
286 writer.writeByte(p.gpr.code());
287 break;
288 case PAYLOAD_FPU:
289 static_assert(FloatRegisters::Total <= 0x100,
290 "Not enough bytes to encode all float registers.");
291 writer.writeByte(p.fpu.code());
292 break;
293 case PAYLOAD_PACKED_TAG: {
294 // This code assumes that the PACKED_TAG payload is following the
295 // writeByte of the mode.
296 if (!writer.oom()) {
297 MOZ_ASSERT(writer.length());
298 uint8_t* mode = writer.buffer() + (writer.length() - 1);
299 MOZ_ASSERT((*mode & PACKED_TAG_MASK) == 0 &&
300 (p.type & ~PACKED_TAG_MASK) == 0);
301 *mode = *mode | p.type;
303 break;
308 void RValueAllocation::writePadding(CompactBufferWriter& writer) {
309 // Write 0x7f in all padding bytes.
310 while (writer.length() % ALLOCATION_TABLE_ALIGNMENT) {
311 writer.writeByte(0x7f);
315 void RValueAllocation::write(CompactBufferWriter& writer) const {
316 const Layout& layout = layoutFromMode(mode());
317 MOZ_ASSERT(layout.type2 != PAYLOAD_PACKED_TAG);
318 MOZ_ASSERT(writer.length() % ALLOCATION_TABLE_ALIGNMENT == 0);
320 writer.writeByte(mode_);
321 writePayload(writer, layout.type1, arg1_);
322 writePayload(writer, layout.type2, arg2_);
323 writePadding(writer);
326 HashNumber RValueAllocation::hash() const {
327 HashNumber res = 0;
328 res = HashNumber(mode_);
329 res = arg1_.index + (res << 6) + (res << 16) - res;
330 res = arg2_.index + (res << 6) + (res << 16) - res;
331 return res;
334 #ifdef JS_JITSPEW
335 void RValueAllocation::dumpPayload(GenericPrinter& out, PayloadType type,
336 Payload p) {
337 switch (type) {
338 case PAYLOAD_NONE:
339 break;
340 case PAYLOAD_INDEX:
341 out.printf("index %u", p.index);
342 break;
343 case PAYLOAD_STACK_OFFSET:
344 out.printf("stack %d", p.stackOffset);
345 break;
346 case PAYLOAD_GPR:
347 out.printf("reg %s", p.gpr.name());
348 break;
349 case PAYLOAD_FPU:
350 out.printf("reg %s", p.fpu.name());
351 break;
352 case PAYLOAD_PACKED_TAG:
353 out.printf("%s", ValTypeToString(p.type));
354 break;
358 void RValueAllocation::dump(GenericPrinter& out) const {
359 const Layout& layout = layoutFromMode(mode());
360 out.printf("%s", layout.name);
362 if (layout.type1 != PAYLOAD_NONE) {
363 out.printf(" (");
365 dumpPayload(out, layout.type1, arg1_);
366 if (layout.type2 != PAYLOAD_NONE) {
367 out.printf(", ");
369 dumpPayload(out, layout.type2, arg2_);
370 if (layout.type1 != PAYLOAD_NONE) {
371 out.printf(")");
374 #endif // JS_JITSPEW
376 SnapshotReader::SnapshotReader(const uint8_t* snapshots, uint32_t offset,
377 uint32_t RVATableSize, uint32_t listSize)
378 : reader_(snapshots + offset, snapshots + listSize),
379 allocReader_(snapshots + listSize, snapshots + listSize + RVATableSize),
380 allocTable_(snapshots + listSize),
381 allocRead_(0) {
382 if (!snapshots) {
383 return;
385 JitSpew(JitSpew_IonSnapshots, "Creating snapshot reader");
386 readSnapshotHeader();
389 #define COMPUTE_SHIFT_AFTER_(name) (name##_BITS + name##_SHIFT)
390 #define COMPUTE_MASK_(name) ((uint32_t(1 << name##_BITS) - 1) << name##_SHIFT)
392 // Details of snapshot header packing.
393 static const uint32_t SNAPSHOT_BAILOUTKIND_SHIFT = 0;
394 static const uint32_t SNAPSHOT_BAILOUTKIND_BITS = 6;
395 static const uint32_t SNAPSHOT_BAILOUTKIND_MASK =
396 COMPUTE_MASK_(SNAPSHOT_BAILOUTKIND);
398 static_assert((1 << SNAPSHOT_BAILOUTKIND_BITS) - 1 >=
399 uint8_t(BailoutKind::Limit),
400 "Not enough bits for BailoutKinds");
402 static const uint32_t SNAPSHOT_ROFFSET_SHIFT =
403 COMPUTE_SHIFT_AFTER_(SNAPSHOT_BAILOUTKIND);
404 static const uint32_t SNAPSHOT_ROFFSET_BITS = 32 - SNAPSHOT_ROFFSET_SHIFT;
405 static const uint32_t SNAPSHOT_ROFFSET_MASK = COMPUTE_MASK_(SNAPSHOT_ROFFSET);
407 #undef COMPUTE_MASK_
408 #undef COMPUTE_SHIFT_AFTER_
410 void SnapshotReader::readSnapshotHeader() {
411 uint32_t bits = reader_.readUnsigned();
413 bailoutKind_ = BailoutKind((bits & SNAPSHOT_BAILOUTKIND_MASK) >>
414 SNAPSHOT_BAILOUTKIND_SHIFT);
415 recoverOffset_ = (bits & SNAPSHOT_ROFFSET_MASK) >> SNAPSHOT_ROFFSET_SHIFT;
417 JitSpew(JitSpew_IonSnapshots, "Read snapshot header with bailout kind %u",
418 uint32_t(bailoutKind_));
420 #ifdef TRACK_SNAPSHOTS
421 readTrackSnapshot();
422 #endif
425 #ifdef TRACK_SNAPSHOTS
426 void SnapshotReader::readTrackSnapshot() {
427 pcOpcode_ = reader_.readUnsigned();
428 mirOpcode_ = reader_.readUnsigned();
429 mirId_ = reader_.readUnsigned();
430 lirOpcode_ = reader_.readUnsigned();
431 lirId_ = reader_.readUnsigned();
434 void SnapshotReader::spewBailingFrom() const {
435 # ifdef JS_JITSPEW
436 if (JitSpewEnabled(JitSpew_IonBailouts)) {
437 JitSpewHeader(JitSpew_IonBailouts);
438 Fprinter& out = JitSpewPrinter();
439 out.printf(" bailing from bytecode: %s, MIR: ", CodeName(JSOp(pcOpcode_)));
440 MDefinition::PrintOpcodeName(out, MDefinition::Opcode(mirOpcode_));
441 out.printf(" [%u], LIR: ", mirId_);
442 LInstruction::printName(out, LInstruction::Opcode(lirOpcode_));
443 out.printf(" [%u]", lirId_);
444 out.printf("\n");
446 # endif
448 #endif
450 uint32_t SnapshotReader::readAllocationIndex() {
451 allocRead_++;
452 return reader_.readUnsigned();
455 RValueAllocation SnapshotReader::readAllocation() {
456 JitSpew(JitSpew_IonSnapshots, "Reading slot %u", allocRead_);
457 uint32_t offset = readAllocationIndex() * ALLOCATION_TABLE_ALIGNMENT;
458 allocReader_.seek(allocTable_, offset);
459 return RValueAllocation::read(allocReader_);
462 SnapshotWriter::SnapshotWriter()
463 // Based on the measurements made in Bug 962555 comment 20, this length
464 // should be enough to prevent the reallocation of the hash table for at
465 // least half of the compilations.
466 : allocMap_(32) {}
468 RecoverReader::RecoverReader(SnapshotReader& snapshot, const uint8_t* recovers,
469 uint32_t size)
470 : reader_(nullptr, nullptr), numInstructions_(0), numInstructionsRead_(0) {
471 if (!recovers) {
472 return;
474 reader_ =
475 CompactBufferReader(recovers + snapshot.recoverOffset(), recovers + size);
476 readRecoverHeader();
477 readInstruction();
480 RecoverReader::RecoverReader(const RecoverReader& rr)
481 : reader_(rr.reader_),
482 numInstructions_(rr.numInstructions_),
483 numInstructionsRead_(rr.numInstructionsRead_) {
484 if (reader_.currentPosition()) {
485 rr.instruction()->cloneInto(&rawData_);
489 RecoverReader& RecoverReader::operator=(const RecoverReader& rr) {
490 reader_ = rr.reader_;
491 numInstructions_ = rr.numInstructions_;
492 numInstructionsRead_ = rr.numInstructionsRead_;
493 if (reader_.currentPosition()) {
494 rr.instruction()->cloneInto(&rawData_);
496 return *this;
499 void RecoverReader::readRecoverHeader() {
500 numInstructions_ = reader_.readUnsigned();
501 MOZ_ASSERT(numInstructions_);
503 JitSpew(JitSpew_IonSnapshots, "Read recover header with instructionCount %u",
504 numInstructions_);
507 void RecoverReader::readInstruction() {
508 MOZ_ASSERT(moreInstructions());
509 RInstruction::readRecoverData(reader_, &rawData_);
510 numInstructionsRead_++;
513 SnapshotOffset SnapshotWriter::startSnapshot(RecoverOffset recoverOffset,
514 BailoutKind kind) {
515 lastStart_ = writer_.length();
516 allocWritten_ = 0;
518 JitSpew(JitSpew_IonSnapshots,
519 "starting snapshot with recover offset %u, bailout kind %u",
520 recoverOffset, uint32_t(kind));
522 MOZ_ASSERT(uint32_t(kind) < (1 << SNAPSHOT_BAILOUTKIND_BITS));
523 MOZ_ASSERT(recoverOffset < (1 << SNAPSHOT_ROFFSET_BITS));
524 uint32_t bits = (uint32_t(kind) << SNAPSHOT_BAILOUTKIND_SHIFT) |
525 (recoverOffset << SNAPSHOT_ROFFSET_SHIFT);
527 writer_.writeUnsigned(bits);
528 return lastStart_;
531 #ifdef TRACK_SNAPSHOTS
532 void SnapshotWriter::trackSnapshot(uint32_t pcOpcode, uint32_t mirOpcode,
533 uint32_t mirId, uint32_t lirOpcode,
534 uint32_t lirId) {
535 writer_.writeUnsigned(pcOpcode);
536 writer_.writeUnsigned(mirOpcode);
537 writer_.writeUnsigned(mirId);
538 writer_.writeUnsigned(lirOpcode);
539 writer_.writeUnsigned(lirId);
541 #endif
543 bool SnapshotWriter::add(const RValueAllocation& alloc) {
544 uint32_t offset;
545 RValueAllocMap::AddPtr p = allocMap_.lookupForAdd(alloc);
546 if (!p) {
547 offset = allocWriter_.length();
548 alloc.write(allocWriter_);
549 if (!allocMap_.add(p, alloc, offset)) {
550 allocWriter_.setOOM();
551 return false;
553 } else {
554 offset = p->value();
557 #ifdef JS_JITSPEW
558 if (JitSpewEnabled(JitSpew_IonSnapshots)) {
559 JitSpewHeader(JitSpew_IonSnapshots);
560 Fprinter& out = JitSpewPrinter();
561 out.printf(" slot %u (%u): ", allocWritten_, offset);
562 alloc.dump(out);
563 out.printf("\n");
565 #endif
567 allocWritten_++;
568 writer_.writeUnsigned(offset / ALLOCATION_TABLE_ALIGNMENT);
569 return true;
572 void SnapshotWriter::endSnapshot() {
573 // Place a sentinel for asserting on the other end.
574 #ifdef DEBUG
575 writer_.writeSigned(-1);
576 #endif
578 JitSpew(JitSpew_IonSnapshots,
579 "ending snapshot total size: %u bytes (start %u)",
580 uint32_t(writer_.length() - lastStart_), lastStart_);
583 RecoverOffset RecoverWriter::startRecover(uint32_t instructionCount) {
584 MOZ_ASSERT(instructionCount);
585 instructionCount_ = instructionCount;
586 instructionsWritten_ = 0;
588 JitSpew(JitSpew_IonSnapshots, "starting recover with %u instruction(s)",
589 instructionCount);
591 RecoverOffset recoverOffset = writer_.length();
592 writer_.writeUnsigned(instructionCount);
593 return recoverOffset;
596 void RecoverWriter::writeInstruction(const MNode* rp) {
597 if (!rp->writeRecoverData(writer_)) {
598 writer_.setOOM();
600 instructionsWritten_++;
603 void RecoverWriter::endRecover() {
604 MOZ_ASSERT(instructionCount_ == instructionsWritten_);