1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
9 #include "mozilla/ScopeExit.h"
11 #include <type_traits>
13 #include "jit/JitSpewer.h"
15 #include "jit/MIRGenerator.h"
16 #include "js/Printf.h"
17 #include "util/Unicode.h"
20 using namespace js::jit
;
22 const char* const js::jit::LIROpNames
[] = {
23 #define OPNAME(op, ...) #op,
24 LIR_OPCODE_LIST(OPNAME
)
28 LIRGraph::LIRGraph(MIRGraph
* mir
)
29 : constantPool_(mir
->alloc()),
30 constantPoolMap_(mir
->alloc()),
31 safepoints_(mir
->alloc()),
32 nonCallSafepoints_(mir
->alloc()),
33 numVirtualRegisters_(0),
34 numInstructions_(1), // First id is 1.
36 argumentSlotCount_(0),
39 bool LIRGraph::addConstantToPool(const Value
& v
, uint32_t* index
) {
40 ConstantPoolMap::AddPtr p
= constantPoolMap_
.lookupForAdd(v
);
45 *index
= constantPool_
.length();
46 return constantPool_
.append(v
) && constantPoolMap_
.add(p
, v
, *index
);
49 bool LIRGraph::noteNeedsSafepoint(LInstruction
* ins
) {
50 // Instructions with safepoints must be in linear order.
51 MOZ_ASSERT_IF(!safepoints_
.empty(), safepoints_
.back()->id() < ins
->id());
52 if (!ins
->isCall() && !nonCallSafepoints_
.append(ins
)) {
55 return safepoints_
.append(ins
);
59 void LIRGraph::dump(GenericPrinter
& out
) {
60 for (size_t i
= 0; i
< numBlocks(); i
++) {
61 getBlock(i
)->dump(out
);
66 void LIRGraph::dump() {
73 LBlock::LBlock(MBasicBlock
* from
)
74 : block_(from
), entryMoveGroup_(nullptr), exitMoveGroup_(nullptr) {
75 from
->assignLir(this);
78 bool LBlock::init(TempAllocator
& alloc
) {
79 // Count the number of LPhis we'll need.
81 for (MPhiIterator
i(block_
->phisBegin()), e(block_
->phisEnd()); i
!= e
; ++i
) {
83 switch (phi
->type()) {
85 numLPhis
+= BOX_PIECES
;
88 numLPhis
+= INT64_PIECES
;
96 // Allocate space for the LPhis.
97 if (!phis_
.init(alloc
, numLPhis
)) {
101 // For each MIR phi, set up LIR phis as appropriate. We'll fill in their
102 // operands on each incoming edge, and set their definitions at the start of
103 // their defining block.
105 size_t numPreds
= block_
->numPredecessors();
106 for (MPhiIterator
i(block_
->phisBegin()), e(block_
->phisEnd()); i
!= e
; ++i
) {
108 MOZ_ASSERT(phi
->numOperands() == numPreds
);
111 switch (phi
->type()) {
113 numPhis
= BOX_PIECES
;
116 numPhis
= INT64_PIECES
;
122 for (int i
= 0; i
< numPhis
; i
++) {
123 LAllocation
* inputs
= alloc
.allocateArray
<LAllocation
>(numPreds
);
128 void* addr
= &phis_
[phiIndex
++];
129 LPhi
* lphi
= new (addr
) LPhi(phi
, inputs
);
130 lphi
->setBlock(this);
136 const LInstruction
* LBlock::firstInstructionWithId() const {
137 for (LInstructionIterator
i(instructions_
.begin()); i
!= instructions_
.end();
146 LMoveGroup
* LBlock::getEntryMoveGroup(TempAllocator
& alloc
) {
147 if (entryMoveGroup_
) {
148 return entryMoveGroup_
;
150 entryMoveGroup_
= LMoveGroup::New(alloc
);
151 insertBefore(*begin(), entryMoveGroup_
);
152 return entryMoveGroup_
;
155 LMoveGroup
* LBlock::getExitMoveGroup(TempAllocator
& alloc
) {
156 if (exitMoveGroup_
) {
157 return exitMoveGroup_
;
159 exitMoveGroup_
= LMoveGroup::New(alloc
);
160 insertBefore(*rbegin(), exitMoveGroup_
);
161 return exitMoveGroup_
;
165 void LBlock::dump(GenericPrinter
& out
) {
166 out
.printf("block%u:\n", mir()->id());
167 for (size_t i
= 0; i
< numPhis(); ++i
) {
168 getPhi(i
)->dump(out
);
171 for (LInstructionIterator iter
= begin(); iter
!= end(); iter
++) {
173 if (iter
->safepoint()) {
174 out
.printf(" SAFEPOINT(0x%p) ", iter
->safepoint());
180 void LBlock::dump() {
181 Fprinter
out(stderr
);
187 static size_t TotalOperandCount(LRecoverInfo
* recoverInfo
) {
189 for (LRecoverInfo::OperandIter
it(recoverInfo
); !it
; ++it
) {
190 if (!it
->isRecoveredOnBailout()) {
197 LRecoverInfo::LRecoverInfo(TempAllocator
& alloc
)
198 : instructions_(alloc
), recoverOffset_(INVALID_RECOVER_OFFSET
) {}
200 LRecoverInfo
* LRecoverInfo::New(MIRGenerator
* gen
, MResumePoint
* mir
) {
201 LRecoverInfo
* recoverInfo
= new (gen
->alloc()) LRecoverInfo(gen
->alloc());
202 if (!recoverInfo
|| !recoverInfo
->init(mir
)) {
206 JitSpew(JitSpew_IonSnapshots
, "Generating LIR recover info %p from MIR (%p)",
207 (void*)recoverInfo
, (void*)mir
);
212 // de-virtualise MResumePoint::getOperand calls.
213 template <typename Node
>
214 bool LRecoverInfo::appendOperands(Node
* ins
) {
215 for (size_t i
= 0, end
= ins
->numOperands(); i
< end
; i
++) {
216 MDefinition
* def
= ins
->getOperand(i
);
218 // As there is no cycle in the data-flow (without MPhi), checking for
219 // isInWorkList implies that the definition is already in the
220 // instruction vector, and not processed by a caller of the current
222 if (def
->isRecoveredOnBailout() && !def
->isInWorklist()) {
223 if (!appendDefinition(def
)) {
232 bool LRecoverInfo::appendDefinition(MDefinition
* def
) {
233 MOZ_ASSERT(def
->isRecoveredOnBailout());
234 def
->setInWorklist();
235 auto clearWorklistFlagOnFailure
=
236 mozilla::MakeScopeExit([&] { def
->setNotInWorklist(); });
238 if (!appendOperands(def
)) {
242 if (!instructions_
.append(def
)) {
246 clearWorklistFlagOnFailure
.release();
250 bool LRecoverInfo::appendResumePoint(MResumePoint
* rp
) {
251 // Stores should be recovered first.
252 if (!rp
->storesEmpty()) {
253 hasSideEffects_
= true;
255 for (auto iter(rp
->storesBegin()), end(rp
->storesEnd()); iter
!= end
;
257 if (!appendDefinition(iter
->operand
)) {
262 if (rp
->caller() && !appendResumePoint(rp
->caller())) {
266 if (!appendOperands(rp
)) {
270 return instructions_
.append(rp
);
273 bool LRecoverInfo::init(MResumePoint
* rp
) {
274 // Before exiting this function, remove temporary flags from all definitions
275 // added in the vector.
276 auto clearWorklistFlags
= mozilla::MakeScopeExit([&] {
277 for (MNode
** it
= begin(); it
!= end(); it
++) {
278 if (!(*it
)->isDefinition()) {
281 (*it
)->toDefinition()->setNotInWorklist();
285 // Sort operations in the order in which we need to restore the stack. This
286 // implies that outer frames, as well as operations needed to recover the
287 // current frame, are located before the current frame. The inner-most
288 // resume point should be the last element in the list.
289 if (!appendResumePoint(rp
)) {
293 MOZ_ASSERT(mir() == rp
);
297 LSnapshot::LSnapshot(LRecoverInfo
* recoverInfo
, BailoutKind kind
)
299 recoverInfo_(recoverInfo
),
300 snapshotOffset_(INVALID_SNAPSHOT_OFFSET
),
301 numSlots_(TotalOperandCount(recoverInfo
) * BOX_PIECES
),
302 bailoutKind_(kind
) {}
304 bool LSnapshot::init(MIRGenerator
* gen
) {
305 slots_
= gen
->allocate
<LAllocation
>(numSlots_
);
309 LSnapshot
* LSnapshot::New(MIRGenerator
* gen
, LRecoverInfo
* recover
,
311 LSnapshot
* snapshot
= new (gen
->alloc()) LSnapshot(recover
, kind
);
312 if (!snapshot
|| !snapshot
->init(gen
)) {
316 JitSpew(JitSpew_IonSnapshots
, "Generating LIR snapshot %p from recover (%p)",
317 (void*)snapshot
, (void*)recover
);
322 void LSnapshot::rewriteRecoveredInput(LUse input
) {
323 // Mark any operands to this snapshot with the same value as input as being
324 // equal to the instruction's result.
325 for (size_t i
= 0; i
< numEntries(); i
++) {
326 if (getEntry(i
)->isUse() &&
327 getEntry(i
)->toUse()->virtualRegister() == input
.virtualRegister()) {
328 setEntry(i
, LUse(input
.virtualRegister(), LUse::RECOVERED_INPUT
));
334 void LNode::printName(GenericPrinter
& out
, Opcode op
) {
335 static const char* const names
[] = {
336 # define LIROP(x) #x,
337 LIR_OPCODE_LIST(LIROP
)
340 const char* name
= names
[uint32_t(op
)];
341 size_t len
= strlen(name
);
342 for (size_t i
= 0; i
< len
; i
++) {
343 out
.printf("%c", unicode::ToLowerCase(name
[i
]));
347 void LNode::printName(GenericPrinter
& out
) { printName(out
, op()); }
350 bool LAllocation::aliases(const LAllocation
& other
) const {
351 if (isFloatReg() && other
.isFloatReg()) {
352 return toFloatReg()->reg().aliases(other
.toFloatReg()->reg());
354 return *this == other
;
358 static const char* DefTypeName(LDefinition::Type type
) {
360 case LDefinition::GENERAL
:
362 case LDefinition::INT32
:
364 case LDefinition::OBJECT
:
366 case LDefinition::SLOTS
:
368 case LDefinition::WASM_ANYREF
:
370 case LDefinition::FLOAT32
:
372 case LDefinition::DOUBLE
:
374 case LDefinition::SIMD128
:
376 case LDefinition::STACKRESULTS
:
377 return "stackresults";
379 case LDefinition::TYPE
:
381 case LDefinition::PAYLOAD
:
384 case LDefinition::BOX
:
388 MOZ_CRASH("Invalid type");
391 UniqueChars
LDefinition::toString() const {
392 AutoEnterOOMUnsafeRegion oomUnsafe
;
396 buf
= JS_smprintf("bogus");
398 buf
= JS_smprintf("v%u<%s>", virtualRegister(), DefTypeName(type()));
400 if (policy() == LDefinition::FIXED
) {
401 buf
= JS_sprintf_append(std::move(buf
), ":%s",
402 output()->toString().get());
403 } else if (policy() == LDefinition::MUST_REUSE_INPUT
) {
404 buf
= JS_sprintf_append(std::move(buf
), ":tied(%u)", getReusedInput());
410 oomUnsafe
.crash("LDefinition::toString()");
416 static UniqueChars
PrintUse(const LUse
* use
) {
417 switch (use
->policy()) {
419 return JS_smprintf("v%u:R", use
->virtualRegister());
421 return JS_smprintf("v%u:F:%s", use
->virtualRegister(),
422 AnyRegister::FromCode(use
->registerCode()).name());
424 return JS_smprintf("v%u:A", use
->virtualRegister());
425 case LUse::KEEPALIVE
:
426 return JS_smprintf("v%u:KA", use
->virtualRegister());
428 return JS_smprintf("v%u:S", use
->virtualRegister());
429 case LUse::RECOVERED_INPUT
:
430 return JS_smprintf("v%u:RI", use
->virtualRegister());
432 MOZ_CRASH("invalid use policy");
436 UniqueChars
LAllocation::toString() const {
437 AutoEnterOOMUnsafeRegion oomUnsafe
;
441 buf
= JS_smprintf("bogus");
444 case LAllocation::CONSTANT_VALUE
:
445 case LAllocation::CONSTANT_INDEX
: {
446 const MConstant
* c
= toConstant();
449 buf
= JS_smprintf("%d", c
->toInt32());
452 buf
= JS_smprintf("%" PRId64
, c
->toInt64());
454 case MIRType::IntPtr
:
455 buf
= JS_smprintf("%" PRIxPTR
, c
->toIntPtr());
457 case MIRType::String
:
458 // If a JSContext is a available, output the actual string
459 if (JSContext
* cx
= TlsContext
.get()) {
462 oomUnsafe
.crash("LAllocation::toString()");
464 spr
.putString(cx
, c
->toString());
467 buf
= JS_smprintf("string");
470 case MIRType::Symbol
:
471 buf
= JS_smprintf("sym");
473 case MIRType::Object
:
475 buf
= JS_smprintf("obj %p", c
->toObjectOrNull());
478 buf
= JS_smprintf("shape");
481 if (c
->isTypeRepresentableAsDouble()) {
482 buf
= JS_smprintf("%g", c
->numberToDouble());
484 buf
= JS_smprintf("const");
488 case LAllocation::GPR
:
489 buf
= JS_smprintf("%s", toGeneralReg()->reg().name());
491 case LAllocation::FPU
:
492 buf
= JS_smprintf("%s", toFloatReg()->reg().name());
494 case LAllocation::STACK_SLOT
:
495 buf
= JS_smprintf("stack:%u", toStackSlot()->slot());
497 case LAllocation::ARGUMENT_SLOT
:
498 buf
= JS_smprintf("arg:%u", toArgument()->index());
500 case LAllocation::STACK_AREA
:
501 buf
= JS_smprintf("stackarea:%u+%u", toStackArea()->base(),
502 toStackArea()->size());
504 case LAllocation::USE
:
505 buf
= PrintUse(toUse());
513 oomUnsafe
.crash("LAllocation::toString()");
519 void LAllocation::dump() const { fprintf(stderr
, "%s\n", toString().get()); }
521 void LDefinition::dump() const { fprintf(stderr
, "%s\n", toString().get()); }
523 template <typename T
>
524 static void PrintOperands(GenericPrinter
& out
, T
* node
) {
525 size_t numOperands
= node
->numOperands();
527 for (size_t i
= 0; i
< numOperands
; i
++) {
528 out
.printf(" (%s)", node
->getOperand(i
)->toString().get());
529 if (i
!= numOperands
- 1) {
535 void LNode::printOperands(GenericPrinter
& out
) {
537 toMoveGroup()->printOperands(out
);
541 out
.printf(" (%d)", toInteger()->i32());
545 out
.printf(" (%" PRId64
")", toInteger64()->i64());
550 PrintOperands(out
, toPhi());
552 PrintOperands(out
, toInstruction());
557 void LInstruction::assignSnapshot(LSnapshot
* snapshot
) {
558 MOZ_ASSERT(!snapshot_
);
559 snapshot_
= snapshot
;
562 if (JitSpewEnabled(JitSpew_IonSnapshots
)) {
563 JitSpewHeader(JitSpew_IonSnapshots
);
564 Fprinter
& out
= JitSpewPrinter();
565 out
.printf("Assigning snapshot %p to instruction %p (", (void*)snapshot
,
574 static size_t NumSuccessorsHelper(const LNode
* ins
) { return 0; }
576 template <size_t Succs
, size_t Operands
, size_t Temps
>
577 static size_t NumSuccessorsHelper(
578 const LControlInstructionHelper
<Succs
, Operands
, Temps
>* ins
) {
582 static size_t NumSuccessors(const LInstruction
* ins
) {
585 MOZ_CRASH("Unexpected LIR op");
587 case LNode::Opcode::x: \
588 return NumSuccessorsHelper(ins->to##x());
589 LIR_OPCODE_LIST(LIROP
)
594 static MBasicBlock
* GetSuccessorHelper(const LNode
* ins
, size_t i
) {
595 MOZ_CRASH("Unexpected instruction with successors");
598 template <size_t Succs
, size_t Operands
, size_t Temps
>
599 static MBasicBlock
* GetSuccessorHelper(
600 const LControlInstructionHelper
<Succs
, Operands
, Temps
>* ins
, size_t i
) {
601 return ins
->getSuccessor(i
);
604 static MBasicBlock
* GetSuccessor(const LInstruction
* ins
, size_t i
) {
605 MOZ_ASSERT(i
< NumSuccessors(ins
));
609 MOZ_CRASH("Unexpected LIR op");
611 case LNode::Opcode::x: \
612 return GetSuccessorHelper(ins->to##x(), i);
613 LIR_OPCODE_LIST(LIROP
)
620 void LNode::dump(GenericPrinter
& out
) {
621 if (numDefs() != 0) {
623 for (size_t i
= 0; i
< numDefs(); i
++) {
624 const LDefinition
* def
=
625 isPhi() ? toPhi()->getDef(i
) : toInstruction()->getDef(i
);
626 out
.printf("%s", def
->toString().get());
627 if (i
!= numDefs() - 1) {
637 if (isInstruction()) {
638 LInstruction
* ins
= toInstruction();
639 size_t numTemps
= ins
->numTemps();
642 for (size_t i
= 0; i
< numTemps
; i
++) {
643 out
.printf("%s", ins
->getTemp(i
)->toString().get());
644 if (i
!= numTemps
- 1) {
651 size_t numSuccessors
= NumSuccessors(ins
);
652 if (numSuccessors
> 0) {
654 for (size_t i
= 0; i
< numSuccessors
; i
++) {
655 MBasicBlock
* succ
= GetSuccessor(ins
, i
);
656 out
.printf("block%u", succ
->id());
657 if (i
!= numSuccessors
- 1) {
667 Fprinter
out(stderr
);
673 const char* LNode::getExtraName() const {
676 MOZ_CRASH("Unexpected LIR op");
678 case LNode::Opcode::x: \
679 return to##x()->extraName();
680 LIR_OPCODE_LIST(LIROP
)
686 void LInstruction::initSafepoint(TempAllocator
& alloc
) {
687 MOZ_ASSERT(!safepoint_
);
688 safepoint_
= new (alloc
) LSafepoint(alloc
);
689 MOZ_ASSERT(safepoint_
);
692 bool LMoveGroup::add(LAllocation from
, LAllocation to
, LDefinition::Type type
) {
694 MOZ_ASSERT(from
!= to
);
695 for (size_t i
= 0; i
< moves_
.length(); i
++) {
696 MOZ_ASSERT(to
!= moves_
[i
].to());
699 // Check that SIMD moves are aligned according to ABI requirements.
701 # ifdef ENABLE_WASM_SIMD
702 // Alignment is not currently required for SIMD on x86/x64/arm64. See also
703 // CodeGeneratorShared::CodeGeneratorShared and in general everywhere
704 // SimdMemoryAignment is used. Likely, alignment requirements will return.
705 # if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
706 defined(JS_CODEGEN_ARM64)
707 // No need for any check on x86/x64/arm64.
709 # error "Need to consider SIMD alignment on this target."
710 // The following code may be of use if we need alignment checks on
711 // some future target.
712 //if (LDefinition(type).type() == LDefinition::SIMD128) {
713 // MOZ_ASSERT(from.isMemory() || from.isFloatReg());
714 // if (from.isMemory()) {
715 // if (from.isArgument()) {
716 // MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
718 // MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
721 // MOZ_ASSERT(to.isMemory() || to.isFloatReg());
722 // if (to.isMemory()) {
723 // if (to.isArgument()) {
724 // MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
726 // MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
735 return moves_
.append(LMove(from
, to
, type
));
738 bool LMoveGroup::addAfter(LAllocation from
, LAllocation to
,
739 LDefinition::Type type
) {
740 // Transform the operands to this move so that performing the result
741 // simultaneously with existing moves in the group will have the same
742 // effect as if the original move took place after the existing moves.
744 for (size_t i
= 0; i
< moves_
.length(); i
++) {
745 if (moves_
[i
].to() == from
) {
746 from
= moves_
[i
].from();
755 for (size_t i
= 0; i
< moves_
.length(); i
++) {
756 if (to
== moves_
[i
].to()) {
757 moves_
[i
] = LMove(from
, to
, type
);
762 return add(from
, to
, type
);
766 void LMoveGroup::printOperands(GenericPrinter
& out
) {
767 for (size_t i
= 0; i
< numMoves(); i
++) {
768 const LMove
& move
= getMove(i
);
769 out
.printf(" [%s -> %s", move
.from().toString().get(),
770 move
.to().toString().get());
771 out
.printf(", %s", DefTypeName(move
.type()));
773 if (i
!= numMoves() - 1) {
781 static_assert(!std::is_polymorphic_v<L##x>, \
782 "LIR instructions should not have virtual methods");
783 LIR_OPCODE_LIST(LIROP
)