Backed out changeset f89c9403564e (bug 1913161) for causing multiple failures. CLOSED...
[gecko.git] / js / src / jit / LIR.cpp
blobff188017fd4793f6e3a552a0d347284fabae61cf
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/LIR.h"
9 #include "mozilla/ScopeExit.h"
11 #include <type_traits>
13 #include "jit/JitSpewer.h"
14 #include "jit/MIR-wasm.h"
15 #include "jit/MIR.h"
16 #include "jit/MIRGenerator.h"
17 #include "js/Printf.h"
18 #include "util/Unicode.h"
20 using namespace js;
21 using namespace js::jit;
23 const char* const js::jit::LIROpNames[] = {
24 #define OPNAME(op, ...) #op,
25 LIR_OPCODE_LIST(OPNAME)
26 #undef OPNAME
29 LIRGraph::LIRGraph(MIRGraph* mir)
30 : constantPool_(mir->alloc()),
31 constantPoolMap_(mir->alloc()),
32 safepoints_(mir->alloc()),
33 nonCallSafepoints_(mir->alloc()),
34 numVirtualRegisters_(0),
35 numInstructions_(1), // First id is 1.
36 localSlotsSize_(0),
37 argumentSlotCount_(0),
38 mir_(*mir) {}
40 bool LIRGraph::addConstantToPool(const Value& v, uint32_t* index) {
41 ConstantPoolMap::AddPtr p = constantPoolMap_.lookupForAdd(v);
42 if (p) {
43 *index = p->value();
44 return true;
46 *index = constantPool_.length();
47 return constantPool_.append(v) && constantPoolMap_.add(p, v, *index);
50 bool LIRGraph::noteNeedsSafepoint(LInstruction* ins) {
51 // Instructions with safepoints must be in linear order.
52 MOZ_ASSERT_IF(!safepoints_.empty(), safepoints_.back()->id() < ins->id());
53 if (!ins->isCall() && !nonCallSafepoints_.append(ins)) {
54 return false;
56 return safepoints_.append(ins);
59 #ifdef JS_JITSPEW
60 void LIRGraph::dump(GenericPrinter& out) {
61 for (size_t i = 0; i < numBlocks(); i++) {
62 getBlock(i)->dump(out);
63 out.printf("\n");
67 void LIRGraph::dump() {
68 Fprinter out(stderr);
69 dump(out);
70 out.finish();
72 #endif
74 LBlock::LBlock(MBasicBlock* from)
75 : block_(from), entryMoveGroup_(nullptr), exitMoveGroup_(nullptr) {
76 from->assignLir(this);
79 bool LBlock::init(TempAllocator& alloc) {
80 // Count the number of LPhis we'll need.
81 size_t numLPhis = 0;
82 for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
83 MPhi* phi = *i;
84 switch (phi->type()) {
85 case MIRType::Value:
86 numLPhis += BOX_PIECES;
87 break;
88 case MIRType::Int64:
89 numLPhis += INT64_PIECES;
90 break;
91 default:
92 numLPhis += 1;
93 break;
97 // Allocate space for the LPhis.
98 if (!phis_.init(alloc, numLPhis)) {
99 return false;
102 // For each MIR phi, set up LIR phis as appropriate. We'll fill in their
103 // operands on each incoming edge, and set their definitions at the start of
104 // their defining block.
105 size_t phiIndex = 0;
106 size_t numPreds = block_->numPredecessors();
107 for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
108 MPhi* phi = *i;
109 MOZ_ASSERT(phi->numOperands() == numPreds);
111 int numPhis;
112 switch (phi->type()) {
113 case MIRType::Value:
114 numPhis = BOX_PIECES;
115 break;
116 case MIRType::Int64:
117 numPhis = INT64_PIECES;
118 break;
119 default:
120 numPhis = 1;
121 break;
123 for (int i = 0; i < numPhis; i++) {
124 LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds);
125 if (!inputs) {
126 return false;
129 void* addr = &phis_[phiIndex++];
130 LPhi* lphi = new (addr) LPhi(phi, inputs);
131 lphi->setBlock(this);
134 return true;
137 const LInstruction* LBlock::firstInstructionWithId() const {
138 for (LInstructionIterator i(instructions_.begin()); i != instructions_.end();
139 ++i) {
140 if (i->id()) {
141 return *i;
144 return 0;
147 LMoveGroup* LBlock::getEntryMoveGroup(TempAllocator& alloc) {
148 if (entryMoveGroup_) {
149 return entryMoveGroup_;
151 entryMoveGroup_ = LMoveGroup::New(alloc);
152 insertBefore(*begin(), entryMoveGroup_);
153 return entryMoveGroup_;
156 LMoveGroup* LBlock::getExitMoveGroup(TempAllocator& alloc) {
157 if (exitMoveGroup_) {
158 return exitMoveGroup_;
160 exitMoveGroup_ = LMoveGroup::New(alloc);
161 insertBefore(*rbegin(), exitMoveGroup_);
162 return exitMoveGroup_;
165 #ifdef JS_JITSPEW
166 void LBlock::dump(GenericPrinter& out) {
167 out.printf("block%u:\n", mir()->id());
168 for (size_t i = 0; i < numPhis(); ++i) {
169 getPhi(i)->dump(out);
170 out.printf("\n");
172 for (LInstructionIterator iter = begin(); iter != end(); iter++) {
173 iter->dump(out);
174 if (iter->safepoint()) {
175 out.printf(" SAFEPOINT(0x%p) ", iter->safepoint());
177 out.printf("\n");
181 void LBlock::dump() {
182 Fprinter out(stderr);
183 dump(out);
184 out.finish();
186 #endif
188 static size_t TotalOperandCount(LRecoverInfo* recoverInfo) {
189 size_t accum = 0;
190 for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
191 if (!it->isRecoveredOnBailout()) {
192 accum++;
195 return accum;
198 LRecoverInfo::LRecoverInfo(TempAllocator& alloc)
199 : instructions_(alloc), recoverOffset_(INVALID_RECOVER_OFFSET) {}
201 LRecoverInfo* LRecoverInfo::New(MIRGenerator* gen, MResumePoint* mir) {
202 LRecoverInfo* recoverInfo = new (gen->alloc()) LRecoverInfo(gen->alloc());
203 if (!recoverInfo || !recoverInfo->init(mir)) {
204 return nullptr;
207 JitSpew(JitSpew_IonSnapshots, "Generating LIR recover info %p from MIR (%p)",
208 (void*)recoverInfo, (void*)mir);
210 return recoverInfo;
213 // de-virtualise MResumePoint::getOperand calls.
214 template <typename Node>
215 bool LRecoverInfo::appendOperands(Node* ins) {
216 for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
217 MDefinition* def = ins->getOperand(i);
219 // As there is no cycle in the data-flow (without MPhi), checking for
220 // isInWorkList implies that the definition is already in the
221 // instruction vector, and not processed by a caller of the current
222 // function.
223 if (def->isRecoveredOnBailout() && !def->isInWorklist()) {
224 if (!appendDefinition(def)) {
225 return false;
230 return true;
233 bool LRecoverInfo::appendDefinition(MDefinition* def) {
234 MOZ_ASSERT(def->isRecoveredOnBailout());
235 def->setInWorklist();
236 auto clearWorklistFlagOnFailure =
237 mozilla::MakeScopeExit([&] { def->setNotInWorklist(); });
239 if (!appendOperands(def)) {
240 return false;
243 if (!instructions_.append(def)) {
244 return false;
247 clearWorklistFlagOnFailure.release();
248 return true;
251 bool LRecoverInfo::appendResumePoint(MResumePoint* rp) {
252 // Stores should be recovered first.
253 if (!rp->storesEmpty()) {
254 hasSideEffects_ = true;
256 for (auto iter(rp->storesBegin()), end(rp->storesEnd()); iter != end;
257 ++iter) {
258 if (!appendDefinition(iter->operand)) {
259 return false;
263 if (rp->caller() && !appendResumePoint(rp->caller())) {
264 return false;
267 if (!appendOperands(rp)) {
268 return false;
271 return instructions_.append(rp);
274 bool LRecoverInfo::init(MResumePoint* rp) {
275 // Before exiting this function, remove temporary flags from all definitions
276 // added in the vector.
277 auto clearWorklistFlags = mozilla::MakeScopeExit([&] {
278 for (MNode** it = begin(); it != end(); it++) {
279 if (!(*it)->isDefinition()) {
280 continue;
282 (*it)->toDefinition()->setNotInWorklist();
286 // Sort operations in the order in which we need to restore the stack. This
287 // implies that outer frames, as well as operations needed to recover the
288 // current frame, are located before the current frame. The inner-most
289 // resume point should be the last element in the list.
290 if (!appendResumePoint(rp)) {
291 return false;
294 MOZ_ASSERT(mir() == rp);
295 return true;
298 LSnapshot::LSnapshot(LRecoverInfo* recoverInfo, BailoutKind kind)
299 : slots_(nullptr),
300 recoverInfo_(recoverInfo),
301 snapshotOffset_(INVALID_SNAPSHOT_OFFSET),
302 numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
303 bailoutKind_(kind) {}
305 bool LSnapshot::init(MIRGenerator* gen) {
306 slots_ = gen->allocate<LAllocation>(numSlots_);
307 return !!slots_;
310 LSnapshot* LSnapshot::New(MIRGenerator* gen, LRecoverInfo* recover,
311 BailoutKind kind) {
312 LSnapshot* snapshot = new (gen->alloc()) LSnapshot(recover, kind);
313 if (!snapshot || !snapshot->init(gen)) {
314 return nullptr;
317 JitSpew(JitSpew_IonSnapshots, "Generating LIR snapshot %p from recover (%p)",
318 (void*)snapshot, (void*)recover);
320 return snapshot;
323 void LSnapshot::rewriteRecoveredInput(LUse input) {
324 // Mark any operands to this snapshot with the same value as input as being
325 // equal to the instruction's result.
326 for (size_t i = 0; i < numEntries(); i++) {
327 if (getEntry(i)->isUse() &&
328 getEntry(i)->toUse()->virtualRegister() == input.virtualRegister()) {
329 setEntry(i, LUse(input.virtualRegister(), LUse::RECOVERED_INPUT));
334 #ifdef JS_JITSPEW
335 void LNode::printName(GenericPrinter& out, Opcode op) {
336 static const char* const names[] = {
337 # define LIROP(x) #x,
338 LIR_OPCODE_LIST(LIROP)
339 # undef LIROP
341 const char* name = names[uint32_t(op)];
342 size_t len = strlen(name);
343 for (size_t i = 0; i < len; i++) {
344 out.printf("%c", unicode::ToLowerCase(name[i]));
348 void LNode::printName(GenericPrinter& out) { printName(out, op()); }
349 #endif
351 bool LAllocation::aliases(const LAllocation& other) const {
352 if (isFloatReg() && other.isFloatReg()) {
353 return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
355 return *this == other;
358 #ifdef JS_JITSPEW
359 static const char* DefTypeName(LDefinition::Type type) {
360 switch (type) {
361 case LDefinition::GENERAL:
362 return "g";
363 case LDefinition::INT32:
364 return "i";
365 case LDefinition::OBJECT:
366 return "o";
367 case LDefinition::SLOTS:
368 return "s";
369 case LDefinition::WASM_ANYREF:
370 return "wr";
371 case LDefinition::FLOAT32:
372 return "f";
373 case LDefinition::DOUBLE:
374 return "d";
375 case LDefinition::SIMD128:
376 return "simd128";
377 case LDefinition::STACKRESULTS:
378 return "stackresults";
379 # ifdef JS_NUNBOX32
380 case LDefinition::TYPE:
381 return "t";
382 case LDefinition::PAYLOAD:
383 return "p";
384 # else
385 case LDefinition::BOX:
386 return "x";
387 # endif
389 MOZ_CRASH("Invalid type");
392 UniqueChars LDefinition::toString() const {
393 AutoEnterOOMUnsafeRegion oomUnsafe;
395 UniqueChars buf;
396 if (isBogusTemp()) {
397 buf = JS_smprintf("bogus");
398 } else {
399 buf = JS_smprintf("v%u<%s>", virtualRegister(), DefTypeName(type()));
400 if (buf) {
401 if (policy() == LDefinition::FIXED) {
402 buf = JS_sprintf_append(std::move(buf), ":%s",
403 output()->toString().get());
404 } else if (policy() == LDefinition::MUST_REUSE_INPUT) {
405 buf = JS_sprintf_append(std::move(buf), ":tied(%u)", getReusedInput());
410 if (!buf) {
411 oomUnsafe.crash("LDefinition::toString()");
414 return buf;
417 static UniqueChars PrintUse(const LUse* use) {
418 switch (use->policy()) {
419 case LUse::REGISTER:
420 return JS_smprintf("v%u:R", use->virtualRegister());
421 case LUse::FIXED:
422 return JS_smprintf("v%u:F:%s", use->virtualRegister(),
423 AnyRegister::FromCode(use->registerCode()).name());
424 case LUse::ANY:
425 return JS_smprintf("v%u:A", use->virtualRegister());
426 case LUse::KEEPALIVE:
427 return JS_smprintf("v%u:KA", use->virtualRegister());
428 case LUse::STACK:
429 return JS_smprintf("v%u:S", use->virtualRegister());
430 case LUse::RECOVERED_INPUT:
431 return JS_smprintf("v%u:RI", use->virtualRegister());
432 default:
433 MOZ_CRASH("invalid use policy");
437 UniqueChars LAllocation::toString() const {
438 AutoEnterOOMUnsafeRegion oomUnsafe;
440 UniqueChars buf;
441 if (isBogus()) {
442 buf = JS_smprintf("bogus");
443 } else {
444 switch (kind()) {
445 case LAllocation::CONSTANT_VALUE:
446 case LAllocation::CONSTANT_INDEX: {
447 const MConstant* c = toConstant();
448 switch (c->type()) {
449 case MIRType::Int32:
450 buf = JS_smprintf("%d", c->toInt32());
451 break;
452 case MIRType::Int64:
453 buf = JS_smprintf("%" PRId64, c->toInt64());
454 break;
455 case MIRType::IntPtr:
456 buf = JS_smprintf("%" PRIxPTR, c->toIntPtr());
457 break;
458 case MIRType::String:
459 // If a JSContext is a available, output the actual string
460 if (JSContext* cx = TlsContext.get()) {
461 Sprinter spr(cx);
462 if (!spr.init()) {
463 oomUnsafe.crash("LAllocation::toString()");
465 spr.putString(cx, c->toString());
466 buf = spr.release();
467 } else {
468 buf = JS_smprintf("string");
470 break;
471 case MIRType::Symbol:
472 buf = JS_smprintf("sym");
473 break;
474 case MIRType::Object:
475 case MIRType::Null:
476 buf = JS_smprintf("obj %p", c->toObjectOrNull());
477 break;
478 case MIRType::Shape:
479 buf = JS_smprintf("shape");
480 break;
481 default:
482 if (c->isTypeRepresentableAsDouble()) {
483 buf = JS_smprintf("%g", c->numberToDouble());
484 } else {
485 buf = JS_smprintf("const");
488 } break;
489 case LAllocation::GPR:
490 buf = JS_smprintf("%s", toGeneralReg()->reg().name());
491 break;
492 case LAllocation::FPU:
493 buf = JS_smprintf("%s", toFloatReg()->reg().name());
494 break;
495 case LAllocation::STACK_SLOT:
496 buf = JS_smprintf("stack:%u", toStackSlot()->slot());
497 break;
498 case LAllocation::ARGUMENT_SLOT:
499 buf = JS_smprintf("arg:%u", toArgument()->index());
500 break;
501 case LAllocation::STACK_AREA:
502 buf = JS_smprintf("stackarea:%u+%u", toStackArea()->base(),
503 toStackArea()->size());
504 break;
505 case LAllocation::USE:
506 buf = PrintUse(toUse());
507 break;
508 default:
509 MOZ_CRASH("what?");
513 if (!buf) {
514 oomUnsafe.crash("LAllocation::toString()");
517 return buf;
520 void LAllocation::dump() const { fprintf(stderr, "%s\n", toString().get()); }
522 void LDefinition::dump() const { fprintf(stderr, "%s\n", toString().get()); }
524 template <typename T>
525 static void PrintOperands(GenericPrinter& out, T* node) {
526 size_t numOperands = node->numOperands();
528 for (size_t i = 0; i < numOperands; i++) {
529 out.printf(" (%s)", node->getOperand(i)->toString().get());
530 if (i != numOperands - 1) {
531 out.printf(",");
536 void LNode::printOperands(GenericPrinter& out) {
537 if (isMoveGroup()) {
538 toMoveGroup()->printOperands(out);
539 return;
541 if (isInteger()) {
542 out.printf(" (%d)", toInteger()->i32());
543 return;
545 if (isInteger64()) {
546 out.printf(" (%" PRId64 ")", toInteger64()->i64());
547 return;
550 if (isPhi()) {
551 PrintOperands(out, toPhi());
552 } else {
553 PrintOperands(out, toInstruction());
556 #endif
558 void LInstruction::assignSnapshot(LSnapshot* snapshot) {
559 MOZ_ASSERT(!snapshot_);
560 snapshot_ = snapshot;
562 #ifdef JS_JITSPEW
563 if (JitSpewEnabled(JitSpew_IonSnapshots)) {
564 JitSpewHeader(JitSpew_IonSnapshots);
565 Fprinter& out = JitSpewPrinter();
566 out.printf("Assigning snapshot %p to instruction %p (", (void*)snapshot,
567 (void*)this);
568 printName(out);
569 out.printf(")\n");
571 #endif
574 #ifdef JS_JITSPEW
575 static size_t NumSuccessorsHelper(const LNode* ins) { return 0; }
577 template <size_t Succs, size_t Operands, size_t Temps>
578 static size_t NumSuccessorsHelper(
579 const LControlInstructionHelper<Succs, Operands, Temps>* ins) {
580 return Succs;
583 static size_t NumSuccessors(const LInstruction* ins) {
584 switch (ins->op()) {
585 default:
586 MOZ_CRASH("Unexpected LIR op");
587 # define LIROP(x) \
588 case LNode::Opcode::x: \
589 return NumSuccessorsHelper(ins->to##x());
590 LIR_OPCODE_LIST(LIROP)
591 # undef LIROP
595 static MBasicBlock* GetSuccessorHelper(const LNode* ins, size_t i) {
596 MOZ_CRASH("Unexpected instruction with successors");
599 template <size_t Succs, size_t Operands, size_t Temps>
600 static MBasicBlock* GetSuccessorHelper(
601 const LControlInstructionHelper<Succs, Operands, Temps>* ins, size_t i) {
602 return ins->getSuccessor(i);
605 static MBasicBlock* GetSuccessor(const LInstruction* ins, size_t i) {
606 MOZ_ASSERT(i < NumSuccessors(ins));
608 switch (ins->op()) {
609 default:
610 MOZ_CRASH("Unexpected LIR op");
611 # define LIROP(x) \
612 case LNode::Opcode::x: \
613 return GetSuccessorHelper(ins->to##x(), i);
614 LIR_OPCODE_LIST(LIROP)
615 # undef LIROP
618 #endif
620 #ifdef JS_JITSPEW
621 void LNode::dump(GenericPrinter& out) {
622 if (numDefs() != 0) {
623 out.printf("{");
624 for (size_t i = 0; i < numDefs(); i++) {
625 const LDefinition* def =
626 isPhi() ? toPhi()->getDef(i) : toInstruction()->getDef(i);
627 out.printf("%s", def->toString().get());
628 if (i != numDefs() - 1) {
629 out.printf(", ");
632 out.printf("} <- ");
635 printName(out);
636 printOperands(out);
638 if (isInstruction()) {
639 LInstruction* ins = toInstruction();
640 size_t numTemps = ins->numTemps();
641 if (numTemps > 0) {
642 out.printf(" t=(");
643 for (size_t i = 0; i < numTemps; i++) {
644 out.printf("%s", ins->getTemp(i)->toString().get());
645 if (i != numTemps - 1) {
646 out.printf(", ");
649 out.printf(")");
652 size_t numSuccessors = NumSuccessors(ins);
653 if (numSuccessors > 0) {
654 out.printf(" s=(");
655 for (size_t i = 0; i < numSuccessors; i++) {
656 MBasicBlock* succ = GetSuccessor(ins, i);
657 out.printf("block%u", succ->id());
658 if (i != numSuccessors - 1) {
659 out.printf(", ");
662 out.printf(")");
667 void LNode::dump() {
668 Fprinter out(stderr);
669 dump(out);
670 out.printf("\n");
671 out.finish();
674 const char* LNode::getExtraName() const {
675 switch (op()) {
676 default:
677 MOZ_CRASH("Unexpected LIR op");
678 # define LIROP(x) \
679 case LNode::Opcode::x: \
680 return to##x()->extraName();
681 LIR_OPCODE_LIST(LIROP)
682 # undef LIROP
685 #endif
687 void LInstruction::initSafepoint(TempAllocator& alloc) {
688 MOZ_ASSERT(!safepoint_);
689 safepoint_ = new (alloc) LSafepoint(alloc);
690 MOZ_ASSERT(safepoint_);
693 bool LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) {
694 #ifdef DEBUG
695 MOZ_ASSERT(from != to);
696 for (size_t i = 0; i < moves_.length(); i++) {
697 MOZ_ASSERT(to != moves_[i].to());
700 // Check that SIMD moves are aligned according to ABI requirements.
701 // clang-format off
702 # ifdef ENABLE_WASM_SIMD
703 // Alignment is not currently required for SIMD on x86/x64/arm64. See also
704 // CodeGeneratorShared::CodeGeneratorShared and in general everywhere
705 // SimdMemoryAignment is used. Likely, alignment requirements will return.
706 # if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
707 defined(JS_CODEGEN_ARM64)
708 // No need for any check on x86/x64/arm64.
709 # else
710 # error "Need to consider SIMD alignment on this target."
711 // The following code may be of use if we need alignment checks on
712 // some future target.
713 //if (LDefinition(type).type() == LDefinition::SIMD128) {
714 // MOZ_ASSERT(from.isMemory() || from.isFloatReg());
715 // if (from.isMemory()) {
716 // if (from.isArgument()) {
717 // MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
718 // } else {
719 // MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
720 // }
721 // }
722 // MOZ_ASSERT(to.isMemory() || to.isFloatReg());
723 // if (to.isMemory()) {
724 // if (to.isArgument()) {
725 // MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
726 // } else {
727 // MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
728 // }
729 // }
731 # endif
732 # endif
733 // clang-format on
735 #endif
736 return moves_.append(LMove(from, to, type));
739 bool LMoveGroup::addAfter(LAllocation from, LAllocation to,
740 LDefinition::Type type) {
741 // Transform the operands to this move so that performing the result
742 // simultaneously with existing moves in the group will have the same
743 // effect as if the original move took place after the existing moves.
745 for (size_t i = 0; i < moves_.length(); i++) {
746 if (moves_[i].to() == from) {
747 from = moves_[i].from();
748 break;
752 if (from == to) {
753 return true;
756 for (size_t i = 0; i < moves_.length(); i++) {
757 if (to == moves_[i].to()) {
758 moves_[i] = LMove(from, to, type);
759 return true;
763 return add(from, to, type);
766 #ifdef JS_JITSPEW
767 void LMoveGroup::printOperands(GenericPrinter& out) {
768 for (size_t i = 0; i < numMoves(); i++) {
769 const LMove& move = getMove(i);
770 out.printf(" [%s -> %s", move.from().toString().get(),
771 move.to().toString().get());
772 out.printf(", %s", DefTypeName(move.type()));
773 out.printf("]");
774 if (i != numMoves() - 1) {
775 out.printf(",");
779 #endif
781 #define LIROP(x) \
782 static_assert(!std::is_polymorphic_v<L##x>, \
783 "LIR instructions should not have virtual methods");
784 LIR_OPCODE_LIST(LIROP)
785 #undef LIROP