Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / LIR.cpp
blob1e3d7af0f0d99815fce50fdd929e37d5b2843890
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/LIR.h"
9 #include "mozilla/ScopeExit.h"
11 #include <type_traits>
13 #include "jit/JitSpewer.h"
14 #include "jit/MIR.h"
15 #include "jit/MIRGenerator.h"
16 #include "js/Printf.h"
17 #include "util/Unicode.h"
19 using namespace js;
20 using namespace js::jit;
22 const char* const js::jit::LIROpNames[] = {
23 #define OPNAME(op, ...) #op,
24 LIR_OPCODE_LIST(OPNAME)
25 #undef OPNAME
28 LIRGraph::LIRGraph(MIRGraph* mir)
29 : constantPool_(mir->alloc()),
30 constantPoolMap_(mir->alloc()),
31 safepoints_(mir->alloc()),
32 nonCallSafepoints_(mir->alloc()),
33 numVirtualRegisters_(0),
34 numInstructions_(1), // First id is 1.
35 localSlotsSize_(0),
36 argumentSlotCount_(0),
37 mir_(*mir) {}
39 bool LIRGraph::addConstantToPool(const Value& v, uint32_t* index) {
40 ConstantPoolMap::AddPtr p = constantPoolMap_.lookupForAdd(v);
41 if (p) {
42 *index = p->value();
43 return true;
45 *index = constantPool_.length();
46 return constantPool_.append(v) && constantPoolMap_.add(p, v, *index);
49 bool LIRGraph::noteNeedsSafepoint(LInstruction* ins) {
50 // Instructions with safepoints must be in linear order.
51 MOZ_ASSERT_IF(!safepoints_.empty(), safepoints_.back()->id() < ins->id());
52 if (!ins->isCall() && !nonCallSafepoints_.append(ins)) {
53 return false;
55 return safepoints_.append(ins);
58 #ifdef JS_JITSPEW
59 void LIRGraph::dump(GenericPrinter& out) {
60 for (size_t i = 0; i < numBlocks(); i++) {
61 getBlock(i)->dump(out);
62 out.printf("\n");
66 void LIRGraph::dump() {
67 Fprinter out(stderr);
68 dump(out);
69 out.finish();
71 #endif
73 LBlock::LBlock(MBasicBlock* from)
74 : block_(from), entryMoveGroup_(nullptr), exitMoveGroup_(nullptr) {
75 from->assignLir(this);
78 bool LBlock::init(TempAllocator& alloc) {
79 // Count the number of LPhis we'll need.
80 size_t numLPhis = 0;
81 for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
82 MPhi* phi = *i;
83 switch (phi->type()) {
84 case MIRType::Value:
85 numLPhis += BOX_PIECES;
86 break;
87 case MIRType::Int64:
88 numLPhis += INT64_PIECES;
89 break;
90 default:
91 numLPhis += 1;
92 break;
96 // Allocate space for the LPhis.
97 if (!phis_.init(alloc, numLPhis)) {
98 return false;
101 // For each MIR phi, set up LIR phis as appropriate. We'll fill in their
102 // operands on each incoming edge, and set their definitions at the start of
103 // their defining block.
104 size_t phiIndex = 0;
105 size_t numPreds = block_->numPredecessors();
106 for (MPhiIterator i(block_->phisBegin()), e(block_->phisEnd()); i != e; ++i) {
107 MPhi* phi = *i;
108 MOZ_ASSERT(phi->numOperands() == numPreds);
110 int numPhis;
111 switch (phi->type()) {
112 case MIRType::Value:
113 numPhis = BOX_PIECES;
114 break;
115 case MIRType::Int64:
116 numPhis = INT64_PIECES;
117 break;
118 default:
119 numPhis = 1;
120 break;
122 for (int i = 0; i < numPhis; i++) {
123 LAllocation* inputs = alloc.allocateArray<LAllocation>(numPreds);
124 if (!inputs) {
125 return false;
128 void* addr = &phis_[phiIndex++];
129 LPhi* lphi = new (addr) LPhi(phi, inputs);
130 lphi->setBlock(this);
133 return true;
136 const LInstruction* LBlock::firstInstructionWithId() const {
137 for (LInstructionIterator i(instructions_.begin()); i != instructions_.end();
138 ++i) {
139 if (i->id()) {
140 return *i;
143 return 0;
146 LMoveGroup* LBlock::getEntryMoveGroup(TempAllocator& alloc) {
147 if (entryMoveGroup_) {
148 return entryMoveGroup_;
150 entryMoveGroup_ = LMoveGroup::New(alloc);
151 insertBefore(*begin(), entryMoveGroup_);
152 return entryMoveGroup_;
155 LMoveGroup* LBlock::getExitMoveGroup(TempAllocator& alloc) {
156 if (exitMoveGroup_) {
157 return exitMoveGroup_;
159 exitMoveGroup_ = LMoveGroup::New(alloc);
160 insertBefore(*rbegin(), exitMoveGroup_);
161 return exitMoveGroup_;
164 #ifdef JS_JITSPEW
165 void LBlock::dump(GenericPrinter& out) {
166 out.printf("block%u:\n", mir()->id());
167 for (size_t i = 0; i < numPhis(); ++i) {
168 getPhi(i)->dump(out);
169 out.printf("\n");
171 for (LInstructionIterator iter = begin(); iter != end(); iter++) {
172 iter->dump(out);
173 if (iter->safepoint()) {
174 out.printf(" SAFEPOINT(0x%p) ", iter->safepoint());
176 out.printf("\n");
180 void LBlock::dump() {
181 Fprinter out(stderr);
182 dump(out);
183 out.finish();
185 #endif
187 static size_t TotalOperandCount(LRecoverInfo* recoverInfo) {
188 size_t accum = 0;
189 for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
190 if (!it->isRecoveredOnBailout()) {
191 accum++;
194 return accum;
197 LRecoverInfo::LRecoverInfo(TempAllocator& alloc)
198 : instructions_(alloc), recoverOffset_(INVALID_RECOVER_OFFSET) {}
200 LRecoverInfo* LRecoverInfo::New(MIRGenerator* gen, MResumePoint* mir) {
201 LRecoverInfo* recoverInfo = new (gen->alloc()) LRecoverInfo(gen->alloc());
202 if (!recoverInfo || !recoverInfo->init(mir)) {
203 return nullptr;
206 JitSpew(JitSpew_IonSnapshots, "Generating LIR recover info %p from MIR (%p)",
207 (void*)recoverInfo, (void*)mir);
209 return recoverInfo;
212 // de-virtualise MResumePoint::getOperand calls.
213 template <typename Node>
214 bool LRecoverInfo::appendOperands(Node* ins) {
215 for (size_t i = 0, end = ins->numOperands(); i < end; i++) {
216 MDefinition* def = ins->getOperand(i);
218 // As there is no cycle in the data-flow (without MPhi), checking for
219 // isInWorkList implies that the definition is already in the
220 // instruction vector, and not processed by a caller of the current
221 // function.
222 if (def->isRecoveredOnBailout() && !def->isInWorklist()) {
223 if (!appendDefinition(def)) {
224 return false;
229 return true;
232 bool LRecoverInfo::appendDefinition(MDefinition* def) {
233 MOZ_ASSERT(def->isRecoveredOnBailout());
234 def->setInWorklist();
235 auto clearWorklistFlagOnFailure =
236 mozilla::MakeScopeExit([&] { def->setNotInWorklist(); });
238 if (!appendOperands(def)) {
239 return false;
242 if (!instructions_.append(def)) {
243 return false;
246 clearWorklistFlagOnFailure.release();
247 return true;
250 bool LRecoverInfo::appendResumePoint(MResumePoint* rp) {
251 // Stores should be recovered first.
252 if (!rp->storesEmpty()) {
253 hasSideEffects_ = true;
255 for (auto iter(rp->storesBegin()), end(rp->storesEnd()); iter != end;
256 ++iter) {
257 if (!appendDefinition(iter->operand)) {
258 return false;
262 if (rp->caller() && !appendResumePoint(rp->caller())) {
263 return false;
266 if (!appendOperands(rp)) {
267 return false;
270 return instructions_.append(rp);
273 bool LRecoverInfo::init(MResumePoint* rp) {
274 // Before exiting this function, remove temporary flags from all definitions
275 // added in the vector.
276 auto clearWorklistFlags = mozilla::MakeScopeExit([&] {
277 for (MNode** it = begin(); it != end(); it++) {
278 if (!(*it)->isDefinition()) {
279 continue;
281 (*it)->toDefinition()->setNotInWorklist();
285 // Sort operations in the order in which we need to restore the stack. This
286 // implies that outer frames, as well as operations needed to recover the
287 // current frame, are located before the current frame. The inner-most
288 // resume point should be the last element in the list.
289 if (!appendResumePoint(rp)) {
290 return false;
293 MOZ_ASSERT(mir() == rp);
294 return true;
297 LSnapshot::LSnapshot(LRecoverInfo* recoverInfo, BailoutKind kind)
298 : slots_(nullptr),
299 recoverInfo_(recoverInfo),
300 snapshotOffset_(INVALID_SNAPSHOT_OFFSET),
301 numSlots_(TotalOperandCount(recoverInfo) * BOX_PIECES),
302 bailoutKind_(kind) {}
304 bool LSnapshot::init(MIRGenerator* gen) {
305 slots_ = gen->allocate<LAllocation>(numSlots_);
306 return !!slots_;
309 LSnapshot* LSnapshot::New(MIRGenerator* gen, LRecoverInfo* recover,
310 BailoutKind kind) {
311 LSnapshot* snapshot = new (gen->alloc()) LSnapshot(recover, kind);
312 if (!snapshot || !snapshot->init(gen)) {
313 return nullptr;
316 JitSpew(JitSpew_IonSnapshots, "Generating LIR snapshot %p from recover (%p)",
317 (void*)snapshot, (void*)recover);
319 return snapshot;
322 void LSnapshot::rewriteRecoveredInput(LUse input) {
323 // Mark any operands to this snapshot with the same value as input as being
324 // equal to the instruction's result.
325 for (size_t i = 0; i < numEntries(); i++) {
326 if (getEntry(i)->isUse() &&
327 getEntry(i)->toUse()->virtualRegister() == input.virtualRegister()) {
328 setEntry(i, LUse(input.virtualRegister(), LUse::RECOVERED_INPUT));
333 #ifdef JS_JITSPEW
334 void LNode::printName(GenericPrinter& out, Opcode op) {
335 static const char* const names[] = {
336 # define LIROP(x) #x,
337 LIR_OPCODE_LIST(LIROP)
338 # undef LIROP
340 const char* name = names[uint32_t(op)];
341 size_t len = strlen(name);
342 for (size_t i = 0; i < len; i++) {
343 out.printf("%c", unicode::ToLowerCase(name[i]));
347 void LNode::printName(GenericPrinter& out) { printName(out, op()); }
348 #endif
350 bool LAllocation::aliases(const LAllocation& other) const {
351 if (isFloatReg() && other.isFloatReg()) {
352 return toFloatReg()->reg().aliases(other.toFloatReg()->reg());
354 return *this == other;
357 #ifdef JS_JITSPEW
358 static const char* DefTypeName(LDefinition::Type type) {
359 switch (type) {
360 case LDefinition::GENERAL:
361 return "g";
362 case LDefinition::INT32:
363 return "i";
364 case LDefinition::OBJECT:
365 return "o";
366 case LDefinition::SLOTS:
367 return "s";
368 case LDefinition::WASM_ANYREF:
369 return "wr";
370 case LDefinition::FLOAT32:
371 return "f";
372 case LDefinition::DOUBLE:
373 return "d";
374 case LDefinition::SIMD128:
375 return "simd128";
376 case LDefinition::STACKRESULTS:
377 return "stackresults";
378 # ifdef JS_NUNBOX32
379 case LDefinition::TYPE:
380 return "t";
381 case LDefinition::PAYLOAD:
382 return "p";
383 # else
384 case LDefinition::BOX:
385 return "x";
386 # endif
388 MOZ_CRASH("Invalid type");
391 UniqueChars LDefinition::toString() const {
392 AutoEnterOOMUnsafeRegion oomUnsafe;
394 UniqueChars buf;
395 if (isBogusTemp()) {
396 buf = JS_smprintf("bogus");
397 } else {
398 buf = JS_smprintf("v%u<%s>", virtualRegister(), DefTypeName(type()));
399 if (buf) {
400 if (policy() == LDefinition::FIXED) {
401 buf = JS_sprintf_append(std::move(buf), ":%s",
402 output()->toString().get());
403 } else if (policy() == LDefinition::MUST_REUSE_INPUT) {
404 buf = JS_sprintf_append(std::move(buf), ":tied(%u)", getReusedInput());
409 if (!buf) {
410 oomUnsafe.crash("LDefinition::toString()");
413 return buf;
416 static UniqueChars PrintUse(const LUse* use) {
417 switch (use->policy()) {
418 case LUse::REGISTER:
419 return JS_smprintf("v%u:R", use->virtualRegister());
420 case LUse::FIXED:
421 return JS_smprintf("v%u:F:%s", use->virtualRegister(),
422 AnyRegister::FromCode(use->registerCode()).name());
423 case LUse::ANY:
424 return JS_smprintf("v%u:A", use->virtualRegister());
425 case LUse::KEEPALIVE:
426 return JS_smprintf("v%u:KA", use->virtualRegister());
427 case LUse::STACK:
428 return JS_smprintf("v%u:S", use->virtualRegister());
429 case LUse::RECOVERED_INPUT:
430 return JS_smprintf("v%u:RI", use->virtualRegister());
431 default:
432 MOZ_CRASH("invalid use policy");
436 UniqueChars LAllocation::toString() const {
437 AutoEnterOOMUnsafeRegion oomUnsafe;
439 UniqueChars buf;
440 if (isBogus()) {
441 buf = JS_smprintf("bogus");
442 } else {
443 switch (kind()) {
444 case LAllocation::CONSTANT_VALUE:
445 case LAllocation::CONSTANT_INDEX: {
446 const MConstant* c = toConstant();
447 switch (c->type()) {
448 case MIRType::Int32:
449 buf = JS_smprintf("%d", c->toInt32());
450 break;
451 case MIRType::Int64:
452 buf = JS_smprintf("%" PRId64, c->toInt64());
453 break;
454 case MIRType::IntPtr:
455 buf = JS_smprintf("%" PRIxPTR, c->toIntPtr());
456 break;
457 case MIRType::String:
458 // If a JSContext is a available, output the actual string
459 if (JSContext* cx = TlsContext.get()) {
460 Sprinter spr(cx);
461 if (!spr.init()) {
462 oomUnsafe.crash("LAllocation::toString()");
464 spr.putString(cx, c->toString());
465 buf = spr.release();
466 } else {
467 buf = JS_smprintf("string");
469 break;
470 case MIRType::Symbol:
471 buf = JS_smprintf("sym");
472 break;
473 case MIRType::Object:
474 case MIRType::Null:
475 buf = JS_smprintf("obj %p", c->toObjectOrNull());
476 break;
477 case MIRType::Shape:
478 buf = JS_smprintf("shape");
479 break;
480 default:
481 if (c->isTypeRepresentableAsDouble()) {
482 buf = JS_smprintf("%g", c->numberToDouble());
483 } else {
484 buf = JS_smprintf("const");
487 } break;
488 case LAllocation::GPR:
489 buf = JS_smprintf("%s", toGeneralReg()->reg().name());
490 break;
491 case LAllocation::FPU:
492 buf = JS_smprintf("%s", toFloatReg()->reg().name());
493 break;
494 case LAllocation::STACK_SLOT:
495 buf = JS_smprintf("stack:%u", toStackSlot()->slot());
496 break;
497 case LAllocation::ARGUMENT_SLOT:
498 buf = JS_smprintf("arg:%u", toArgument()->index());
499 break;
500 case LAllocation::STACK_AREA:
501 buf = JS_smprintf("stackarea:%u+%u", toStackArea()->base(),
502 toStackArea()->size());
503 break;
504 case LAllocation::USE:
505 buf = PrintUse(toUse());
506 break;
507 default:
508 MOZ_CRASH("what?");
512 if (!buf) {
513 oomUnsafe.crash("LAllocation::toString()");
516 return buf;
519 void LAllocation::dump() const { fprintf(stderr, "%s\n", toString().get()); }
521 void LDefinition::dump() const { fprintf(stderr, "%s\n", toString().get()); }
523 template <typename T>
524 static void PrintOperands(GenericPrinter& out, T* node) {
525 size_t numOperands = node->numOperands();
527 for (size_t i = 0; i < numOperands; i++) {
528 out.printf(" (%s)", node->getOperand(i)->toString().get());
529 if (i != numOperands - 1) {
530 out.printf(",");
535 void LNode::printOperands(GenericPrinter& out) {
536 if (isMoveGroup()) {
537 toMoveGroup()->printOperands(out);
538 return;
540 if (isInteger()) {
541 out.printf(" (%d)", toInteger()->i32());
542 return;
544 if (isInteger64()) {
545 out.printf(" (%" PRId64 ")", toInteger64()->i64());
546 return;
549 if (isPhi()) {
550 PrintOperands(out, toPhi());
551 } else {
552 PrintOperands(out, toInstruction());
555 #endif
557 void LInstruction::assignSnapshot(LSnapshot* snapshot) {
558 MOZ_ASSERT(!snapshot_);
559 snapshot_ = snapshot;
561 #ifdef JS_JITSPEW
562 if (JitSpewEnabled(JitSpew_IonSnapshots)) {
563 JitSpewHeader(JitSpew_IonSnapshots);
564 Fprinter& out = JitSpewPrinter();
565 out.printf("Assigning snapshot %p to instruction %p (", (void*)snapshot,
566 (void*)this);
567 printName(out);
568 out.printf(")\n");
570 #endif
573 #ifdef JS_JITSPEW
574 static size_t NumSuccessorsHelper(const LNode* ins) { return 0; }
576 template <size_t Succs, size_t Operands, size_t Temps>
577 static size_t NumSuccessorsHelper(
578 const LControlInstructionHelper<Succs, Operands, Temps>* ins) {
579 return Succs;
582 static size_t NumSuccessors(const LInstruction* ins) {
583 switch (ins->op()) {
584 default:
585 MOZ_CRASH("Unexpected LIR op");
586 # define LIROP(x) \
587 case LNode::Opcode::x: \
588 return NumSuccessorsHelper(ins->to##x());
589 LIR_OPCODE_LIST(LIROP)
590 # undef LIROP
594 static MBasicBlock* GetSuccessorHelper(const LNode* ins, size_t i) {
595 MOZ_CRASH("Unexpected instruction with successors");
598 template <size_t Succs, size_t Operands, size_t Temps>
599 static MBasicBlock* GetSuccessorHelper(
600 const LControlInstructionHelper<Succs, Operands, Temps>* ins, size_t i) {
601 return ins->getSuccessor(i);
604 static MBasicBlock* GetSuccessor(const LInstruction* ins, size_t i) {
605 MOZ_ASSERT(i < NumSuccessors(ins));
607 switch (ins->op()) {
608 default:
609 MOZ_CRASH("Unexpected LIR op");
610 # define LIROP(x) \
611 case LNode::Opcode::x: \
612 return GetSuccessorHelper(ins->to##x(), i);
613 LIR_OPCODE_LIST(LIROP)
614 # undef LIROP
617 #endif
619 #ifdef JS_JITSPEW
620 void LNode::dump(GenericPrinter& out) {
621 if (numDefs() != 0) {
622 out.printf("{");
623 for (size_t i = 0; i < numDefs(); i++) {
624 const LDefinition* def =
625 isPhi() ? toPhi()->getDef(i) : toInstruction()->getDef(i);
626 out.printf("%s", def->toString().get());
627 if (i != numDefs() - 1) {
628 out.printf(", ");
631 out.printf("} <- ");
634 printName(out);
635 printOperands(out);
637 if (isInstruction()) {
638 LInstruction* ins = toInstruction();
639 size_t numTemps = ins->numTemps();
640 if (numTemps > 0) {
641 out.printf(" t=(");
642 for (size_t i = 0; i < numTemps; i++) {
643 out.printf("%s", ins->getTemp(i)->toString().get());
644 if (i != numTemps - 1) {
645 out.printf(", ");
648 out.printf(")");
651 size_t numSuccessors = NumSuccessors(ins);
652 if (numSuccessors > 0) {
653 out.printf(" s=(");
654 for (size_t i = 0; i < numSuccessors; i++) {
655 MBasicBlock* succ = GetSuccessor(ins, i);
656 out.printf("block%u", succ->id());
657 if (i != numSuccessors - 1) {
658 out.printf(", ");
661 out.printf(")");
666 void LNode::dump() {
667 Fprinter out(stderr);
668 dump(out);
669 out.printf("\n");
670 out.finish();
673 const char* LNode::getExtraName() const {
674 switch (op()) {
675 default:
676 MOZ_CRASH("Unexpected LIR op");
677 # define LIROP(x) \
678 case LNode::Opcode::x: \
679 return to##x()->extraName();
680 LIR_OPCODE_LIST(LIROP)
681 # undef LIROP
684 #endif
686 void LInstruction::initSafepoint(TempAllocator& alloc) {
687 MOZ_ASSERT(!safepoint_);
688 safepoint_ = new (alloc) LSafepoint(alloc);
689 MOZ_ASSERT(safepoint_);
692 bool LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type) {
693 #ifdef DEBUG
694 MOZ_ASSERT(from != to);
695 for (size_t i = 0; i < moves_.length(); i++) {
696 MOZ_ASSERT(to != moves_[i].to());
699 // Check that SIMD moves are aligned according to ABI requirements.
700 // clang-format off
701 # ifdef ENABLE_WASM_SIMD
702 // Alignment is not currently required for SIMD on x86/x64/arm64. See also
703 // CodeGeneratorShared::CodeGeneratorShared and in general everywhere
704 // SimdMemoryAignment is used. Likely, alignment requirements will return.
705 # if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
706 defined(JS_CODEGEN_ARM64)
707 // No need for any check on x86/x64/arm64.
708 # else
709 # error "Need to consider SIMD alignment on this target."
710 // The following code may be of use if we need alignment checks on
711 // some future target.
712 //if (LDefinition(type).type() == LDefinition::SIMD128) {
713 // MOZ_ASSERT(from.isMemory() || from.isFloatReg());
714 // if (from.isMemory()) {
715 // if (from.isArgument()) {
716 // MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
717 // } else {
718 // MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
719 // }
720 // }
721 // MOZ_ASSERT(to.isMemory() || to.isFloatReg());
722 // if (to.isMemory()) {
723 // if (to.isArgument()) {
724 // MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
725 // } else {
726 // MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
727 // }
728 // }
730 # endif
731 # endif
732 // clang-format on
734 #endif
735 return moves_.append(LMove(from, to, type));
738 bool LMoveGroup::addAfter(LAllocation from, LAllocation to,
739 LDefinition::Type type) {
740 // Transform the operands to this move so that performing the result
741 // simultaneously with existing moves in the group will have the same
742 // effect as if the original move took place after the existing moves.
744 for (size_t i = 0; i < moves_.length(); i++) {
745 if (moves_[i].to() == from) {
746 from = moves_[i].from();
747 break;
751 if (from == to) {
752 return true;
755 for (size_t i = 0; i < moves_.length(); i++) {
756 if (to == moves_[i].to()) {
757 moves_[i] = LMove(from, to, type);
758 return true;
762 return add(from, to, type);
765 #ifdef JS_JITSPEW
766 void LMoveGroup::printOperands(GenericPrinter& out) {
767 for (size_t i = 0; i < numMoves(); i++) {
768 const LMove& move = getMove(i);
769 out.printf(" [%s -> %s", move.from().toString().get(),
770 move.to().toString().get());
771 out.printf(", %s", DefTypeName(move.type()));
772 out.printf("]");
773 if (i != numMoves() - 1) {
774 out.printf(",");
778 #endif
780 #define LIROP(x) \
781 static_assert(!std::is_polymorphic_v<L##x>, \
782 "LIR instructions should not have virtual methods");
783 LIR_OPCODE_LIST(LIROP)
784 #undef LIROP