Backed out changeset d53c38086d1b (bug 1853454) for causing spidermonkey build bustag...
[gecko.git] / js / src / wasm / WasmIonCompile.cpp
blob88d6b3909dd958af28c42b64fd637f0c4854416c
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright 2015 Mozilla Foundation
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
19 #include "wasm/WasmIonCompile.h"
21 #include "mozilla/DebugOnly.h"
22 #include "mozilla/MathAlgorithms.h"
24 #include <algorithm>
26 #include "jit/ABIArgGenerator.h"
27 #include "jit/CodeGenerator.h"
28 #include "jit/CompileInfo.h"
29 #include "jit/Ion.h"
30 #include "jit/IonOptimizationLevels.h"
31 #include "jit/MIR.h"
32 #include "jit/ShuffleAnalysis.h"
33 #include "js/ScalarType.h" // js::Scalar::Type
34 #include "wasm/WasmBaselineCompile.h"
35 #include "wasm/WasmBuiltinModule.h"
36 #include "wasm/WasmBuiltins.h"
37 #include "wasm/WasmCodegenTypes.h"
38 #include "wasm/WasmGC.h"
39 #include "wasm/WasmGcObject.h"
40 #include "wasm/WasmGenerator.h"
41 #include "wasm/WasmOpIter.h"
42 #include "wasm/WasmSignalHandlers.h"
43 #include "wasm/WasmStubs.h"
44 #include "wasm/WasmValidate.h"
46 using namespace js;
47 using namespace js::jit;
48 using namespace js::wasm;
50 using mozilla::IsPowerOfTwo;
51 using mozilla::Maybe;
52 using mozilla::Nothing;
53 using mozilla::Some;
55 namespace {
57 using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
58 using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
60 // To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
61 // from being just an MBasicBlock* to a Control structure collecting additional
62 // information.
63 using ControlInstructionVector =
64 Vector<MControlInstruction*, 8, SystemAllocPolicy>;
66 struct Control {
67 MBasicBlock* block;
68 // For a try-catch ControlItem, when its block's Labelkind is Try, this
69 // collects branches to later bind and create the try's landing pad.
70 ControlInstructionVector tryPadPatches;
72 Control() : block(nullptr) {}
74 explicit Control(MBasicBlock* block) : block(block) {}
76 public:
77 void setBlock(MBasicBlock* newBlock) { block = newBlock; }
80 // [SMDOC] WebAssembly Exception Handling in Ion
81 // =======================================================
83 // ## Throwing instructions
85 // Wasm exceptions can be thrown by either a throw instruction (local throw),
86 // or by a wasm call.
88 // ## The "catching try control"
90 // We know we are in try-code if there is a surrounding ControlItem with
91 // LabelKind::Try. The innermost such control is called the
92 // "catching try control".
94 // ## Throws without a catching try control
96 // Such throws are implemented with an instance call that triggers the exception
97 // unwinding runtime. The exception unwinding runtime will not return to the
98 // function.
100 // ## "landing pad" and "pre-pad" blocks
102 // When an exception is thrown, the unwinder will search for the nearest
103 // enclosing try block and redirect control flow to it. The code that executes
104 // before any catch blocks is called the 'landing pad'. The 'landing pad' is
105 // responsible to:
106 // 1. Consume the pending exception state from
107 // Instance::pendingException(Tag)
108 // 2. Branch to the correct catch block, or else rethrow
110 // There is one landing pad for each try block. The immediate predecessors of
111 // the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
112 // throwing instruction.
114 // ## Creating pre-pad blocks
116 // There are two possible sorts of pre-pad blocks, depending on whether we
117 // are branching after a local throw instruction, or after a wasm call:
119 // - If we encounter a local throw, we create the exception and tag objects,
120 // store them to Instance::pendingException(Tag), and then jump to the
121 // landing pad.
123 // - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
124 // control instruction with either a branch to a fallthrough block or
125 // to a pre-pad block.
127 // The pre-pad block for a wasm call is empty except for a jump to the
128 // landing pad. It only exists to avoid critical edges which when split would
129 // violate the invariants of MWasmCallCatchable. The pending exception state
130 // is taken care of by the unwinder.
132 // Each pre-pad ends with a pending jump to the landing pad. The pending jumps
133 // to the landing pad are tracked in `tryPadPatches`. These are called
134 // "pad patches".
136 // ## Creating the landing pad
138 // When we exit try-code, we check if tryPadPatches has captured any control
139 // instructions (pad patches). If not, we don't compile any catches and we mark
140 // the rest as dead code.
142 // If there are pre-pad blocks, we join them to create a landing pad (or just
143 // "pad"). The pad's last two slots are the caught exception, and the
144 // exception's tag object.
146 // There are three different forms of try-catch/catch_all Wasm instructions,
147 // which result in different form of landing pad.
149 // 1. A catchless try, so a Wasm instruction of the form "try ... end".
150 // - In this case, we end the pad by rethrowing the caught exception.
152 // 2. A single catch_all after a try.
153 // - If the first catch after a try is a catch_all, then there won't be
154 // any more catches, but we need the exception and its tag object, in
155 // case the code in a catch_all contains "rethrow" instructions.
156 // - The Wasm instruction "rethrow", gets the exception and tag object to
157 // rethrow from the last two slots of the landing pad which, due to
158 // validation, is the l'th surrounding ControlItem.
159 // - We immediately GoTo to a new block after the pad and pop both the
160 // exception and tag object, as we don't need them anymore in this case.
162 // 3. Otherwise, there is one or more catch code blocks following.
163 // - In this case, we construct the landing pad by creating a sequence
164 // of compare and branch blocks that compare the pending exception tag
165 // object to the tag object of the current tagged catch block. This is
166 // done incrementally as we visit each tagged catch block in the bytecode
167 // stream. At every step, we update the ControlItem's block to point to
168 // the next block to be created in the landing pad sequence. The final
169 // block will either be a rethrow, if there is no catch_all, or else a
170 // jump to a catch_all block.
172 struct IonCompilePolicy {
173 // We store SSA definitions in the value stack.
174 using Value = MDefinition*;
175 using ValueVector = DefVector;
177 // We store loop headers and then/else blocks in the control flow stack.
178 // In the case of try-catch control blocks, we collect additional information
179 // regarding the possible paths from throws and calls to a landing pad, as
180 // well as information on the landing pad's handlers (its catches).
181 using ControlItem = Control;
184 using IonOpIter = OpIter<IonCompilePolicy>;
186 class FunctionCompiler;
188 // CallCompileState describes a call that is being compiled.
190 class CallCompileState {
191 // A generator object that is passed each argument as it is compiled.
192 WasmABIArgGenerator abi_;
194 // Accumulates the register arguments while compiling arguments.
195 MWasmCallBase::Args regArgs_;
197 // Reserved argument for passing Instance* to builtin instance method calls.
198 ABIArg instanceArg_;
200 // The stack area in which the callee will write stack return values, or
201 // nullptr if no stack results.
202 MWasmStackResultArea* stackResultArea_ = nullptr;
204 // Indicates that the call is a return/tail call.
205 bool returnCall = false;
207 // Only FunctionCompiler should be directly manipulating CallCompileState.
208 friend class FunctionCompiler;
211 // Encapsulates the compilation of a single function in an asm.js module. The
212 // function compiler handles the creation and final backend compilation of the
213 // MIR graph.
214 class FunctionCompiler {
215 struct ControlFlowPatch {
216 MControlInstruction* ins;
217 uint32_t index;
218 ControlFlowPatch(MControlInstruction* ins, uint32_t index)
219 : ins(ins), index(index) {}
222 using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
223 using ControlFlowPatchVectorVector =
224 Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
226 const ModuleEnvironment& moduleEnv_;
227 IonOpIter iter_;
228 const FuncCompileInput& func_;
229 const ValTypeVector& locals_;
230 size_t lastReadCallSite_;
232 TempAllocator& alloc_;
233 MIRGraph& graph_;
234 const CompileInfo& info_;
235 MIRGenerator& mirGen_;
237 MBasicBlock* curBlock_;
238 uint32_t maxStackArgBytes_;
240 uint32_t loopDepth_;
241 uint32_t blockDepth_;
242 ControlFlowPatchVectorVector blockPatches_;
244 // Instance pointer argument to the current function.
245 MWasmParameter* instancePointer_;
246 MWasmParameter* stackResultPointer_;
248 // Reference to masm.tryNotes_
249 wasm::TryNoteVector& tryNotes_;
251 public:
252 FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
253 const FuncCompileInput& func, const ValTypeVector& locals,
254 MIRGenerator& mirGen, TryNoteVector& tryNotes)
255 : moduleEnv_(moduleEnv),
256 iter_(moduleEnv, decoder),
257 func_(func),
258 locals_(locals),
259 lastReadCallSite_(0),
260 alloc_(mirGen.alloc()),
261 graph_(mirGen.graph()),
262 info_(mirGen.outerInfo()),
263 mirGen_(mirGen),
264 curBlock_(nullptr),
265 maxStackArgBytes_(0),
266 loopDepth_(0),
267 blockDepth_(0),
268 instancePointer_(nullptr),
269 stackResultPointer_(nullptr),
270 tryNotes_(tryNotes) {}
272 const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
274 IonOpIter& iter() { return iter_; }
275 TempAllocator& alloc() const { return alloc_; }
276 // FIXME(1401675): Replace with BlockType.
277 uint32_t funcIndex() const { return func_.index; }
278 const FuncType& funcType() const {
279 return *moduleEnv_.funcs[func_.index].type;
282 BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
283 BytecodeOffset bytecodeIfNotAsmJS() const {
284 return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
287 [[nodiscard]] bool init() {
288 // Prepare the entry block for MIR generation:
290 const ArgTypeVector args(funcType());
292 if (!mirGen_.ensureBallast()) {
293 return false;
295 if (!newBlock(/* prev */ nullptr, &curBlock_)) {
296 return false;
299 for (WasmABIArgIter i(args); !i.done(); i++) {
300 MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
301 curBlock_->add(ins);
302 if (args.isSyntheticStackResultPointerArg(i.index())) {
303 MOZ_ASSERT(stackResultPointer_ == nullptr);
304 stackResultPointer_ = ins;
305 } else {
306 curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
307 ins);
309 if (!mirGen_.ensureBallast()) {
310 return false;
314 // Set up a parameter that receives the hidden instance pointer argument.
315 instancePointer_ =
316 MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
317 curBlock_->add(instancePointer_);
318 if (!mirGen_.ensureBallast()) {
319 return false;
322 for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
323 i++) {
324 ValType slotValType = locals_[i];
325 #ifndef ENABLE_WASM_SIMD
326 if (slotValType == ValType::V128) {
327 return iter().fail("Ion has no SIMD support yet");
329 #endif
330 MDefinition* zero = constantZeroOfValType(slotValType);
331 curBlock_->initSlot(info().localSlot(i), zero);
332 if (!mirGen_.ensureBallast()) {
333 return false;
337 return true;
340 void finish() {
341 mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
343 MOZ_ASSERT(loopDepth_ == 0);
344 MOZ_ASSERT(blockDepth_ == 0);
345 #ifdef DEBUG
346 for (ControlFlowPatchVector& patches : blockPatches_) {
347 MOZ_ASSERT(patches.empty());
349 #endif
350 MOZ_ASSERT(inDeadCode());
351 MOZ_ASSERT(done(), "all bytes must be consumed");
352 MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
355 /************************* Read-only interface (after local scope setup) */
357 MIRGenerator& mirGen() const { return mirGen_; }
358 MIRGraph& mirGraph() const { return graph_; }
359 const CompileInfo& info() const { return info_; }
361 MDefinition* getLocalDef(unsigned slot) {
362 if (inDeadCode()) {
363 return nullptr;
365 return curBlock_->getSlot(info().localSlot(slot));
368 const ValTypeVector& locals() const { return locals_; }
370 /*********************************************************** Constants ***/
372 MDefinition* constantF32(float f) {
373 if (inDeadCode()) {
374 return nullptr;
376 auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
377 curBlock_->add(cst);
378 return cst;
380 // Hide all other overloads, to guarantee no implicit argument conversion.
381 template <typename T>
382 MDefinition* constantF32(T) = delete;
384 MDefinition* constantF64(double d) {
385 if (inDeadCode()) {
386 return nullptr;
388 auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
389 curBlock_->add(cst);
390 return cst;
392 template <typename T>
393 MDefinition* constantF64(T) = delete;
395 MDefinition* constantI32(int32_t i) {
396 if (inDeadCode()) {
397 return nullptr;
399 MConstant* constant =
400 MConstant::New(alloc(), Int32Value(i), MIRType::Int32);
401 curBlock_->add(constant);
402 return constant;
404 template <typename T>
405 MDefinition* constantI32(T) = delete;
407 MDefinition* constantI64(int64_t i) {
408 if (inDeadCode()) {
409 return nullptr;
411 MConstant* constant = MConstant::NewInt64(alloc(), i);
412 curBlock_->add(constant);
413 return constant;
415 template <typename T>
416 MDefinition* constantI64(T) = delete;
418 // Produce an MConstant of the machine's target int type (Int32 or Int64).
419 MDefinition* constantTargetWord(intptr_t n) {
420 return targetIs64Bit() ? constantI64(int64_t(n)) : constantI32(int32_t(n));
422 template <typename T>
423 MDefinition* constantTargetWord(T) = delete;
425 #ifdef ENABLE_WASM_SIMD
426 MDefinition* constantV128(V128 v) {
427 if (inDeadCode()) {
428 return nullptr;
430 MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
431 alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
432 curBlock_->add(constant);
433 return constant;
435 template <typename T>
436 MDefinition* constantV128(T) = delete;
437 #endif
439 MDefinition* constantNullRef() {
440 if (inDeadCode()) {
441 return nullptr;
443 // MConstant has a lot of baggage so we don't use that here.
444 MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
445 curBlock_->add(constant);
446 return constant;
449 // Produce a zero constant for the specified ValType.
450 MDefinition* constantZeroOfValType(ValType valType) {
451 switch (valType.kind()) {
452 case ValType::I32:
453 return constantI32(0);
454 case ValType::I64:
455 return constantI64(int64_t(0));
456 #ifdef ENABLE_WASM_SIMD
457 case ValType::V128:
458 return constantV128(V128(0));
459 #endif
460 case ValType::F32:
461 return constantF32(0.0f);
462 case ValType::F64:
463 return constantF64(0.0);
464 case ValType::Ref:
465 return constantNullRef();
466 default:
467 MOZ_CRASH();
471 /***************************** Code generation (after local scope setup) */
473 void fence() {
474 if (inDeadCode()) {
475 return;
477 MWasmFence* ins = MWasmFence::New(alloc());
478 curBlock_->add(ins);
481 template <class T>
482 MDefinition* unary(MDefinition* op) {
483 if (inDeadCode()) {
484 return nullptr;
486 T* ins = T::New(alloc(), op);
487 curBlock_->add(ins);
488 return ins;
491 template <class T>
492 MDefinition* unary(MDefinition* op, MIRType type) {
493 if (inDeadCode()) {
494 return nullptr;
496 T* ins = T::New(alloc(), op, type);
497 curBlock_->add(ins);
498 return ins;
501 template <class T>
502 MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
503 if (inDeadCode()) {
504 return nullptr;
506 T* ins = T::New(alloc(), lhs, rhs);
507 curBlock_->add(ins);
508 return ins;
511 template <class T>
512 MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
513 if (inDeadCode()) {
514 return nullptr;
516 T* ins = T::New(alloc(), lhs, rhs, type);
517 curBlock_->add(ins);
518 return ins;
521 template <class T>
522 MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
523 MWasmBinaryBitwise::SubOpcode subOpc) {
524 if (inDeadCode()) {
525 return nullptr;
527 T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
528 curBlock_->add(ins);
529 return ins;
532 MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
533 if (inDeadCode()) {
534 return nullptr;
536 auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
537 curBlock_->add(ins);
538 return ins;
541 MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
542 if (inDeadCode()) {
543 return nullptr;
545 auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
546 curBlock_->add(ins);
547 return ins;
550 bool mustPreserveNaN(MIRType type) {
551 return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
554 MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
555 if (inDeadCode()) {
556 return nullptr;
559 // wasm can't fold x - 0.0 because of NaN with custom payloads.
560 MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
561 curBlock_->add(ins);
562 return ins;
565 MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
566 if (inDeadCode()) {
567 return nullptr;
570 auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
571 curBlock_->add(ins);
572 return ins;
575 MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
576 bool isMax) {
577 if (inDeadCode()) {
578 return nullptr;
581 if (mustPreserveNaN(type)) {
582 // Convert signaling NaN to quiet NaNs.
583 MDefinition* zero = constantZeroOfValType(ValType::fromMIRType(type));
584 lhs = sub(lhs, zero, type);
585 rhs = sub(rhs, zero, type);
588 MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
589 curBlock_->add(ins);
590 return ins;
593 MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
594 MMul::Mode mode) {
595 if (inDeadCode()) {
596 return nullptr;
599 // wasm can't fold x * 1.0 because of NaN with custom payloads.
600 auto* ins =
601 MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
602 curBlock_->add(ins);
603 return ins;
606 MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
607 bool unsignd) {
608 if (inDeadCode()) {
609 return nullptr;
611 bool trapOnError = !moduleEnv().isAsmJS();
612 if (!unsignd && type == MIRType::Int32) {
613 // Enforce the signedness of the operation by coercing the operands
614 // to signed. Otherwise, operands that "look" unsigned to Ion but
615 // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
616 // the operation being executed unsigned. Applies to mod() as well.
618 // Do this for Int32 only since Int64 is not subject to the same
619 // issues.
621 // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
622 // but it doesn't matter: they're not codegen'd to calls since inputs
623 // already are int32.
624 auto* lhs2 = createTruncateToInt32(lhs);
625 curBlock_->add(lhs2);
626 lhs = lhs2;
627 auto* rhs2 = createTruncateToInt32(rhs);
628 curBlock_->add(rhs2);
629 rhs = rhs2;
632 // For x86 and arm we implement i64 div via c++ builtin.
633 // A call to c++ builtin requires instance pointer.
634 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
635 if (type == MIRType::Int64) {
636 auto* ins =
637 MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
638 trapOnError, bytecodeOffset());
639 curBlock_->add(ins);
640 return ins;
642 #endif
644 auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
645 bytecodeOffset(), mustPreserveNaN(type));
646 curBlock_->add(ins);
647 return ins;
650 MInstruction* createTruncateToInt32(MDefinition* op) {
651 if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
652 return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
655 return MTruncateToInt32::New(alloc(), op);
658 MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
659 bool unsignd) {
660 if (inDeadCode()) {
661 return nullptr;
663 bool trapOnError = !moduleEnv().isAsmJS();
664 if (!unsignd && type == MIRType::Int32) {
665 // See block comment in div().
666 auto* lhs2 = createTruncateToInt32(lhs);
667 curBlock_->add(lhs2);
668 lhs = lhs2;
669 auto* rhs2 = createTruncateToInt32(rhs);
670 curBlock_->add(rhs2);
671 rhs = rhs2;
674 // For x86 and arm we implement i64 mod via c++ builtin.
675 // A call to c++ builtin requires instance pointer.
676 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
677 if (type == MIRType::Int64) {
678 auto* ins =
679 MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
680 trapOnError, bytecodeOffset());
681 curBlock_->add(ins);
682 return ins;
684 #endif
686 // Should be handled separately because we call BuiltinThunk for this case
687 // and so, need to add the dependency from instancePointer.
688 if (type == MIRType::Double) {
689 auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
690 type, bytecodeOffset());
691 curBlock_->add(ins);
692 return ins;
695 auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
696 bytecodeOffset());
697 curBlock_->add(ins);
698 return ins;
701 MDefinition* bitnot(MDefinition* op) {
702 if (inDeadCode()) {
703 return nullptr;
705 auto* ins = MBitNot::New(alloc(), op);
706 curBlock_->add(ins);
707 return ins;
710 MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
711 MDefinition* condExpr) {
712 if (inDeadCode()) {
713 return nullptr;
715 auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
716 curBlock_->add(ins);
717 return ins;
720 MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
721 if (inDeadCode()) {
722 return nullptr;
724 auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
725 curBlock_->add(ins);
726 return ins;
729 MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
730 uint32_t targetSize) {
731 if (inDeadCode()) {
732 return nullptr;
734 MInstruction* ins;
735 switch (targetSize) {
736 case 4: {
737 MSignExtendInt32::Mode mode;
738 switch (srcSize) {
739 case 1:
740 mode = MSignExtendInt32::Byte;
741 break;
742 case 2:
743 mode = MSignExtendInt32::Half;
744 break;
745 default:
746 MOZ_CRASH("Bad sign extension");
748 ins = MSignExtendInt32::New(alloc(), op, mode);
749 break;
751 case 8: {
752 MSignExtendInt64::Mode mode;
753 switch (srcSize) {
754 case 1:
755 mode = MSignExtendInt64::Byte;
756 break;
757 case 2:
758 mode = MSignExtendInt64::Half;
759 break;
760 case 4:
761 mode = MSignExtendInt64::Word;
762 break;
763 default:
764 MOZ_CRASH("Bad sign extension");
766 ins = MSignExtendInt64::New(alloc(), op, mode);
767 break;
769 default: {
770 MOZ_CRASH("Bad sign extension");
773 curBlock_->add(ins);
774 return ins;
777 MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
778 bool isUnsigned) {
779 if (inDeadCode()) {
780 return nullptr;
782 #if defined(JS_CODEGEN_ARM)
783 auto* ins = MBuiltinInt64ToFloatingPoint::New(
784 alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
785 #else
786 auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
787 isUnsigned);
788 #endif
789 curBlock_->add(ins);
790 return ins;
793 MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
794 bool left) {
795 if (inDeadCode()) {
796 return nullptr;
798 auto* ins = MRotate::New(alloc(), input, count, type, left);
799 curBlock_->add(ins);
800 return ins;
803 template <class T>
804 MDefinition* truncate(MDefinition* op, TruncFlags flags) {
805 if (inDeadCode()) {
806 return nullptr;
808 auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
809 curBlock_->add(ins);
810 return ins;
813 #if defined(JS_CODEGEN_ARM)
814 MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
815 if (inDeadCode()) {
816 return nullptr;
818 auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
819 flags, bytecodeOffset());
820 curBlock_->add(ins);
821 return ins;
823 #endif
825 MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
826 MCompare::CompareType type) {
827 if (inDeadCode()) {
828 return nullptr;
830 auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
831 curBlock_->add(ins);
832 return ins;
835 void assign(unsigned slot, MDefinition* def) {
836 if (inDeadCode()) {
837 return;
839 curBlock_->setSlot(info().localSlot(slot), def);
842 MDefinition* compareIsNull(MDefinition* ref, JSOp compareOp) {
843 MDefinition* nullVal = constantNullRef();
844 if (!nullVal) {
845 return nullptr;
847 return compare(ref, nullVal, compareOp, MCompare::Compare_WasmAnyRef);
850 [[nodiscard]] bool refAsNonNull(MDefinition* ref) {
851 if (inDeadCode()) {
852 return true;
855 auto* ins = MWasmTrapIfNull::New(
856 alloc(), ref, wasm::Trap::NullPointerDereference, bytecodeOffset());
858 curBlock_->add(ins);
859 return true;
862 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
863 [[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
864 const ResultType& type, MDefinition* condition) {
865 if (inDeadCode()) {
866 return true;
869 MBasicBlock* fallthroughBlock = nullptr;
870 if (!newBlock(curBlock_, &fallthroughBlock)) {
871 return false;
874 MDefinition* check = compareIsNull(condition, JSOp::Eq);
875 if (!check) {
876 return false;
878 MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
879 if (!test ||
880 !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
881 return false;
884 if (!pushDefs(values)) {
885 return false;
888 curBlock_->end(test);
889 curBlock_ = fallthroughBlock;
890 return true;
893 [[nodiscard]] bool brOnNonNull(uint32_t relativeDepth,
894 const DefVector& values,
895 const ResultType& type,
896 MDefinition* condition) {
897 if (inDeadCode()) {
898 return true;
901 MBasicBlock* fallthroughBlock = nullptr;
902 if (!newBlock(curBlock_, &fallthroughBlock)) {
903 return false;
906 MDefinition* check = compareIsNull(condition, JSOp::Ne);
907 if (!check) {
908 return false;
910 MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
911 if (!test ||
912 !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
913 return false;
916 if (!pushDefs(values)) {
917 return false;
920 curBlock_->end(test);
921 curBlock_ = fallthroughBlock;
922 return true;
925 #endif // ENABLE_WASM_FUNCTION_REFERENCES
927 #ifdef ENABLE_WASM_GC
928 MDefinition* refI31(MDefinition* input) {
929 auto* ins = MWasmNewI31Ref::New(alloc(), input);
930 curBlock_->add(ins);
931 return ins;
934 MDefinition* i31Get(MDefinition* input, FieldWideningOp wideningOp) {
935 auto* ins = MWasmI31RefGet::New(alloc(), input, wideningOp);
936 curBlock_->add(ins);
937 return ins;
939 #endif // ENABLE_WASM_GC
941 #ifdef ENABLE_WASM_SIMD
942 // About Wasm SIMD as supported by Ion:
944 // The expectation is that Ion will only ever support SIMD on x86 and x64,
945 // since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
946 // implement SIMD.
948 // The division of the operations into MIR nodes reflects that expectation,
949 // and is a good fit for x86/x64. Should the expectation change we'll
950 // possibly want to re-architect the SIMD support to be a little more general.
952 // Most SIMD operations map directly to a single MIR node that ultimately ends
953 // up being expanded in the macroassembler.
955 // Some SIMD operations that do have a complete macroassembler expansion are
956 // open-coded into multiple MIR nodes here; in some cases that's just
957 // convenience, in other cases it may also allow them to benefit from Ion
958 // optimizations. The reason for the expansions will be documented by a
959 // comment.
961 // (v128,v128) -> v128 effect-free binary operations
962 MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
963 bool commutative, SimdOp op) {
964 if (inDeadCode()) {
965 return nullptr;
968 MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
969 rhs->type() == MIRType::Simd128);
971 auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
972 curBlock_->add(ins);
973 return ins;
976 // (v128,i32) -> v128 effect-free shift operations
977 MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
978 if (inDeadCode()) {
979 return nullptr;
982 MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
983 rhs->type() == MIRType::Int32);
985 int32_t maskBits;
986 if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
987 MDefinition* mask = constantI32(maskBits);
988 auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
989 curBlock_->add(rhs2);
990 rhs = rhs2;
993 auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
994 curBlock_->add(ins);
995 return ins;
998 // (v128,scalar,imm) -> v128
999 MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
1000 uint32_t laneIndex, SimdOp op) {
1001 if (inDeadCode()) {
1002 return nullptr;
1005 MOZ_ASSERT(lhs->type() == MIRType::Simd128);
1007 auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
1008 curBlock_->add(ins);
1009 return ins;
1012 // (scalar) -> v128 effect-free unary operations
1013 MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
1014 if (inDeadCode()) {
1015 return nullptr;
1018 auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
1019 curBlock_->add(ins);
1020 return ins;
1023 // (v128) -> v128 effect-free unary operations
1024 MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
1025 if (inDeadCode()) {
1026 return nullptr;
1029 MOZ_ASSERT(src->type() == MIRType::Simd128);
1030 auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
1031 curBlock_->add(ins);
1032 return ins;
1035 // (v128, imm) -> scalar effect-free unary operations
1036 MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
1037 uint32_t imm = 0) {
1038 if (inDeadCode()) {
1039 return nullptr;
1042 MOZ_ASSERT(src->type() == MIRType::Simd128);
1043 auto* ins =
1044 MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
1045 curBlock_->add(ins);
1046 return ins;
1049 // (v128, v128, v128) -> v128 effect-free operations
1050 MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
1051 SimdOp op) {
1052 if (inDeadCode()) {
1053 return nullptr;
1056 MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
1057 v1->type() == MIRType::Simd128 &&
1058 v2->type() == MIRType::Simd128);
1060 auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
1061 curBlock_->add(ins);
1062 return ins;
1065 // (v128, v128, imm_v128) -> v128 effect-free operations
1066 MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
1067 if (inDeadCode()) {
1068 return nullptr;
1071 MOZ_ASSERT(v1->type() == MIRType::Simd128);
1072 MOZ_ASSERT(v2->type() == MIRType::Simd128);
1073 auto* ins = BuildWasmShuffleSimd128(
1074 alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
1075 curBlock_->add(ins);
1076 return ins;
1079 // Also see below for SIMD memory references
1081 #endif // ENABLE_WASM_SIMD
1083 /************************************************ Linear memory accesses */
1085 // For detailed information about memory accesses, see "Linear memory
1086 // addresses and bounds checking" in WasmMemory.cpp.
1088 private:
1089 // If the platform does not have a HeapReg, load the memory base from
1090 // instance.
1091 MDefinition* maybeLoadMemoryBase(uint32_t memoryIndex) {
1092 #ifdef WASM_HAS_HEAPREG
1093 if (memoryIndex == 0) {
1094 return nullptr;
1096 #endif
1097 return memoryBase(memoryIndex);
1100 public:
1101 // A value holding the memory base, whether that's HeapReg or some other
1102 // register.
1103 MDefinition* memoryBase(uint32_t memoryIndex) {
1104 AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
1105 ? AliasSet::None()
1106 : AliasSet::Load(AliasSet::WasmHeapMeta);
1107 #ifdef WASM_HAS_HEAPREG
1108 if (memoryIndex == 0) {
1109 MWasmHeapReg* base = MWasmHeapReg::New(alloc(), aliases);
1110 curBlock_->add(base);
1111 return base;
1113 #endif
1114 uint32_t offset =
1115 memoryIndex == 0
1116 ? Instance::offsetOfMemory0Base()
1117 : (Instance::offsetInData(
1118 moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
1119 offsetof(MemoryInstanceData, base)));
1120 MWasmLoadInstance* base = MWasmLoadInstance::New(
1121 alloc(), instancePointer_, offset, MIRType::Pointer, aliases);
1122 curBlock_->add(base);
1123 return base;
1126 private:
1127 // If the bounds checking strategy requires it, load the bounds check limit
1128 // from the instance.
1129 MWasmLoadInstance* maybeLoadBoundsCheckLimit(uint32_t memoryIndex,
1130 MIRType type) {
1131 MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
1132 if (moduleEnv_.hugeMemoryEnabled(memoryIndex)) {
1133 return nullptr;
1135 uint32_t offset =
1136 memoryIndex == 0
1137 ? Instance::offsetOfMemory0BoundsCheckLimit()
1138 : (Instance::offsetInData(
1139 moduleEnv_.offsetOfMemoryInstanceData(memoryIndex) +
1140 offsetof(MemoryInstanceData, boundsCheckLimit)));
1141 AliasSet aliases = !moduleEnv_.memories[memoryIndex].canMovingGrow()
1142 ? AliasSet::None()
1143 : AliasSet::Load(AliasSet::WasmHeapMeta);
1144 auto* load = MWasmLoadInstance::New(alloc(), instancePointer_, offset, type,
1145 aliases);
1146 curBlock_->add(load);
1147 return load;
1150 // Return true if the access requires an alignment check. If so, sets
1151 // *mustAdd to true if the offset must be added to the pointer before
1152 // checking.
1153 bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
1154 bool* mustAdd) {
1155 MOZ_ASSERT(!*mustAdd);
1157 // asm.js accesses are always aligned and need no checks.
1158 if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
1159 return false;
1162 // If the EA is known and aligned it will need no checks.
1163 if (base->isConstant()) {
1164 // We only care about the low bits, so overflow is OK, as is chopping off
1165 // the high bits of an i64 pointer.
1166 uint32_t ptr = 0;
1167 if (isMem64(access->memoryIndex())) {
1168 ptr = uint32_t(base->toConstant()->toInt64());
1169 } else {
1170 ptr = base->toConstant()->toInt32();
1172 if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
1173 return false;
1177 // If the offset is aligned then the EA is just the pointer, for
1178 // the purposes of this check.
1179 *mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
1180 return true;
1183 // Fold a constant base into the offset and make the base 0, provided the
1184 // offset stays below the guard limit. The reason for folding the base into
1185 // the offset rather than vice versa is that a small offset can be ignored
1186 // by both explicit bounds checking and bounds check elimination.
1187 void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
1188 uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
1189 moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1191 if ((*base)->isConstant()) {
1192 uint64_t basePtr = 0;
1193 if (isMem64(access->memoryIndex())) {
1194 basePtr = uint64_t((*base)->toConstant()->toInt64());
1195 } else {
1196 basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
1199 uint64_t offset = access->offset64();
1201 if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
1202 offset += uint32_t(basePtr);
1203 access->setOffset32(uint32_t(offset));
1204 *base = isMem64(access->memoryIndex()) ? constantI64(int64_t(0))
1205 : constantI32(0);
1210 // If the offset must be added because it is large or because the true EA must
1211 // be checked, compute the effective address, trapping on overflow.
1212 void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
1213 MDefinition** base, bool mustAddOffset) {
1214 uint32_t offsetGuardLimit = GetMaxOffsetGuardLimit(
1215 moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1217 if (access->offset64() >= offsetGuardLimit ||
1218 access->offset64() > UINT32_MAX || mustAddOffset ||
1219 !JitOptions.wasmFoldOffsets) {
1220 *base = computeEffectiveAddress(*base, access);
1224 MWasmLoadInstance* needBoundsCheck(uint32_t memoryIndex) {
1225 #ifdef JS_64BIT
1226 // For 32-bit base pointers:
1228 // If the bounds check uses the full 64 bits of the bounds check limit, then
1229 // the base pointer must be zero-extended to 64 bits before checking and
1230 // wrapped back to 32-bits after Spectre masking. (And it's important that
1231 // the value we end up with has flowed through the Spectre mask.)
1233 // If the memory's max size is known to be smaller than 64K pages exactly,
1234 // we can use a 32-bit check and avoid extension and wrapping.
1235 static_assert(0x100000000 % PageSize == 0);
1236 bool mem32LimitIs64Bits =
1237 isMem32(memoryIndex) &&
1238 !moduleEnv_.memories[memoryIndex].boundsCheckLimitIs32Bits() &&
1239 MaxMemoryPages(moduleEnv_.memories[memoryIndex].indexType()) >=
1240 Pages(0x100000000 / PageSize);
1241 #else
1242 // On 32-bit platforms we have no more than 2GB memory and the limit for a
1243 // 32-bit base pointer is never a 64-bit value.
1244 bool mem32LimitIs64Bits = false;
1245 #endif
1246 return maybeLoadBoundsCheckLimit(memoryIndex,
1247 mem32LimitIs64Bits || isMem64(memoryIndex)
1248 ? MIRType::Int64
1249 : MIRType::Int32);
1252 void performBoundsCheck(uint32_t memoryIndex, MDefinition** base,
1253 MWasmLoadInstance* boundsCheckLimit) {
1254 // At the outset, actualBase could be the result of pretty much any integer
1255 // operation, or it could be the load of an integer constant. If its type
1256 // is i32, we may assume the value has a canonical representation for the
1257 // platform, see doc block in MacroAssembler.h.
1258 MDefinition* actualBase = *base;
1260 // Extend an i32 index value to perform a 64-bit bounds check if the memory
1261 // can be 4GB or larger.
1262 bool extendAndWrapIndex =
1263 isMem32(memoryIndex) && boundsCheckLimit->type() == MIRType::Int64;
1264 if (extendAndWrapIndex) {
1265 auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
1266 curBlock_->add(extended);
1267 actualBase = extended;
1270 auto target = memoryIndex == 0 ? MWasmBoundsCheck::Memory0
1271 : MWasmBoundsCheck::Unknown;
1272 auto* ins = MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
1273 bytecodeOffset(), target);
1274 curBlock_->add(ins);
1275 actualBase = ins;
1277 // If we're masking, then we update *base to create a dependency chain
1278 // through the masked index. But we will first need to wrap the index
1279 // value if it was extended above.
1280 if (JitOptions.spectreIndexMasking) {
1281 if (extendAndWrapIndex) {
1282 auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
1283 curBlock_->add(wrapped);
1284 actualBase = wrapped;
1286 *base = actualBase;
1290 // Perform all necessary checking before a wasm heap access, based on the
1291 // attributes of the access and base pointer.
1293 // For 64-bit indices on platforms that are limited to indices that fit into
1294 // 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
1295 // `base` that has type Int32. Lowering code depends on this and will assert
1296 // that the base has this type. See the end of this function.
1298 void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
1299 MDefinition** base) {
1300 MOZ_ASSERT(!inDeadCode());
1301 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1303 // Attempt to fold an offset into a constant base pointer so as to simplify
1304 // the addressing expression. This may update *base.
1305 foldConstantPointer(access, base);
1307 // Determine whether an alignment check is needed and whether the offset
1308 // must be checked too.
1309 bool mustAddOffsetForAlignmentCheck = false;
1310 bool alignmentCheck =
1311 needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
1313 // If bounds checking or alignment checking requires it, compute the
1314 // effective address: add the offset into the pointer and trap on overflow.
1315 // This may update *base.
1316 maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
1318 // Emit the alignment check if necessary; it traps if it fails.
1319 if (alignmentCheck) {
1320 curBlock_->add(MWasmAlignmentCheck::New(
1321 alloc(), *base, access->byteSize(), bytecodeOffset()));
1324 // Emit the bounds check if necessary; it traps if it fails. This may
1325 // update *base.
1326 MWasmLoadInstance* boundsCheckLimit =
1327 needBoundsCheck(access->memoryIndex());
1328 if (boundsCheckLimit) {
1329 performBoundsCheck(access->memoryIndex(), base, boundsCheckLimit);
1332 #ifndef JS_64BIT
1333 if (isMem64(access->memoryIndex())) {
1334 // We must have had an explicit bounds check (or one was elided if it was
1335 // proved redundant), and on 32-bit systems the index will for sure fit in
1336 // 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
1337 // simplify the back-end.
1338 MOZ_ASSERT((*base)->type() == MIRType::Int64);
1339 MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled(access->memoryIndex()));
1340 auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
1341 MOZ_ASSERT(chopped->type() == MIRType::Int32);
1342 curBlock_->add(chopped);
1343 *base = chopped;
1345 #endif
1348 bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
1349 if (result == ValType::I64 && access->byteSize() <= 4) {
1350 // These smaller accesses should all be zero-extending.
1351 MOZ_ASSERT(!isSignedIntType(access->type()));
1352 return true;
1354 return false;
1357 public:
1358 bool isMem32(uint32_t memoryIndex) {
1359 return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I32;
1361 bool isMem64(uint32_t memoryIndex) {
1362 return moduleEnv_.memories[memoryIndex].indexType() == IndexType::I64;
1364 bool hugeMemoryEnabled(uint32_t memoryIndex) {
1365 return moduleEnv_.hugeMemoryEnabled(memoryIndex);
1368 // Add the offset into the pointer to yield the EA; trap on overflow.
1369 MDefinition* computeEffectiveAddress(MDefinition* base,
1370 MemoryAccessDesc* access) {
1371 if (inDeadCode()) {
1372 return nullptr;
1374 uint64_t offset = access->offset64();
1375 if (offset == 0) {
1376 return base;
1378 auto* ins = MWasmAddOffset::New(alloc(), base, offset, bytecodeOffset());
1379 curBlock_->add(ins);
1380 access->clearOffset();
1381 return ins;
1384 MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
1385 ValType result) {
1386 if (inDeadCode()) {
1387 return nullptr;
1390 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1391 MInstruction* load = nullptr;
1392 if (moduleEnv_.isAsmJS()) {
1393 MOZ_ASSERT(access->offset64() == 0);
1394 MWasmLoadInstance* boundsCheckLimit =
1395 maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
1396 load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1397 access->type());
1398 } else {
1399 checkOffsetAndAlignmentAndBounds(access, &base);
1400 #ifndef JS_64BIT
1401 MOZ_ASSERT(base->type() == MIRType::Int32);
1402 #endif
1403 load = MWasmLoad::New(alloc(), memoryBase, base, *access,
1404 result.toMIRType());
1406 if (!load) {
1407 return nullptr;
1409 curBlock_->add(load);
1410 return load;
1413 void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
1414 if (inDeadCode()) {
1415 return;
1418 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1419 MInstruction* store = nullptr;
1420 if (moduleEnv_.isAsmJS()) {
1421 MOZ_ASSERT(access->offset64() == 0);
1422 MWasmLoadInstance* boundsCheckLimit =
1423 maybeLoadBoundsCheckLimit(access->memoryIndex(), MIRType::Int32);
1424 store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
1425 access->type(), v);
1426 } else {
1427 checkOffsetAndAlignmentAndBounds(access, &base);
1428 #ifndef JS_64BIT
1429 MOZ_ASSERT(base->type() == MIRType::Int32);
1430 #endif
1431 store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
1433 if (!store) {
1434 return;
1436 curBlock_->add(store);
1439 MDefinition* atomicCompareExchangeHeap(MDefinition* base,
1440 MemoryAccessDesc* access,
1441 ValType result, MDefinition* oldv,
1442 MDefinition* newv) {
1443 if (inDeadCode()) {
1444 return nullptr;
1447 checkOffsetAndAlignmentAndBounds(access, &base);
1448 #ifndef JS_64BIT
1449 MOZ_ASSERT(base->type() == MIRType::Int32);
1450 #endif
1452 if (isSmallerAccessForI64(result, access)) {
1453 auto* cvtOldv =
1454 MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
1455 curBlock_->add(cvtOldv);
1456 oldv = cvtOldv;
1458 auto* cvtNewv =
1459 MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
1460 curBlock_->add(cvtNewv);
1461 newv = cvtNewv;
1464 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1465 MInstruction* cas = MWasmCompareExchangeHeap::New(
1466 alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
1467 instancePointer_);
1468 if (!cas) {
1469 return nullptr;
1471 curBlock_->add(cas);
1473 if (isSmallerAccessForI64(result, access)) {
1474 cas = MExtendInt32ToInt64::New(alloc(), cas, true);
1475 curBlock_->add(cas);
1478 return cas;
1481 MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
1482 ValType result, MDefinition* value) {
1483 if (inDeadCode()) {
1484 return nullptr;
1487 checkOffsetAndAlignmentAndBounds(access, &base);
1488 #ifndef JS_64BIT
1489 MOZ_ASSERT(base->type() == MIRType::Int32);
1490 #endif
1492 if (isSmallerAccessForI64(result, access)) {
1493 auto* cvtValue =
1494 MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1495 curBlock_->add(cvtValue);
1496 value = cvtValue;
1499 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1500 MInstruction* xchg =
1501 MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
1502 base, *access, value, instancePointer_);
1503 if (!xchg) {
1504 return nullptr;
1506 curBlock_->add(xchg);
1508 if (isSmallerAccessForI64(result, access)) {
1509 xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
1510 curBlock_->add(xchg);
1513 return xchg;
1516 MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
1517 MemoryAccessDesc* access, ValType result,
1518 MDefinition* value) {
1519 if (inDeadCode()) {
1520 return nullptr;
1523 checkOffsetAndAlignmentAndBounds(access, &base);
1524 #ifndef JS_64BIT
1525 MOZ_ASSERT(base->type() == MIRType::Int32);
1526 #endif
1528 if (isSmallerAccessForI64(result, access)) {
1529 auto* cvtValue =
1530 MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
1531 curBlock_->add(cvtValue);
1532 value = cvtValue;
1535 MDefinition* memoryBase = maybeLoadMemoryBase(access->memoryIndex());
1536 MInstruction* binop =
1537 MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
1538 base, *access, value, instancePointer_);
1539 if (!binop) {
1540 return nullptr;
1542 curBlock_->add(binop);
1544 if (isSmallerAccessForI64(result, access)) {
1545 binop = MExtendInt32ToInt64::New(alloc(), binop, true);
1546 curBlock_->add(binop);
1549 return binop;
1552 #ifdef ENABLE_WASM_SIMD
1553 MDefinition* loadSplatSimd128(Scalar::Type viewType,
1554 const LinearMemoryAddress<MDefinition*>& addr,
1555 wasm::SimdOp splatOp) {
1556 if (inDeadCode()) {
1557 return nullptr;
1560 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
1561 bytecodeIfNotAsmJS(),
1562 hugeMemoryEnabled(addr.memoryIndex));
1564 // Generate better code (on x86)
1565 // If AVX2 is enabled, more broadcast operators are available.
1566 if (viewType == Scalar::Float64
1567 # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
1568 || (js::jit::CPUInfo::IsAVX2Present() &&
1569 (viewType == Scalar::Uint8 || viewType == Scalar::Uint16 ||
1570 viewType == Scalar::Float32))
1571 # endif
1573 access.setSplatSimd128Load();
1574 return load(addr.base, &access, ValType::V128);
1577 ValType resultType = ValType::I32;
1578 if (viewType == Scalar::Float32) {
1579 resultType = ValType::F32;
1580 splatOp = wasm::SimdOp::F32x4Splat;
1582 auto* scalar = load(addr.base, &access, resultType);
1583 if (!inDeadCode() && !scalar) {
1584 return nullptr;
1586 return scalarToSimd128(scalar, splatOp);
1589 MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
1590 wasm::SimdOp op) {
1591 if (inDeadCode()) {
1592 return nullptr;
1595 // Generate better code (on x86) by loading as a double with an
1596 // operation that sign extends directly.
1597 MemoryAccessDesc access(addr.memoryIndex, Scalar::Float64, addr.align,
1598 addr.offset, bytecodeIfNotAsmJS(),
1599 hugeMemoryEnabled(addr.memoryIndex));
1600 access.setWidenSimd128Load(op);
1601 return load(addr.base, &access, ValType::V128);
1604 MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
1605 const LinearMemoryAddress<MDefinition*>& addr) {
1606 if (inDeadCode()) {
1607 return nullptr;
1610 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
1611 bytecodeIfNotAsmJS(),
1612 hugeMemoryEnabled(addr.memoryIndex));
1613 access.setZeroExtendSimd128Load();
1614 return load(addr.base, &access, ValType::V128);
1617 MDefinition* loadLaneSimd128(uint32_t laneSize,
1618 const LinearMemoryAddress<MDefinition*>& addr,
1619 uint32_t laneIndex, MDefinition* src) {
1620 if (inDeadCode()) {
1621 return nullptr;
1624 MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
1625 addr.offset, bytecodeIfNotAsmJS(),
1626 hugeMemoryEnabled(addr.memoryIndex));
1627 MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
1628 MDefinition* base = addr.base;
1629 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1630 checkOffsetAndAlignmentAndBounds(&access, &base);
1631 # ifndef JS_64BIT
1632 MOZ_ASSERT(base->type() == MIRType::Int32);
1633 # endif
1634 MInstruction* load = MWasmLoadLaneSimd128::New(
1635 alloc(), memoryBase, base, access, laneSize, laneIndex, src);
1636 if (!load) {
1637 return nullptr;
1639 curBlock_->add(load);
1640 return load;
1643 void storeLaneSimd128(uint32_t laneSize,
1644 const LinearMemoryAddress<MDefinition*>& addr,
1645 uint32_t laneIndex, MDefinition* src) {
1646 if (inDeadCode()) {
1647 return;
1649 MemoryAccessDesc access(addr.memoryIndex, Scalar::Simd128, addr.align,
1650 addr.offset, bytecodeIfNotAsmJS(),
1651 hugeMemoryEnabled(addr.memoryIndex));
1652 MDefinition* memoryBase = maybeLoadMemoryBase(access.memoryIndex());
1653 MDefinition* base = addr.base;
1654 MOZ_ASSERT(!moduleEnv_.isAsmJS());
1655 checkOffsetAndAlignmentAndBounds(&access, &base);
1656 # ifndef JS_64BIT
1657 MOZ_ASSERT(base->type() == MIRType::Int32);
1658 # endif
1659 MInstruction* store = MWasmStoreLaneSimd128::New(
1660 alloc(), memoryBase, base, access, laneSize, laneIndex, src);
1661 if (!store) {
1662 return;
1664 curBlock_->add(store);
1666 #endif // ENABLE_WASM_SIMD
1668 /************************************************ Global variable accesses */
1670 MDefinition* loadGlobalVar(unsigned instanceDataOffset, bool isConst,
1671 bool isIndirect, MIRType type) {
1672 if (inDeadCode()) {
1673 return nullptr;
1676 MInstruction* load;
1677 if (isIndirect) {
1678 // Pull a pointer to the value out of Instance::globalArea, then
1679 // load from that pointer. Note that the pointer is immutable
1680 // even though the value it points at may change, hence the use of
1681 // |true| for the first node's |isConst| value, irrespective of
1682 // the |isConst| formal parameter to this method. The latter
1683 // applies to the denoted value as a whole.
1684 auto* cellPtr = MWasmLoadInstanceDataField::New(
1685 alloc(), MIRType::Pointer, instanceDataOffset,
1686 /*isConst=*/true, instancePointer_);
1687 curBlock_->add(cellPtr);
1688 load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
1689 } else {
1690 // Pull the value directly out of Instance::globalArea.
1691 load = MWasmLoadInstanceDataField::New(alloc(), type, instanceDataOffset,
1692 isConst, instancePointer_);
1694 curBlock_->add(load);
1695 return load;
1698 [[nodiscard]] bool storeGlobalVar(uint32_t lineOrBytecode,
1699 uint32_t instanceDataOffset,
1700 bool isIndirect, MDefinition* v) {
1701 if (inDeadCode()) {
1702 return true;
1705 if (isIndirect) {
1706 // Pull a pointer to the value out of Instance::globalArea, then
1707 // store through that pointer.
1708 auto* valueAddr = MWasmLoadInstanceDataField::New(
1709 alloc(), MIRType::Pointer, instanceDataOffset,
1710 /*isConst=*/true, instancePointer_);
1711 curBlock_->add(valueAddr);
1713 // Handle a store to a ref-typed field specially
1714 if (v->type() == MIRType::WasmAnyRef) {
1715 // Load the previous value for the post-write barrier
1716 auto* prevValue =
1717 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef, valueAddr);
1718 curBlock_->add(prevValue);
1720 // Store the new value
1721 auto* store =
1722 MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
1723 /*valueOffset=*/0, v, AliasSet::WasmGlobalCell,
1724 WasmPreBarrierKind::Normal);
1725 curBlock_->add(store);
1727 // Call the post-write barrier
1728 return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
1731 auto* store = MWasmStoreGlobalCell::New(alloc(), v, valueAddr);
1732 curBlock_->add(store);
1733 return true;
1735 // Or else store the value directly in Instance::globalArea.
1737 // Handle a store to a ref-typed field specially
1738 if (v->type() == MIRType::WasmAnyRef) {
1739 // Compute the address of the ref-typed global
1740 auto* valueAddr = MWasmDerivedPointer::New(
1741 alloc(), instancePointer_,
1742 wasm::Instance::offsetInData(instanceDataOffset));
1743 curBlock_->add(valueAddr);
1745 // Load the previous value for the post-write barrier
1746 auto* prevValue =
1747 MWasmLoadGlobalCell::New(alloc(), MIRType::WasmAnyRef, valueAddr);
1748 curBlock_->add(prevValue);
1750 // Store the new value
1751 auto* store =
1752 MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
1753 /*valueOffset=*/0, v, AliasSet::WasmInstanceData,
1754 WasmPreBarrierKind::Normal);
1755 curBlock_->add(store);
1757 // Call the post-write barrier
1758 return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
1761 auto* store = MWasmStoreInstanceDataField::New(alloc(), instanceDataOffset,
1762 v, instancePointer_);
1763 curBlock_->add(store);
1764 return true;
1767 MDefinition* loadTableField(uint32_t tableIndex, unsigned fieldOffset,
1768 MIRType type) {
1769 uint32_t instanceDataOffset = wasm::Instance::offsetInData(
1770 moduleEnv_.offsetOfTableInstanceData(tableIndex) + fieldOffset);
1771 auto* load =
1772 MWasmLoadInstance::New(alloc(), instancePointer_, instanceDataOffset,
1773 type, AliasSet::Load(AliasSet::WasmTableMeta));
1774 curBlock_->add(load);
1775 return load;
1778 MDefinition* loadTableLength(uint32_t tableIndex) {
1779 return loadTableField(tableIndex, offsetof(TableInstanceData, length),
1780 MIRType::Int32);
1783 MDefinition* loadTableElements(uint32_t tableIndex) {
1784 return loadTableField(tableIndex, offsetof(TableInstanceData, elements),
1785 MIRType::Pointer);
1788 MDefinition* tableGetAnyRef(uint32_t tableIndex, MDefinition* index) {
1789 // Load the table length and perform a bounds check with spectre index
1790 // masking
1791 auto* length = loadTableLength(tableIndex);
1792 auto* check = MWasmBoundsCheck::New(
1793 alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
1794 curBlock_->add(check);
1795 if (JitOptions.spectreIndexMasking) {
1796 index = check;
1799 // Load the table elements and load the element
1800 auto* elements = loadTableElements(tableIndex);
1801 auto* element = MWasmLoadTableElement::New(alloc(), elements, index);
1802 curBlock_->add(element);
1803 return element;
1806 [[nodiscard]] bool tableSetAnyRef(uint32_t tableIndex, MDefinition* index,
1807 MDefinition* value,
1808 uint32_t lineOrBytecode) {
1809 // Load the table length and perform a bounds check with spectre index
1810 // masking
1811 auto* length = loadTableLength(tableIndex);
1812 auto* check = MWasmBoundsCheck::New(
1813 alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
1814 curBlock_->add(check);
1815 if (JitOptions.spectreIndexMasking) {
1816 index = check;
1819 // Load the table elements
1820 auto* elements = loadTableElements(tableIndex);
1822 // Load the previous value
1823 auto* prevValue = MWasmLoadTableElement::New(alloc(), elements, index);
1824 curBlock_->add(prevValue);
1826 // Compute the value's location for the post barrier
1827 auto* loc =
1828 MWasmDerivedIndexPointer::New(alloc(), elements, index, ScalePointer);
1829 curBlock_->add(loc);
1831 // Store the new value
1832 auto* store = MWasmStoreRef::New(
1833 alloc(), instancePointer_, loc, /*valueOffset=*/0, value,
1834 AliasSet::WasmTableElement, WasmPreBarrierKind::Normal);
1835 curBlock_->add(store);
1837 // Perform the post barrier
1838 return postBarrierPrecise(lineOrBytecode, loc, prevValue);
1841 void addInterruptCheck() {
1842 if (inDeadCode()) {
1843 return;
1845 curBlock_->add(
1846 MWasmInterruptCheck::New(alloc(), instancePointer_, bytecodeOffset()));
1849 // Perform a post-write barrier to update the generational store buffer. This
1850 // version will remove a previous store buffer entry if it is no longer
1851 // needed.
1852 [[nodiscard]] bool postBarrierPrecise(uint32_t lineOrBytecode,
1853 MDefinition* valueAddr,
1854 MDefinition* value) {
1855 return emitInstanceCall2(lineOrBytecode, SASigPostBarrierPrecise, valueAddr,
1856 value);
1859 // Perform a post-write barrier to update the generational store buffer. This
1860 // version will remove a previous store buffer entry if it is no longer
1861 // needed.
1862 [[nodiscard]] bool postBarrierPreciseWithOffset(uint32_t lineOrBytecode,
1863 MDefinition* valueBase,
1864 uint32_t valueOffset,
1865 MDefinition* value) {
1866 MDefinition* valueOffsetDef = constantI32(int32_t(valueOffset));
1867 if (!valueOffsetDef) {
1868 return false;
1870 return emitInstanceCall3(lineOrBytecode, SASigPostBarrierPreciseWithOffset,
1871 valueBase, valueOffsetDef, value);
1874 // Perform a post-write barrier to update the generational store buffer. This
1875 // version is the most efficient and only requires the address to store the
1876 // value and the new value. It does not remove a previous store buffer entry
1877 // if it is no longer needed, you must use a precise post-write barrier for
1878 // that.
1879 [[nodiscard]] bool postBarrier(uint32_t lineOrBytecode, MDefinition* object,
1880 MDefinition* valueBase, uint32_t valueOffset,
1881 MDefinition* newValue) {
1882 auto* barrier = MWasmPostWriteBarrier::New(
1883 alloc(), instancePointer_, object, valueBase, valueOffset, newValue);
1884 if (!barrier) {
1885 return false;
1887 curBlock_->add(barrier);
1888 return true;
1891 /***************************************************************** Calls */
1893 // The IonMonkey backend maintains a single stack offset (from the stack
1894 // pointer to the base of the frame) by adding the total amount of spill
1895 // space required plus the maximum stack required for argument passing.
1896 // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
1897 // manually accumulate, for the entire function, the maximum required stack
1898 // space for argument passing. (This is passed to the CodeGenerator via
1899 // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
1900 // stack space required for each individual call (as determined by the call
1901 // ABI).
1903 // Operations that modify a CallCompileState.
1905 [[nodiscard]] bool passInstance(MIRType instanceType,
1906 CallCompileState* args) {
1907 if (inDeadCode()) {
1908 return true;
1911 // Should only pass an instance once. And it must be a non-GC pointer.
1912 MOZ_ASSERT(args->instanceArg_ == ABIArg());
1913 MOZ_ASSERT(instanceType == MIRType::Pointer);
1914 args->instanceArg_ = args->abi_.next(MIRType::Pointer);
1915 return true;
1918 // Do not call this directly. Call one of the passArg() variants instead.
1919 [[nodiscard]] bool passArgWorker(MDefinition* argDef, MIRType type,
1920 CallCompileState* call) {
1921 ABIArg arg = call->abi_.next(type);
1922 switch (arg.kind()) {
1923 #ifdef JS_CODEGEN_REGISTER_PAIR
1924 case ABIArg::GPR_PAIR: {
1925 auto mirLow =
1926 MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
1927 curBlock_->add(mirLow);
1928 auto mirHigh =
1929 MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
1930 curBlock_->add(mirHigh);
1931 return call->regArgs_.append(
1932 MWasmCallBase::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
1933 call->regArgs_.append(
1934 MWasmCallBase::Arg(AnyRegister(arg.gpr64().high), mirHigh));
1936 #endif
1937 case ABIArg::GPR:
1938 case ABIArg::FPU:
1939 return call->regArgs_.append(MWasmCallBase::Arg(arg.reg(), argDef));
1940 case ABIArg::Stack: {
1941 auto* mir =
1942 MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
1943 curBlock_->add(mir);
1944 return true;
1946 case ABIArg::Uninitialized:
1947 MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
1949 MOZ_CRASH("Unknown ABIArg kind.");
1952 template <typename SpanT>
1953 [[nodiscard]] bool passArgs(const DefVector& argDefs, SpanT types,
1954 CallCompileState* call) {
1955 MOZ_ASSERT(argDefs.length() == types.size());
1956 for (uint32_t i = 0; i < argDefs.length(); i++) {
1957 MDefinition* def = argDefs[i];
1958 ValType type = types[i];
1959 if (!passArg(def, type, call)) {
1960 return false;
1963 return true;
1966 [[nodiscard]] bool passArg(MDefinition* argDef, MIRType type,
1967 CallCompileState* call) {
1968 if (inDeadCode()) {
1969 return true;
1971 return passArgWorker(argDef, type, call);
1974 [[nodiscard]] bool passArg(MDefinition* argDef, ValType type,
1975 CallCompileState* call) {
1976 if (inDeadCode()) {
1977 return true;
1979 return passArgWorker(argDef, type.toMIRType(), call);
1982 void markReturnCall(CallCompileState* call) { call->returnCall = true; }
1984 // If the call returns results on the stack, prepare a stack area to receive
1985 // them, and pass the address of the stack area to the callee as an additional
1986 // argument.
1987 [[nodiscard]] bool passStackResultAreaCallArg(const ResultType& resultType,
1988 CallCompileState* call) {
1989 if (inDeadCode()) {
1990 return true;
1992 ABIResultIter iter(resultType);
1993 while (!iter.done() && iter.cur().inRegister()) {
1994 iter.next();
1996 if (iter.done()) {
1997 // No stack results.
1998 return true;
2001 auto* stackResultArea = MWasmStackResultArea::New(alloc());
2002 if (!stackResultArea) {
2003 return false;
2005 if (!stackResultArea->init(alloc(), iter.remaining())) {
2006 return false;
2008 for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
2009 MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
2010 iter.cur().type().toMIRType());
2011 stackResultArea->initResult(iter.index() - base, loc);
2013 curBlock_->add(stackResultArea);
2014 MDefinition* def = call->returnCall ? (MDefinition*)stackResultPointer_
2015 : (MDefinition*)stackResultArea;
2016 if (!passArg(def, MIRType::Pointer, call)) {
2017 return false;
2019 call->stackResultArea_ = stackResultArea;
2020 return true;
2023 [[nodiscard]] bool finishCall(CallCompileState* call) {
2024 if (inDeadCode()) {
2025 return true;
2028 if (!call->regArgs_.append(
2029 MWasmCallBase::Arg(AnyRegister(InstanceReg), instancePointer_))) {
2030 return false;
2033 uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
2035 maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
2036 return true;
2039 // Wrappers for creating various kinds of calls.
2041 [[nodiscard]] bool collectUnaryCallResult(MIRType type,
2042 MDefinition** result) {
2043 MInstruction* def;
2044 switch (type) {
2045 case MIRType::Int32:
2046 def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
2047 break;
2048 case MIRType::Int64:
2049 def = MWasmRegister64Result::New(alloc(), ReturnReg64);
2050 break;
2051 case MIRType::Float32:
2052 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
2053 break;
2054 case MIRType::Double:
2055 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
2056 break;
2057 #ifdef ENABLE_WASM_SIMD
2058 case MIRType::Simd128:
2059 def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
2060 break;
2061 #endif
2062 case MIRType::WasmAnyRef:
2063 def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef, ReturnReg);
2064 break;
2065 default:
2066 MOZ_CRASH("unexpected MIRType result for builtin call");
2069 if (!def) {
2070 return false;
2073 curBlock_->add(def);
2074 *result = def;
2076 return true;
2079 [[nodiscard]] bool collectCallResults(const ResultType& type,
2080 MWasmStackResultArea* stackResultArea,
2081 DefVector* results) {
2082 if (!results->reserve(type.length())) {
2083 return false;
2086 // The result iterator goes in the order in which results would be popped
2087 // off; we want the order in which they would be pushed.
2088 ABIResultIter iter(type);
2089 uint32_t stackResultCount = 0;
2090 while (!iter.done()) {
2091 if (iter.cur().onStack()) {
2092 stackResultCount++;
2094 iter.next();
2097 for (iter.switchToPrev(); !iter.done(); iter.prev()) {
2098 if (!mirGen().ensureBallast()) {
2099 return false;
2101 const ABIResult& result = iter.cur();
2102 MInstruction* def;
2103 if (result.inRegister()) {
2104 switch (result.type().kind()) {
2105 case wasm::ValType::I32:
2106 def =
2107 MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
2108 break;
2109 case wasm::ValType::I64:
2110 def = MWasmRegister64Result::New(alloc(), result.gpr64());
2111 break;
2112 case wasm::ValType::F32:
2113 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
2114 result.fpr());
2115 break;
2116 case wasm::ValType::F64:
2117 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
2118 result.fpr());
2119 break;
2120 case wasm::ValType::Ref:
2121 def = MWasmRegisterResult::New(alloc(), MIRType::WasmAnyRef,
2122 result.gpr());
2123 break;
2124 case wasm::ValType::V128:
2125 #ifdef ENABLE_WASM_SIMD
2126 def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
2127 result.fpr());
2128 #else
2129 return this->iter().fail("Ion has no SIMD support yet");
2130 #endif
2132 } else {
2133 MOZ_ASSERT(stackResultArea);
2134 MOZ_ASSERT(stackResultCount);
2135 uint32_t idx = --stackResultCount;
2136 def = MWasmStackResult::New(alloc(), stackResultArea, idx);
2139 if (!def) {
2140 return false;
2142 curBlock_->add(def);
2143 results->infallibleAppend(def);
2146 MOZ_ASSERT(results->length() == type.length());
2148 return true;
2151 [[nodiscard]] bool catchableCall(const CallSiteDesc& desc,
2152 const CalleeDesc& callee,
2153 const MWasmCallBase::Args& args,
2154 const ArgTypeVector& argTypes,
2155 MDefinition* indexOrRef = nullptr) {
2156 MWasmCallTryDesc tryDesc;
2157 if (!beginTryCall(&tryDesc)) {
2158 return false;
2161 MInstruction* ins;
2162 if (tryDesc.inTry) {
2163 ins = MWasmCallCatchable::New(alloc(), desc, callee, args,
2164 StackArgAreaSizeUnaligned(argTypes),
2165 tryDesc, indexOrRef);
2166 } else {
2167 ins = MWasmCallUncatchable::New(alloc(), desc, callee, args,
2168 StackArgAreaSizeUnaligned(argTypes),
2169 indexOrRef);
2171 if (!ins) {
2172 return false;
2174 curBlock_->add(ins);
2176 return finishTryCall(&tryDesc);
2179 [[nodiscard]] bool callDirect(const FuncType& funcType, uint32_t funcIndex,
2180 uint32_t lineOrBytecode,
2181 const CallCompileState& call,
2182 DefVector* results) {
2183 MOZ_ASSERT(!inDeadCode());
2185 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
2186 ResultType resultType = ResultType::Vector(funcType.results());
2187 auto callee = CalleeDesc::function(funcIndex);
2188 ArgTypeVector args(funcType);
2190 if (!catchableCall(desc, callee, call.regArgs_, args)) {
2191 return false;
2193 return collectCallResults(resultType, call.stackResultArea_, results);
2196 [[nodiscard]] bool returnCallDirect(const FuncType& funcType,
2197 uint32_t funcIndex,
2198 uint32_t lineOrBytecode,
2199 const CallCompileState& call,
2200 DefVector* results) {
2201 MOZ_ASSERT(!inDeadCode());
2203 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::ReturnFunc);
2204 auto callee = CalleeDesc::function(funcIndex);
2205 ArgTypeVector args(funcType);
2207 auto ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2208 StackArgAreaSizeUnaligned(args), nullptr);
2209 if (!ins) {
2210 return false;
2212 curBlock_->end(ins);
2213 curBlock_ = nullptr;
2214 return true;
2217 [[nodiscard]] bool returnCallImport(unsigned globalDataOffset,
2218 uint32_t lineOrBytecode,
2219 const CallCompileState& call,
2220 const FuncType& funcType,
2221 DefVector* results) {
2222 MOZ_ASSERT(!inDeadCode());
2224 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Import);
2225 auto callee = CalleeDesc::import(globalDataOffset);
2226 ArgTypeVector args(funcType);
2228 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2229 StackArgAreaSizeUnaligned(args), nullptr);
2230 if (!ins) {
2231 return false;
2233 curBlock_->end(ins);
2234 curBlock_ = nullptr;
2235 return true;
2238 [[nodiscard]] bool returnCallIndirect(uint32_t funcTypeIndex,
2239 uint32_t tableIndex, MDefinition* index,
2240 uint32_t lineOrBytecode,
2241 const CallCompileState& call,
2242 DefVector* results) {
2243 MOZ_ASSERT(!inDeadCode());
2245 const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
2246 CallIndirectId callIndirectId =
2247 CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
2249 CalleeDesc callee;
2250 MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
2251 const TableDesc& table = moduleEnv_.tables[tableIndex];
2252 callee =
2253 CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
2255 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Indirect);
2256 ArgTypeVector args(funcType);
2258 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2259 StackArgAreaSizeUnaligned(args), index);
2260 if (!ins) {
2261 return false;
2263 curBlock_->end(ins);
2264 curBlock_ = nullptr;
2265 return true;
2268 [[nodiscard]] bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
2269 MDefinition* index, uint32_t lineOrBytecode,
2270 const CallCompileState& call,
2271 DefVector* results) {
2272 MOZ_ASSERT(!inDeadCode());
2274 const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
2275 CallIndirectId callIndirectId =
2276 CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
2278 CalleeDesc callee;
2279 if (moduleEnv_.isAsmJS()) {
2280 MOZ_ASSERT(tableIndex == 0);
2281 MOZ_ASSERT(callIndirectId.kind() == CallIndirectIdKind::AsmJS);
2282 uint32_t tableIndex = moduleEnv_.asmJSSigToTableIndex[funcTypeIndex];
2283 const TableDesc& table = moduleEnv_.tables[tableIndex];
2284 MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
2286 MDefinition* mask = constantI32(int32_t(table.initialLength - 1));
2287 MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
2288 curBlock_->add(maskedIndex);
2290 index = maskedIndex;
2291 callee = CalleeDesc::asmJSTable(moduleEnv_, tableIndex);
2292 } else {
2293 MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
2294 const TableDesc& table = moduleEnv_.tables[tableIndex];
2295 callee =
2296 CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
2299 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Indirect);
2300 ArgTypeVector args(funcType);
2301 ResultType resultType = ResultType::Vector(funcType.results());
2303 if (!catchableCall(desc, callee, call.regArgs_, args, index)) {
2304 return false;
2306 return collectCallResults(resultType, call.stackResultArea_, results);
2309 [[nodiscard]] bool callImport(unsigned instanceDataOffset,
2310 uint32_t lineOrBytecode,
2311 const CallCompileState& call,
2312 const FuncType& funcType, DefVector* results) {
2313 MOZ_ASSERT(!inDeadCode());
2315 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Import);
2316 auto callee = CalleeDesc::import(instanceDataOffset);
2317 ArgTypeVector args(funcType);
2318 ResultType resultType = ResultType::Vector(funcType.results());
2320 if (!catchableCall(desc, callee, call.regArgs_, args)) {
2321 return false;
2323 return collectCallResults(resultType, call.stackResultArea_, results);
2326 [[nodiscard]] bool builtinCall(const SymbolicAddressSignature& builtin,
2327 uint32_t lineOrBytecode,
2328 const CallCompileState& call,
2329 MDefinition** def) {
2330 if (inDeadCode()) {
2331 *def = nullptr;
2332 return true;
2335 MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
2337 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
2338 auto callee = CalleeDesc::builtin(builtin.identity);
2339 auto* ins = MWasmCallUncatchable::New(alloc(), desc, callee, call.regArgs_,
2340 StackArgAreaSizeUnaligned(builtin));
2341 if (!ins) {
2342 return false;
2345 curBlock_->add(ins);
2347 return collectUnaryCallResult(builtin.retType, def);
2350 [[nodiscard]] bool builtinInstanceMethodCall(
2351 const SymbolicAddressSignature& builtin, uint32_t lineOrBytecode,
2352 const CallCompileState& call, MDefinition** def = nullptr) {
2353 MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
2354 if (inDeadCode()) {
2355 if (def) {
2356 *def = nullptr;
2358 return true;
2361 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
2362 auto* ins = MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
2363 alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
2364 call.regArgs_, StackArgAreaSizeUnaligned(builtin));
2365 if (!ins) {
2366 return false;
2369 curBlock_->add(ins);
2371 return def ? collectUnaryCallResult(builtin.retType, def) : true;
2374 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
2375 [[nodiscard]] bool callRef(const FuncType& funcType, MDefinition* ref,
2376 uint32_t lineOrBytecode,
2377 const CallCompileState& call, DefVector* results) {
2378 MOZ_ASSERT(!inDeadCode());
2380 CalleeDesc callee = CalleeDesc::wasmFuncRef();
2382 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::FuncRef);
2383 ArgTypeVector args(funcType);
2384 ResultType resultType = ResultType::Vector(funcType.results());
2386 if (!catchableCall(desc, callee, call.regArgs_, args, ref)) {
2387 return false;
2389 return collectCallResults(resultType, call.stackResultArea_, results);
2392 # ifdef ENABLE_WASM_TAIL_CALLS
2393 [[nodiscard]] bool returnCallRef(const FuncType& funcType, MDefinition* ref,
2394 uint32_t lineOrBytecode,
2395 const CallCompileState& call,
2396 DefVector* results) {
2397 MOZ_ASSERT(!inDeadCode());
2399 CalleeDesc callee = CalleeDesc::wasmFuncRef();
2401 CallSiteDesc desc(lineOrBytecode, CallSiteDesc::FuncRef);
2402 ArgTypeVector args(funcType);
2404 auto* ins = MWasmReturnCall::New(alloc(), desc, callee, call.regArgs_,
2405 StackArgAreaSizeUnaligned(args), ref);
2406 if (!ins) {
2407 return false;
2409 curBlock_->end(ins);
2410 curBlock_ = nullptr;
2411 return true;
2414 # endif // ENABLE_WASM_TAIL_CALLS
2416 #endif // ENABLE_WASM_FUNCTION_REFERENCES
2418 /*********************************************** Control flow generation */
2420 inline bool inDeadCode() const { return curBlock_ == nullptr; }
2422 [[nodiscard]] bool returnValues(const DefVector& values) {
2423 if (inDeadCode()) {
2424 return true;
2427 if (values.empty()) {
2428 curBlock_->end(MWasmReturnVoid::New(alloc(), instancePointer_));
2429 } else {
2430 ResultType resultType = ResultType::Vector(funcType().results());
2431 ABIResultIter iter(resultType);
2432 // Switch to iterate in FIFO order instead of the default LIFO.
2433 while (!iter.done()) {
2434 iter.next();
2436 iter.switchToPrev();
2437 for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
2438 if (!mirGen().ensureBallast()) {
2439 return false;
2441 const ABIResult& result = iter.cur();
2442 if (result.onStack()) {
2443 MOZ_ASSERT(iter.remaining() > 1);
2444 if (result.type().isRefRepr()) {
2445 auto* store = MWasmStoreRef::New(
2446 alloc(), instancePointer_, stackResultPointer_,
2447 result.stackOffset(), values[i], AliasSet::WasmStackResult,
2448 WasmPreBarrierKind::None);
2449 curBlock_->add(store);
2450 } else {
2451 auto* store = MWasmStoreStackResult::New(
2452 alloc(), stackResultPointer_, result.stackOffset(), values[i]);
2453 curBlock_->add(store);
2455 } else {
2456 MOZ_ASSERT(iter.remaining() == 1);
2457 MOZ_ASSERT(i + 1 == values.length());
2458 curBlock_->end(
2459 MWasmReturn::New(alloc(), values[i], instancePointer_));
2463 curBlock_ = nullptr;
2464 return true;
2467 void unreachableTrap() {
2468 if (inDeadCode()) {
2469 return;
2472 auto* ins =
2473 MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
2474 curBlock_->end(ins);
2475 curBlock_ = nullptr;
2478 private:
2479 static uint32_t numPushed(MBasicBlock* block) {
2480 return block->stackDepth() - block->info().firstStackSlot();
2483 public:
2484 [[nodiscard]] bool pushDefs(const DefVector& defs) {
2485 if (inDeadCode()) {
2486 return true;
2488 MOZ_ASSERT(numPushed(curBlock_) == 0);
2489 if (!curBlock_->ensureHasSlots(defs.length())) {
2490 return false;
2492 for (MDefinition* def : defs) {
2493 MOZ_ASSERT(def->type() != MIRType::None);
2494 curBlock_->push(def);
2496 return true;
2499 [[nodiscard]] bool popPushedDefs(DefVector* defs) {
2500 size_t n = numPushed(curBlock_);
2501 if (!defs->resizeUninitialized(n)) {
2502 return false;
2504 for (; n > 0; n--) {
2505 MDefinition* def = curBlock_->pop();
2506 MOZ_ASSERT(def->type() != MIRType::Value);
2507 (*defs)[n - 1] = def;
2509 return true;
2512 private:
2513 [[nodiscard]] bool addJoinPredecessor(const DefVector& defs,
2514 MBasicBlock** joinPred) {
2515 *joinPred = curBlock_;
2516 if (inDeadCode()) {
2517 return true;
2519 return pushDefs(defs);
2522 public:
2523 [[nodiscard]] bool branchAndStartThen(MDefinition* cond,
2524 MBasicBlock** elseBlock) {
2525 if (inDeadCode()) {
2526 *elseBlock = nullptr;
2527 } else {
2528 MBasicBlock* thenBlock;
2529 if (!newBlock(curBlock_, &thenBlock)) {
2530 return false;
2532 if (!newBlock(curBlock_, elseBlock)) {
2533 return false;
2536 curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
2538 curBlock_ = thenBlock;
2539 mirGraph().moveBlockToEnd(curBlock_);
2542 return startBlock();
2545 [[nodiscard]] bool switchToElse(MBasicBlock* elseBlock,
2546 MBasicBlock** thenJoinPred) {
2547 DefVector values;
2548 if (!finishBlock(&values)) {
2549 return false;
2552 if (!elseBlock) {
2553 *thenJoinPred = nullptr;
2554 } else {
2555 if (!addJoinPredecessor(values, thenJoinPred)) {
2556 return false;
2559 curBlock_ = elseBlock;
2560 mirGraph().moveBlockToEnd(curBlock_);
2563 return startBlock();
2566 [[nodiscard]] bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
2567 DefVector values;
2568 if (!finishBlock(&values)) {
2569 return false;
2572 if (!thenJoinPred && inDeadCode()) {
2573 return true;
2576 MBasicBlock* elseJoinPred;
2577 if (!addJoinPredecessor(values, &elseJoinPred)) {
2578 return false;
2581 mozilla::Array<MBasicBlock*, 2> blocks;
2582 size_t numJoinPreds = 0;
2583 if (thenJoinPred) {
2584 blocks[numJoinPreds++] = thenJoinPred;
2586 if (elseJoinPred) {
2587 blocks[numJoinPreds++] = elseJoinPred;
2590 if (numJoinPreds == 0) {
2591 return true;
2594 MBasicBlock* join;
2595 if (!goToNewBlock(blocks[0], &join)) {
2596 return false;
2598 for (size_t i = 1; i < numJoinPreds; ++i) {
2599 if (!goToExistingBlock(blocks[i], join)) {
2600 return false;
2604 curBlock_ = join;
2605 return popPushedDefs(defs);
2608 [[nodiscard]] bool startBlock() {
2609 MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
2610 blockPatches_[blockDepth_].empty());
2611 blockDepth_++;
2612 return true;
2615 [[nodiscard]] bool finishBlock(DefVector* defs) {
2616 MOZ_ASSERT(blockDepth_);
2617 uint32_t topLabel = --blockDepth_;
2618 return bindBranches(topLabel, defs);
2621 [[nodiscard]] bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
2622 *loopHeader = nullptr;
2624 blockDepth_++;
2625 loopDepth_++;
2627 if (inDeadCode()) {
2628 return true;
2631 // Create the loop header.
2632 MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
2633 *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
2634 MBasicBlock::PENDING_LOOP_HEADER);
2635 if (!*loopHeader) {
2636 return false;
2639 (*loopHeader)->setLoopDepth(loopDepth_);
2640 mirGraph().addBlock(*loopHeader);
2641 curBlock_->end(MGoto::New(alloc(), *loopHeader));
2643 DefVector loopParams;
2644 if (!iter().getResults(paramCount, &loopParams)) {
2645 return false;
2647 for (size_t i = 0; i < paramCount; i++) {
2648 MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
2649 if (!phi) {
2650 return false;
2652 if (!phi->reserveLength(2)) {
2653 return false;
2655 (*loopHeader)->addPhi(phi);
2656 phi->addInput(loopParams[i]);
2657 loopParams[i] = phi;
2659 iter().setResults(paramCount, loopParams);
2661 MBasicBlock* body;
2662 if (!goToNewBlock(*loopHeader, &body)) {
2663 return false;
2665 curBlock_ = body;
2666 return true;
2669 private:
2670 void fixupRedundantPhis(MBasicBlock* b) {
2671 for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
2672 MDefinition* def = b->getSlot(i);
2673 if (def->isUnused()) {
2674 b->setSlot(i, def->toPhi()->getOperand(0));
2679 [[nodiscard]] bool setLoopBackedge(MBasicBlock* loopEntry,
2680 MBasicBlock* loopBody,
2681 MBasicBlock* backedge, size_t paramCount) {
2682 if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
2683 return false;
2686 // Flag all redundant phis as unused.
2687 for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
2688 phi++) {
2689 MOZ_ASSERT(phi->numOperands() == 2);
2690 if (phi->getOperand(0) == phi->getOperand(1)) {
2691 phi->setUnused();
2695 // Fix up phis stored in the slots Vector of pending blocks.
2696 for (ControlFlowPatchVector& patches : blockPatches_) {
2697 for (ControlFlowPatch& p : patches) {
2698 MBasicBlock* block = p.ins->block();
2699 if (block->loopDepth() >= loopEntry->loopDepth()) {
2700 fixupRedundantPhis(block);
2705 // The loop body, if any, might be referencing recycled phis too.
2706 if (loopBody) {
2707 fixupRedundantPhis(loopBody);
2710 // Pending jumps to an enclosing try-catch may reference the recycled phis.
2711 // We have to search above all enclosing try blocks, as a delegate may move
2712 // patches around.
2713 for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
2714 LabelKind kind = iter().controlKind(depth);
2715 if (kind != LabelKind::Try && kind != LabelKind::Body) {
2716 continue;
2718 Control& control = iter().controlItem(depth);
2719 for (MControlInstruction* patch : control.tryPadPatches) {
2720 MBasicBlock* block = patch->block();
2721 if (block->loopDepth() >= loopEntry->loopDepth()) {
2722 fixupRedundantPhis(block);
2727 // Discard redundant phis and add to the free list.
2728 for (MPhiIterator phi = loopEntry->phisBegin();
2729 phi != loopEntry->phisEnd();) {
2730 MPhi* entryDef = *phi++;
2731 if (!entryDef->isUnused()) {
2732 continue;
2735 entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
2736 loopEntry->discardPhi(entryDef);
2737 mirGraph().addPhiToFreeList(entryDef);
2740 return true;
2743 public:
2744 [[nodiscard]] bool closeLoop(MBasicBlock* loopHeader,
2745 DefVector* loopResults) {
2746 MOZ_ASSERT(blockDepth_ >= 1);
2747 MOZ_ASSERT(loopDepth_);
2749 uint32_t headerLabel = blockDepth_ - 1;
2751 if (!loopHeader) {
2752 MOZ_ASSERT(inDeadCode());
2753 MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
2754 blockPatches_[headerLabel].empty());
2755 blockDepth_--;
2756 loopDepth_--;
2757 return true;
2760 // Op::Loop doesn't have an implicit backedge so temporarily set
2761 // aside the end of the loop body to bind backedges.
2762 MBasicBlock* loopBody = curBlock_;
2763 curBlock_ = nullptr;
2765 // As explained in bug 1253544, Ion apparently has an invariant that
2766 // there is only one backedge to loop headers. To handle wasm's ability
2767 // to have multiple backedges to the same loop header, we bind all those
2768 // branches as forward jumps to a single backward jump. This is
2769 // unfortunate but the optimizer is able to fold these into single jumps
2770 // to backedges.
2771 DefVector backedgeValues;
2772 if (!bindBranches(headerLabel, &backedgeValues)) {
2773 return false;
2776 MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
2778 if (curBlock_) {
2779 // We're on the loop backedge block, created by bindBranches.
2780 for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
2781 curBlock_->pop();
2784 if (!pushDefs(backedgeValues)) {
2785 return false;
2788 MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
2789 curBlock_->end(MGoto::New(alloc(), loopHeader));
2790 if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
2791 backedgeValues.length())) {
2792 return false;
2796 curBlock_ = loopBody;
2798 loopDepth_--;
2800 // If the loop depth still at the inner loop body, correct it.
2801 if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
2802 MBasicBlock* out;
2803 if (!goToNewBlock(curBlock_, &out)) {
2804 return false;
2806 curBlock_ = out;
2809 blockDepth_ -= 1;
2810 return inDeadCode() || popPushedDefs(loopResults);
2813 [[nodiscard]] bool addControlFlowPatch(MControlInstruction* ins,
2814 uint32_t relative, uint32_t index) {
2815 MOZ_ASSERT(relative < blockDepth_);
2816 uint32_t absolute = blockDepth_ - 1 - relative;
2818 if (absolute >= blockPatches_.length() &&
2819 !blockPatches_.resize(absolute + 1)) {
2820 return false;
2823 return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
2826 [[nodiscard]] bool br(uint32_t relativeDepth, const DefVector& values) {
2827 if (inDeadCode()) {
2828 return true;
2831 MGoto* jump = MGoto::New(alloc());
2832 if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
2833 return false;
2836 if (!pushDefs(values)) {
2837 return false;
2840 curBlock_->end(jump);
2841 curBlock_ = nullptr;
2842 return true;
2845 [[nodiscard]] bool brIf(uint32_t relativeDepth, const DefVector& values,
2846 MDefinition* condition) {
2847 if (inDeadCode()) {
2848 return true;
2851 MBasicBlock* joinBlock = nullptr;
2852 if (!newBlock(curBlock_, &joinBlock)) {
2853 return false;
2856 MTest* test = MTest::New(alloc(), condition, nullptr, joinBlock);
2857 if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
2858 return false;
2861 if (!pushDefs(values)) {
2862 return false;
2865 curBlock_->end(test);
2866 curBlock_ = joinBlock;
2867 return true;
2870 [[nodiscard]] bool brTable(MDefinition* operand, uint32_t defaultDepth,
2871 const Uint32Vector& depths,
2872 const DefVector& values) {
2873 if (inDeadCode()) {
2874 return true;
2877 size_t numCases = depths.length();
2878 MOZ_ASSERT(numCases <= INT32_MAX);
2879 MOZ_ASSERT(numCases);
2881 MTableSwitch* table =
2882 MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
2884 size_t defaultIndex;
2885 if (!table->addDefault(nullptr, &defaultIndex)) {
2886 return false;
2888 if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
2889 return false;
2892 using IndexToCaseMap =
2893 HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
2895 IndexToCaseMap indexToCase;
2896 if (!indexToCase.put(defaultDepth, defaultIndex)) {
2897 return false;
2900 for (size_t i = 0; i < numCases; i++) {
2901 if (!mirGen_.ensureBallast()) {
2902 return false;
2905 uint32_t depth = depths[i];
2907 size_t caseIndex;
2908 IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
2909 if (!p) {
2910 if (!table->addSuccessor(nullptr, &caseIndex)) {
2911 return false;
2913 if (!addControlFlowPatch(table, depth, caseIndex)) {
2914 return false;
2916 if (!indexToCase.add(p, depth, caseIndex)) {
2917 return false;
2919 } else {
2920 caseIndex = p->value();
2923 if (!table->addCase(caseIndex)) {
2924 return false;
2928 if (!pushDefs(values)) {
2929 return false;
2932 curBlock_->end(table);
2933 curBlock_ = nullptr;
2935 return true;
2938 /********************************************************** Exceptions ***/
2940 bool inTryBlock(uint32_t* relativeDepth) {
2941 return iter().controlFindInnermost(LabelKind::Try, relativeDepth);
2944 bool inTryCode() {
2945 uint32_t relativeDepth;
2946 return inTryBlock(&relativeDepth);
2949 MDefinition* loadTag(uint32_t tagIndex) {
2950 MWasmLoadInstanceDataField* tag = MWasmLoadInstanceDataField::New(
2951 alloc(), MIRType::WasmAnyRef,
2952 moduleEnv_.offsetOfTagInstanceData(tagIndex), true, instancePointer_);
2953 curBlock_->add(tag);
2954 return tag;
2957 void loadPendingExceptionState(MInstruction** exception, MInstruction** tag) {
2958 *exception = MWasmLoadInstance::New(
2959 alloc(), instancePointer_, wasm::Instance::offsetOfPendingException(),
2960 MIRType::WasmAnyRef, AliasSet::Load(AliasSet::WasmPendingException));
2961 curBlock_->add(*exception);
2963 *tag = MWasmLoadInstance::New(
2964 alloc(), instancePointer_,
2965 wasm::Instance::offsetOfPendingExceptionTag(), MIRType::WasmAnyRef,
2966 AliasSet::Load(AliasSet::WasmPendingException));
2967 curBlock_->add(*tag);
2970 [[nodiscard]] bool setPendingExceptionState(MDefinition* exception,
2971 MDefinition* tag) {
2972 // Set the pending exception object
2973 auto* exceptionAddr = MWasmDerivedPointer::New(
2974 alloc(), instancePointer_, Instance::offsetOfPendingException());
2975 curBlock_->add(exceptionAddr);
2976 auto* setException = MWasmStoreRef::New(
2977 alloc(), instancePointer_, exceptionAddr, /*valueOffset=*/0, exception,
2978 AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
2979 curBlock_->add(setException);
2980 if (!postBarrierPrecise(/*lineOrBytecode=*/0, exceptionAddr, exception)) {
2981 return false;
2984 // Set the pending exception tag object
2985 auto* exceptionTagAddr = MWasmDerivedPointer::New(
2986 alloc(), instancePointer_, Instance::offsetOfPendingExceptionTag());
2987 curBlock_->add(exceptionTagAddr);
2988 auto* setExceptionTag = MWasmStoreRef::New(
2989 alloc(), instancePointer_, exceptionTagAddr, /*valueOffset=*/0, tag,
2990 AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
2991 curBlock_->add(setExceptionTag);
2992 return postBarrierPrecise(/*lineOrBytecode=*/0, exceptionTagAddr, tag);
2995 [[nodiscard]] bool addPadPatch(MControlInstruction* ins,
2996 size_t relativeTryDepth) {
2997 Control& tryControl = iter().controlItem(relativeTryDepth);
2998 ControlInstructionVector& padPatches = tryControl.tryPadPatches;
2999 return padPatches.emplaceBack(ins);
3002 [[nodiscard]] bool endWithPadPatch(uint32_t relativeTryDepth) {
3003 MGoto* jumpToLandingPad = MGoto::New(alloc());
3004 curBlock_->end(jumpToLandingPad);
3005 return addPadPatch(jumpToLandingPad, relativeTryDepth);
3008 [[nodiscard]] bool delegatePadPatches(const ControlInstructionVector& patches,
3009 uint32_t relativeDepth) {
3010 if (patches.empty()) {
3011 return true;
3014 // Find where we are delegating the pad patches to.
3015 uint32_t targetRelativeDepth;
3016 if (!iter().controlFindInnermostFrom(LabelKind::Try, relativeDepth,
3017 &targetRelativeDepth)) {
3018 MOZ_ASSERT(relativeDepth <= blockDepth_ - 1);
3019 targetRelativeDepth = blockDepth_ - 1;
3021 // Append the delegate's pad patches to the target's.
3022 for (MControlInstruction* ins : patches) {
3023 if (!addPadPatch(ins, targetRelativeDepth)) {
3024 return false;
3027 return true;
3030 [[nodiscard]] bool beginTryCall(MWasmCallTryDesc* call) {
3031 call->inTry = inTryBlock(&call->relativeTryDepth);
3032 if (!call->inTry) {
3033 return true;
3035 // Allocate a try note
3036 if (!tryNotes_.append(wasm::TryNote())) {
3037 return false;
3039 call->tryNoteIndex = tryNotes_.length() - 1;
3040 // Allocate blocks for fallthrough and exceptions
3041 return newBlock(curBlock_, &call->fallthroughBlock) &&
3042 newBlock(curBlock_, &call->prePadBlock);
3045 [[nodiscard]] bool finishTryCall(MWasmCallTryDesc* call) {
3046 if (!call->inTry) {
3047 return true;
3050 // Switch to the prePadBlock
3051 MBasicBlock* callBlock = curBlock_;
3052 curBlock_ = call->prePadBlock;
3054 // Mark this as the landing pad for the call
3055 curBlock_->add(
3056 MWasmCallLandingPrePad::New(alloc(), callBlock, call->tryNoteIndex));
3058 // End with a pending jump to the landing pad
3059 if (!endWithPadPatch(call->relativeTryDepth)) {
3060 return false;
3063 // Compilation continues in the fallthroughBlock.
3064 curBlock_ = call->fallthroughBlock;
3065 return true;
3068 // Create a landing pad for a try block if there are any throwing
3069 // instructions.
3070 [[nodiscard]] bool createTryLandingPadIfNeeded(Control& control,
3071 MBasicBlock** landingPad) {
3072 // If there are no pad-patches for this try control, it means there are no
3073 // instructions in the try code that could throw an exception. In this
3074 // case, all the catches are dead code, and the try code ends up equivalent
3075 // to a plain wasm block.
3076 ControlInstructionVector& patches = control.tryPadPatches;
3077 if (patches.empty()) {
3078 *landingPad = nullptr;
3079 return true;
3082 // Otherwise, if there are (pad-) branches from places in the try code that
3083 // may throw an exception, bind these branches to a new landing pad
3084 // block. This is done similarly to what is done in bindBranches.
3085 MControlInstruction* ins = patches[0];
3086 MBasicBlock* pred = ins->block();
3087 if (!newBlock(pred, landingPad)) {
3088 return false;
3090 ins->replaceSuccessor(0, *landingPad);
3091 for (size_t i = 1; i < patches.length(); i++) {
3092 ins = patches[i];
3093 pred = ins->block();
3094 if (!(*landingPad)->addPredecessor(alloc(), pred)) {
3095 return false;
3097 ins->replaceSuccessor(0, *landingPad);
3100 // Set up the slots in the landing pad block.
3101 if (!setupLandingPadSlots(*landingPad)) {
3102 return false;
3105 // Clear the now bound pad patches.
3106 patches.clear();
3107 return true;
3110 // Consume the pending exception state from instance, and set up the slots
3111 // of the landing pad with the exception state.
3112 [[nodiscard]] bool setupLandingPadSlots(MBasicBlock* landingPad) {
3113 MBasicBlock* prevBlock = curBlock_;
3114 curBlock_ = landingPad;
3116 // Load the pending exception and tag
3117 MInstruction* exception;
3118 MInstruction* tag;
3119 loadPendingExceptionState(&exception, &tag);
3121 // Clear the pending exception and tag
3122 auto* null = constantNullRef();
3123 if (!setPendingExceptionState(null, null)) {
3124 return false;
3127 // Push the exception and its tag on the stack to make them available
3128 // to the landing pad blocks.
3129 if (!landingPad->ensureHasSlots(2)) {
3130 return false;
3132 landingPad->push(exception);
3133 landingPad->push(tag);
3135 curBlock_ = prevBlock;
3136 return true;
3139 [[nodiscard]] bool startTry(MBasicBlock** curBlock) {
3140 *curBlock = curBlock_;
3141 return startBlock();
3144 [[nodiscard]] bool joinTryOrCatchBlock(Control& control) {
3145 // If the try or catch block ended with dead code, there is no need to
3146 // do any control flow join.
3147 if (inDeadCode()) {
3148 return true;
3151 // This is a split path which we'll need to join later, using a control
3152 // flow patch.
3153 MOZ_ASSERT(!curBlock_->hasLastIns());
3154 MGoto* jump = MGoto::New(alloc());
3155 if (!addControlFlowPatch(jump, 0, MGoto::TargetIndex)) {
3156 return false;
3159 // Finish the current block with the control flow patch instruction.
3160 curBlock_->end(jump);
3161 return true;
3164 // Finish the previous block (either a try or catch block) and then setup a
3165 // new catch block.
3166 [[nodiscard]] bool switchToCatch(Control& control, const LabelKind& fromKind,
3167 uint32_t tagIndex) {
3168 // If there is no control block, then either:
3169 // - the entry of the try block is dead code, or
3170 // - there is no landing pad for the try-catch.
3171 // In either case, any catch will be dead code.
3172 if (!control.block) {
3173 MOZ_ASSERT(inDeadCode());
3174 return true;
3177 // Join the previous try or catch block with a patch to the future join of
3178 // the whole try-catch block.
3179 if (!joinTryOrCatchBlock(control)) {
3180 return false;
3183 // If we are switching from the try block, create the landing pad. This is
3184 // guaranteed to happen once and only once before processing catch blocks.
3185 if (fromKind == LabelKind::Try) {
3186 MBasicBlock* padBlock = nullptr;
3187 if (!createTryLandingPadIfNeeded(control, &padBlock)) {
3188 return false;
3190 // Set the control block for this try-catch to the landing pad.
3191 control.block = padBlock;
3194 // If there is no landing pad, then this and following catches are dead
3195 // code.
3196 if (!control.block) {
3197 curBlock_ = nullptr;
3198 return true;
3201 // Switch to the landing pad.
3202 curBlock_ = control.block;
3204 // Handle a catch_all by immediately jumping to a new block. We require a
3205 // new block (as opposed to just emitting the catch_all code in the current
3206 // block) because rethrow requires the exception/tag to be present in the
3207 // landing pad's slots, while the catch_all block must not have the
3208 // exception/tag in slots.
3209 if (tagIndex == CatchAllIndex) {
3210 MBasicBlock* catchAllBlock = nullptr;
3211 if (!goToNewBlock(curBlock_, &catchAllBlock)) {
3212 return false;
3214 // Compilation will continue in the catch_all block.
3215 curBlock_ = catchAllBlock;
3216 // Remove the tag and exception slots from the block, they are no
3217 // longer necessary.
3218 curBlock_->pop();
3219 curBlock_->pop();
3220 return true;
3223 // Handle a tagged catch by doing a compare and branch on the tag index,
3224 // jumping to a catch block if they match, or else to a fallthrough block
3225 // to continue the landing pad.
3226 MBasicBlock* catchBlock = nullptr;
3227 MBasicBlock* fallthroughBlock = nullptr;
3228 if (!newBlock(curBlock_, &catchBlock) ||
3229 !newBlock(curBlock_, &fallthroughBlock)) {
3230 return false;
3233 // Get the exception and its tag from the slots we pushed when adding
3234 // control flow patches.
3235 MDefinition* exceptionTag = curBlock_->pop();
3236 MDefinition* exception = curBlock_->pop();
3238 // Branch to the catch block if the exception's tag matches this catch
3239 // block's tag.
3240 MDefinition* catchTag = loadTag(tagIndex);
3241 MDefinition* matchesCatchTag =
3242 compare(exceptionTag, catchTag, JSOp::Eq, MCompare::Compare_WasmAnyRef);
3243 curBlock_->end(
3244 MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
3246 // The landing pad will continue in the fallthrough block
3247 control.block = fallthroughBlock;
3249 // Set up the catch block by extracting the values from the exception
3250 // object.
3251 curBlock_ = catchBlock;
3253 // Remove the tag and exception slots from the block, they are no
3254 // longer necessary.
3255 curBlock_->pop();
3256 curBlock_->pop();
3258 // Extract the exception values for the catch block
3259 DefVector values;
3260 if (!loadExceptionValues(exception, tagIndex, &values)) {
3261 return false;
3263 iter().setResults(values.length(), values);
3264 return true;
3267 [[nodiscard]] bool loadExceptionValues(MDefinition* exception,
3268 uint32_t tagIndex, DefVector* values) {
3269 SharedTagType tagType = moduleEnv().tags[tagIndex].type;
3270 const ValTypeVector& params = tagType->argTypes();
3271 const TagOffsetVector& offsets = tagType->argOffsets();
3273 // Get the data pointer from the exception object
3274 auto* data = MWasmLoadField::New(
3275 alloc(), exception, WasmExceptionObject::offsetOfData(),
3276 MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
3277 if (!data) {
3278 return false;
3280 curBlock_->add(data);
3282 // Presize the values vector to the number of params
3283 if (!values->reserve(params.length())) {
3284 return false;
3287 // Load each value from the data pointer
3288 for (size_t i = 0; i < params.length(); i++) {
3289 if (!mirGen_.ensureBallast()) {
3290 return false;
3292 auto* load = MWasmLoadFieldKA::New(
3293 alloc(), exception, data, offsets[i], params[i].toMIRType(),
3294 MWideningOp::None, AliasSet::Load(AliasSet::Any));
3295 if (!load || !values->append(load)) {
3296 return false;
3298 curBlock_->add(load);
3300 return true;
3303 [[nodiscard]] bool finishTryCatch(LabelKind kind, Control& control,
3304 DefVector* defs) {
3305 switch (kind) {
3306 case LabelKind::Try: {
3307 // This is a catchless try, we must delegate all throwing instructions
3308 // to the nearest enclosing try block if one exists, or else to the
3309 // body block which will handle it in emitBodyDelegateThrowPad. We
3310 // specify a relativeDepth of '1' to delegate outside of the still
3311 // active try block.
3312 uint32_t relativeDepth = 1;
3313 if (!delegatePadPatches(control.tryPadPatches, relativeDepth)) {
3314 return false;
3316 break;
3318 case LabelKind::Catch: {
3319 // This is a try without a catch_all, we must have a rethrow at the end
3320 // of the landing pad (if any).
3321 MBasicBlock* padBlock = control.block;
3322 if (padBlock) {
3323 MBasicBlock* prevBlock = curBlock_;
3324 curBlock_ = padBlock;
3325 MDefinition* tag = curBlock_->pop();
3326 MDefinition* exception = curBlock_->pop();
3327 if (!throwFrom(exception, tag)) {
3328 return false;
3330 curBlock_ = prevBlock;
3332 break;
3334 case LabelKind::CatchAll:
3335 // This is a try with a catch_all, and requires no special handling.
3336 break;
3337 default:
3338 MOZ_CRASH();
3341 // Finish the block, joining the try and catch blocks
3342 return finishBlock(defs);
3345 [[nodiscard]] bool emitBodyDelegateThrowPad(Control& control) {
3346 // Create a landing pad for any throwing instructions
3347 MBasicBlock* padBlock;
3348 if (!createTryLandingPadIfNeeded(control, &padBlock)) {
3349 return false;
3352 // If no landing pad was necessary, then we don't need to do anything here
3353 if (!padBlock) {
3354 return true;
3357 // Switch to the landing pad and rethrow the exception
3358 MBasicBlock* prevBlock = curBlock_;
3359 curBlock_ = padBlock;
3360 MDefinition* tag = curBlock_->pop();
3361 MDefinition* exception = curBlock_->pop();
3362 if (!throwFrom(exception, tag)) {
3363 return false;
3365 curBlock_ = prevBlock;
3366 return true;
3369 [[nodiscard]] bool emitNewException(MDefinition* tag,
3370 MDefinition** exception) {
3371 return emitInstanceCall1(readBytecodeOffset(), SASigExceptionNew, tag,
3372 exception);
3375 [[nodiscard]] bool emitThrow(uint32_t tagIndex, const DefVector& argValues) {
3376 if (inDeadCode()) {
3377 return true;
3379 uint32_t bytecodeOffset = readBytecodeOffset();
3381 // Load the tag
3382 MDefinition* tag = loadTag(tagIndex);
3383 if (!tag) {
3384 return false;
3387 // Allocate an exception object
3388 MDefinition* exception;
3389 if (!emitNewException(tag, &exception)) {
3390 return false;
3393 // Load the data pointer from the object
3394 auto* data = MWasmLoadField::New(
3395 alloc(), exception, WasmExceptionObject::offsetOfData(),
3396 MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
3397 if (!data) {
3398 return false;
3400 curBlock_->add(data);
3402 // Store the params into the data pointer
3403 SharedTagType tagType = moduleEnv_.tags[tagIndex].type;
3404 for (size_t i = 0; i < tagType->argOffsets().length(); i++) {
3405 if (!mirGen_.ensureBallast()) {
3406 return false;
3408 ValType type = tagType->argTypes()[i];
3409 uint32_t offset = tagType->argOffsets()[i];
3411 if (!type.isRefRepr()) {
3412 auto* store = MWasmStoreFieldKA::New(alloc(), exception, data, offset,
3413 argValues[i], MNarrowingOp::None,
3414 AliasSet::Store(AliasSet::Any));
3415 if (!store) {
3416 return false;
3418 curBlock_->add(store);
3419 continue;
3422 // Store the new value
3423 auto* store = MWasmStoreFieldRefKA::New(
3424 alloc(), instancePointer_, exception, data, offset, argValues[i],
3425 AliasSet::Store(AliasSet::Any), Nothing(), WasmPreBarrierKind::None);
3426 if (!store) {
3427 return false;
3429 curBlock_->add(store);
3431 // Call the post-write barrier
3432 if (!postBarrier(bytecodeOffset, exception, data, offset, argValues[i])) {
3433 return false;
3437 // Throw the exception
3438 return throwFrom(exception, tag);
3441 [[nodiscard]] bool throwFrom(MDefinition* exn, MDefinition* tag) {
3442 if (inDeadCode()) {
3443 return true;
3446 // Check if there is a local catching try control, and if so, then add a
3447 // pad-patch to its tryPadPatches.
3448 uint32_t relativeTryDepth;
3449 if (inTryBlock(&relativeTryDepth)) {
3450 // Set the pending exception state, the landing pad will read from this
3451 if (!setPendingExceptionState(exn, tag)) {
3452 return false;
3455 // End with a pending jump to the landing pad
3456 if (!endWithPadPatch(relativeTryDepth)) {
3457 return false;
3459 curBlock_ = nullptr;
3460 return true;
3463 // If there is no surrounding catching block, call an instance method to
3464 // throw the exception.
3465 if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exn)) {
3466 return false;
3468 unreachableTrap();
3470 curBlock_ = nullptr;
3471 return true;
3474 [[nodiscard]] bool emitRethrow(uint32_t relativeDepth) {
3475 if (inDeadCode()) {
3476 return true;
3479 Control& control = iter().controlItem(relativeDepth);
3480 MBasicBlock* pad = control.block;
3481 MOZ_ASSERT(pad);
3482 MOZ_ASSERT(pad->nslots() > 1);
3483 MOZ_ASSERT(iter().controlKind(relativeDepth) == LabelKind::Catch ||
3484 iter().controlKind(relativeDepth) == LabelKind::CatchAll);
3486 // The exception will always be the last slot in the landing pad.
3487 size_t exnSlotPosition = pad->nslots() - 2;
3488 MDefinition* tag = pad->getSlot(exnSlotPosition + 1);
3489 MDefinition* exception = pad->getSlot(exnSlotPosition);
3490 MOZ_ASSERT(exception->type() == MIRType::WasmAnyRef &&
3491 tag->type() == MIRType::WasmAnyRef);
3492 return throwFrom(exception, tag);
3495 /*********************************************** Instance call helpers ***/
3497 // Do not call this function directly -- it offers no protection against
3498 // mis-counting of arguments. Instead call one of
3499 // ::emitInstanceCall{0,1,2,3,4,5,6}.
3501 // Emits a call to the Instance function indicated by `callee`. This is
3502 // assumed to take an Instance pointer as its first argument. The remaining
3503 // args are taken from `args`, which is assumed to hold `numArgs` entries.
3504 // If `result` is non-null, the MDefinition* holding the return value is
3505 // written to `*result`.
3506 [[nodiscard]] bool emitInstanceCallN(uint32_t lineOrBytecode,
3507 const SymbolicAddressSignature& callee,
3508 MDefinition** args, size_t numArgs,
3509 MDefinition** result = nullptr) {
3510 // Check that the first formal parameter is plausibly an Instance pointer.
3511 MOZ_ASSERT(callee.numArgs > 0);
3512 MOZ_ASSERT(callee.argTypes[0] == MIRType::Pointer);
3513 // Check we agree on the number of args.
3514 MOZ_ASSERT(numArgs + 1 /* the instance pointer */ == callee.numArgs);
3515 // Check we agree on whether a value is returned.
3516 MOZ_ASSERT((result == nullptr) == (callee.retType == MIRType::None));
3518 // If we are in dead code, it can happen that some of the `args` entries
3519 // are nullptr, which will look like an OOM to the logic below. So exit
3520 // at this point. `passInstance`, `passArg`, `finishCall` and
3521 // `builtinInstanceMethodCall` all do nothing in dead code, so it's valid
3522 // to exit here.
3523 if (inDeadCode()) {
3524 if (result) {
3525 *result = nullptr;
3527 return true;
3530 // Check all args for signs of OOMness before attempting to allocating any
3531 // more memory.
3532 for (size_t i = 0; i < numArgs; i++) {
3533 if (!args[i]) {
3534 if (result) {
3535 *result = nullptr;
3537 return false;
3541 // Finally, construct the call.
3542 CallCompileState ccsArgs;
3543 if (!passInstance(callee.argTypes[0], &ccsArgs)) {
3544 return false;
3546 for (size_t i = 0; i < numArgs; i++) {
3547 if (!passArg(args[i], callee.argTypes[i + 1], &ccsArgs)) {
3548 return false;
3551 if (!finishCall(&ccsArgs)) {
3552 return false;
3554 return builtinInstanceMethodCall(callee, lineOrBytecode, ccsArgs, result);
3557 [[nodiscard]] bool emitInstanceCall0(uint32_t lineOrBytecode,
3558 const SymbolicAddressSignature& callee,
3559 MDefinition** result = nullptr) {
3560 MDefinition* args[0] = {};
3561 return emitInstanceCallN(lineOrBytecode, callee, args, 0, result);
3563 [[nodiscard]] bool emitInstanceCall1(uint32_t lineOrBytecode,
3564 const SymbolicAddressSignature& callee,
3565 MDefinition* arg1,
3566 MDefinition** result = nullptr) {
3567 MDefinition* args[1] = {arg1};
3568 return emitInstanceCallN(lineOrBytecode, callee, args, 1, result);
3570 [[nodiscard]] bool emitInstanceCall2(uint32_t lineOrBytecode,
3571 const SymbolicAddressSignature& callee,
3572 MDefinition* arg1, MDefinition* arg2,
3573 MDefinition** result = nullptr) {
3574 MDefinition* args[2] = {arg1, arg2};
3575 return emitInstanceCallN(lineOrBytecode, callee, args, 2, result);
3577 [[nodiscard]] bool emitInstanceCall3(uint32_t lineOrBytecode,
3578 const SymbolicAddressSignature& callee,
3579 MDefinition* arg1, MDefinition* arg2,
3580 MDefinition* arg3,
3581 MDefinition** result = nullptr) {
3582 MDefinition* args[3] = {arg1, arg2, arg3};
3583 return emitInstanceCallN(lineOrBytecode, callee, args, 3, result);
3585 [[nodiscard]] bool emitInstanceCall4(uint32_t lineOrBytecode,
3586 const SymbolicAddressSignature& callee,
3587 MDefinition* arg1, MDefinition* arg2,
3588 MDefinition* arg3, MDefinition* arg4,
3589 MDefinition** result = nullptr) {
3590 MDefinition* args[4] = {arg1, arg2, arg3, arg4};
3591 return emitInstanceCallN(lineOrBytecode, callee, args, 4, result);
3593 [[nodiscard]] bool emitInstanceCall5(uint32_t lineOrBytecode,
3594 const SymbolicAddressSignature& callee,
3595 MDefinition* arg1, MDefinition* arg2,
3596 MDefinition* arg3, MDefinition* arg4,
3597 MDefinition* arg5,
3598 MDefinition** result = nullptr) {
3599 MDefinition* args[5] = {arg1, arg2, arg3, arg4, arg5};
3600 return emitInstanceCallN(lineOrBytecode, callee, args, 5, result);
3602 [[nodiscard]] bool emitInstanceCall6(uint32_t lineOrBytecode,
3603 const SymbolicAddressSignature& callee,
3604 MDefinition* arg1, MDefinition* arg2,
3605 MDefinition* arg3, MDefinition* arg4,
3606 MDefinition* arg5, MDefinition* arg6,
3607 MDefinition** result = nullptr) {
3608 MDefinition* args[6] = {arg1, arg2, arg3, arg4, arg5, arg6};
3609 return emitInstanceCallN(lineOrBytecode, callee, args, 6, result);
3612 /******************************** WasmGC: low level load/store helpers ***/
3614 // Given a (FieldType, FieldExtension) pair, produce the (MIRType,
3615 // MWideningOp) pair that will give the correct operation for reading the
3616 // value from memory.
3617 static void fieldLoadInfoToMIR(FieldType type, FieldWideningOp wideningOp,
3618 MIRType* mirType, MWideningOp* mirWideningOp) {
3619 switch (type.kind()) {
3620 case FieldType::I8: {
3621 switch (wideningOp) {
3622 case FieldWideningOp::Signed:
3623 *mirType = MIRType::Int32;
3624 *mirWideningOp = MWideningOp::FromS8;
3625 return;
3626 case FieldWideningOp::Unsigned:
3627 *mirType = MIRType::Int32;
3628 *mirWideningOp = MWideningOp::FromU8;
3629 return;
3630 default:
3631 MOZ_CRASH();
3634 case FieldType::I16: {
3635 switch (wideningOp) {
3636 case FieldWideningOp::Signed:
3637 *mirType = MIRType::Int32;
3638 *mirWideningOp = MWideningOp::FromS16;
3639 return;
3640 case FieldWideningOp::Unsigned:
3641 *mirType = MIRType::Int32;
3642 *mirWideningOp = MWideningOp::FromU16;
3643 return;
3644 default:
3645 MOZ_CRASH();
3648 default: {
3649 switch (wideningOp) {
3650 case FieldWideningOp::None:
3651 *mirType = type.toMIRType();
3652 *mirWideningOp = MWideningOp::None;
3653 return;
3654 default:
3655 MOZ_CRASH();
3661 // Given a FieldType, produce the MNarrowingOp required for writing the
3662 // value to memory.
3663 static MNarrowingOp fieldStoreInfoToMIR(FieldType type) {
3664 switch (type.kind()) {
3665 case FieldType::I8:
3666 return MNarrowingOp::To8;
3667 case FieldType::I16:
3668 return MNarrowingOp::To16;
3669 default:
3670 return MNarrowingOp::None;
3674 // Generate a write of `value` at address `base + offset`, where `offset` is
3675 // known at JIT time. If the written value is a reftype, the previous value
3676 // at `base + offset` will be retrieved and handed off to the post-write
3677 // barrier. `keepAlive` will be referenced by the instruction so as to hold
3678 // it live (from the GC's point of view).
3679 [[nodiscard]] bool writeGcValueAtBasePlusOffset(
3680 uint32_t lineOrBytecode, FieldType fieldType, MDefinition* keepAlive,
3681 AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
3682 uint32_t offset, bool needsTrapInfo, WasmPreBarrierKind preBarrierKind) {
3683 MOZ_ASSERT(aliasBitset != 0);
3684 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3685 MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
3686 MNarrowingOp narrowingOp = fieldStoreInfoToMIR(fieldType);
3688 if (!fieldType.isRefRepr()) {
3689 MaybeTrapSiteInfo maybeTrap;
3690 if (needsTrapInfo) {
3691 maybeTrap.emplace(getTrapSiteInfo());
3693 auto* store = MWasmStoreFieldKA::New(
3694 alloc(), keepAlive, base, offset, value, narrowingOp,
3695 AliasSet::Store(aliasBitset), maybeTrap);
3696 if (!store) {
3697 return false;
3699 curBlock_->add(store);
3700 return true;
3703 // Otherwise it's a ref store. Load the previous value so we can show it
3704 // to the post-write barrier.
3706 // Optimisation opportunity: for the case where this field write results
3707 // from struct.new, the old value is always zero. So we should synthesise
3708 // a suitable zero constant rather than reading it from the object. See
3709 // also bug 1799999.
3710 MOZ_ASSERT(narrowingOp == MNarrowingOp::None);
3711 MOZ_ASSERT(fieldType.widenToValType() == fieldType.valType());
3713 // Store the new value
3714 auto* store = MWasmStoreFieldRefKA::New(
3715 alloc(), instancePointer_, keepAlive, base, offset, value,
3716 AliasSet::Store(aliasBitset), mozilla::Some(getTrapSiteInfo()),
3717 preBarrierKind);
3718 if (!store) {
3719 return false;
3721 curBlock_->add(store);
3723 // Call the post-write barrier
3724 return postBarrier(lineOrBytecode, keepAlive, base, offset, value);
3727 // Generate a write of `value` at address `base + index * scale`, where
3728 // `scale` is known at JIT-time. If the written value is a reftype, the
3729 // previous value at `base + index * scale` will be retrieved and handed off
3730 // to the post-write barrier. `keepAlive` will be referenced by the
3731 // instruction so as to hold it live (from the GC's point of view).
3732 [[nodiscard]] bool writeGcValueAtBasePlusScaledIndex(
3733 uint32_t lineOrBytecode, FieldType fieldType, MDefinition* keepAlive,
3734 AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
3735 uint32_t scale, MDefinition* index, WasmPreBarrierKind preBarrierKind) {
3736 MOZ_ASSERT(aliasBitset != 0);
3737 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3738 MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
3739 MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
3740 scale == 16);
3742 // Currently there's no single MIR node that this can be translated into.
3743 // So compute the final address "manually", then store directly to that
3744 // address. See bug 1802287.
3745 MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
3746 if (!scaleDef) {
3747 return false;
3749 MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
3750 if (!finalAddr) {
3751 return false;
3754 return writeGcValueAtBasePlusOffset(
3755 lineOrBytecode, fieldType, keepAlive, aliasBitset, value, finalAddr,
3756 /*offset=*/0,
3757 /*needsTrapInfo=*/false, preBarrierKind);
3760 // Generate a read from address `base + offset`, where `offset` is known at
3761 // JIT time. The loaded value will be widened as described by `fieldType`
3762 // and `fieldWideningOp`. `keepAlive` will be referenced by the instruction
3763 // so as to hold it live (from the GC's point of view).
3764 [[nodiscard]] MDefinition* readGcValueAtBasePlusOffset(
3765 FieldType fieldType, FieldWideningOp fieldWideningOp,
3766 MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
3767 uint32_t offset, bool needsTrapInfo) {
3768 MOZ_ASSERT(aliasBitset != 0);
3769 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3770 MIRType mirType;
3771 MWideningOp mirWideningOp;
3772 fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
3773 MaybeTrapSiteInfo maybeTrap;
3774 if (needsTrapInfo) {
3775 maybeTrap.emplace(getTrapSiteInfo());
3777 auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, base, offset,
3778 mirType, mirWideningOp,
3779 AliasSet::Load(aliasBitset), maybeTrap);
3780 if (!load) {
3781 return nullptr;
3783 curBlock_->add(load);
3784 return load;
3787 // Generate a read from address `base + index * scale`, where `scale` is
3788 // known at JIT-time. The loaded value will be widened as described by
3789 // `fieldType` and `fieldWideningOp`. `keepAlive` will be referenced by the
3790 // instruction so as to hold it live (from the GC's point of view).
3791 [[nodiscard]] MDefinition* readGcValueAtBasePlusScaledIndex(
3792 FieldType fieldType, FieldWideningOp fieldWideningOp,
3793 MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
3794 uint32_t scale, MDefinition* index) {
3795 MOZ_ASSERT(aliasBitset != 0);
3796 MOZ_ASSERT(keepAlive->type() == MIRType::WasmAnyRef);
3797 MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
3798 scale == 16);
3800 // Currently there's no single MIR node that this can be translated into.
3801 // So compute the final address "manually", then store directly to that
3802 // address. See bug 1802287.
3803 MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
3804 if (!scaleDef) {
3805 return nullptr;
3807 MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
3808 if (!finalAddr) {
3809 return nullptr;
3812 MIRType mirType;
3813 MWideningOp mirWideningOp;
3814 fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
3815 auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, finalAddr,
3816 /*offset=*/0, mirType, mirWideningOp,
3817 AliasSet::Load(aliasBitset),
3818 mozilla::Some(getTrapSiteInfo()));
3819 if (!load) {
3820 return nullptr;
3822 curBlock_->add(load);
3823 return load;
3826 /************************************************ WasmGC: type helpers ***/
3828 // Returns an MDefinition holding the supertype vector for `typeIndex`.
3829 [[nodiscard]] MDefinition* loadSuperTypeVector(uint32_t typeIndex) {
3830 uint32_t stvOffset = moduleEnv().offsetOfSuperTypeVector(typeIndex);
3832 auto* load =
3833 MWasmLoadInstanceDataField::New(alloc(), MIRType::Pointer, stvOffset,
3834 /*isConst=*/true, instancePointer_);
3835 if (!load) {
3836 return nullptr;
3838 curBlock_->add(load);
3839 return load;
3842 [[nodiscard]] MDefinition* loadTypeDefInstanceData(uint32_t typeIndex) {
3843 size_t offset = Instance::offsetInData(
3844 moduleEnv_.offsetOfTypeDefInstanceData(typeIndex));
3845 auto* result = MWasmDerivedPointer::New(alloc(), instancePointer_, offset);
3846 if (!result) {
3847 return nullptr;
3849 curBlock_->add(result);
3850 return result;
3853 /********************************************** WasmGC: struct helpers ***/
3855 [[nodiscard]] MDefinition* createStructObject(uint32_t typeIndex,
3856 bool zeroFields) {
3857 const TypeDef& typeDef = (*moduleEnv().types)[typeIndex];
3858 gc::AllocKind allocKind = WasmStructObject::allocKindForTypeDef(&typeDef);
3859 bool isOutline =
3860 WasmStructObject::requiresOutlineBytes(typeDef.structType().size_);
3862 // Allocate an uninitialized struct. This requires the type definition
3863 // for the struct.
3864 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
3865 if (!typeDefData) {
3866 return nullptr;
3869 auto* structObject =
3870 MWasmNewStructObject::New(alloc(), instancePointer_, typeDefData,
3871 isOutline, zeroFields, allocKind);
3872 if (!structObject) {
3873 return nullptr;
3875 curBlock_->add(structObject);
3877 return structObject;
3880 // Helper function for EmitStruct{New,Set}: given a MIR pointer to a
3881 // WasmStructObject, a MIR pointer to a value, and a field descriptor,
3882 // generate MIR to write the value to the relevant field in the object.
3883 [[nodiscard]] bool writeValueToStructField(
3884 uint32_t lineOrBytecode, const StructField& field,
3885 MDefinition* structObject, MDefinition* value,
3886 WasmPreBarrierKind preBarrierKind) {
3887 FieldType fieldType = field.type;
3888 uint32_t fieldOffset = field.offset;
3890 bool areaIsOutline;
3891 uint32_t areaOffset;
3892 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
3893 &areaIsOutline, &areaOffset);
3895 // Make `base` point at the first byte of either the struct object as a
3896 // whole or of the out-of-line data area. And adjust `areaOffset`
3897 // accordingly.
3898 MDefinition* base;
3899 bool needsTrapInfo;
3900 if (areaIsOutline) {
3901 auto* load = MWasmLoadField::New(
3902 alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
3903 MIRType::Pointer, MWideningOp::None,
3904 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
3905 mozilla::Some(getTrapSiteInfo()));
3906 if (!load) {
3907 return false;
3909 curBlock_->add(load);
3910 base = load;
3911 needsTrapInfo = false;
3912 } else {
3913 base = structObject;
3914 needsTrapInfo = true;
3915 areaOffset += WasmStructObject::offsetOfInlineData();
3917 // The transaction is to happen at `base + areaOffset`, so to speak.
3918 // After this point we must ignore `fieldOffset`.
3920 // The alias set denoting the field's location, although lacking a
3921 // Load-vs-Store indication at this point.
3922 AliasSet::Flag fieldAliasSet = areaIsOutline
3923 ? AliasSet::WasmStructOutlineDataArea
3924 : AliasSet::WasmStructInlineDataArea;
3926 return writeGcValueAtBasePlusOffset(lineOrBytecode, fieldType, structObject,
3927 fieldAliasSet, value, base, areaOffset,
3928 needsTrapInfo, preBarrierKind);
3931 // Helper function for EmitStructGet: given a MIR pointer to a
3932 // WasmStructObject, a field descriptor and a field widening operation,
3933 // generate MIR to read the value from the relevant field in the object.
3934 [[nodiscard]] MDefinition* readValueFromStructField(
3935 const StructField& field, FieldWideningOp wideningOp,
3936 MDefinition* structObject) {
3937 FieldType fieldType = field.type;
3938 uint32_t fieldOffset = field.offset;
3940 bool areaIsOutline;
3941 uint32_t areaOffset;
3942 WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
3943 &areaIsOutline, &areaOffset);
3945 // Make `base` point at the first byte of either the struct object as a
3946 // whole or of the out-of-line data area. And adjust `areaOffset`
3947 // accordingly.
3948 MDefinition* base;
3949 bool needsTrapInfo;
3950 if (areaIsOutline) {
3951 auto* loadOOLptr = MWasmLoadField::New(
3952 alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
3953 MIRType::Pointer, MWideningOp::None,
3954 AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
3955 mozilla::Some(getTrapSiteInfo()));
3956 if (!loadOOLptr) {
3957 return nullptr;
3959 curBlock_->add(loadOOLptr);
3960 base = loadOOLptr;
3961 needsTrapInfo = false;
3962 } else {
3963 base = structObject;
3964 needsTrapInfo = true;
3965 areaOffset += WasmStructObject::offsetOfInlineData();
3967 // The transaction is to happen at `base + areaOffset`, so to speak.
3968 // After this point we must ignore `fieldOffset`.
3970 // The alias set denoting the field's location, although lacking a
3971 // Load-vs-Store indication at this point.
3972 AliasSet::Flag fieldAliasSet = areaIsOutline
3973 ? AliasSet::WasmStructOutlineDataArea
3974 : AliasSet::WasmStructInlineDataArea;
3976 return readGcValueAtBasePlusOffset(fieldType, wideningOp, structObject,
3977 fieldAliasSet, base, areaOffset,
3978 needsTrapInfo);
3981 /********************************* WasmGC: address-arithmetic helpers ***/
3983 inline bool targetIs64Bit() const {
3984 #ifdef JS_64BIT
3985 return true;
3986 #else
3987 return false;
3988 #endif
3991 // Generate MIR to unsigned widen `val` out to the target word size. If
3992 // `val` is already at the target word size, this is a no-op. The only
3993 // other allowed case is where `val` is Int32 and we're compiling for a
3994 // 64-bit target, in which case a widen is generated.
3995 [[nodiscard]] MDefinition* unsignedWidenToTargetWord(MDefinition* val) {
3996 if (targetIs64Bit()) {
3997 if (val->type() == MIRType::Int32) {
3998 auto* ext = MExtendInt32ToInt64::New(alloc(), val, /*isUnsigned=*/true);
3999 if (!ext) {
4000 return nullptr;
4002 curBlock_->add(ext);
4003 return ext;
4005 MOZ_ASSERT(val->type() == MIRType::Int64);
4006 return val;
4008 MOZ_ASSERT(val->type() == MIRType::Int32);
4009 return val;
4012 // Compute `base + index * scale`, for both 32- and 64-bit targets. For the
4013 // convenience of callers, on a 64-bit target, `index` and `scale` can
4014 // (independently) be either Int32 or Int64; in the former case they will be
4015 // zero-extended before the multiplication, so that both the multiplication
4016 // and addition are done at the target word size.
4017 [[nodiscard]] MDefinition* computeBasePlusScaledIndex(MDefinition* base,
4018 MDefinition* scale,
4019 MDefinition* index) {
4020 // On a 32-bit target, require:
4021 // base : Int32 (== TargetWordMIRType())
4022 // index, scale : Int32
4023 // Calculate base +32 (index *32 scale)
4025 // On a 64-bit target, require:
4026 // base : Int64 (== TargetWordMIRType())
4027 // index, scale: either Int32 or Int64 (any combination is OK)
4028 // Calculate base +64 (u-widen to 64(index)) *64 (u-widen to 64(scale))
4030 // Final result type is the same as that of `base`.
4032 MOZ_ASSERT(base->type() == TargetWordMIRType());
4034 // Widen `index` if necessary, producing `indexW`.
4035 MDefinition* indexW = unsignedWidenToTargetWord(index);
4036 if (!indexW) {
4037 return nullptr;
4039 // Widen `scale` if necessary, producing `scaleW`.
4040 MDefinition* scaleW = unsignedWidenToTargetWord(scale);
4041 if (!scaleW) {
4042 return nullptr;
4044 // Compute `scaledIndex = indexW * scaleW`.
4045 MIRType targetWordType = TargetWordMIRType();
4046 bool targetIs64 = targetWordType == MIRType::Int64;
4047 MMul* scaledIndex =
4048 MMul::NewWasm(alloc(), indexW, scaleW, targetWordType,
4049 targetIs64 ? MMul::Mode::Normal : MMul::Mode::Integer,
4050 /*mustPreserveNan=*/false);
4051 if (!scaledIndex) {
4052 return nullptr;
4054 // Compute `result = base + scaledIndex`.
4055 curBlock_->add(scaledIndex);
4056 MAdd* result = MAdd::NewWasm(alloc(), base, scaledIndex, targetWordType);
4057 if (!result) {
4058 return nullptr;
4060 curBlock_->add(result);
4061 return result;
4064 /********************************************** WasmGC: array helpers ***/
4066 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4067 // return the contents of the WasmArrayObject::numElements_ field.
4068 // Adds trap site info for the null check.
4069 [[nodiscard]] MDefinition* getWasmArrayObjectNumElements(
4070 MDefinition* arrayObject) {
4071 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4073 auto* numElements = MWasmLoadField::New(
4074 alloc(), arrayObject, WasmArrayObject::offsetOfNumElements(),
4075 MIRType::Int32, MWideningOp::None,
4076 AliasSet::Load(AliasSet::WasmArrayNumElements),
4077 mozilla::Some(getTrapSiteInfo()));
4078 if (!numElements) {
4079 return nullptr;
4081 curBlock_->add(numElements);
4083 return numElements;
4086 // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
4087 // return the contents of the WasmArrayObject::data_ field.
4088 [[nodiscard]] MDefinition* getWasmArrayObjectData(MDefinition* arrayObject) {
4089 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4091 auto* data = MWasmLoadField::New(
4092 alloc(), arrayObject, WasmArrayObject::offsetOfData(),
4093 TargetWordMIRType(), MWideningOp::None,
4094 AliasSet::Load(AliasSet::WasmArrayDataPointer),
4095 mozilla::Some(getTrapSiteInfo()));
4096 if (!data) {
4097 return nullptr;
4099 curBlock_->add(data);
4101 return data;
4104 // Given a JIT-time-known type index `typeIndex` and a run-time known number
4105 // of elements `numElements`, create MIR to call `Instance::arrayNew<true>`,
4106 // producing an array with the relevant type and size and initialized with
4107 // `typeIndex`s default value.
4108 [[nodiscard]] MDefinition* createDefaultInitializedArrayObject(
4109 uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
4110 // Get the type definition for the array as a whole.
4111 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
4112 if (!typeDefData) {
4113 return nullptr;
4116 // Create call:
4117 // arrayObject = Instance::arrayNew<true>(numElements, typeDefData)
4118 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4119 // by this call will trap.
4120 MDefinition* arrayObject;
4121 if (!emitInstanceCall2(lineOrBytecode, SASigArrayNew_true, numElements,
4122 typeDefData, &arrayObject)) {
4123 return nullptr;
4126 return arrayObject;
4129 [[nodiscard]] MDefinition* createUninitializedArrayObject(
4130 uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
4131 // Get the type definition for the array as a whole.
4132 MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
4133 if (!typeDefData) {
4134 return nullptr;
4137 // Create call:
4138 // arrayObject = Instance::arrayNew<false>(numElements, typeDefData)
4139 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
4140 // by this call will trap.
4141 MDefinition* arrayObject;
4142 if (!emitInstanceCall2(lineOrBytecode, SASigArrayNew_false, numElements,
4143 typeDefData, &arrayObject)) {
4144 return nullptr;
4147 return arrayObject;
4150 // This emits MIR to perform several actions common to array loads and
4151 // stores. Given `arrayObject`, that points to a WasmArrayObject, and an
4152 // index value `index`, it:
4154 // * Generates a trap if the array pointer is null
4155 // * Gets the size of the array
4156 // * Emits a bounds check of `index` against the array size
4157 // * Retrieves the OOL object pointer from the array
4158 // * Includes check for null via signal handler.
4160 // The returned value is for the OOL object pointer.
4161 [[nodiscard]] MDefinition* setupForArrayAccess(MDefinition* arrayObject,
4162 MDefinition* index) {
4163 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4164 MOZ_ASSERT(index->type() == MIRType::Int32);
4166 // Check for null is done in getWasmArrayObjectNumElements.
4168 // Get the size value for the array.
4169 MDefinition* numElements = getWasmArrayObjectNumElements(arrayObject);
4170 if (!numElements) {
4171 return nullptr;
4174 // Create a bounds check.
4175 auto* boundsCheck =
4176 MWasmBoundsCheck::New(alloc(), index, numElements, bytecodeOffset(),
4177 MWasmBoundsCheck::Target::Unknown);
4178 if (!boundsCheck) {
4179 return nullptr;
4181 curBlock_->add(boundsCheck);
4183 // Get the address of the first byte of the (OOL) data area.
4184 return getWasmArrayObjectData(arrayObject);
4187 [[nodiscard]] bool fillArray(uint32_t lineOrBytecode,
4188 const ArrayType& arrayType,
4189 MDefinition* arrayObject, MDefinition* index,
4190 MDefinition* numElements, MDefinition* val) {
4191 mozilla::DebugOnly<MIRType> valMIRType = val->type();
4192 FieldType valFieldType = arrayType.elementType_;
4193 MOZ_ASSERT(valFieldType.widenToValType().toMIRType() == valMIRType);
4195 uint32_t elemSize = valFieldType.size();
4196 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
4198 // Make `arrayBase` point at the first byte of the (OOL) data area.
4199 MDefinition* arrayBase = getWasmArrayObjectData(arrayObject);
4200 if (!arrayBase) {
4201 return false;
4204 // We have:
4205 // arrayBase : TargetWord
4206 // index : Int32
4207 // numElements : Int32
4208 // val : <any FieldType>
4209 // $elemSize = arrayType.elementType_.size(); 1, 2, 4, 8 or 16
4211 // Generate MIR:
4212 // <in current block>
4213 // fillBase : TargetWord = arrayBase + numElements * index
4214 // limit : TargetWord = fillBase + numElements * elemSize
4215 // if (limit == fillBase) goto after; // skip loop if trip count == 0
4216 // loop:
4217 // ptrPhi = phi(fillBase, ptrNext)
4218 // *ptrPhi = val
4219 // ptrNext = ptrPhi + $elemSize
4220 // if (ptrNext <u limit) goto loop;
4221 // after:
4223 // We construct the loop "manually" rather than using
4224 // FunctionCompiler::{startLoop,closeLoop} as the latter have awareness of
4225 // the wasm view of loops, whereas the loop we're building here is not a
4226 // wasm-level loop.
4227 // ==== Create the "loop" and "after" blocks ====
4228 MBasicBlock* loopBlock;
4229 if (!newBlock(curBlock_, &loopBlock, MBasicBlock::LOOP_HEADER)) {
4230 return false;
4232 MBasicBlock* afterBlock;
4233 if (!newBlock(loopBlock, &afterBlock)) {
4234 return false;
4237 // ==== Fill in the remainder of the block preceding the loop ====
4238 MDefinition* elemSizeDef = constantTargetWord(intptr_t(elemSize));
4239 if (!elemSizeDef) {
4240 return false;
4243 MDefinition* fillBase =
4244 computeBasePlusScaledIndex(arrayBase, elemSizeDef, index);
4245 if (!fillBase) {
4246 return false;
4248 MDefinition* limit =
4249 computeBasePlusScaledIndex(fillBase, elemSizeDef, numElements);
4250 if (!limit) {
4251 return false;
4254 // Use JSOp::StrictEq, not ::Eq, so that the comparison (and eventually
4255 // the entire initialisation loop) will be folded out in the case where
4256 // the number of elements is zero. See MCompare::tryFoldEqualOperands.
4257 MDefinition* limitEqualsBase = compare(
4258 limit, fillBase, JSOp::StrictEq,
4259 targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
4260 if (!limitEqualsBase) {
4261 return false;
4263 MTest* skipIfLimitEqualsBase =
4264 MTest::New(alloc(), limitEqualsBase, afterBlock, loopBlock);
4265 if (!skipIfLimitEqualsBase) {
4266 return false;
4268 curBlock_->end(skipIfLimitEqualsBase);
4269 if (!afterBlock->addPredecessor(alloc(), curBlock_)) {
4270 return false;
4273 // ==== Fill in the loop block as best we can ====
4274 curBlock_ = loopBlock;
4275 MPhi* ptrPhi = MPhi::New(alloc(), TargetWordMIRType());
4276 if (!ptrPhi) {
4277 return false;
4279 if (!ptrPhi->reserveLength(2)) {
4280 return false;
4282 ptrPhi->addInput(fillBase);
4283 curBlock_->addPhi(ptrPhi);
4284 curBlock_->setLoopDepth(loopDepth_ + 1);
4286 // Because we have the exact address to hand, use
4287 // `writeGcValueAtBasePlusOffset` rather than
4288 // `writeGcValueAtBasePlusScaledIndex` to do the store.
4289 if (!writeGcValueAtBasePlusOffset(
4290 lineOrBytecode, valFieldType, arrayObject,
4291 AliasSet::WasmArrayDataArea, val, ptrPhi, /*offset=*/0,
4292 /*needsTrapInfo=*/false, WasmPreBarrierKind::None)) {
4293 return false;
4296 auto* ptrNext =
4297 MAdd::NewWasm(alloc(), ptrPhi, elemSizeDef, TargetWordMIRType());
4298 if (!ptrNext) {
4299 return false;
4301 curBlock_->add(ptrNext);
4302 ptrPhi->addInput(ptrNext);
4304 MDefinition* ptrNextLtuLimit = compare(
4305 ptrNext, limit, JSOp::Lt,
4306 targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
4307 if (!ptrNextLtuLimit) {
4308 return false;
4310 auto* continueIfPtrNextLtuLimit =
4311 MTest::New(alloc(), ptrNextLtuLimit, loopBlock, afterBlock);
4312 if (!continueIfPtrNextLtuLimit) {
4313 return false;
4315 curBlock_->end(continueIfPtrNextLtuLimit);
4316 if (!loopBlock->addPredecessor(alloc(), loopBlock)) {
4317 return false;
4319 // ==== Loop block completed ====
4321 curBlock_ = afterBlock;
4322 return true;
4325 // This routine generates all MIR required for `array.new`. The returned
4326 // value is for the newly created array.
4327 [[nodiscard]] MDefinition* createArrayNewCallAndLoop(uint32_t lineOrBytecode,
4328 uint32_t typeIndex,
4329 MDefinition* numElements,
4330 MDefinition* fillValue) {
4331 const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
4333 // Create the array object, uninitialized.
4334 MDefinition* arrayObject =
4335 createUninitializedArrayObject(lineOrBytecode, typeIndex, numElements);
4336 if (!arrayObject) {
4337 return nullptr;
4340 // Optimisation opportunity: if the fill value is zero, maybe we should
4341 // likewise skip over the initialisation loop entirely (and, if the zero
4342 // value is visible at JIT time, the loop will be removed). For the
4343 // reftyped case, that would be a big win since each iteration requires a
4344 // call to the post-write barrier routine.
4346 if (!fillArray(lineOrBytecode, arrayType, arrayObject, constantI32(0),
4347 numElements, fillValue)) {
4348 return nullptr;
4351 return arrayObject;
4354 [[nodiscard]] bool createArrayFill(uint32_t lineOrBytecode,
4355 uint32_t typeIndex,
4356 MDefinition* arrayObject,
4357 MDefinition* index, MDefinition* val,
4358 MDefinition* numElements) {
4359 MOZ_ASSERT(arrayObject->type() == MIRType::WasmAnyRef);
4360 MOZ_ASSERT(index->type() == MIRType::Int32);
4361 MOZ_ASSERT(numElements->type() == MIRType::Int32);
4363 const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
4365 // Check for null is done in getWasmArrayObjectNumElements.
4367 // Get the array's actual size.
4368 MDefinition* actualNumElements = getWasmArrayObjectNumElements(arrayObject);
4369 if (!actualNumElements) {
4370 return false;
4373 // Create a bounds check.
4374 auto* boundsCheck = MWasmBoundsCheckRange32::New(
4375 alloc(), index, numElements, actualNumElements, bytecodeOffset());
4376 if (!boundsCheck) {
4377 return false;
4379 curBlock_->add(boundsCheck);
4381 return fillArray(lineOrBytecode, arrayType, arrayObject, index, numElements,
4382 val);
4385 /*********************************************** WasmGC: other helpers ***/
4387 // Generate MIR that causes a trap of kind `trapKind` if `arg` is zero.
4388 // Currently `arg` may only be a MIRType::Int32, but that requirement could
4389 // be relaxed if needed in future.
4390 [[nodiscard]] bool trapIfZero(wasm::Trap trapKind, MDefinition* arg) {
4391 MOZ_ASSERT(arg->type() == MIRType::Int32);
4393 MBasicBlock* trapBlock = nullptr;
4394 if (!newBlock(curBlock_, &trapBlock)) {
4395 return false;
4398 auto* trap = MWasmTrap::New(alloc(), trapKind, bytecodeOffset());
4399 if (!trap) {
4400 return false;
4402 trapBlock->end(trap);
4404 MBasicBlock* joinBlock = nullptr;
4405 if (!newBlock(curBlock_, &joinBlock)) {
4406 return false;
4409 auto* test = MTest::New(alloc(), arg, joinBlock, trapBlock);
4410 if (!test) {
4411 return false;
4413 curBlock_->end(test);
4414 curBlock_ = joinBlock;
4415 return true;
4418 [[nodiscard]] MDefinition* isRefSubtypeOf(MDefinition* ref,
4419 RefType sourceType,
4420 RefType destType) {
4421 MInstruction* isSubTypeOf = nullptr;
4422 if (destType.isTypeRef()) {
4423 uint32_t typeIndex = moduleEnv_.types->indexOf(*destType.typeDef());
4424 MDefinition* superSTV = loadSuperTypeVector(typeIndex);
4425 isSubTypeOf = MWasmRefIsSubtypeOfConcrete::New(alloc(), ref, superSTV,
4426 sourceType, destType);
4427 } else {
4428 isSubTypeOf =
4429 MWasmRefIsSubtypeOfAbstract::New(alloc(), ref, sourceType, destType);
4431 MOZ_ASSERT(isSubTypeOf);
4433 curBlock_->add(isSubTypeOf);
4434 return isSubTypeOf;
4437 // Generate MIR that attempts to downcast `ref` to `castToTypeDef`. If the
4438 // downcast fails, we trap. If it succeeds, then `ref` can be assumed to
4439 // have a type that is a subtype of (or the same as) `castToTypeDef` after
4440 // this point.
4441 [[nodiscard]] bool refCast(MDefinition* ref, RefType sourceType,
4442 RefType destType) {
4443 MDefinition* success = isRefSubtypeOf(ref, sourceType, destType);
4444 if (!success) {
4445 return false;
4448 // Trap if `success` is zero. If it's nonzero, we have established that
4449 // `ref <: castToTypeDef`.
4450 return trapIfZero(wasm::Trap::BadCast, success);
4453 // Generate MIR that computes a boolean value indicating whether or not it
4454 // is possible to downcast `ref` to `destType`.
4455 [[nodiscard]] MDefinition* refTest(MDefinition* ref, RefType sourceType,
4456 RefType destType) {
4457 return isRefSubtypeOf(ref, sourceType, destType);
4460 // Generates MIR for br_on_cast and br_on_cast_fail.
4461 [[nodiscard]] bool brOnCastCommon(bool onSuccess, uint32_t labelRelativeDepth,
4462 RefType sourceType, RefType destType,
4463 const ResultType& labelType,
4464 const DefVector& values) {
4465 if (inDeadCode()) {
4466 return true;
4469 MBasicBlock* fallthroughBlock = nullptr;
4470 if (!newBlock(curBlock_, &fallthroughBlock)) {
4471 return false;
4474 // `values` are the values in the top block-value on the stack. Since the
4475 // argument to `br_on_cast{_fail}` is at the top of the stack, it is the
4476 // last element in `values`.
4478 // For both br_on_cast and br_on_cast_fail, the OpIter validation routines
4479 // ensure that `values` is non-empty (by rejecting the case
4480 // `labelType->length() < 1`) and that the last value in `values` is
4481 // reftyped.
4482 MOZ_RELEASE_ASSERT(values.length() > 0);
4483 MDefinition* ref = values.back();
4484 MOZ_ASSERT(ref->type() == MIRType::WasmAnyRef);
4486 MDefinition* success = isRefSubtypeOf(ref, sourceType, destType);
4487 if (!success) {
4488 return false;
4491 MTest* test;
4492 if (onSuccess) {
4493 test = MTest::New(alloc(), success, nullptr, fallthroughBlock);
4494 if (!test || !addControlFlowPatch(test, labelRelativeDepth,
4495 MTest::TrueBranchIndex)) {
4496 return false;
4498 } else {
4499 test = MTest::New(alloc(), success, fallthroughBlock, nullptr);
4500 if (!test || !addControlFlowPatch(test, labelRelativeDepth,
4501 MTest::FalseBranchIndex)) {
4502 return false;
4506 if (!pushDefs(values)) {
4507 return false;
4510 curBlock_->end(test);
4511 curBlock_ = fallthroughBlock;
4512 return true;
4515 [[nodiscard]] bool brOnNonStruct(const DefVector& values) {
4516 if (inDeadCode()) {
4517 return true;
4520 MBasicBlock* fallthroughBlock = nullptr;
4521 if (!newBlock(curBlock_, &fallthroughBlock)) {
4522 return false;
4525 MOZ_ASSERT(values.length() > 0);
4526 MOZ_ASSERT(values.back()->type() == MIRType::WasmAnyRef);
4528 MGoto* jump = MGoto::New(alloc(), fallthroughBlock);
4529 if (!jump) {
4530 return false;
4532 if (!pushDefs(values)) {
4533 return false;
4536 curBlock_->end(jump);
4537 curBlock_ = fallthroughBlock;
4538 return true;
4541 /************************************************************ DECODING ***/
4543 // AsmJS adds a line number to `callSiteLineNums` for certain operations that
4544 // are represented by a JS call, such as math builtins. We use these line
4545 // numbers when calling builtins. This method will read from
4546 // `callSiteLineNums` when we are using AsmJS, or else return the current
4547 // bytecode offset.
4549 // This method MUST be called from opcodes that AsmJS will emit a call site
4550 // line number for, or else the arrays will get out of sync. Other opcodes
4551 // must use `readBytecodeOffset` below.
4552 uint32_t readCallSiteLineOrBytecode() {
4553 if (!func_.callSiteLineNums.empty()) {
4554 return func_.callSiteLineNums[lastReadCallSite_++];
4556 return iter_.lastOpcodeOffset();
4559 // Return the current bytecode offset.
4560 uint32_t readBytecodeOffset() { return iter_.lastOpcodeOffset(); }
4562 TrapSiteInfo getTrapSiteInfo() {
4563 return TrapSiteInfo(wasm::BytecodeOffset(readBytecodeOffset()));
4566 #if DEBUG
4567 bool done() const { return iter_.done(); }
4568 #endif
4570 /*************************************************************************/
4571 private:
4572 [[nodiscard]] bool newBlock(MBasicBlock* pred, MBasicBlock** block,
4573 MBasicBlock::Kind kind = MBasicBlock::NORMAL) {
4574 *block = MBasicBlock::New(mirGraph(), info(), pred, kind);
4575 if (!*block) {
4576 return false;
4578 mirGraph().addBlock(*block);
4579 (*block)->setLoopDepth(loopDepth_);
4580 return true;
4583 [[nodiscard]] bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
4584 if (!newBlock(pred, block)) {
4585 return false;
4587 pred->end(MGoto::New(alloc(), *block));
4588 return true;
4591 [[nodiscard]] bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
4592 MOZ_ASSERT(prev);
4593 MOZ_ASSERT(next);
4594 prev->end(MGoto::New(alloc(), next));
4595 return next->addPredecessor(alloc(), prev);
4598 [[nodiscard]] bool bindBranches(uint32_t absolute, DefVector* defs) {
4599 if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
4600 return inDeadCode() || popPushedDefs(defs);
4603 ControlFlowPatchVector& patches = blockPatches_[absolute];
4604 MControlInstruction* ins = patches[0].ins;
4605 MBasicBlock* pred = ins->block();
4607 MBasicBlock* join = nullptr;
4608 if (!newBlock(pred, &join)) {
4609 return false;
4612 pred->mark();
4613 ins->replaceSuccessor(patches[0].index, join);
4615 for (size_t i = 1; i < patches.length(); i++) {
4616 ins = patches[i].ins;
4618 pred = ins->block();
4619 if (!pred->isMarked()) {
4620 if (!join->addPredecessor(alloc(), pred)) {
4621 return false;
4623 pred->mark();
4626 ins->replaceSuccessor(patches[i].index, join);
4629 MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
4630 for (uint32_t i = 0; i < join->numPredecessors(); i++) {
4631 join->getPredecessor(i)->unmark();
4634 if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
4635 return false;
4638 curBlock_ = join;
4640 if (!popPushedDefs(defs)) {
4641 return false;
4644 patches.clear();
4645 return true;
4649 template <>
4650 MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
4651 if (inDeadCode()) {
4652 return nullptr;
4654 auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
4655 curBlock_->add(ins);
4656 return ins;
4659 template <>
4660 MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
4661 MDefinition* op) {
4662 if (inDeadCode()) {
4663 return nullptr;
4665 auto* ins = MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_,
4666 bytecodeOffset());
4667 curBlock_->add(ins);
4668 return ins;
4671 template <>
4672 MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
4673 if (inDeadCode()) {
4674 return nullptr;
4676 auto* ins = MNot::NewInt32(alloc(), op);
4677 curBlock_->add(ins);
4678 return ins;
4681 template <>
4682 MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
4683 if (inDeadCode()) {
4684 return nullptr;
4686 auto* ins = MAbs::NewWasm(alloc(), op, type);
4687 curBlock_->add(ins);
4688 return ins;
4691 } // end anonymous namespace
4693 static bool EmitI32Const(FunctionCompiler& f) {
4694 int32_t i32;
4695 if (!f.iter().readI32Const(&i32)) {
4696 return false;
4699 f.iter().setResult(f.constantI32(i32));
4700 return true;
4703 static bool EmitI64Const(FunctionCompiler& f) {
4704 int64_t i64;
4705 if (!f.iter().readI64Const(&i64)) {
4706 return false;
4709 f.iter().setResult(f.constantI64(i64));
4710 return true;
4713 static bool EmitF32Const(FunctionCompiler& f) {
4714 float f32;
4715 if (!f.iter().readF32Const(&f32)) {
4716 return false;
4719 f.iter().setResult(f.constantF32(f32));
4720 return true;
4723 static bool EmitF64Const(FunctionCompiler& f) {
4724 double f64;
4725 if (!f.iter().readF64Const(&f64)) {
4726 return false;
4729 f.iter().setResult(f.constantF64(f64));
4730 return true;
4733 static bool EmitBlock(FunctionCompiler& f) {
4734 ResultType params;
4735 return f.iter().readBlock(&params) && f.startBlock();
4738 static bool EmitLoop(FunctionCompiler& f) {
4739 ResultType params;
4740 if (!f.iter().readLoop(&params)) {
4741 return false;
4744 MBasicBlock* loopHeader;
4745 if (!f.startLoop(&loopHeader, params.length())) {
4746 return false;
4749 f.addInterruptCheck();
4751 f.iter().controlItem().setBlock(loopHeader);
4752 return true;
4755 static bool EmitIf(FunctionCompiler& f) {
4756 ResultType params;
4757 MDefinition* condition = nullptr;
4758 if (!f.iter().readIf(&params, &condition)) {
4759 return false;
4762 MBasicBlock* elseBlock;
4763 if (!f.branchAndStartThen(condition, &elseBlock)) {
4764 return false;
4767 f.iter().controlItem().setBlock(elseBlock);
4768 return true;
4771 static bool EmitElse(FunctionCompiler& f) {
4772 ResultType paramType;
4773 ResultType resultType;
4774 DefVector thenValues;
4775 if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
4776 return false;
4779 if (!f.pushDefs(thenValues)) {
4780 return false;
4783 Control& control = f.iter().controlItem();
4784 return f.switchToElse(control.block, &control.block);
4787 static bool EmitEnd(FunctionCompiler& f) {
4788 LabelKind kind;
4789 ResultType type;
4790 DefVector preJoinDefs;
4791 DefVector resultsForEmptyElse;
4792 if (!f.iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
4793 return false;
4796 Control& control = f.iter().controlItem();
4797 MBasicBlock* block = control.block;
4799 if (!f.pushDefs(preJoinDefs)) {
4800 return false;
4803 // Every label case is responsible to pop the control item at the appropriate
4804 // time for the label case
4805 DefVector postJoinDefs;
4806 switch (kind) {
4807 case LabelKind::Body:
4808 if (!f.emitBodyDelegateThrowPad(control)) {
4809 return false;
4811 if (!f.finishBlock(&postJoinDefs)) {
4812 return false;
4814 if (!f.returnValues(postJoinDefs)) {
4815 return false;
4817 f.iter().popEnd();
4818 MOZ_ASSERT(f.iter().controlStackEmpty());
4819 return f.iter().endFunction(f.iter().end());
4820 case LabelKind::Block:
4821 if (!f.finishBlock(&postJoinDefs)) {
4822 return false;
4824 f.iter().popEnd();
4825 break;
4826 case LabelKind::Loop:
4827 if (!f.closeLoop(block, &postJoinDefs)) {
4828 return false;
4830 f.iter().popEnd();
4831 break;
4832 case LabelKind::Then: {
4833 // If we didn't see an Else, create a trivial else block so that we create
4834 // a diamond anyway, to preserve Ion invariants.
4835 if (!f.switchToElse(block, &block)) {
4836 return false;
4839 if (!f.pushDefs(resultsForEmptyElse)) {
4840 return false;
4843 if (!f.joinIfElse(block, &postJoinDefs)) {
4844 return false;
4846 f.iter().popEnd();
4847 break;
4849 case LabelKind::Else:
4850 if (!f.joinIfElse(block, &postJoinDefs)) {
4851 return false;
4853 f.iter().popEnd();
4854 break;
4855 case LabelKind::Try:
4856 case LabelKind::Catch:
4857 case LabelKind::CatchAll:
4858 if (!f.finishTryCatch(kind, control, &postJoinDefs)) {
4859 return false;
4861 f.iter().popEnd();
4862 break;
4865 MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
4866 f.iter().setResults(postJoinDefs.length(), postJoinDefs);
4868 return true;
4871 static bool EmitBr(FunctionCompiler& f) {
4872 uint32_t relativeDepth;
4873 ResultType type;
4874 DefVector values;
4875 if (!f.iter().readBr(&relativeDepth, &type, &values)) {
4876 return false;
4879 return f.br(relativeDepth, values);
4882 static bool EmitBrIf(FunctionCompiler& f) {
4883 uint32_t relativeDepth;
4884 ResultType type;
4885 DefVector values;
4886 MDefinition* condition;
4887 if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
4888 return false;
4891 return f.brIf(relativeDepth, values, condition);
4894 static bool EmitBrTable(FunctionCompiler& f) {
4895 Uint32Vector depths;
4896 uint32_t defaultDepth;
4897 ResultType branchValueType;
4898 DefVector branchValues;
4899 MDefinition* index;
4900 if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
4901 &branchValues, &index)) {
4902 return false;
4905 // If all the targets are the same, or there are no targets, we can just
4906 // use a goto. This is not just an optimization: MaybeFoldConditionBlock
4907 // assumes that tables have more than one successor.
4908 bool allSameDepth = true;
4909 for (uint32_t depth : depths) {
4910 if (depth != defaultDepth) {
4911 allSameDepth = false;
4912 break;
4916 if (allSameDepth) {
4917 return f.br(defaultDepth, branchValues);
4920 return f.brTable(index, defaultDepth, depths, branchValues);
4923 static bool EmitReturn(FunctionCompiler& f) {
4924 DefVector values;
4925 if (!f.iter().readReturn(&values)) {
4926 return false;
4929 return f.returnValues(values);
4932 static bool EmitUnreachable(FunctionCompiler& f) {
4933 if (!f.iter().readUnreachable()) {
4934 return false;
4937 f.unreachableTrap();
4938 return true;
4941 static bool EmitTry(FunctionCompiler& f) {
4942 ResultType params;
4943 if (!f.iter().readTry(&params)) {
4944 return false;
4947 MBasicBlock* curBlock = nullptr;
4948 if (!f.startTry(&curBlock)) {
4949 return false;
4952 f.iter().controlItem().setBlock(curBlock);
4953 return true;
4956 static bool EmitCatch(FunctionCompiler& f) {
4957 LabelKind kind;
4958 uint32_t tagIndex;
4959 ResultType paramType, resultType;
4960 DefVector tryValues;
4961 if (!f.iter().readCatch(&kind, &tagIndex, &paramType, &resultType,
4962 &tryValues)) {
4963 return false;
4966 // Pushing the results of the previous block, to properly join control flow
4967 // after the try and after each handler, as well as potential control flow
4968 // patches from other instrunctions. This is similar to what is done for
4969 // if-then-else control flow and for most other control control flow joins.
4970 if (!f.pushDefs(tryValues)) {
4971 return false;
4974 return f.switchToCatch(f.iter().controlItem(), kind, tagIndex);
4977 static bool EmitCatchAll(FunctionCompiler& f) {
4978 LabelKind kind;
4979 ResultType paramType, resultType;
4980 DefVector tryValues;
4981 if (!f.iter().readCatchAll(&kind, &paramType, &resultType, &tryValues)) {
4982 return false;
4985 // Pushing the results of the previous block, to properly join control flow
4986 // after the try and after each handler, as well as potential control flow
4987 // patches from other instrunctions.
4988 if (!f.pushDefs(tryValues)) {
4989 return false;
4992 return f.switchToCatch(f.iter().controlItem(), kind, CatchAllIndex);
4995 static bool EmitDelegate(FunctionCompiler& f) {
4996 uint32_t relativeDepth;
4997 ResultType resultType;
4998 DefVector tryValues;
4999 if (!f.iter().readDelegate(&relativeDepth, &resultType, &tryValues)) {
5000 return false;
5003 Control& control = f.iter().controlItem();
5004 MBasicBlock* block = control.block;
5006 // Unless the entire try-delegate is dead code, delegate any pad-patches from
5007 // this try to the next try-block above relativeDepth.
5008 if (block) {
5009 ControlInstructionVector& delegatePadPatches = control.tryPadPatches;
5010 if (!f.delegatePadPatches(delegatePadPatches, relativeDepth)) {
5011 return false;
5014 f.iter().popDelegate();
5016 // Push the results of the previous block, and join control flow with
5017 // potential control flow patches from other instrunctions in the try code.
5018 // This is similar to what is done for EmitEnd.
5019 if (!f.pushDefs(tryValues)) {
5020 return false;
5022 DefVector postJoinDefs;
5023 if (!f.finishBlock(&postJoinDefs)) {
5024 return false;
5026 MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == resultType.length());
5027 f.iter().setResults(postJoinDefs.length(), postJoinDefs);
5029 return true;
5032 static bool EmitThrow(FunctionCompiler& f) {
5033 uint32_t tagIndex;
5034 DefVector argValues;
5035 if (!f.iter().readThrow(&tagIndex, &argValues)) {
5036 return false;
5039 return f.emitThrow(tagIndex, argValues);
5042 static bool EmitRethrow(FunctionCompiler& f) {
5043 uint32_t relativeDepth;
5044 if (!f.iter().readRethrow(&relativeDepth)) {
5045 return false;
5048 return f.emitRethrow(relativeDepth);
5051 static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
5052 const DefVector& args, CallCompileState* call) {
5053 for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
5054 if (!f.mirGen().ensureBallast()) {
5055 return false;
5057 if (!f.passArg(args[i], funcType.args()[i], call)) {
5058 return false;
5062 ResultType resultType = ResultType::Vector(funcType.results());
5063 if (!f.passStackResultAreaCallArg(resultType, call)) {
5064 return false;
5067 return f.finishCall(call);
5070 static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
5071 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5073 uint32_t funcIndex;
5074 DefVector args;
5075 if (asmJSFuncDef) {
5076 if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports, &funcIndex,
5077 &args)) {
5078 return false;
5080 } else {
5081 if (!f.iter().readCall(&funcIndex, &args)) {
5082 return false;
5086 if (f.inDeadCode()) {
5087 return true;
5090 const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
5092 CallCompileState call;
5093 if (!EmitCallArgs(f, funcType, args, &call)) {
5094 return false;
5097 DefVector results;
5098 if (f.moduleEnv().funcIsImport(funcIndex)) {
5099 uint32_t instanceDataOffset =
5100 f.moduleEnv().offsetOfFuncImportInstanceData(funcIndex);
5101 if (!f.callImport(instanceDataOffset, lineOrBytecode, call, funcType,
5102 &results)) {
5103 return false;
5105 } else {
5106 if (!f.callDirect(funcType, funcIndex, lineOrBytecode, call, &results)) {
5107 return false;
5111 f.iter().setResults(results.length(), results);
5112 return true;
5115 static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
5116 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5118 uint32_t funcTypeIndex;
5119 uint32_t tableIndex;
5120 MDefinition* callee;
5121 DefVector args;
5122 if (oldStyle) {
5123 tableIndex = 0;
5124 if (!f.iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
5125 return false;
5127 } else {
5128 if (!f.iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee,
5129 &args)) {
5130 return false;
5134 if (f.inDeadCode()) {
5135 return true;
5138 const FuncType& funcType = (*f.moduleEnv().types)[funcTypeIndex].funcType();
5140 CallCompileState call;
5141 if (!EmitCallArgs(f, funcType, args, &call)) {
5142 return false;
5145 DefVector results;
5146 if (!f.callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, call,
5147 &results)) {
5148 return false;
5151 f.iter().setResults(results.length(), results);
5152 return true;
5155 #ifdef ENABLE_WASM_TAIL_CALLS
5156 static bool EmitReturnCall(FunctionCompiler& f) {
5157 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5159 uint32_t funcIndex;
5160 DefVector args;
5161 if (!f.iter().readReturnCall(&funcIndex, &args)) {
5162 return false;
5165 if (f.inDeadCode()) {
5166 return true;
5169 const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
5171 CallCompileState call;
5172 f.markReturnCall(&call);
5173 if (!EmitCallArgs(f, funcType, args, &call)) {
5174 return false;
5177 DefVector results;
5178 if (f.moduleEnv().funcIsImport(funcIndex)) {
5179 uint32_t globalDataOffset =
5180 f.moduleEnv().offsetOfFuncImportInstanceData(funcIndex);
5181 if (!f.returnCallImport(globalDataOffset, lineOrBytecode, call, funcType,
5182 &results)) {
5183 return false;
5185 } else {
5186 if (!f.returnCallDirect(funcType, funcIndex, lineOrBytecode, call,
5187 &results)) {
5188 return false;
5191 return true;
5194 static bool EmitReturnCallIndirect(FunctionCompiler& f) {
5195 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5197 uint32_t funcTypeIndex;
5198 uint32_t tableIndex;
5199 MDefinition* callee;
5200 DefVector args;
5201 if (!f.iter().readReturnCallIndirect(&funcTypeIndex, &tableIndex, &callee,
5202 &args)) {
5203 return false;
5206 if (f.inDeadCode()) {
5207 return true;
5210 const FuncType& funcType = (*f.moduleEnv().types)[funcTypeIndex].funcType();
5212 CallCompileState call;
5213 f.markReturnCall(&call);
5214 if (!EmitCallArgs(f, funcType, args, &call)) {
5215 return false;
5218 DefVector results;
5219 return f.returnCallIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode,
5220 call, &results);
5222 #endif
5224 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
5225 static bool EmitReturnCallRef(FunctionCompiler& f) {
5226 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5228 const FuncType* funcType;
5229 MDefinition* callee;
5230 DefVector args;
5232 if (!f.iter().readReturnCallRef(&funcType, &callee, &args)) {
5233 return false;
5236 if (f.inDeadCode()) {
5237 return true;
5240 CallCompileState call;
5241 f.markReturnCall(&call);
5242 if (!EmitCallArgs(f, *funcType, args, &call)) {
5243 return false;
5246 DefVector results;
5247 return f.returnCallRef(*funcType, callee, lineOrBytecode, call, &results);
5249 #endif
5251 static bool EmitGetLocal(FunctionCompiler& f) {
5252 uint32_t id;
5253 if (!f.iter().readGetLocal(f.locals(), &id)) {
5254 return false;
5257 f.iter().setResult(f.getLocalDef(id));
5258 return true;
5261 static bool EmitSetLocal(FunctionCompiler& f) {
5262 uint32_t id;
5263 MDefinition* value;
5264 if (!f.iter().readSetLocal(f.locals(), &id, &value)) {
5265 return false;
5268 f.assign(id, value);
5269 return true;
5272 static bool EmitTeeLocal(FunctionCompiler& f) {
5273 uint32_t id;
5274 MDefinition* value;
5275 if (!f.iter().readTeeLocal(f.locals(), &id, &value)) {
5276 return false;
5279 f.assign(id, value);
5280 return true;
5283 static bool EmitGetGlobal(FunctionCompiler& f) {
5284 uint32_t id;
5285 if (!f.iter().readGetGlobal(&id)) {
5286 return false;
5289 const GlobalDesc& global = f.moduleEnv().globals[id];
5290 if (!global.isConstant()) {
5291 f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
5292 global.isIndirect(),
5293 global.type().toMIRType()));
5294 return true;
5297 LitVal value = global.constantValue();
5299 MDefinition* result;
5300 switch (value.type().kind()) {
5301 case ValType::I32:
5302 result = f.constantI32(int32_t(value.i32()));
5303 break;
5304 case ValType::I64:
5305 result = f.constantI64(int64_t(value.i64()));
5306 break;
5307 case ValType::F32:
5308 result = f.constantF32(value.f32());
5309 break;
5310 case ValType::F64:
5311 result = f.constantF64(value.f64());
5312 break;
5313 case ValType::V128:
5314 #ifdef ENABLE_WASM_SIMD
5315 result = f.constantV128(value.v128());
5316 break;
5317 #else
5318 return f.iter().fail("Ion has no SIMD support yet");
5319 #endif
5320 case ValType::Ref:
5321 MOZ_ASSERT(value.ref().isNull());
5322 result = f.constantNullRef();
5323 break;
5324 default:
5325 MOZ_CRASH("unexpected type in EmitGetGlobal");
5328 f.iter().setResult(result);
5329 return true;
5332 static bool EmitSetGlobal(FunctionCompiler& f) {
5333 uint32_t bytecodeOffset = f.readBytecodeOffset();
5335 uint32_t id;
5336 MDefinition* value;
5337 if (!f.iter().readSetGlobal(&id, &value)) {
5338 return false;
5341 const GlobalDesc& global = f.moduleEnv().globals[id];
5342 MOZ_ASSERT(global.isMutable());
5343 return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
5344 value);
5347 static bool EmitTeeGlobal(FunctionCompiler& f) {
5348 uint32_t bytecodeOffset = f.readBytecodeOffset();
5350 uint32_t id;
5351 MDefinition* value;
5352 if (!f.iter().readTeeGlobal(&id, &value)) {
5353 return false;
5356 const GlobalDesc& global = f.moduleEnv().globals[id];
5357 MOZ_ASSERT(global.isMutable());
5359 return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
5360 value);
5363 template <typename MIRClass>
5364 static bool EmitUnary(FunctionCompiler& f, ValType operandType) {
5365 MDefinition* input;
5366 if (!f.iter().readUnary(operandType, &input)) {
5367 return false;
5370 f.iter().setResult(f.unary<MIRClass>(input));
5371 return true;
5374 template <typename MIRClass>
5375 static bool EmitConversion(FunctionCompiler& f, ValType operandType,
5376 ValType resultType) {
5377 MDefinition* input;
5378 if (!f.iter().readConversion(operandType, resultType, &input)) {
5379 return false;
5382 f.iter().setResult(f.unary<MIRClass>(input));
5383 return true;
5386 template <typename MIRClass>
5387 static bool EmitUnaryWithType(FunctionCompiler& f, ValType operandType,
5388 MIRType mirType) {
5389 MDefinition* input;
5390 if (!f.iter().readUnary(operandType, &input)) {
5391 return false;
5394 f.iter().setResult(f.unary<MIRClass>(input, mirType));
5395 return true;
5398 template <typename MIRClass>
5399 static bool EmitConversionWithType(FunctionCompiler& f, ValType operandType,
5400 ValType resultType, MIRType mirType) {
5401 MDefinition* input;
5402 if (!f.iter().readConversion(operandType, resultType, &input)) {
5403 return false;
5406 f.iter().setResult(f.unary<MIRClass>(input, mirType));
5407 return true;
5410 static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
5411 ValType resultType, bool isUnsigned,
5412 bool isSaturating) {
5413 MDefinition* input = nullptr;
5414 if (!f.iter().readConversion(operandType, resultType, &input)) {
5415 return false;
5418 TruncFlags flags = 0;
5419 if (isUnsigned) {
5420 flags |= TRUNC_UNSIGNED;
5422 if (isSaturating) {
5423 flags |= TRUNC_SATURATING;
5425 if (resultType == ValType::I32) {
5426 if (f.moduleEnv().isAsmJS()) {
5427 if (input && (input->type() == MIRType::Double ||
5428 input->type() == MIRType::Float32)) {
5429 f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
5430 } else {
5431 f.iter().setResult(f.unary<MTruncateToInt32>(input));
5433 } else {
5434 f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, flags));
5436 } else {
5437 MOZ_ASSERT(resultType == ValType::I64);
5438 MOZ_ASSERT(!f.moduleEnv().isAsmJS());
5439 #if defined(JS_CODEGEN_ARM)
5440 f.iter().setResult(f.truncateWithInstance(input, flags));
5441 #else
5442 f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, flags));
5443 #endif
5445 return true;
5448 static bool EmitSignExtend(FunctionCompiler& f, uint32_t srcSize,
5449 uint32_t targetSize) {
5450 MDefinition* input;
5451 ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
5452 if (!f.iter().readConversion(type, type, &input)) {
5453 return false;
5456 f.iter().setResult(f.signExtend(input, srcSize, targetSize));
5457 return true;
5460 static bool EmitExtendI32(FunctionCompiler& f, bool isUnsigned) {
5461 MDefinition* input;
5462 if (!f.iter().readConversion(ValType::I32, ValType::I64, &input)) {
5463 return false;
5466 f.iter().setResult(f.extendI32(input, isUnsigned));
5467 return true;
5470 static bool EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
5471 ValType resultType, MIRType mirType,
5472 bool isUnsigned) {
5473 MDefinition* input;
5474 if (!f.iter().readConversion(ValType::I64, resultType, &input)) {
5475 return false;
5478 f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
5479 return true;
5482 static bool EmitReinterpret(FunctionCompiler& f, ValType resultType,
5483 ValType operandType, MIRType mirType) {
5484 MDefinition* input;
5485 if (!f.iter().readConversion(operandType, resultType, &input)) {
5486 return false;
5489 f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
5490 return true;
5493 static bool EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType) {
5494 MDefinition* lhs;
5495 MDefinition* rhs;
5496 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5497 return false;
5500 f.iter().setResult(f.add(lhs, rhs, mirType));
5501 return true;
5504 static bool EmitSub(FunctionCompiler& f, ValType type, MIRType mirType) {
5505 MDefinition* lhs;
5506 MDefinition* rhs;
5507 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5508 return false;
5511 f.iter().setResult(f.sub(lhs, rhs, mirType));
5512 return true;
5515 static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
5516 MDefinition* lhs;
5517 MDefinition* rhs;
5518 if (!f.iter().readBinary(type, &lhs, &rhs)) {
5519 return false;
5522 MDefinition* result = f.rotate(lhs, rhs, type.toMIRType(), isLeftRotation);
5523 f.iter().setResult(result);
5524 return true;
5527 static bool EmitBitNot(FunctionCompiler& f, ValType operandType) {
5528 MDefinition* input;
5529 if (!f.iter().readUnary(operandType, &input)) {
5530 return false;
5533 f.iter().setResult(f.bitnot(input));
5534 return true;
5537 static bool EmitBitwiseAndOrXor(FunctionCompiler& f, ValType operandType,
5538 MIRType mirType,
5539 MWasmBinaryBitwise::SubOpcode subOpc) {
5540 MDefinition* lhs;
5541 MDefinition* rhs;
5542 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5543 return false;
5546 f.iter().setResult(f.binary<MWasmBinaryBitwise>(lhs, rhs, mirType, subOpc));
5547 return true;
5550 template <typename MIRClass>
5551 static bool EmitShift(FunctionCompiler& f, ValType operandType,
5552 MIRType mirType) {
5553 MDefinition* lhs;
5554 MDefinition* rhs;
5555 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5556 return false;
5559 f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
5560 return true;
5563 static bool EmitUrsh(FunctionCompiler& f, ValType operandType,
5564 MIRType mirType) {
5565 MDefinition* lhs;
5566 MDefinition* rhs;
5567 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5568 return false;
5571 f.iter().setResult(f.ursh(lhs, rhs, mirType));
5572 return true;
5575 static bool EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) {
5576 MDefinition* lhs;
5577 MDefinition* rhs;
5578 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5579 return false;
5582 f.iter().setResult(
5583 f.mul(lhs, rhs, mirType,
5584 mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
5585 return true;
5588 static bool EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType,
5589 bool isUnsigned) {
5590 MDefinition* lhs;
5591 MDefinition* rhs;
5592 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5593 return false;
5596 f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
5597 return true;
5600 static bool EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType,
5601 bool isUnsigned) {
5602 MDefinition* lhs;
5603 MDefinition* rhs;
5604 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5605 return false;
5608 f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
5609 return true;
5612 static bool EmitMinMax(FunctionCompiler& f, ValType operandType,
5613 MIRType mirType, bool isMax) {
5614 MDefinition* lhs;
5615 MDefinition* rhs;
5616 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5617 return false;
5620 f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
5621 return true;
5624 static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
5625 MDefinition* lhs;
5626 MDefinition* rhs;
5627 if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
5628 return false;
5631 f.iter().setResult(f.binary<MCopySign>(lhs, rhs, operandType.toMIRType()));
5632 return true;
5635 static bool EmitComparison(FunctionCompiler& f, ValType operandType,
5636 JSOp compareOp, MCompare::CompareType compareType) {
5637 MDefinition* lhs;
5638 MDefinition* rhs;
5639 if (!f.iter().readComparison(operandType, &lhs, &rhs)) {
5640 return false;
5643 f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
5644 return true;
5647 static bool EmitSelect(FunctionCompiler& f, bool typed) {
5648 StackType type;
5649 MDefinition* trueValue;
5650 MDefinition* falseValue;
5651 MDefinition* condition;
5652 if (!f.iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
5653 return false;
5656 f.iter().setResult(f.select(trueValue, falseValue, condition));
5657 return true;
5660 static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
5661 LinearMemoryAddress<MDefinition*> addr;
5662 if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
5663 return false;
5666 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5667 f.bytecodeIfNotAsmJS(),
5668 f.hugeMemoryEnabled(addr.memoryIndex));
5669 auto* ins = f.load(addr.base, &access, type);
5670 if (!f.inDeadCode() && !ins) {
5671 return false;
5674 f.iter().setResult(ins);
5675 return true;
5678 static bool EmitStore(FunctionCompiler& f, ValType resultType,
5679 Scalar::Type viewType) {
5680 LinearMemoryAddress<MDefinition*> addr;
5681 MDefinition* value;
5682 if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
5683 &value)) {
5684 return false;
5687 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5688 f.bytecodeIfNotAsmJS(),
5689 f.hugeMemoryEnabled(addr.memoryIndex));
5691 f.store(addr.base, &access, value);
5692 return true;
5695 static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
5696 Scalar::Type viewType) {
5697 LinearMemoryAddress<MDefinition*> addr;
5698 MDefinition* value;
5699 if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
5700 &value)) {
5701 return false;
5704 MOZ_ASSERT(f.isMem32(addr.memoryIndex)); // asm.js opcode
5705 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5706 f.bytecodeIfNotAsmJS(),
5707 f.hugeMemoryEnabled(addr.memoryIndex));
5709 f.store(addr.base, &access, value);
5710 return true;
5713 static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
5714 Scalar::Type viewType) {
5715 LinearMemoryAddress<MDefinition*> addr;
5716 MDefinition* value;
5717 if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
5718 &value)) {
5719 return false;
5722 if (resultType == ValType::F32 && viewType == Scalar::Float64) {
5723 value = f.unary<MToDouble>(value);
5724 } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
5725 value = f.unary<MToFloat32>(value);
5726 } else {
5727 MOZ_CRASH("unexpected coerced store");
5730 MOZ_ASSERT(f.isMem32(addr.memoryIndex)); // asm.js opcode
5731 MemoryAccessDesc access(addr.memoryIndex, viewType, addr.align, addr.offset,
5732 f.bytecodeIfNotAsmJS(),
5733 f.hugeMemoryEnabled(addr.memoryIndex));
5735 f.store(addr.base, &access, value);
5736 return true;
5739 static bool TryInlineUnaryBuiltin(FunctionCompiler& f, SymbolicAddress callee,
5740 MDefinition* input) {
5741 if (!input) {
5742 return false;
5745 MOZ_ASSERT(IsFloatingPointType(input->type()));
5747 RoundingMode mode;
5748 if (!IsRoundingFunction(callee, &mode)) {
5749 return false;
5752 if (!MNearbyInt::HasAssemblerSupport(mode)) {
5753 return false;
5756 f.iter().setResult(f.nearbyInt(input, mode));
5757 return true;
5760 static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
5761 const SymbolicAddressSignature& callee) {
5762 MOZ_ASSERT(callee.numArgs == 1);
5764 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5766 MDefinition* input;
5767 if (!f.iter().readUnary(ValType::fromMIRType(callee.argTypes[0]), &input)) {
5768 return false;
5771 if (TryInlineUnaryBuiltin(f, callee.identity, input)) {
5772 return true;
5775 CallCompileState call;
5776 if (!f.passArg(input, callee.argTypes[0], &call)) {
5777 return false;
5780 if (!f.finishCall(&call)) {
5781 return false;
5784 MDefinition* def;
5785 if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
5786 return false;
5789 f.iter().setResult(def);
5790 return true;
5793 static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
5794 const SymbolicAddressSignature& callee) {
5795 MOZ_ASSERT(callee.numArgs == 2);
5796 MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
5798 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
5800 CallCompileState call;
5801 MDefinition* lhs;
5802 MDefinition* rhs;
5803 // This call to readBinary assumes both operands have the same type.
5804 if (!f.iter().readBinary(ValType::fromMIRType(callee.argTypes[0]), &lhs,
5805 &rhs)) {
5806 return false;
5809 if (!f.passArg(lhs, callee.argTypes[0], &call)) {
5810 return false;
5813 if (!f.passArg(rhs, callee.argTypes[1], &call)) {
5814 return false;
5817 if (!f.finishCall(&call)) {
5818 return false;
5821 MDefinition* def;
5822 if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
5823 return false;
5826 f.iter().setResult(def);
5827 return true;
5830 static bool EmitMemoryGrow(FunctionCompiler& f) {
5831 uint32_t bytecodeOffset = f.readBytecodeOffset();
5833 MDefinition* delta;
5834 uint32_t memoryIndex;
5835 if (!f.iter().readMemoryGrow(&memoryIndex, &delta)) {
5836 return false;
5839 if (f.inDeadCode()) {
5840 return true;
5843 MDefinition* memoryIndexValue = f.constantI32(int32_t(memoryIndex));
5844 if (!memoryIndexValue) {
5845 return false;
5848 const SymbolicAddressSignature& callee =
5849 f.isMem32(memoryIndex) ? SASigMemoryGrowM32 : SASigMemoryGrowM64;
5851 MDefinition* ret;
5852 if (!f.emitInstanceCall2(bytecodeOffset, callee, delta, memoryIndexValue,
5853 &ret)) {
5854 return false;
5857 f.iter().setResult(ret);
5858 return true;
5861 static bool EmitMemorySize(FunctionCompiler& f) {
5862 uint32_t bytecodeOffset = f.readBytecodeOffset();
5864 uint32_t memoryIndex;
5865 if (!f.iter().readMemorySize(&memoryIndex)) {
5866 return false;
5869 if (f.inDeadCode()) {
5870 return true;
5873 MDefinition* memoryIndexValue = f.constantI32(int32_t(memoryIndex));
5874 if (!memoryIndexValue) {
5875 return false;
5878 const SymbolicAddressSignature& callee =
5879 f.isMem32(memoryIndex) ? SASigMemorySizeM32 : SASigMemorySizeM64;
5881 MDefinition* ret;
5882 if (!f.emitInstanceCall1(bytecodeOffset, callee, memoryIndexValue, &ret)) {
5883 return false;
5886 f.iter().setResult(ret);
5887 return true;
5890 static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
5891 Scalar::Type viewType) {
5892 LinearMemoryAddress<MDefinition*> addr;
5893 MDefinition* oldValue;
5894 MDefinition* newValue;
5895 if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
5896 &newValue)) {
5897 return false;
5900 MemoryAccessDesc access(
5901 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
5902 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
5903 auto* ins =
5904 f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
5905 if (!f.inDeadCode() && !ins) {
5906 return false;
5909 f.iter().setResult(ins);
5910 return true;
5913 static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
5914 Scalar::Type viewType) {
5915 LinearMemoryAddress<MDefinition*> addr;
5916 if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
5917 return false;
5920 MemoryAccessDesc access(
5921 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
5922 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Load());
5923 auto* ins = f.load(addr.base, &access, type);
5924 if (!f.inDeadCode() && !ins) {
5925 return false;
5928 f.iter().setResult(ins);
5929 return true;
5932 static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
5933 Scalar::Type viewType, jit::AtomicOp op) {
5934 LinearMemoryAddress<MDefinition*> addr;
5935 MDefinition* value;
5936 if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
5937 return false;
5940 MemoryAccessDesc access(
5941 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
5942 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
5943 auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
5944 if (!f.inDeadCode() && !ins) {
5945 return false;
5948 f.iter().setResult(ins);
5949 return true;
5952 static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
5953 Scalar::Type viewType) {
5954 LinearMemoryAddress<MDefinition*> addr;
5955 MDefinition* value;
5956 if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
5957 return false;
5960 MemoryAccessDesc access(
5961 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
5962 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Store());
5963 f.store(addr.base, &access, value);
5964 return true;
5967 static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
5968 MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
5969 MOZ_ASSERT(type.size() == byteSize);
5971 uint32_t bytecodeOffset = f.readBytecodeOffset();
5973 LinearMemoryAddress<MDefinition*> addr;
5974 MDefinition* expected;
5975 MDefinition* timeout;
5976 if (!f.iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
5977 return false;
5980 if (f.inDeadCode()) {
5981 return true;
5984 MemoryAccessDesc access(addr.memoryIndex,
5985 type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
5986 addr.align, addr.offset, f.bytecodeOffset(),
5987 f.hugeMemoryEnabled(addr.memoryIndex));
5988 MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
5989 if (!ptr) {
5990 return false;
5993 MDefinition* memoryIndex = f.constantI32(int32_t(addr.memoryIndex));
5994 if (!memoryIndex) {
5995 return false;
5998 const SymbolicAddressSignature& callee =
5999 f.isMem32(addr.memoryIndex)
6000 ? (type == ValType::I32 ? SASigWaitI32M32 : SASigWaitI64M32)
6001 : (type == ValType::I32 ? SASigWaitI32M64 : SASigWaitI64M64);
6003 MDefinition* ret;
6004 if (!f.emitInstanceCall4(bytecodeOffset, callee, ptr, expected, timeout,
6005 memoryIndex, &ret)) {
6006 return false;
6009 f.iter().setResult(ret);
6010 return true;
6013 static bool EmitFence(FunctionCompiler& f) {
6014 if (!f.iter().readFence()) {
6015 return false;
6018 f.fence();
6019 return true;
6022 static bool EmitWake(FunctionCompiler& f) {
6023 uint32_t bytecodeOffset = f.readBytecodeOffset();
6025 LinearMemoryAddress<MDefinition*> addr;
6026 MDefinition* count;
6027 if (!f.iter().readWake(&addr, &count)) {
6028 return false;
6031 if (f.inDeadCode()) {
6032 return true;
6035 MemoryAccessDesc access(addr.memoryIndex, Scalar::Int32, addr.align,
6036 addr.offset, f.bytecodeOffset(),
6037 f.hugeMemoryEnabled(addr.memoryIndex));
6038 MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
6039 if (!ptr) {
6040 return false;
6043 MDefinition* memoryIndex = f.constantI32(int32_t(addr.memoryIndex));
6044 if (!memoryIndex) {
6045 return false;
6048 const SymbolicAddressSignature& callee =
6049 f.isMem32(addr.memoryIndex) ? SASigWakeM32 : SASigWakeM64;
6051 MDefinition* ret;
6052 if (!f.emitInstanceCall3(bytecodeOffset, callee, ptr, count, memoryIndex,
6053 &ret)) {
6054 return false;
6057 f.iter().setResult(ret);
6058 return true;
6061 static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
6062 Scalar::Type viewType) {
6063 LinearMemoryAddress<MDefinition*> addr;
6064 MDefinition* value;
6065 if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
6066 return false;
6069 MemoryAccessDesc access(
6070 addr.memoryIndex, viewType, addr.align, addr.offset, f.bytecodeOffset(),
6071 f.hugeMemoryEnabled(addr.memoryIndex), Synchronization::Full());
6072 MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
6073 if (!f.inDeadCode() && !ins) {
6074 return false;
6077 f.iter().setResult(ins);
6078 return true;
6081 static bool EmitMemCopyCall(FunctionCompiler& f, uint32_t dstMemIndex,
6082 uint32_t srcMemIndex, MDefinition* dst,
6083 MDefinition* src, MDefinition* len) {
6084 uint32_t bytecodeOffset = f.readBytecodeOffset();
6086 if (dstMemIndex == srcMemIndex) {
6087 const SymbolicAddressSignature& callee =
6088 (f.moduleEnv().usesSharedMemory(dstMemIndex)
6089 ? (f.isMem32(dstMemIndex) ? SASigMemCopySharedM32
6090 : SASigMemCopySharedM64)
6091 : (f.isMem32(dstMemIndex) ? SASigMemCopyM32 : SASigMemCopyM64));
6092 MDefinition* memoryBase = f.memoryBase(dstMemIndex);
6093 if (!memoryBase) {
6094 return false;
6096 return f.emitInstanceCall4(bytecodeOffset, callee, dst, src, len,
6097 memoryBase);
6100 IndexType dstIndexType = f.moduleEnv().memories[dstMemIndex].indexType();
6101 IndexType srcIndexType = f.moduleEnv().memories[srcMemIndex].indexType();
6103 if (dstIndexType == IndexType::I32) {
6104 dst = f.extendI32(dst, /*isUnsigned=*/true);
6105 if (!dst) {
6106 return false;
6109 if (srcIndexType == IndexType::I32) {
6110 src = f.extendI32(src, /*isUnsigned=*/true);
6111 if (!src) {
6112 return false;
6115 if (dstIndexType == IndexType::I32 || srcIndexType == IndexType::I32) {
6116 len = f.extendI32(len, /*isUnsigned=*/true);
6117 if (!len) {
6118 return false;
6122 MDefinition* dstMemIndexValue = f.constantI32(int32_t(dstMemIndex));
6123 if (!dstMemIndexValue) {
6124 return false;
6127 MDefinition* srcMemIndexValue = f.constantI32(int32_t(srcMemIndex));
6128 if (!srcMemIndexValue) {
6129 return false;
6132 return f.emitInstanceCall5(bytecodeOffset, SASigMemCopyAny, dst, src, len,
6133 dstMemIndexValue, srcMemIndexValue);
6136 static bool EmitMemCopyInline(FunctionCompiler& f, uint32_t memoryIndex,
6137 MDefinition* dst, MDefinition* src,
6138 uint32_t length) {
6139 MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
6141 // Compute the number of copies of each width we will need to do
6142 size_t remainder = length;
6143 #ifdef ENABLE_WASM_SIMD
6144 size_t numCopies16 = 0;
6145 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6146 numCopies16 = remainder / sizeof(V128);
6147 remainder %= sizeof(V128);
6149 #endif
6150 #ifdef JS_64BIT
6151 size_t numCopies8 = remainder / sizeof(uint64_t);
6152 remainder %= sizeof(uint64_t);
6153 #endif
6154 size_t numCopies4 = remainder / sizeof(uint32_t);
6155 remainder %= sizeof(uint32_t);
6156 size_t numCopies2 = remainder / sizeof(uint16_t);
6157 remainder %= sizeof(uint16_t);
6158 size_t numCopies1 = remainder;
6160 // Load all source bytes from low to high using the widest transfer width we
6161 // can for the system. We will trap without writing anything if any source
6162 // byte is out-of-bounds.
6163 size_t offset = 0;
6164 DefVector loadedValues;
6166 #ifdef ENABLE_WASM_SIMD
6167 for (uint32_t i = 0; i < numCopies16; i++) {
6168 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6169 f.bytecodeOffset(),
6170 f.hugeMemoryEnabled(memoryIndex));
6171 auto* load = f.load(src, &access, ValType::V128);
6172 if (!load || !loadedValues.append(load)) {
6173 return false;
6176 offset += sizeof(V128);
6178 #endif
6180 #ifdef JS_64BIT
6181 for (uint32_t i = 0; i < numCopies8; i++) {
6182 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6183 f.bytecodeOffset(),
6184 f.hugeMemoryEnabled(memoryIndex));
6185 auto* load = f.load(src, &access, ValType::I64);
6186 if (!load || !loadedValues.append(load)) {
6187 return false;
6190 offset += sizeof(uint64_t);
6192 #endif
6194 for (uint32_t i = 0; i < numCopies4; i++) {
6195 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6196 f.bytecodeOffset(),
6197 f.hugeMemoryEnabled(memoryIndex));
6198 auto* load = f.load(src, &access, ValType::I32);
6199 if (!load || !loadedValues.append(load)) {
6200 return false;
6203 offset += sizeof(uint32_t);
6206 if (numCopies2) {
6207 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6208 f.bytecodeOffset(),
6209 f.hugeMemoryEnabled(memoryIndex));
6210 auto* load = f.load(src, &access, ValType::I32);
6211 if (!load || !loadedValues.append(load)) {
6212 return false;
6215 offset += sizeof(uint16_t);
6218 if (numCopies1) {
6219 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6220 f.bytecodeOffset(),
6221 f.hugeMemoryEnabled(memoryIndex));
6222 auto* load = f.load(src, &access, ValType::I32);
6223 if (!load || !loadedValues.append(load)) {
6224 return false;
6228 // Store all source bytes to the destination from high to low. We will trap
6229 // without writing anything on the first store if any dest byte is
6230 // out-of-bounds.
6231 offset = length;
6233 if (numCopies1) {
6234 offset -= sizeof(uint8_t);
6236 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6237 f.bytecodeOffset(),
6238 f.hugeMemoryEnabled(memoryIndex));
6239 auto* value = loadedValues.popCopy();
6240 f.store(dst, &access, value);
6243 if (numCopies2) {
6244 offset -= sizeof(uint16_t);
6246 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6247 f.bytecodeOffset(),
6248 f.hugeMemoryEnabled(memoryIndex));
6249 auto* value = loadedValues.popCopy();
6250 f.store(dst, &access, value);
6253 for (uint32_t i = 0; i < numCopies4; i++) {
6254 offset -= sizeof(uint32_t);
6256 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6257 f.bytecodeOffset(),
6258 f.hugeMemoryEnabled(memoryIndex));
6259 auto* value = loadedValues.popCopy();
6260 f.store(dst, &access, value);
6263 #ifdef JS_64BIT
6264 for (uint32_t i = 0; i < numCopies8; i++) {
6265 offset -= sizeof(uint64_t);
6267 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6268 f.bytecodeOffset(),
6269 f.hugeMemoryEnabled(memoryIndex));
6270 auto* value = loadedValues.popCopy();
6271 f.store(dst, &access, value);
6273 #endif
6275 #ifdef ENABLE_WASM_SIMD
6276 for (uint32_t i = 0; i < numCopies16; i++) {
6277 offset -= sizeof(V128);
6279 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6280 f.bytecodeOffset(),
6281 f.hugeMemoryEnabled(memoryIndex));
6282 auto* value = loadedValues.popCopy();
6283 f.store(dst, &access, value);
6285 #endif
6287 return true;
6290 static bool EmitMemCopy(FunctionCompiler& f) {
6291 MDefinition *dst, *src, *len;
6292 uint32_t dstMemIndex;
6293 uint32_t srcMemIndex;
6294 if (!f.iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
6295 &len)) {
6296 return false;
6299 if (f.inDeadCode()) {
6300 return true;
6303 if (dstMemIndex == srcMemIndex && len->isConstant()) {
6304 uint64_t length = f.isMem32(dstMemIndex) ? len->toConstant()->toInt32()
6305 : len->toConstant()->toInt64();
6306 static_assert(MaxInlineMemoryCopyLength <= UINT32_MAX);
6307 if (length != 0 && length <= MaxInlineMemoryCopyLength) {
6308 return EmitMemCopyInline(f, dstMemIndex, dst, src, uint32_t(length));
6312 return EmitMemCopyCall(f, dstMemIndex, srcMemIndex, dst, src, len);
6315 static bool EmitTableCopy(FunctionCompiler& f) {
6316 MDefinition *dst, *src, *len;
6317 uint32_t dstTableIndex;
6318 uint32_t srcTableIndex;
6319 if (!f.iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
6320 &src, &len)) {
6321 return false;
6324 if (f.inDeadCode()) {
6325 return true;
6328 uint32_t bytecodeOffset = f.readBytecodeOffset();
6329 MDefinition* dti = f.constantI32(int32_t(dstTableIndex));
6330 MDefinition* sti = f.constantI32(int32_t(srcTableIndex));
6332 return f.emitInstanceCall5(bytecodeOffset, SASigTableCopy, dst, src, len, dti,
6333 sti);
6336 static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
6337 uint32_t segIndexVal = 0;
6338 if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
6339 return false;
6342 if (f.inDeadCode()) {
6343 return true;
6346 uint32_t bytecodeOffset = f.readBytecodeOffset();
6348 MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
6350 const SymbolicAddressSignature& callee =
6351 isData ? SASigDataDrop : SASigElemDrop;
6352 return f.emitInstanceCall1(bytecodeOffset, callee, segIndex);
6355 static bool EmitMemFillCall(FunctionCompiler& f, uint32_t memoryIndex,
6356 MDefinition* start, MDefinition* val,
6357 MDefinition* len) {
6358 MDefinition* memoryBase = f.memoryBase(memoryIndex);
6360 uint32_t bytecodeOffset = f.readBytecodeOffset();
6361 const SymbolicAddressSignature& callee =
6362 (f.moduleEnv().usesSharedMemory(memoryIndex)
6363 ? (f.isMem32(memoryIndex) ? SASigMemFillSharedM32
6364 : SASigMemFillSharedM64)
6365 : (f.isMem32(memoryIndex) ? SASigMemFillM32 : SASigMemFillM64));
6366 return f.emitInstanceCall4(bytecodeOffset, callee, start, val, len,
6367 memoryBase);
6370 static bool EmitMemFillInline(FunctionCompiler& f, uint32_t memoryIndex,
6371 MDefinition* start, MDefinition* val,
6372 uint32_t length) {
6373 MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
6374 uint32_t value = val->toConstant()->toInt32();
6376 // Compute the number of copies of each width we will need to do
6377 size_t remainder = length;
6378 #ifdef ENABLE_WASM_SIMD
6379 size_t numCopies16 = 0;
6380 if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
6381 numCopies16 = remainder / sizeof(V128);
6382 remainder %= sizeof(V128);
6384 #endif
6385 #ifdef JS_64BIT
6386 size_t numCopies8 = remainder / sizeof(uint64_t);
6387 remainder %= sizeof(uint64_t);
6388 #endif
6389 size_t numCopies4 = remainder / sizeof(uint32_t);
6390 remainder %= sizeof(uint32_t);
6391 size_t numCopies2 = remainder / sizeof(uint16_t);
6392 remainder %= sizeof(uint16_t);
6393 size_t numCopies1 = remainder;
6395 // Generate splatted definitions for wider fills as needed
6396 #ifdef ENABLE_WASM_SIMD
6397 MDefinition* val16 = numCopies16 ? f.constantV128(V128(value)) : nullptr;
6398 #endif
6399 #ifdef JS_64BIT
6400 MDefinition* val8 =
6401 numCopies8 ? f.constantI64(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
6402 : nullptr;
6403 #endif
6404 MDefinition* val4 =
6405 numCopies4 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 4)))
6406 : nullptr;
6407 MDefinition* val2 =
6408 numCopies2 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 2)))
6409 : nullptr;
6411 // Store the fill value to the destination from high to low. We will trap
6412 // without writing anything on the first store if any dest byte is
6413 // out-of-bounds.
6414 size_t offset = length;
6416 if (numCopies1) {
6417 offset -= sizeof(uint8_t);
6419 MemoryAccessDesc access(memoryIndex, Scalar::Uint8, 1, offset,
6420 f.bytecodeOffset(),
6421 f.hugeMemoryEnabled(memoryIndex));
6422 f.store(start, &access, val);
6425 if (numCopies2) {
6426 offset -= sizeof(uint16_t);
6428 MemoryAccessDesc access(memoryIndex, Scalar::Uint16, 1, offset,
6429 f.bytecodeOffset(),
6430 f.hugeMemoryEnabled(memoryIndex));
6431 f.store(start, &access, val2);
6434 for (uint32_t i = 0; i < numCopies4; i++) {
6435 offset -= sizeof(uint32_t);
6437 MemoryAccessDesc access(memoryIndex, Scalar::Uint32, 1, offset,
6438 f.bytecodeOffset(),
6439 f.hugeMemoryEnabled(memoryIndex));
6440 f.store(start, &access, val4);
6443 #ifdef JS_64BIT
6444 for (uint32_t i = 0; i < numCopies8; i++) {
6445 offset -= sizeof(uint64_t);
6447 MemoryAccessDesc access(memoryIndex, Scalar::Int64, 1, offset,
6448 f.bytecodeOffset(),
6449 f.hugeMemoryEnabled(memoryIndex));
6450 f.store(start, &access, val8);
6452 #endif
6454 #ifdef ENABLE_WASM_SIMD
6455 for (uint32_t i = 0; i < numCopies16; i++) {
6456 offset -= sizeof(V128);
6458 MemoryAccessDesc access(memoryIndex, Scalar::Simd128, 1, offset,
6459 f.bytecodeOffset(),
6460 f.hugeMemoryEnabled(memoryIndex));
6461 f.store(start, &access, val16);
6463 #endif
6465 return true;
6468 static bool EmitMemFill(FunctionCompiler& f) {
6469 uint32_t memoryIndex;
6470 MDefinition *start, *val, *len;
6471 if (!f.iter().readMemFill(&memoryIndex, &start, &val, &len)) {
6472 return false;
6475 if (f.inDeadCode()) {
6476 return true;
6479 if (len->isConstant() && val->isConstant()) {
6480 uint64_t length = f.isMem32(memoryIndex) ? len->toConstant()->toInt32()
6481 : len->toConstant()->toInt64();
6482 static_assert(MaxInlineMemoryFillLength <= UINT32_MAX);
6483 if (length != 0 && length <= MaxInlineMemoryFillLength) {
6484 return EmitMemFillInline(f, memoryIndex, start, val, uint32_t(length));
6488 return EmitMemFillCall(f, memoryIndex, start, val, len);
6491 static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
6492 uint32_t segIndexVal = 0, dstMemOrTableIndex = 0;
6493 MDefinition *dstOff, *srcOff, *len;
6494 if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstMemOrTableIndex,
6495 &dstOff, &srcOff, &len)) {
6496 return false;
6499 if (f.inDeadCode()) {
6500 return true;
6503 uint32_t bytecodeOffset = f.readBytecodeOffset();
6504 const SymbolicAddressSignature& callee =
6505 isMem
6506 ? (f.isMem32(dstMemOrTableIndex) ? SASigMemInitM32 : SASigMemInitM64)
6507 : SASigTableInit;
6509 MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
6510 if (!segIndex) {
6511 return false;
6514 MDefinition* dti = f.constantI32(int32_t(dstMemOrTableIndex));
6515 if (!dti) {
6516 return false;
6519 return f.emitInstanceCall5(bytecodeOffset, callee, dstOff, srcOff, len,
6520 segIndex, dti);
6523 // Note, table.{get,grow,set} on table(funcref) are currently rejected by the
6524 // verifier.
6526 static bool EmitTableFill(FunctionCompiler& f) {
6527 uint32_t tableIndex;
6528 MDefinition *start, *val, *len;
6529 if (!f.iter().readTableFill(&tableIndex, &start, &val, &len)) {
6530 return false;
6533 if (f.inDeadCode()) {
6534 return true;
6537 uint32_t bytecodeOffset = f.readBytecodeOffset();
6539 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6540 if (!tableIndexArg) {
6541 return false;
6544 return f.emitInstanceCall4(bytecodeOffset, SASigTableFill, start, val, len,
6545 tableIndexArg);
6548 #if ENABLE_WASM_MEMORY_CONTROL
6549 static bool EmitMemDiscard(FunctionCompiler& f) {
6550 uint32_t memoryIndex;
6551 MDefinition *start, *len;
6552 if (!f.iter().readMemDiscard(&memoryIndex, &start, &len)) {
6553 return false;
6556 if (f.inDeadCode()) {
6557 return true;
6560 uint32_t bytecodeOffset = f.readBytecodeOffset();
6562 MDefinition* memoryBase = f.memoryBase(memoryIndex);
6563 bool isMem32 = f.isMem32(memoryIndex);
6565 const SymbolicAddressSignature& callee =
6566 (f.moduleEnv().usesSharedMemory(memoryIndex)
6567 ? (isMem32 ? SASigMemDiscardSharedM32 : SASigMemDiscardSharedM64)
6568 : (isMem32 ? SASigMemDiscardM32 : SASigMemDiscardM64));
6569 return f.emitInstanceCall3(bytecodeOffset, callee, start, len, memoryBase);
6571 #endif
6573 static bool EmitTableGet(FunctionCompiler& f) {
6574 uint32_t tableIndex;
6575 MDefinition* index;
6576 if (!f.iter().readTableGet(&tableIndex, &index)) {
6577 return false;
6580 if (f.inDeadCode()) {
6581 return true;
6584 const TableDesc& table = f.moduleEnv().tables[tableIndex];
6585 if (table.elemType.tableRepr() == TableRepr::Ref) {
6586 MDefinition* ret = f.tableGetAnyRef(tableIndex, index);
6587 if (!ret) {
6588 return false;
6590 f.iter().setResult(ret);
6591 return true;
6594 uint32_t bytecodeOffset = f.readBytecodeOffset();
6596 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6597 if (!tableIndexArg) {
6598 return false;
6601 // The return value here is either null, denoting an error, or a short-lived
6602 // pointer to a location containing a possibly-null ref.
6603 MDefinition* ret;
6604 if (!f.emitInstanceCall2(bytecodeOffset, SASigTableGet, index, tableIndexArg,
6605 &ret)) {
6606 return false;
6609 f.iter().setResult(ret);
6610 return true;
6613 static bool EmitTableGrow(FunctionCompiler& f) {
6614 uint32_t tableIndex;
6615 MDefinition* initValue;
6616 MDefinition* delta;
6617 if (!f.iter().readTableGrow(&tableIndex, &initValue, &delta)) {
6618 return false;
6621 if (f.inDeadCode()) {
6622 return true;
6625 uint32_t bytecodeOffset = f.readBytecodeOffset();
6627 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6628 if (!tableIndexArg) {
6629 return false;
6632 MDefinition* ret;
6633 if (!f.emitInstanceCall3(bytecodeOffset, SASigTableGrow, initValue, delta,
6634 tableIndexArg, &ret)) {
6635 return false;
6638 f.iter().setResult(ret);
6639 return true;
6642 static bool EmitTableSet(FunctionCompiler& f) {
6643 uint32_t tableIndex;
6644 MDefinition* index;
6645 MDefinition* value;
6646 if (!f.iter().readTableSet(&tableIndex, &index, &value)) {
6647 return false;
6650 if (f.inDeadCode()) {
6651 return true;
6654 uint32_t bytecodeOffset = f.readBytecodeOffset();
6656 const TableDesc& table = f.moduleEnv().tables[tableIndex];
6657 if (table.elemType.tableRepr() == TableRepr::Ref) {
6658 return f.tableSetAnyRef(tableIndex, index, value, bytecodeOffset);
6661 MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
6662 if (!tableIndexArg) {
6663 return false;
6666 return f.emitInstanceCall3(bytecodeOffset, SASigTableSet, index, value,
6667 tableIndexArg);
6670 static bool EmitTableSize(FunctionCompiler& f) {
6671 uint32_t tableIndex;
6672 if (!f.iter().readTableSize(&tableIndex)) {
6673 return false;
6676 if (f.inDeadCode()) {
6677 return true;
6680 MDefinition* length = f.loadTableLength(tableIndex);
6681 if (!length) {
6682 return false;
6685 f.iter().setResult(length);
6686 return true;
6689 static bool EmitRefFunc(FunctionCompiler& f) {
6690 uint32_t funcIndex;
6691 if (!f.iter().readRefFunc(&funcIndex)) {
6692 return false;
6695 if (f.inDeadCode()) {
6696 return true;
6699 uint32_t bytecodeOffset = f.readBytecodeOffset();
6701 MDefinition* funcIndexArg = f.constantI32(int32_t(funcIndex));
6702 if (!funcIndexArg) {
6703 return false;
6706 // The return value here is either null, denoting an error, or a short-lived
6707 // pointer to a location containing a possibly-null ref.
6708 MDefinition* ret;
6709 if (!f.emitInstanceCall1(bytecodeOffset, SASigRefFunc, funcIndexArg, &ret)) {
6710 return false;
6713 f.iter().setResult(ret);
6714 return true;
6717 static bool EmitRefNull(FunctionCompiler& f) {
6718 RefType type;
6719 if (!f.iter().readRefNull(&type)) {
6720 return false;
6723 if (f.inDeadCode()) {
6724 return true;
6727 MDefinition* nullVal = f.constantNullRef();
6728 if (!nullVal) {
6729 return false;
6731 f.iter().setResult(nullVal);
6732 return true;
6735 static bool EmitRefIsNull(FunctionCompiler& f) {
6736 MDefinition* input;
6737 if (!f.iter().readRefIsNull(&input)) {
6738 return false;
6741 if (f.inDeadCode()) {
6742 return true;
6745 MDefinition* nullVal = f.constantNullRef();
6746 if (!nullVal) {
6747 return false;
6749 f.iter().setResult(
6750 f.compare(input, nullVal, JSOp::Eq, MCompare::Compare_WasmAnyRef));
6751 return true;
6754 #ifdef ENABLE_WASM_SIMD
6755 static bool EmitConstSimd128(FunctionCompiler& f) {
6756 V128 v128;
6757 if (!f.iter().readV128Const(&v128)) {
6758 return false;
6761 f.iter().setResult(f.constantV128(v128));
6762 return true;
6765 static bool EmitBinarySimd128(FunctionCompiler& f, bool commutative,
6766 SimdOp op) {
6767 MDefinition* lhs;
6768 MDefinition* rhs;
6769 if (!f.iter().readBinary(ValType::V128, &lhs, &rhs)) {
6770 return false;
6773 f.iter().setResult(f.binarySimd128(lhs, rhs, commutative, op));
6774 return true;
6777 static bool EmitTernarySimd128(FunctionCompiler& f, wasm::SimdOp op) {
6778 MDefinition* v0;
6779 MDefinition* v1;
6780 MDefinition* v2;
6781 if (!f.iter().readTernary(ValType::V128, &v0, &v1, &v2)) {
6782 return false;
6785 f.iter().setResult(f.ternarySimd128(v0, v1, v2, op));
6786 return true;
6789 static bool EmitShiftSimd128(FunctionCompiler& f, SimdOp op) {
6790 MDefinition* lhs;
6791 MDefinition* rhs;
6792 if (!f.iter().readVectorShift(&lhs, &rhs)) {
6793 return false;
6796 f.iter().setResult(f.shiftSimd128(lhs, rhs, op));
6797 return true;
6800 static bool EmitSplatSimd128(FunctionCompiler& f, ValType inType, SimdOp op) {
6801 MDefinition* src;
6802 if (!f.iter().readConversion(inType, ValType::V128, &src)) {
6803 return false;
6806 f.iter().setResult(f.scalarToSimd128(src, op));
6807 return true;
6810 static bool EmitUnarySimd128(FunctionCompiler& f, SimdOp op) {
6811 MDefinition* src;
6812 if (!f.iter().readUnary(ValType::V128, &src)) {
6813 return false;
6816 f.iter().setResult(f.unarySimd128(src, op));
6817 return true;
6820 static bool EmitReduceSimd128(FunctionCompiler& f, SimdOp op) {
6821 MDefinition* src;
6822 if (!f.iter().readConversion(ValType::V128, ValType::I32, &src)) {
6823 return false;
6826 f.iter().setResult(f.reduceSimd128(src, op, ValType::I32));
6827 return true;
6830 static bool EmitExtractLaneSimd128(FunctionCompiler& f, ValType outType,
6831 uint32_t laneLimit, SimdOp op) {
6832 uint32_t laneIndex;
6833 MDefinition* src;
6834 if (!f.iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
6835 return false;
6838 f.iter().setResult(f.reduceSimd128(src, op, outType, laneIndex));
6839 return true;
6842 static bool EmitReplaceLaneSimd128(FunctionCompiler& f, ValType laneType,
6843 uint32_t laneLimit, SimdOp op) {
6844 uint32_t laneIndex;
6845 MDefinition* lhs;
6846 MDefinition* rhs;
6847 if (!f.iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
6848 return false;
6851 f.iter().setResult(f.replaceLaneSimd128(lhs, rhs, laneIndex, op));
6852 return true;
6855 static bool EmitShuffleSimd128(FunctionCompiler& f) {
6856 MDefinition* v1;
6857 MDefinition* v2;
6858 V128 control;
6859 if (!f.iter().readVectorShuffle(&v1, &v2, &control)) {
6860 return false;
6863 f.iter().setResult(f.shuffleSimd128(v1, v2, control));
6864 return true;
6867 static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
6868 wasm::SimdOp splatOp) {
6869 LinearMemoryAddress<MDefinition*> addr;
6870 if (!f.iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
6871 return false;
6874 f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
6875 return true;
6878 static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
6879 LinearMemoryAddress<MDefinition*> addr;
6880 if (!f.iter().readLoadExtend(&addr)) {
6881 return false;
6884 f.iter().setResult(f.loadExtendSimd128(addr, op));
6885 return true;
6888 static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
6889 size_t numBytes) {
6890 LinearMemoryAddress<MDefinition*> addr;
6891 if (!f.iter().readLoadSplat(numBytes, &addr)) {
6892 return false;
6895 f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
6896 return true;
6899 static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
6900 uint32_t laneIndex;
6901 MDefinition* src;
6902 LinearMemoryAddress<MDefinition*> addr;
6903 if (!f.iter().readLoadLane(laneSize, &addr, &laneIndex, &src)) {
6904 return false;
6907 f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
6908 return true;
6911 static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
6912 uint32_t laneIndex;
6913 MDefinition* src;
6914 LinearMemoryAddress<MDefinition*> addr;
6915 if (!f.iter().readStoreLane(laneSize, &addr, &laneIndex, &src)) {
6916 return false;
6919 f.storeLaneSimd128(laneSize, addr, laneIndex, src);
6920 return true;
6923 #endif // ENABLE_WASM_SIMD
6925 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
6926 static bool EmitRefAsNonNull(FunctionCompiler& f) {
6927 MDefinition* ref;
6928 if (!f.iter().readRefAsNonNull(&ref)) {
6929 return false;
6932 return f.refAsNonNull(ref);
6935 static bool EmitBrOnNull(FunctionCompiler& f) {
6936 uint32_t relativeDepth;
6937 ResultType type;
6938 DefVector values;
6939 MDefinition* condition;
6940 if (!f.iter().readBrOnNull(&relativeDepth, &type, &values, &condition)) {
6941 return false;
6944 return f.brOnNull(relativeDepth, values, type, condition);
6947 static bool EmitBrOnNonNull(FunctionCompiler& f) {
6948 uint32_t relativeDepth;
6949 ResultType type;
6950 DefVector values;
6951 MDefinition* condition;
6952 if (!f.iter().readBrOnNonNull(&relativeDepth, &type, &values, &condition)) {
6953 return false;
6956 return f.brOnNonNull(relativeDepth, values, type, condition);
6959 static bool EmitCallRef(FunctionCompiler& f) {
6960 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
6962 const FuncType* funcType;
6963 MDefinition* callee;
6964 DefVector args;
6966 if (!f.iter().readCallRef(&funcType, &callee, &args)) {
6967 return false;
6970 if (f.inDeadCode()) {
6971 return true;
6974 CallCompileState call;
6975 if (!EmitCallArgs(f, *funcType, args, &call)) {
6976 return false;
6979 DefVector results;
6980 if (!f.callRef(*funcType, callee, lineOrBytecode, call, &results)) {
6981 return false;
6984 f.iter().setResults(results.length(), results);
6985 return true;
6988 #endif // ENABLE_WASM_FUNCTION_REFERENCES
6990 #ifdef ENABLE_WASM_GC
6992 static bool EmitStructNew(FunctionCompiler& f) {
6993 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
6995 uint32_t typeIndex;
6996 DefVector args;
6997 if (!f.iter().readStructNew(&typeIndex, &args)) {
6998 return false;
7001 if (f.inDeadCode()) {
7002 return true;
7005 const TypeDef& typeDef = (*f.moduleEnv().types)[typeIndex];
7006 const StructType& structType = typeDef.structType();
7007 MOZ_ASSERT(args.length() == structType.fields_.length());
7009 MDefinition* structObject = f.createStructObject(typeIndex, false);
7010 if (!structObject) {
7011 return false;
7014 // And fill in the fields.
7015 for (uint32_t fieldIndex = 0; fieldIndex < structType.fields_.length();
7016 fieldIndex++) {
7017 if (!f.mirGen().ensureBallast()) {
7018 return false;
7020 const StructField& field = structType.fields_[fieldIndex];
7021 if (!f.writeValueToStructField(lineOrBytecode, field, structObject,
7022 args[fieldIndex],
7023 WasmPreBarrierKind::None)) {
7024 return false;
7028 f.iter().setResult(structObject);
7029 return true;
7032 static bool EmitStructNewDefault(FunctionCompiler& f) {
7033 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7035 uint32_t typeIndex;
7036 if (!f.iter().readStructNewDefault(&typeIndex)) {
7037 return false;
7040 if (f.inDeadCode()) {
7041 return true;
7044 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7046 // Allocate a default initialized struct. This requires the type definition
7047 // for the struct.
7048 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7049 if (!typeDefData) {
7050 return false;
7053 // Figure out whether we need an OOL storage area, and hence which routine
7054 // to call.
7055 SymbolicAddressSignature calleeSASig =
7056 WasmStructObject::requiresOutlineBytes(structType.size_)
7057 ? SASigStructNewOOL_true
7058 : SASigStructNewIL_true;
7060 // Create call: structObject = Instance::structNew{IL,OOL}<true>(typeDefData)
7061 MDefinition* structObject;
7062 if (!f.emitInstanceCall1(lineOrBytecode, calleeSASig, typeDefData,
7063 &structObject)) {
7064 return false;
7067 f.iter().setResult(structObject);
7068 return true;
7071 static bool EmitStructSet(FunctionCompiler& f) {
7072 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7074 uint32_t typeIndex;
7075 uint32_t fieldIndex;
7076 MDefinition* structObject;
7077 MDefinition* value;
7078 if (!f.iter().readStructSet(&typeIndex, &fieldIndex, &structObject, &value)) {
7079 return false;
7082 if (f.inDeadCode()) {
7083 return true;
7086 // Check for null is done at writeValueToStructField.
7088 // And fill in the field.
7089 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7090 const StructField& field = structType.fields_[fieldIndex];
7091 return f.writeValueToStructField(lineOrBytecode, field, structObject, value,
7092 WasmPreBarrierKind::Normal);
7095 static bool EmitStructGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
7096 uint32_t typeIndex;
7097 uint32_t fieldIndex;
7098 MDefinition* structObject;
7099 if (!f.iter().readStructGet(&typeIndex, &fieldIndex, wideningOp,
7100 &structObject)) {
7101 return false;
7104 if (f.inDeadCode()) {
7105 return true;
7108 // Check for null is done at readValueFromStructField.
7110 // And fetch the data.
7111 const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
7112 const StructField& field = structType.fields_[fieldIndex];
7113 MDefinition* load =
7114 f.readValueFromStructField(field, wideningOp, structObject);
7115 if (!load) {
7116 return false;
7119 f.iter().setResult(load);
7120 return true;
7123 static bool EmitArrayNew(FunctionCompiler& f) {
7124 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7126 uint32_t typeIndex;
7127 MDefinition* numElements;
7128 MDefinition* fillValue;
7129 if (!f.iter().readArrayNew(&typeIndex, &numElements, &fillValue)) {
7130 return false;
7133 if (f.inDeadCode()) {
7134 return true;
7137 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7138 // this helper will trap.
7139 MDefinition* arrayObject = f.createArrayNewCallAndLoop(
7140 lineOrBytecode, typeIndex, numElements, fillValue);
7141 if (!arrayObject) {
7142 return false;
7145 f.iter().setResult(arrayObject);
7146 return true;
7149 static bool EmitArrayNewDefault(FunctionCompiler& f) {
7150 // This is almost identical to EmitArrayNew, except we skip the
7151 // initialisation loop.
7152 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7154 uint32_t typeIndex;
7155 MDefinition* numElements;
7156 if (!f.iter().readArrayNewDefault(&typeIndex, &numElements)) {
7157 return false;
7160 if (f.inDeadCode()) {
7161 return true;
7164 // Create the array object, default-initialized.
7165 MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
7166 lineOrBytecode, typeIndex, numElements);
7167 if (!arrayObject) {
7168 return false;
7171 f.iter().setResult(arrayObject);
7172 return true;
7175 static bool EmitArrayNewFixed(FunctionCompiler& f) {
7176 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7178 uint32_t typeIndex, numElements;
7179 DefVector values;
7181 if (!f.iter().readArrayNewFixed(&typeIndex, &numElements, &values)) {
7182 return false;
7184 MOZ_ASSERT(values.length() == numElements);
7186 if (f.inDeadCode()) {
7187 return true;
7190 MDefinition* numElementsDef = f.constantI32(int32_t(numElements));
7191 if (!numElementsDef) {
7192 return false;
7195 // Create the array object, default-initialized.
7196 MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
7197 lineOrBytecode, typeIndex, numElementsDef);
7198 if (!arrayObject) {
7199 return false;
7202 // Make `base` point at the first byte of the (OOL) data area.
7203 MDefinition* base = f.getWasmArrayObjectData(arrayObject);
7204 if (!base) {
7205 return false;
7208 // Write each element in turn.
7209 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7210 FieldType elemFieldType = arrayType.elementType_;
7211 uint32_t elemSize = elemFieldType.size();
7213 // How do we know that the offset expression `i * elemSize` below remains
7214 // within 2^31 (signed-i32) range? In the worst case we will have 16-byte
7215 // values, and there can be at most MaxFunctionBytes expressions, if it were
7216 // theoretically possible to generate one expression per instruction byte.
7217 // Hence the max offset we can be expected to generate is
7218 // `16 * MaxFunctionBytes`.
7219 static_assert(16 /* sizeof v128 */ * MaxFunctionBytes <=
7220 MaxArrayPayloadBytes);
7221 MOZ_RELEASE_ASSERT(numElements <= MaxFunctionBytes);
7223 for (uint32_t i = 0; i < numElements; i++) {
7224 if (!f.mirGen().ensureBallast()) {
7225 return false;
7227 // `i * elemSize` is made safe by the assertions above.
7228 if (!f.writeGcValueAtBasePlusOffset(
7229 lineOrBytecode, elemFieldType, arrayObject,
7230 AliasSet::WasmArrayDataArea, values[numElements - 1 - i], base,
7231 i * elemSize, false, WasmPreBarrierKind::None)) {
7232 return false;
7236 f.iter().setResult(arrayObject);
7237 return true;
7240 static bool EmitArrayNewData(FunctionCompiler& f) {
7241 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7243 uint32_t typeIndex, segIndex;
7244 MDefinition* segByteOffset;
7245 MDefinition* numElements;
7246 if (!f.iter().readArrayNewData(&typeIndex, &segIndex, &segByteOffset,
7247 &numElements)) {
7248 return false;
7251 if (f.inDeadCode()) {
7252 return true;
7255 // Get the type definition data for the array as a whole.
7256 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7257 if (!typeDefData) {
7258 return false;
7261 // Other values we need to pass to the instance call:
7262 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7263 if (!segIndexM) {
7264 return false;
7267 // Create call:
7268 // arrayObject = Instance::arrayNewData(segByteOffset:u32, numElements:u32,
7269 // typeDefData:word, segIndex:u32)
7270 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7271 // this call will trap.
7272 MDefinition* arrayObject;
7273 if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewData, segByteOffset,
7274 numElements, typeDefData, segIndexM, &arrayObject)) {
7275 return false;
7278 f.iter().setResult(arrayObject);
7279 return true;
7282 static bool EmitArrayNewElem(FunctionCompiler& f) {
7283 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7285 uint32_t typeIndex, segIndex;
7286 MDefinition* segElemIndex;
7287 MDefinition* numElements;
7288 if (!f.iter().readArrayNewElem(&typeIndex, &segIndex, &segElemIndex,
7289 &numElements)) {
7290 return false;
7293 if (f.inDeadCode()) {
7294 return true;
7297 // Get the type definition for the array as a whole.
7298 // Get the type definition data for the array as a whole.
7299 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7300 if (!typeDefData) {
7301 return false;
7304 // Other values we need to pass to the instance call:
7305 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7306 if (!segIndexM) {
7307 return false;
7310 // Create call:
7311 // arrayObject = Instance::arrayNewElem(segElemIndex:u32, numElements:u32,
7312 // typeDefData:word, segIndex:u32)
7313 // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
7314 // this call will trap.
7315 MDefinition* arrayObject;
7316 if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewElem, segElemIndex,
7317 numElements, typeDefData, segIndexM, &arrayObject)) {
7318 return false;
7321 f.iter().setResult(arrayObject);
7322 return true;
7325 static bool EmitArrayInitData(FunctionCompiler& f) {
7326 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7328 uint32_t typeIndex, segIndex;
7329 MDefinition* array;
7330 MDefinition* arrayIndex;
7331 MDefinition* segOffset;
7332 MDefinition* length;
7333 if (!f.iter().readArrayInitData(&typeIndex, &segIndex, &array, &arrayIndex,
7334 &segOffset, &length)) {
7335 return false;
7338 if (f.inDeadCode()) {
7339 return true;
7342 // Get the type definition data for the array as a whole.
7343 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7344 if (!typeDefData) {
7345 return false;
7348 // Other values we need to pass to the instance call:
7349 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7350 if (!segIndexM) {
7351 return false;
7354 // Create call:
7355 // Instance::arrayInitData(array:word, index:u32, segByteOffset:u32,
7356 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7357 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7358 return f.emitInstanceCall6(lineOrBytecode, SASigArrayInitData, array,
7359 arrayIndex, segOffset, length, typeDefData,
7360 segIndexM);
7363 static bool EmitArrayInitElem(FunctionCompiler& f) {
7364 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7366 uint32_t typeIndex, segIndex;
7367 MDefinition* array;
7368 MDefinition* arrayIndex;
7369 MDefinition* segOffset;
7370 MDefinition* length;
7371 if (!f.iter().readArrayInitElem(&typeIndex, &segIndex, &array, &arrayIndex,
7372 &segOffset, &length)) {
7373 return false;
7376 if (f.inDeadCode()) {
7377 return true;
7380 // Get the type definition data for the array as a whole.
7381 MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
7382 if (!typeDefData) {
7383 return false;
7386 // Other values we need to pass to the instance call:
7387 MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
7388 if (!segIndexM) {
7389 return false;
7392 // Create call:
7393 // Instance::arrayInitElem(array:word, index:u32, segByteOffset:u32,
7394 // numElements:u32, typeDefData:word, segIndex:u32) If the requested size
7395 // exceeds MaxArrayPayloadBytes, the MIR generated by this call will trap.
7396 return f.emitInstanceCall6(lineOrBytecode, SASigArrayInitElem, array,
7397 arrayIndex, segOffset, length, typeDefData,
7398 segIndexM);
7401 static bool EmitArraySet(FunctionCompiler& f) {
7402 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7404 uint32_t typeIndex;
7405 MDefinition* value;
7406 MDefinition* index;
7407 MDefinition* arrayObject;
7408 if (!f.iter().readArraySet(&typeIndex, &value, &index, &arrayObject)) {
7409 return false;
7412 if (f.inDeadCode()) {
7413 return true;
7416 // Check for null is done at setupForArrayAccess.
7418 // Create the object null check and the array bounds check and get the OOL
7419 // data pointer.
7420 MDefinition* base = f.setupForArrayAccess(arrayObject, index);
7421 if (!base) {
7422 return false;
7425 // And do the store.
7426 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7427 FieldType elemFieldType = arrayType.elementType_;
7428 uint32_t elemSize = elemFieldType.size();
7429 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
7431 return f.writeGcValueAtBasePlusScaledIndex(
7432 lineOrBytecode, elemFieldType, arrayObject, AliasSet::WasmArrayDataArea,
7433 value, base, elemSize, index, WasmPreBarrierKind::Normal);
7436 static bool EmitArrayGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
7437 uint32_t typeIndex;
7438 MDefinition* index;
7439 MDefinition* arrayObject;
7440 if (!f.iter().readArrayGet(&typeIndex, wideningOp, &index, &arrayObject)) {
7441 return false;
7444 if (f.inDeadCode()) {
7445 return true;
7448 // Check for null is done at setupForArrayAccess.
7450 // Create the object null check and the array bounds check and get the OOL
7451 // data pointer.
7452 MDefinition* base = f.setupForArrayAccess(arrayObject, index);
7453 if (!base) {
7454 return false;
7457 // And do the load.
7458 const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
7459 FieldType elemFieldType = arrayType.elementType_;
7460 uint32_t elemSize = elemFieldType.size();
7461 MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
7463 MDefinition* load = f.readGcValueAtBasePlusScaledIndex(
7464 elemFieldType, wideningOp, arrayObject, AliasSet::WasmArrayDataArea, base,
7465 elemSize, index);
7466 if (!load) {
7467 return false;
7470 f.iter().setResult(load);
7471 return true;
7474 static bool EmitArrayLen(FunctionCompiler& f) {
7475 MDefinition* arrayObject;
7476 if (!f.iter().readArrayLen(&arrayObject)) {
7477 return false;
7480 if (f.inDeadCode()) {
7481 return true;
7484 // Check for null is done at getWasmArrayObjectNumElements.
7486 // Get the size value for the array
7487 MDefinition* numElements = f.getWasmArrayObjectNumElements(arrayObject);
7488 if (!numElements) {
7489 return false;
7492 f.iter().setResult(numElements);
7493 return true;
7496 static bool EmitArrayCopy(FunctionCompiler& f) {
7497 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7499 int32_t elemSize;
7500 bool elemsAreRefTyped;
7501 MDefinition* dstArrayObject;
7502 MDefinition* dstArrayIndex;
7503 MDefinition* srcArrayObject;
7504 MDefinition* srcArrayIndex;
7505 MDefinition* numElements;
7506 if (!f.iter().readArrayCopy(&elemSize, &elemsAreRefTyped, &dstArrayObject,
7507 &dstArrayIndex, &srcArrayObject, &srcArrayIndex,
7508 &numElements)) {
7509 return false;
7512 if (f.inDeadCode()) {
7513 return true;
7516 MOZ_ASSERT_IF(elemsAreRefTyped,
7517 size_t(elemSize) == MIRTypeToSize(TargetWordMIRType()));
7518 MOZ_ASSERT_IF(!elemsAreRefTyped, elemSize == 1 || elemSize == 2 ||
7519 elemSize == 4 || elemSize == 8 ||
7520 elemSize == 16);
7522 // A negative element size is used to inform Instance::arrayCopy that the
7523 // values are reftyped. This avoids having to pass it an extra boolean
7524 // argument.
7525 MDefinition* elemSizeDef =
7526 f.constantI32(elemsAreRefTyped ? -elemSize : elemSize);
7527 if (!elemSizeDef) {
7528 return false;
7531 // Create call:
7532 // Instance::arrayCopy(dstArrayObject:word, dstArrayIndex:u32,
7533 // srcArrayObject:word, srcArrayIndex:u32,
7534 // numElements:u32,
7535 // (elemsAreRefTyped ? -elemSize : elemSize):u32))
7536 return f.emitInstanceCall6(lineOrBytecode, SASigArrayCopy, dstArrayObject,
7537 dstArrayIndex, srcArrayObject, srcArrayIndex,
7538 numElements, elemSizeDef);
7541 static bool EmitArrayFill(FunctionCompiler& f) {
7542 uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
7544 uint32_t typeIndex;
7545 MDefinition* array;
7546 MDefinition* index;
7547 MDefinition* val;
7548 MDefinition* numElements;
7549 if (!f.iter().readArrayFill(&typeIndex, &array, &index, &val, &numElements)) {
7550 return false;
7553 if (f.inDeadCode()) {
7554 return true;
7557 return f.createArrayFill(lineOrBytecode, typeIndex, array, index, val,
7558 numElements);
7561 static bool EmitRefI31(FunctionCompiler& f) {
7562 MDefinition* input;
7563 if (!f.iter().readConversion(
7564 ValType::I32, ValType(RefType::i31().asNonNullable()), &input)) {
7565 return false;
7568 if (f.inDeadCode()) {
7569 return true;
7572 MDefinition* output = f.refI31(input);
7573 if (!output) {
7574 return false;
7576 f.iter().setResult(output);
7577 return true;
7580 static bool EmitI31Get(FunctionCompiler& f, FieldWideningOp wideningOp) {
7581 MOZ_ASSERT(wideningOp != FieldWideningOp::None);
7583 MDefinition* input;
7584 if (!f.iter().readConversion(ValType(RefType::i31()), ValType::I32, &input)) {
7585 return false;
7588 if (f.inDeadCode()) {
7589 return true;
7592 if (!f.refAsNonNull(input)) {
7593 return false;
7595 MDefinition* output = f.i31Get(input, wideningOp);
7596 if (!output) {
7597 return false;
7599 f.iter().setResult(output);
7600 return true;
7603 static bool EmitRefTest(FunctionCompiler& f, bool nullable) {
7604 MDefinition* ref;
7605 RefType sourceType;
7606 RefType destType;
7607 if (!f.iter().readRefTest(nullable, &sourceType, &destType, &ref)) {
7608 return false;
7611 if (f.inDeadCode()) {
7612 return true;
7615 MDefinition* success = f.refTest(ref, sourceType, destType);
7616 if (!success) {
7617 return false;
7620 f.iter().setResult(success);
7621 return true;
7624 static bool EmitRefCast(FunctionCompiler& f, bool nullable) {
7625 MDefinition* ref;
7626 RefType sourceType;
7627 RefType destType;
7628 if (!f.iter().readRefCast(nullable, &sourceType, &destType, &ref)) {
7629 return false;
7632 if (f.inDeadCode()) {
7633 return true;
7636 if (!f.refCast(ref, sourceType, destType)) {
7637 return false;
7640 f.iter().setResult(ref);
7641 return true;
7644 static bool EmitBrOnCast(FunctionCompiler& f, bool onSuccess) {
7645 uint32_t labelRelativeDepth;
7646 RefType sourceType;
7647 RefType destType;
7648 ResultType labelType;
7649 DefVector values;
7650 if (!f.iter().readBrOnCast(onSuccess, &labelRelativeDepth, &sourceType,
7651 &destType, &labelType, &values)) {
7652 return false;
7655 return f.brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, destType,
7656 labelType, values);
7659 static bool EmitAnyConvertExtern(FunctionCompiler& f) {
7660 // any.convert_extern is a no-op because anyref and extern share the same
7661 // representation
7662 MDefinition* ref;
7663 if (!f.iter().readRefConversion(RefType::extern_(), RefType::any(), &ref)) {
7664 return false;
7667 f.iter().setResult(ref);
7668 return true;
7671 static bool EmitExternConvertAny(FunctionCompiler& f) {
7672 // extern.convert_any is a no-op because anyref and extern share the same
7673 // representation
7674 MDefinition* ref;
7675 if (!f.iter().readRefConversion(RefType::any(), RefType::extern_(), &ref)) {
7676 return false;
7679 f.iter().setResult(ref);
7680 return true;
7683 #endif // ENABLE_WASM_GC
7685 static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
7686 // It's almost possible to use FunctionCompiler::emitInstanceCallN here.
7687 // Unfortunately not currently possible though, since ::emitInstanceCallN
7688 // expects an array of arguments along with a size, and that's not what is
7689 // available here. It would be possible if we were prepared to copy
7690 // `builtinModuleFunc->params` into a fixed-sized (16 element?) array, add
7691 // `memoryBase`, and make the call.
7692 const BuiltinModuleFunc* builtinModuleFunc;
7694 DefVector params;
7695 if (!f.iter().readCallBuiltinModuleFunc(&builtinModuleFunc, &params)) {
7696 return false;
7699 uint32_t bytecodeOffset = f.readBytecodeOffset();
7700 const SymbolicAddressSignature& callee = builtinModuleFunc->signature;
7702 CallCompileState args;
7703 if (!f.passInstance(callee.argTypes[0], &args)) {
7704 return false;
7707 if (!f.passArgs(params, builtinModuleFunc->params, &args)) {
7708 return false;
7711 if (builtinModuleFunc->usesMemory) {
7712 MDefinition* memoryBase = f.memoryBase(0);
7713 if (!f.passArg(memoryBase, MIRType::Pointer, &args)) {
7714 return false;
7718 if (!f.finishCall(&args)) {
7719 return false;
7722 bool hasResult = builtinModuleFunc->result.isSome();
7723 MDefinition* result = nullptr;
7724 MDefinition** resultOutParam = hasResult ? &result : nullptr;
7725 if (!f.builtinInstanceMethodCall(callee, bytecodeOffset, args,
7726 resultOutParam)) {
7727 return false;
7730 if (hasResult) {
7731 f.iter().setResult(result);
7733 return true;
7736 static bool EmitBodyExprs(FunctionCompiler& f) {
7737 if (!f.iter().startFunction(f.funcIndex(), f.locals())) {
7738 return false;
7741 #define CHECK(c) \
7742 if (!(c)) return false; \
7743 break
7745 while (true) {
7746 if (!f.mirGen().ensureBallast()) {
7747 return false;
7750 OpBytes op;
7751 if (!f.iter().readOp(&op)) {
7752 return false;
7755 switch (op.b0) {
7756 case uint16_t(Op::End):
7757 if (!EmitEnd(f)) {
7758 return false;
7760 if (f.iter().controlStackEmpty()) {
7761 return true;
7763 break;
7765 // Control opcodes
7766 case uint16_t(Op::Unreachable):
7767 CHECK(EmitUnreachable(f));
7768 case uint16_t(Op::Nop):
7769 CHECK(f.iter().readNop());
7770 case uint16_t(Op::Block):
7771 CHECK(EmitBlock(f));
7772 case uint16_t(Op::Loop):
7773 CHECK(EmitLoop(f));
7774 case uint16_t(Op::If):
7775 CHECK(EmitIf(f));
7776 case uint16_t(Op::Else):
7777 CHECK(EmitElse(f));
7778 case uint16_t(Op::Try):
7779 if (!f.moduleEnv().exceptionsEnabled()) {
7780 return f.iter().unrecognizedOpcode(&op);
7782 CHECK(EmitTry(f));
7783 case uint16_t(Op::Catch):
7784 if (!f.moduleEnv().exceptionsEnabled()) {
7785 return f.iter().unrecognizedOpcode(&op);
7787 CHECK(EmitCatch(f));
7788 case uint16_t(Op::CatchAll):
7789 if (!f.moduleEnv().exceptionsEnabled()) {
7790 return f.iter().unrecognizedOpcode(&op);
7792 CHECK(EmitCatchAll(f));
7793 case uint16_t(Op::Delegate):
7794 if (!f.moduleEnv().exceptionsEnabled()) {
7795 return f.iter().unrecognizedOpcode(&op);
7797 if (!EmitDelegate(f)) {
7798 return false;
7800 break;
7801 case uint16_t(Op::Throw):
7802 if (!f.moduleEnv().exceptionsEnabled()) {
7803 return f.iter().unrecognizedOpcode(&op);
7805 CHECK(EmitThrow(f));
7806 case uint16_t(Op::Rethrow):
7807 if (!f.moduleEnv().exceptionsEnabled()) {
7808 return f.iter().unrecognizedOpcode(&op);
7810 CHECK(EmitRethrow(f));
7811 case uint16_t(Op::Br):
7812 CHECK(EmitBr(f));
7813 case uint16_t(Op::BrIf):
7814 CHECK(EmitBrIf(f));
7815 case uint16_t(Op::BrTable):
7816 CHECK(EmitBrTable(f));
7817 case uint16_t(Op::Return):
7818 CHECK(EmitReturn(f));
7820 // Calls
7821 case uint16_t(Op::Call):
7822 CHECK(EmitCall(f, /* asmJSFuncDef = */ false));
7823 case uint16_t(Op::CallIndirect):
7824 CHECK(EmitCallIndirect(f, /* oldStyle = */ false));
7826 // Parametric operators
7827 case uint16_t(Op::Drop):
7828 CHECK(f.iter().readDrop());
7829 case uint16_t(Op::SelectNumeric):
7830 CHECK(EmitSelect(f, /*typed*/ false));
7831 case uint16_t(Op::SelectTyped):
7832 CHECK(EmitSelect(f, /*typed*/ true));
7834 // Locals and globals
7835 case uint16_t(Op::LocalGet):
7836 CHECK(EmitGetLocal(f));
7837 case uint16_t(Op::LocalSet):
7838 CHECK(EmitSetLocal(f));
7839 case uint16_t(Op::LocalTee):
7840 CHECK(EmitTeeLocal(f));
7841 case uint16_t(Op::GlobalGet):
7842 CHECK(EmitGetGlobal(f));
7843 case uint16_t(Op::GlobalSet):
7844 CHECK(EmitSetGlobal(f));
7845 case uint16_t(Op::TableGet):
7846 CHECK(EmitTableGet(f));
7847 case uint16_t(Op::TableSet):
7848 CHECK(EmitTableSet(f));
7850 // Memory-related operators
7851 case uint16_t(Op::I32Load):
7852 CHECK(EmitLoad(f, ValType::I32, Scalar::Int32));
7853 case uint16_t(Op::I64Load):
7854 CHECK(EmitLoad(f, ValType::I64, Scalar::Int64));
7855 case uint16_t(Op::F32Load):
7856 CHECK(EmitLoad(f, ValType::F32, Scalar::Float32));
7857 case uint16_t(Op::F64Load):
7858 CHECK(EmitLoad(f, ValType::F64, Scalar::Float64));
7859 case uint16_t(Op::I32Load8S):
7860 CHECK(EmitLoad(f, ValType::I32, Scalar::Int8));
7861 case uint16_t(Op::I32Load8U):
7862 CHECK(EmitLoad(f, ValType::I32, Scalar::Uint8));
7863 case uint16_t(Op::I32Load16S):
7864 CHECK(EmitLoad(f, ValType::I32, Scalar::Int16));
7865 case uint16_t(Op::I32Load16U):
7866 CHECK(EmitLoad(f, ValType::I32, Scalar::Uint16));
7867 case uint16_t(Op::I64Load8S):
7868 CHECK(EmitLoad(f, ValType::I64, Scalar::Int8));
7869 case uint16_t(Op::I64Load8U):
7870 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint8));
7871 case uint16_t(Op::I64Load16S):
7872 CHECK(EmitLoad(f, ValType::I64, Scalar::Int16));
7873 case uint16_t(Op::I64Load16U):
7874 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint16));
7875 case uint16_t(Op::I64Load32S):
7876 CHECK(EmitLoad(f, ValType::I64, Scalar::Int32));
7877 case uint16_t(Op::I64Load32U):
7878 CHECK(EmitLoad(f, ValType::I64, Scalar::Uint32));
7879 case uint16_t(Op::I32Store):
7880 CHECK(EmitStore(f, ValType::I32, Scalar::Int32));
7881 case uint16_t(Op::I64Store):
7882 CHECK(EmitStore(f, ValType::I64, Scalar::Int64));
7883 case uint16_t(Op::F32Store):
7884 CHECK(EmitStore(f, ValType::F32, Scalar::Float32));
7885 case uint16_t(Op::F64Store):
7886 CHECK(EmitStore(f, ValType::F64, Scalar::Float64));
7887 case uint16_t(Op::I32Store8):
7888 CHECK(EmitStore(f, ValType::I32, Scalar::Int8));
7889 case uint16_t(Op::I32Store16):
7890 CHECK(EmitStore(f, ValType::I32, Scalar::Int16));
7891 case uint16_t(Op::I64Store8):
7892 CHECK(EmitStore(f, ValType::I64, Scalar::Int8));
7893 case uint16_t(Op::I64Store16):
7894 CHECK(EmitStore(f, ValType::I64, Scalar::Int16));
7895 case uint16_t(Op::I64Store32):
7896 CHECK(EmitStore(f, ValType::I64, Scalar::Int32));
7897 case uint16_t(Op::MemorySize):
7898 CHECK(EmitMemorySize(f));
7899 case uint16_t(Op::MemoryGrow):
7900 CHECK(EmitMemoryGrow(f));
7902 // Constants
7903 case uint16_t(Op::I32Const):
7904 CHECK(EmitI32Const(f));
7905 case uint16_t(Op::I64Const):
7906 CHECK(EmitI64Const(f));
7907 case uint16_t(Op::F32Const):
7908 CHECK(EmitF32Const(f));
7909 case uint16_t(Op::F64Const):
7910 CHECK(EmitF64Const(f));
7912 // Comparison operators
7913 case uint16_t(Op::I32Eqz):
7914 CHECK(EmitConversion<MNot>(f, ValType::I32, ValType::I32));
7915 case uint16_t(Op::I32Eq):
7916 CHECK(
7917 EmitComparison(f, ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
7918 case uint16_t(Op::I32Ne):
7919 CHECK(
7920 EmitComparison(f, ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
7921 case uint16_t(Op::I32LtS):
7922 CHECK(
7923 EmitComparison(f, ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
7924 case uint16_t(Op::I32LtU):
7925 CHECK(EmitComparison(f, ValType::I32, JSOp::Lt,
7926 MCompare::Compare_UInt32));
7927 case uint16_t(Op::I32GtS):
7928 CHECK(
7929 EmitComparison(f, ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
7930 case uint16_t(Op::I32GtU):
7931 CHECK(EmitComparison(f, ValType::I32, JSOp::Gt,
7932 MCompare::Compare_UInt32));
7933 case uint16_t(Op::I32LeS):
7934 CHECK(
7935 EmitComparison(f, ValType::I32, JSOp::Le, MCompare::Compare_Int32));
7936 case uint16_t(Op::I32LeU):
7937 CHECK(EmitComparison(f, ValType::I32, JSOp::Le,
7938 MCompare::Compare_UInt32));
7939 case uint16_t(Op::I32GeS):
7940 CHECK(
7941 EmitComparison(f, ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
7942 case uint16_t(Op::I32GeU):
7943 CHECK(EmitComparison(f, ValType::I32, JSOp::Ge,
7944 MCompare::Compare_UInt32));
7945 case uint16_t(Op::I64Eqz):
7946 CHECK(EmitConversion<MNot>(f, ValType::I64, ValType::I32));
7947 case uint16_t(Op::I64Eq):
7948 CHECK(
7949 EmitComparison(f, ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
7950 case uint16_t(Op::I64Ne):
7951 CHECK(
7952 EmitComparison(f, ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
7953 case uint16_t(Op::I64LtS):
7954 CHECK(
7955 EmitComparison(f, ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
7956 case uint16_t(Op::I64LtU):
7957 CHECK(EmitComparison(f, ValType::I64, JSOp::Lt,
7958 MCompare::Compare_UInt64));
7959 case uint16_t(Op::I64GtS):
7960 CHECK(
7961 EmitComparison(f, ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
7962 case uint16_t(Op::I64GtU):
7963 CHECK(EmitComparison(f, ValType::I64, JSOp::Gt,
7964 MCompare::Compare_UInt64));
7965 case uint16_t(Op::I64LeS):
7966 CHECK(
7967 EmitComparison(f, ValType::I64, JSOp::Le, MCompare::Compare_Int64));
7968 case uint16_t(Op::I64LeU):
7969 CHECK(EmitComparison(f, ValType::I64, JSOp::Le,
7970 MCompare::Compare_UInt64));
7971 case uint16_t(Op::I64GeS):
7972 CHECK(
7973 EmitComparison(f, ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
7974 case uint16_t(Op::I64GeU):
7975 CHECK(EmitComparison(f, ValType::I64, JSOp::Ge,
7976 MCompare::Compare_UInt64));
7977 case uint16_t(Op::F32Eq):
7978 CHECK(EmitComparison(f, ValType::F32, JSOp::Eq,
7979 MCompare::Compare_Float32));
7980 case uint16_t(Op::F32Ne):
7981 CHECK(EmitComparison(f, ValType::F32, JSOp::Ne,
7982 MCompare::Compare_Float32));
7983 case uint16_t(Op::F32Lt):
7984 CHECK(EmitComparison(f, ValType::F32, JSOp::Lt,
7985 MCompare::Compare_Float32));
7986 case uint16_t(Op::F32Gt):
7987 CHECK(EmitComparison(f, ValType::F32, JSOp::Gt,
7988 MCompare::Compare_Float32));
7989 case uint16_t(Op::F32Le):
7990 CHECK(EmitComparison(f, ValType::F32, JSOp::Le,
7991 MCompare::Compare_Float32));
7992 case uint16_t(Op::F32Ge):
7993 CHECK(EmitComparison(f, ValType::F32, JSOp::Ge,
7994 MCompare::Compare_Float32));
7995 case uint16_t(Op::F64Eq):
7996 CHECK(EmitComparison(f, ValType::F64, JSOp::Eq,
7997 MCompare::Compare_Double));
7998 case uint16_t(Op::F64Ne):
7999 CHECK(EmitComparison(f, ValType::F64, JSOp::Ne,
8000 MCompare::Compare_Double));
8001 case uint16_t(Op::F64Lt):
8002 CHECK(EmitComparison(f, ValType::F64, JSOp::Lt,
8003 MCompare::Compare_Double));
8004 case uint16_t(Op::F64Gt):
8005 CHECK(EmitComparison(f, ValType::F64, JSOp::Gt,
8006 MCompare::Compare_Double));
8007 case uint16_t(Op::F64Le):
8008 CHECK(EmitComparison(f, ValType::F64, JSOp::Le,
8009 MCompare::Compare_Double));
8010 case uint16_t(Op::F64Ge):
8011 CHECK(EmitComparison(f, ValType::F64, JSOp::Ge,
8012 MCompare::Compare_Double));
8014 // Numeric operators
8015 case uint16_t(Op::I32Clz):
8016 CHECK(EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32));
8017 case uint16_t(Op::I32Ctz):
8018 CHECK(EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32));
8019 case uint16_t(Op::I32Popcnt):
8020 CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32));
8021 case uint16_t(Op::I32Add):
8022 CHECK(EmitAdd(f, ValType::I32, MIRType::Int32));
8023 case uint16_t(Op::I32Sub):
8024 CHECK(EmitSub(f, ValType::I32, MIRType::Int32));
8025 case uint16_t(Op::I32Mul):
8026 CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
8027 case uint16_t(Op::I32DivS):
8028 case uint16_t(Op::I32DivU):
8029 CHECK(
8030 EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
8031 case uint16_t(Op::I32RemS):
8032 case uint16_t(Op::I32RemU):
8033 CHECK(
8034 EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
8035 case uint16_t(Op::I32And):
8036 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8037 MWasmBinaryBitwise::SubOpcode::And));
8038 case uint16_t(Op::I32Or):
8039 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8040 MWasmBinaryBitwise::SubOpcode::Or));
8041 case uint16_t(Op::I32Xor):
8042 CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
8043 MWasmBinaryBitwise::SubOpcode::Xor));
8044 case uint16_t(Op::I32Shl):
8045 CHECK(EmitShift<MLsh>(f, ValType::I32, MIRType::Int32));
8046 case uint16_t(Op::I32ShrS):
8047 CHECK(EmitShift<MRsh>(f, ValType::I32, MIRType::Int32));
8048 case uint16_t(Op::I32ShrU):
8049 CHECK(EmitUrsh(f, ValType::I32, MIRType::Int32));
8050 case uint16_t(Op::I32Rotl):
8051 case uint16_t(Op::I32Rotr):
8052 CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
8053 case uint16_t(Op::I64Clz):
8054 CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
8055 case uint16_t(Op::I64Ctz):
8056 CHECK(EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64));
8057 case uint16_t(Op::I64Popcnt):
8058 CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64));
8059 case uint16_t(Op::I64Add):
8060 CHECK(EmitAdd(f, ValType::I64, MIRType::Int64));
8061 case uint16_t(Op::I64Sub):
8062 CHECK(EmitSub(f, ValType::I64, MIRType::Int64));
8063 case uint16_t(Op::I64Mul):
8064 CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
8065 case uint16_t(Op::I64DivS):
8066 case uint16_t(Op::I64DivU):
8067 CHECK(
8068 EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
8069 case uint16_t(Op::I64RemS):
8070 case uint16_t(Op::I64RemU):
8071 CHECK(
8072 EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
8073 case uint16_t(Op::I64And):
8074 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8075 MWasmBinaryBitwise::SubOpcode::And));
8076 case uint16_t(Op::I64Or):
8077 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8078 MWasmBinaryBitwise::SubOpcode::Or));
8079 case uint16_t(Op::I64Xor):
8080 CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
8081 MWasmBinaryBitwise::SubOpcode::Xor));
8082 case uint16_t(Op::I64Shl):
8083 CHECK(EmitShift<MLsh>(f, ValType::I64, MIRType::Int64));
8084 case uint16_t(Op::I64ShrS):
8085 CHECK(EmitShift<MRsh>(f, ValType::I64, MIRType::Int64));
8086 case uint16_t(Op::I64ShrU):
8087 CHECK(EmitUrsh(f, ValType::I64, MIRType::Int64));
8088 case uint16_t(Op::I64Rotl):
8089 case uint16_t(Op::I64Rotr):
8090 CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
8091 case uint16_t(Op::F32Abs):
8092 CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
8093 case uint16_t(Op::F32Neg):
8094 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F32, MIRType::Float32));
8095 case uint16_t(Op::F32Ceil):
8096 CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilF));
8097 case uint16_t(Op::F32Floor):
8098 CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorF));
8099 case uint16_t(Op::F32Trunc):
8100 CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncF));
8101 case uint16_t(Op::F32Nearest):
8102 CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntF));
8103 case uint16_t(Op::F32Sqrt):
8104 CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32));
8105 case uint16_t(Op::F32Add):
8106 CHECK(EmitAdd(f, ValType::F32, MIRType::Float32));
8107 case uint16_t(Op::F32Sub):
8108 CHECK(EmitSub(f, ValType::F32, MIRType::Float32));
8109 case uint16_t(Op::F32Mul):
8110 CHECK(EmitMul(f, ValType::F32, MIRType::Float32));
8111 case uint16_t(Op::F32Div):
8112 CHECK(EmitDiv(f, ValType::F32, MIRType::Float32,
8113 /* isUnsigned = */ false));
8114 case uint16_t(Op::F32Min):
8115 case uint16_t(Op::F32Max):
8116 CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32,
8117 Op(op.b0) == Op::F32Max));
8118 case uint16_t(Op::F32CopySign):
8119 CHECK(EmitCopySign(f, ValType::F32));
8120 case uint16_t(Op::F64Abs):
8121 CHECK(EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double));
8122 case uint16_t(Op::F64Neg):
8123 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F64, MIRType::Double));
8124 case uint16_t(Op::F64Ceil):
8125 CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilD));
8126 case uint16_t(Op::F64Floor):
8127 CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorD));
8128 case uint16_t(Op::F64Trunc):
8129 CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncD));
8130 case uint16_t(Op::F64Nearest):
8131 CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntD));
8132 case uint16_t(Op::F64Sqrt):
8133 CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double));
8134 case uint16_t(Op::F64Add):
8135 CHECK(EmitAdd(f, ValType::F64, MIRType::Double));
8136 case uint16_t(Op::F64Sub):
8137 CHECK(EmitSub(f, ValType::F64, MIRType::Double));
8138 case uint16_t(Op::F64Mul):
8139 CHECK(EmitMul(f, ValType::F64, MIRType::Double));
8140 case uint16_t(Op::F64Div):
8141 CHECK(EmitDiv(f, ValType::F64, MIRType::Double,
8142 /* isUnsigned = */ false));
8143 case uint16_t(Op::F64Min):
8144 case uint16_t(Op::F64Max):
8145 CHECK(EmitMinMax(f, ValType::F64, MIRType::Double,
8146 Op(op.b0) == Op::F64Max));
8147 case uint16_t(Op::F64CopySign):
8148 CHECK(EmitCopySign(f, ValType::F64));
8150 // Conversions
8151 case uint16_t(Op::I32WrapI64):
8152 CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
8153 case uint16_t(Op::I32TruncF32S):
8154 case uint16_t(Op::I32TruncF32U):
8155 CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
8156 Op(op.b0) == Op::I32TruncF32U, false));
8157 case uint16_t(Op::I32TruncF64S):
8158 case uint16_t(Op::I32TruncF64U):
8159 CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
8160 Op(op.b0) == Op::I32TruncF64U, false));
8161 case uint16_t(Op::I64ExtendI32S):
8162 case uint16_t(Op::I64ExtendI32U):
8163 CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendI32U));
8164 case uint16_t(Op::I64TruncF32S):
8165 case uint16_t(Op::I64TruncF32U):
8166 CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
8167 Op(op.b0) == Op::I64TruncF32U, false));
8168 case uint16_t(Op::I64TruncF64S):
8169 case uint16_t(Op::I64TruncF64U):
8170 CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
8171 Op(op.b0) == Op::I64TruncF64U, false));
8172 case uint16_t(Op::F32ConvertI32S):
8173 CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
8174 case uint16_t(Op::F32ConvertI32U):
8175 CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32,
8176 ValType::F32));
8177 case uint16_t(Op::F32ConvertI64S):
8178 case uint16_t(Op::F32ConvertI64U):
8179 CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
8180 Op(op.b0) == Op::F32ConvertI64U));
8181 case uint16_t(Op::F32DemoteF64):
8182 CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
8183 case uint16_t(Op::F64ConvertI32S):
8184 CHECK(EmitConversion<MToDouble>(f, ValType::I32, ValType::F64));
8185 case uint16_t(Op::F64ConvertI32U):
8186 CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32,
8187 ValType::F64));
8188 case uint16_t(Op::F64ConvertI64S):
8189 case uint16_t(Op::F64ConvertI64U):
8190 CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
8191 Op(op.b0) == Op::F64ConvertI64U));
8192 case uint16_t(Op::F64PromoteF32):
8193 CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
8195 // Reinterpretations
8196 case uint16_t(Op::I32ReinterpretF32):
8197 CHECK(EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32));
8198 case uint16_t(Op::I64ReinterpretF64):
8199 CHECK(EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64));
8200 case uint16_t(Op::F32ReinterpretI32):
8201 CHECK(EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32));
8202 case uint16_t(Op::F64ReinterpretI64):
8203 CHECK(EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double));
8205 #ifdef ENABLE_WASM_GC
8206 case uint16_t(Op::RefEq):
8207 if (!f.moduleEnv().gcEnabled()) {
8208 return f.iter().unrecognizedOpcode(&op);
8210 CHECK(EmitComparison(f, RefType::eq(), JSOp::Eq,
8211 MCompare::Compare_WasmAnyRef));
8212 #endif
8213 case uint16_t(Op::RefFunc):
8214 CHECK(EmitRefFunc(f));
8215 case uint16_t(Op::RefNull):
8216 CHECK(EmitRefNull(f));
8217 case uint16_t(Op::RefIsNull):
8218 CHECK(EmitRefIsNull(f));
8220 // Sign extensions
8221 case uint16_t(Op::I32Extend8S):
8222 CHECK(EmitSignExtend(f, 1, 4));
8223 case uint16_t(Op::I32Extend16S):
8224 CHECK(EmitSignExtend(f, 2, 4));
8225 case uint16_t(Op::I64Extend8S):
8226 CHECK(EmitSignExtend(f, 1, 8));
8227 case uint16_t(Op::I64Extend16S):
8228 CHECK(EmitSignExtend(f, 2, 8));
8229 case uint16_t(Op::I64Extend32S):
8230 CHECK(EmitSignExtend(f, 4, 8));
8232 #ifdef ENABLE_WASM_TAIL_CALLS
8233 case uint16_t(Op::ReturnCall): {
8234 if (!f.moduleEnv().tailCallsEnabled()) {
8235 return f.iter().unrecognizedOpcode(&op);
8237 CHECK(EmitReturnCall(f));
8239 case uint16_t(Op::ReturnCallIndirect): {
8240 if (!f.moduleEnv().tailCallsEnabled()) {
8241 return f.iter().unrecognizedOpcode(&op);
8243 CHECK(EmitReturnCallIndirect(f));
8245 #endif
8247 #ifdef ENABLE_WASM_FUNCTION_REFERENCES
8248 case uint16_t(Op::RefAsNonNull):
8249 if (!f.moduleEnv().functionReferencesEnabled()) {
8250 return f.iter().unrecognizedOpcode(&op);
8252 CHECK(EmitRefAsNonNull(f));
8253 case uint16_t(Op::BrOnNull): {
8254 if (!f.moduleEnv().functionReferencesEnabled()) {
8255 return f.iter().unrecognizedOpcode(&op);
8257 CHECK(EmitBrOnNull(f));
8259 case uint16_t(Op::BrOnNonNull): {
8260 if (!f.moduleEnv().functionReferencesEnabled()) {
8261 return f.iter().unrecognizedOpcode(&op);
8263 CHECK(EmitBrOnNonNull(f));
8265 case uint16_t(Op::CallRef): {
8266 if (!f.moduleEnv().functionReferencesEnabled()) {
8267 return f.iter().unrecognizedOpcode(&op);
8269 CHECK(EmitCallRef(f));
8271 #endif
8273 #if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
8274 case uint16_t(Op::ReturnCallRef): {
8275 if (!f.moduleEnv().functionReferencesEnabled() ||
8276 !f.moduleEnv().tailCallsEnabled()) {
8277 return f.iter().unrecognizedOpcode(&op);
8279 CHECK(EmitReturnCallRef(f));
8281 #endif
8283 // Gc operations
8284 #ifdef ENABLE_WASM_GC
8285 case uint16_t(Op::GcPrefix): {
8286 if (!f.moduleEnv().gcEnabled()) {
8287 return f.iter().unrecognizedOpcode(&op);
8289 switch (op.b1) {
8290 case uint32_t(GcOp::StructNew):
8291 CHECK(EmitStructNew(f));
8292 case uint32_t(GcOp::StructNewDefault):
8293 CHECK(EmitStructNewDefault(f));
8294 case uint32_t(GcOp::StructSet):
8295 CHECK(EmitStructSet(f));
8296 case uint32_t(GcOp::StructGet):
8297 CHECK(EmitStructGet(f, FieldWideningOp::None));
8298 case uint32_t(GcOp::StructGetS):
8299 CHECK(EmitStructGet(f, FieldWideningOp::Signed));
8300 case uint32_t(GcOp::StructGetU):
8301 CHECK(EmitStructGet(f, FieldWideningOp::Unsigned));
8302 case uint32_t(GcOp::ArrayNew):
8303 CHECK(EmitArrayNew(f));
8304 case uint32_t(GcOp::ArrayNewDefault):
8305 CHECK(EmitArrayNewDefault(f));
8306 case uint32_t(GcOp::ArrayNewFixed):
8307 CHECK(EmitArrayNewFixed(f));
8308 case uint32_t(GcOp::ArrayNewData):
8309 CHECK(EmitArrayNewData(f));
8310 case uint32_t(GcOp::ArrayNewElem):
8311 CHECK(EmitArrayNewElem(f));
8312 case uint32_t(GcOp::ArrayInitData):
8313 CHECK(EmitArrayInitData(f));
8314 case uint32_t(GcOp::ArrayInitElem):
8315 CHECK(EmitArrayInitElem(f));
8316 case uint32_t(GcOp::ArraySet):
8317 CHECK(EmitArraySet(f));
8318 case uint32_t(GcOp::ArrayGet):
8319 CHECK(EmitArrayGet(f, FieldWideningOp::None));
8320 case uint32_t(GcOp::ArrayGetS):
8321 CHECK(EmitArrayGet(f, FieldWideningOp::Signed));
8322 case uint32_t(GcOp::ArrayGetU):
8323 CHECK(EmitArrayGet(f, FieldWideningOp::Unsigned));
8324 case uint32_t(GcOp::ArrayLen):
8325 CHECK(EmitArrayLen(f));
8326 case uint32_t(GcOp::ArrayCopy):
8327 CHECK(EmitArrayCopy(f));
8328 case uint32_t(GcOp::ArrayFill):
8329 CHECK(EmitArrayFill(f));
8330 case uint32_t(GcOp::RefI31):
8331 CHECK(EmitRefI31(f));
8332 case uint32_t(GcOp::I31GetS):
8333 CHECK(EmitI31Get(f, FieldWideningOp::Signed));
8334 case uint32_t(GcOp::I31GetU):
8335 CHECK(EmitI31Get(f, FieldWideningOp::Unsigned));
8336 case uint32_t(GcOp::BrOnCast):
8337 CHECK(EmitBrOnCast(f, /*onSuccess=*/true));
8338 case uint32_t(GcOp::BrOnCastFail):
8339 CHECK(EmitBrOnCast(f, /*onSuccess=*/false));
8340 case uint32_t(GcOp::RefTest):
8341 CHECK(EmitRefTest(f, /*nullable=*/false));
8342 case uint32_t(GcOp::RefTestNull):
8343 CHECK(EmitRefTest(f, /*nullable=*/true));
8344 case uint32_t(GcOp::RefCast):
8345 CHECK(EmitRefCast(f, /*nullable=*/false));
8346 case uint32_t(GcOp::RefCastNull):
8347 CHECK(EmitRefCast(f, /*nullable=*/true));
8348 case uint16_t(GcOp::AnyConvertExtern):
8349 CHECK(EmitAnyConvertExtern(f));
8350 case uint16_t(GcOp::ExternConvertAny):
8351 CHECK(EmitExternConvertAny(f));
8352 default:
8353 return f.iter().unrecognizedOpcode(&op);
8354 } // switch (op.b1)
8355 break;
8357 #endif
8359 // SIMD operations
8360 #ifdef ENABLE_WASM_SIMD
8361 case uint16_t(Op::SimdPrefix): {
8362 if (!f.moduleEnv().simdAvailable()) {
8363 return f.iter().unrecognizedOpcode(&op);
8365 switch (op.b1) {
8366 case uint32_t(SimdOp::V128Const):
8367 CHECK(EmitConstSimd128(f));
8368 case uint32_t(SimdOp::V128Load):
8369 CHECK(EmitLoad(f, ValType::V128, Scalar::Simd128));
8370 case uint32_t(SimdOp::V128Store):
8371 CHECK(EmitStore(f, ValType::V128, Scalar::Simd128));
8372 case uint32_t(SimdOp::V128And):
8373 case uint32_t(SimdOp::V128Or):
8374 case uint32_t(SimdOp::V128Xor):
8375 case uint32_t(SimdOp::I8x16AvgrU):
8376 case uint32_t(SimdOp::I16x8AvgrU):
8377 case uint32_t(SimdOp::I8x16Add):
8378 case uint32_t(SimdOp::I8x16AddSatS):
8379 case uint32_t(SimdOp::I8x16AddSatU):
8380 case uint32_t(SimdOp::I8x16MinS):
8381 case uint32_t(SimdOp::I8x16MinU):
8382 case uint32_t(SimdOp::I8x16MaxS):
8383 case uint32_t(SimdOp::I8x16MaxU):
8384 case uint32_t(SimdOp::I16x8Add):
8385 case uint32_t(SimdOp::I16x8AddSatS):
8386 case uint32_t(SimdOp::I16x8AddSatU):
8387 case uint32_t(SimdOp::I16x8Mul):
8388 case uint32_t(SimdOp::I16x8MinS):
8389 case uint32_t(SimdOp::I16x8MinU):
8390 case uint32_t(SimdOp::I16x8MaxS):
8391 case uint32_t(SimdOp::I16x8MaxU):
8392 case uint32_t(SimdOp::I32x4Add):
8393 case uint32_t(SimdOp::I32x4Mul):
8394 case uint32_t(SimdOp::I32x4MinS):
8395 case uint32_t(SimdOp::I32x4MinU):
8396 case uint32_t(SimdOp::I32x4MaxS):
8397 case uint32_t(SimdOp::I32x4MaxU):
8398 case uint32_t(SimdOp::I64x2Add):
8399 case uint32_t(SimdOp::I64x2Mul):
8400 case uint32_t(SimdOp::F32x4Add):
8401 case uint32_t(SimdOp::F32x4Mul):
8402 case uint32_t(SimdOp::F32x4Min):
8403 case uint32_t(SimdOp::F32x4Max):
8404 case uint32_t(SimdOp::F64x2Add):
8405 case uint32_t(SimdOp::F64x2Mul):
8406 case uint32_t(SimdOp::F64x2Min):
8407 case uint32_t(SimdOp::F64x2Max):
8408 case uint32_t(SimdOp::I8x16Eq):
8409 case uint32_t(SimdOp::I8x16Ne):
8410 case uint32_t(SimdOp::I16x8Eq):
8411 case uint32_t(SimdOp::I16x8Ne):
8412 case uint32_t(SimdOp::I32x4Eq):
8413 case uint32_t(SimdOp::I32x4Ne):
8414 case uint32_t(SimdOp::I64x2Eq):
8415 case uint32_t(SimdOp::I64x2Ne):
8416 case uint32_t(SimdOp::F32x4Eq):
8417 case uint32_t(SimdOp::F32x4Ne):
8418 case uint32_t(SimdOp::F64x2Eq):
8419 case uint32_t(SimdOp::F64x2Ne):
8420 case uint32_t(SimdOp::I32x4DotI16x8S):
8421 case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
8422 case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
8423 case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
8424 case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
8425 case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
8426 case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
8427 case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
8428 case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
8429 case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
8430 case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
8431 case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
8432 case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
8433 case uint32_t(SimdOp::I16x8Q15MulrSatS):
8434 CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
8435 case uint32_t(SimdOp::V128AndNot):
8436 case uint32_t(SimdOp::I8x16Sub):
8437 case uint32_t(SimdOp::I8x16SubSatS):
8438 case uint32_t(SimdOp::I8x16SubSatU):
8439 case uint32_t(SimdOp::I16x8Sub):
8440 case uint32_t(SimdOp::I16x8SubSatS):
8441 case uint32_t(SimdOp::I16x8SubSatU):
8442 case uint32_t(SimdOp::I32x4Sub):
8443 case uint32_t(SimdOp::I64x2Sub):
8444 case uint32_t(SimdOp::F32x4Sub):
8445 case uint32_t(SimdOp::F32x4Div):
8446 case uint32_t(SimdOp::F64x2Sub):
8447 case uint32_t(SimdOp::F64x2Div):
8448 case uint32_t(SimdOp::I8x16NarrowI16x8S):
8449 case uint32_t(SimdOp::I8x16NarrowI16x8U):
8450 case uint32_t(SimdOp::I16x8NarrowI32x4S):
8451 case uint32_t(SimdOp::I16x8NarrowI32x4U):
8452 case uint32_t(SimdOp::I8x16LtS):
8453 case uint32_t(SimdOp::I8x16LtU):
8454 case uint32_t(SimdOp::I8x16GtS):
8455 case uint32_t(SimdOp::I8x16GtU):
8456 case uint32_t(SimdOp::I8x16LeS):
8457 case uint32_t(SimdOp::I8x16LeU):
8458 case uint32_t(SimdOp::I8x16GeS):
8459 case uint32_t(SimdOp::I8x16GeU):
8460 case uint32_t(SimdOp::I16x8LtS):
8461 case uint32_t(SimdOp::I16x8LtU):
8462 case uint32_t(SimdOp::I16x8GtS):
8463 case uint32_t(SimdOp::I16x8GtU):
8464 case uint32_t(SimdOp::I16x8LeS):
8465 case uint32_t(SimdOp::I16x8LeU):
8466 case uint32_t(SimdOp::I16x8GeS):
8467 case uint32_t(SimdOp::I16x8GeU):
8468 case uint32_t(SimdOp::I32x4LtS):
8469 case uint32_t(SimdOp::I32x4LtU):
8470 case uint32_t(SimdOp::I32x4GtS):
8471 case uint32_t(SimdOp::I32x4GtU):
8472 case uint32_t(SimdOp::I32x4LeS):
8473 case uint32_t(SimdOp::I32x4LeU):
8474 case uint32_t(SimdOp::I32x4GeS):
8475 case uint32_t(SimdOp::I32x4GeU):
8476 case uint32_t(SimdOp::I64x2LtS):
8477 case uint32_t(SimdOp::I64x2GtS):
8478 case uint32_t(SimdOp::I64x2LeS):
8479 case uint32_t(SimdOp::I64x2GeS):
8480 case uint32_t(SimdOp::F32x4Lt):
8481 case uint32_t(SimdOp::F32x4Gt):
8482 case uint32_t(SimdOp::F32x4Le):
8483 case uint32_t(SimdOp::F32x4Ge):
8484 case uint32_t(SimdOp::F64x2Lt):
8485 case uint32_t(SimdOp::F64x2Gt):
8486 case uint32_t(SimdOp::F64x2Le):
8487 case uint32_t(SimdOp::F64x2Ge):
8488 case uint32_t(SimdOp::I8x16Swizzle):
8489 case uint32_t(SimdOp::F32x4PMax):
8490 case uint32_t(SimdOp::F32x4PMin):
8491 case uint32_t(SimdOp::F64x2PMax):
8492 case uint32_t(SimdOp::F64x2PMin):
8493 CHECK(
8494 EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
8495 case uint32_t(SimdOp::I8x16Splat):
8496 case uint32_t(SimdOp::I16x8Splat):
8497 case uint32_t(SimdOp::I32x4Splat):
8498 CHECK(EmitSplatSimd128(f, ValType::I32, SimdOp(op.b1)));
8499 case uint32_t(SimdOp::I64x2Splat):
8500 CHECK(EmitSplatSimd128(f, ValType::I64, SimdOp(op.b1)));
8501 case uint32_t(SimdOp::F32x4Splat):
8502 CHECK(EmitSplatSimd128(f, ValType::F32, SimdOp(op.b1)));
8503 case uint32_t(SimdOp::F64x2Splat):
8504 CHECK(EmitSplatSimd128(f, ValType::F64, SimdOp(op.b1)));
8505 case uint32_t(SimdOp::I8x16Neg):
8506 case uint32_t(SimdOp::I16x8Neg):
8507 case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
8508 case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
8509 case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
8510 case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
8511 case uint32_t(SimdOp::I32x4Neg):
8512 case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
8513 case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
8514 case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
8515 case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
8516 case uint32_t(SimdOp::I32x4TruncSatF32x4S):
8517 case uint32_t(SimdOp::I32x4TruncSatF32x4U):
8518 case uint32_t(SimdOp::I64x2Neg):
8519 case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
8520 case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
8521 case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
8522 case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
8523 case uint32_t(SimdOp::F32x4Abs):
8524 case uint32_t(SimdOp::F32x4Neg):
8525 case uint32_t(SimdOp::F32x4Sqrt):
8526 case uint32_t(SimdOp::F32x4ConvertI32x4S):
8527 case uint32_t(SimdOp::F32x4ConvertI32x4U):
8528 case uint32_t(SimdOp::F64x2Abs):
8529 case uint32_t(SimdOp::F64x2Neg):
8530 case uint32_t(SimdOp::F64x2Sqrt):
8531 case uint32_t(SimdOp::V128Not):
8532 case uint32_t(SimdOp::I8x16Popcnt):
8533 case uint32_t(SimdOp::I8x16Abs):
8534 case uint32_t(SimdOp::I16x8Abs):
8535 case uint32_t(SimdOp::I32x4Abs):
8536 case uint32_t(SimdOp::I64x2Abs):
8537 case uint32_t(SimdOp::F32x4Ceil):
8538 case uint32_t(SimdOp::F32x4Floor):
8539 case uint32_t(SimdOp::F32x4Trunc):
8540 case uint32_t(SimdOp::F32x4Nearest):
8541 case uint32_t(SimdOp::F64x2Ceil):
8542 case uint32_t(SimdOp::F64x2Floor):
8543 case uint32_t(SimdOp::F64x2Trunc):
8544 case uint32_t(SimdOp::F64x2Nearest):
8545 case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
8546 case uint32_t(SimdOp::F64x2PromoteLowF32x4):
8547 case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
8548 case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
8549 case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
8550 case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
8551 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
8552 case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
8553 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
8554 case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
8555 CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
8556 case uint32_t(SimdOp::V128AnyTrue):
8557 case uint32_t(SimdOp::I8x16AllTrue):
8558 case uint32_t(SimdOp::I16x8AllTrue):
8559 case uint32_t(SimdOp::I32x4AllTrue):
8560 case uint32_t(SimdOp::I64x2AllTrue):
8561 case uint32_t(SimdOp::I8x16Bitmask):
8562 case uint32_t(SimdOp::I16x8Bitmask):
8563 case uint32_t(SimdOp::I32x4Bitmask):
8564 case uint32_t(SimdOp::I64x2Bitmask):
8565 CHECK(EmitReduceSimd128(f, SimdOp(op.b1)));
8566 case uint32_t(SimdOp::I8x16Shl):
8567 case uint32_t(SimdOp::I8x16ShrS):
8568 case uint32_t(SimdOp::I8x16ShrU):
8569 case uint32_t(SimdOp::I16x8Shl):
8570 case uint32_t(SimdOp::I16x8ShrS):
8571 case uint32_t(SimdOp::I16x8ShrU):
8572 case uint32_t(SimdOp::I32x4Shl):
8573 case uint32_t(SimdOp::I32x4ShrS):
8574 case uint32_t(SimdOp::I32x4ShrU):
8575 case uint32_t(SimdOp::I64x2Shl):
8576 case uint32_t(SimdOp::I64x2ShrS):
8577 case uint32_t(SimdOp::I64x2ShrU):
8578 CHECK(EmitShiftSimd128(f, SimdOp(op.b1)));
8579 case uint32_t(SimdOp::I8x16ExtractLaneS):
8580 case uint32_t(SimdOp::I8x16ExtractLaneU):
8581 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
8582 case uint32_t(SimdOp::I16x8ExtractLaneS):
8583 case uint32_t(SimdOp::I16x8ExtractLaneU):
8584 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
8585 case uint32_t(SimdOp::I32x4ExtractLane):
8586 CHECK(EmitExtractLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
8587 case uint32_t(SimdOp::I64x2ExtractLane):
8588 CHECK(EmitExtractLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
8589 case uint32_t(SimdOp::F32x4ExtractLane):
8590 CHECK(EmitExtractLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
8591 case uint32_t(SimdOp::F64x2ExtractLane):
8592 CHECK(EmitExtractLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
8593 case uint32_t(SimdOp::I8x16ReplaceLane):
8594 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
8595 case uint32_t(SimdOp::I16x8ReplaceLane):
8596 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
8597 case uint32_t(SimdOp::I32x4ReplaceLane):
8598 CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
8599 case uint32_t(SimdOp::I64x2ReplaceLane):
8600 CHECK(EmitReplaceLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
8601 case uint32_t(SimdOp::F32x4ReplaceLane):
8602 CHECK(EmitReplaceLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
8603 case uint32_t(SimdOp::F64x2ReplaceLane):
8604 CHECK(EmitReplaceLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
8605 case uint32_t(SimdOp::V128Bitselect):
8606 CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
8607 case uint32_t(SimdOp::I8x16Shuffle):
8608 CHECK(EmitShuffleSimd128(f));
8609 case uint32_t(SimdOp::V128Load8Splat):
8610 CHECK(EmitLoadSplatSimd128(f, Scalar::Uint8, SimdOp::I8x16Splat));
8611 case uint32_t(SimdOp::V128Load16Splat):
8612 CHECK(EmitLoadSplatSimd128(f, Scalar::Uint16, SimdOp::I16x8Splat));
8613 case uint32_t(SimdOp::V128Load32Splat):
8614 CHECK(EmitLoadSplatSimd128(f, Scalar::Float32, SimdOp::I32x4Splat));
8615 case uint32_t(SimdOp::V128Load64Splat):
8616 CHECK(EmitLoadSplatSimd128(f, Scalar::Float64, SimdOp::I64x2Splat));
8617 case uint32_t(SimdOp::V128Load8x8S):
8618 case uint32_t(SimdOp::V128Load8x8U):
8619 case uint32_t(SimdOp::V128Load16x4S):
8620 case uint32_t(SimdOp::V128Load16x4U):
8621 case uint32_t(SimdOp::V128Load32x2S):
8622 case uint32_t(SimdOp::V128Load32x2U):
8623 CHECK(EmitLoadExtendSimd128(f, SimdOp(op.b1)));
8624 case uint32_t(SimdOp::V128Load32Zero):
8625 CHECK(EmitLoadZeroSimd128(f, Scalar::Float32, 4));
8626 case uint32_t(SimdOp::V128Load64Zero):
8627 CHECK(EmitLoadZeroSimd128(f, Scalar::Float64, 8));
8628 case uint32_t(SimdOp::V128Load8Lane):
8629 CHECK(EmitLoadLaneSimd128(f, 1));
8630 case uint32_t(SimdOp::V128Load16Lane):
8631 CHECK(EmitLoadLaneSimd128(f, 2));
8632 case uint32_t(SimdOp::V128Load32Lane):
8633 CHECK(EmitLoadLaneSimd128(f, 4));
8634 case uint32_t(SimdOp::V128Load64Lane):
8635 CHECK(EmitLoadLaneSimd128(f, 8));
8636 case uint32_t(SimdOp::V128Store8Lane):
8637 CHECK(EmitStoreLaneSimd128(f, 1));
8638 case uint32_t(SimdOp::V128Store16Lane):
8639 CHECK(EmitStoreLaneSimd128(f, 2));
8640 case uint32_t(SimdOp::V128Store32Lane):
8641 CHECK(EmitStoreLaneSimd128(f, 4));
8642 case uint32_t(SimdOp::V128Store64Lane):
8643 CHECK(EmitStoreLaneSimd128(f, 8));
8644 # ifdef ENABLE_WASM_RELAXED_SIMD
8645 case uint32_t(SimdOp::F32x4RelaxedMadd):
8646 case uint32_t(SimdOp::F32x4RelaxedNmadd):
8647 case uint32_t(SimdOp::F64x2RelaxedMadd):
8648 case uint32_t(SimdOp::F64x2RelaxedNmadd):
8649 case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
8650 case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
8651 case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
8652 case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
8653 case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
8654 if (!f.moduleEnv().v128RelaxedEnabled()) {
8655 return f.iter().unrecognizedOpcode(&op);
8657 CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
8659 case uint32_t(SimdOp::F32x4RelaxedMin):
8660 case uint32_t(SimdOp::F32x4RelaxedMax):
8661 case uint32_t(SimdOp::F64x2RelaxedMin):
8662 case uint32_t(SimdOp::F64x2RelaxedMax):
8663 case uint32_t(SimdOp::I16x8RelaxedQ15MulrS): {
8664 if (!f.moduleEnv().v128RelaxedEnabled()) {
8665 return f.iter().unrecognizedOpcode(&op);
8667 CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
8669 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
8670 case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
8671 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
8672 case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
8673 if (!f.moduleEnv().v128RelaxedEnabled()) {
8674 return f.iter().unrecognizedOpcode(&op);
8676 CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
8678 case uint32_t(SimdOp::I8x16RelaxedSwizzle):
8679 case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
8680 if (!f.moduleEnv().v128RelaxedEnabled()) {
8681 return f.iter().unrecognizedOpcode(&op);
8683 CHECK(
8684 EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
8686 # endif
8688 default:
8689 return f.iter().unrecognizedOpcode(&op);
8690 } // switch (op.b1)
8691 break;
8693 #endif
8695 // Miscellaneous operations
8696 case uint16_t(Op::MiscPrefix): {
8697 switch (op.b1) {
8698 case uint32_t(MiscOp::I32TruncSatF32S):
8699 case uint32_t(MiscOp::I32TruncSatF32U):
8700 CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
8701 MiscOp(op.b1) == MiscOp::I32TruncSatF32U, true));
8702 case uint32_t(MiscOp::I32TruncSatF64S):
8703 case uint32_t(MiscOp::I32TruncSatF64U):
8704 CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
8705 MiscOp(op.b1) == MiscOp::I32TruncSatF64U, true));
8706 case uint32_t(MiscOp::I64TruncSatF32S):
8707 case uint32_t(MiscOp::I64TruncSatF32U):
8708 CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
8709 MiscOp(op.b1) == MiscOp::I64TruncSatF32U, true));
8710 case uint32_t(MiscOp::I64TruncSatF64S):
8711 case uint32_t(MiscOp::I64TruncSatF64U):
8712 CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
8713 MiscOp(op.b1) == MiscOp::I64TruncSatF64U, true));
8714 case uint32_t(MiscOp::MemoryCopy):
8715 CHECK(EmitMemCopy(f));
8716 case uint32_t(MiscOp::DataDrop):
8717 CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
8718 case uint32_t(MiscOp::MemoryFill):
8719 CHECK(EmitMemFill(f));
8720 case uint32_t(MiscOp::MemoryInit):
8721 CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
8722 case uint32_t(MiscOp::TableCopy):
8723 CHECK(EmitTableCopy(f));
8724 case uint32_t(MiscOp::ElemDrop):
8725 CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
8726 case uint32_t(MiscOp::TableInit):
8727 CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
8728 case uint32_t(MiscOp::TableFill):
8729 CHECK(EmitTableFill(f));
8730 #if ENABLE_WASM_MEMORY_CONTROL
8731 case uint32_t(MiscOp::MemoryDiscard): {
8732 if (!f.moduleEnv().memoryControlEnabled()) {
8733 return f.iter().unrecognizedOpcode(&op);
8735 CHECK(EmitMemDiscard(f));
8737 #endif
8738 case uint32_t(MiscOp::TableGrow):
8739 CHECK(EmitTableGrow(f));
8740 case uint32_t(MiscOp::TableSize):
8741 CHECK(EmitTableSize(f));
8742 default:
8743 return f.iter().unrecognizedOpcode(&op);
8745 break;
8748 // Thread operations
8749 case uint16_t(Op::ThreadPrefix): {
8750 // Though thread ops can be used on nonshared memories, we make them
8751 // unavailable if shared memory has been disabled in the prefs, for
8752 // maximum predictability and safety and consistency with JS.
8753 if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
8754 return f.iter().unrecognizedOpcode(&op);
8756 switch (op.b1) {
8757 case uint32_t(ThreadOp::Wake):
8758 CHECK(EmitWake(f));
8760 case uint32_t(ThreadOp::I32Wait):
8761 CHECK(EmitWait(f, ValType::I32, 4));
8762 case uint32_t(ThreadOp::I64Wait):
8763 CHECK(EmitWait(f, ValType::I64, 8));
8764 case uint32_t(ThreadOp::Fence):
8765 CHECK(EmitFence(f));
8767 case uint32_t(ThreadOp::I32AtomicLoad):
8768 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Int32));
8769 case uint32_t(ThreadOp::I64AtomicLoad):
8770 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Int64));
8771 case uint32_t(ThreadOp::I32AtomicLoad8U):
8772 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint8));
8773 case uint32_t(ThreadOp::I32AtomicLoad16U):
8774 CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint16));
8775 case uint32_t(ThreadOp::I64AtomicLoad8U):
8776 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint8));
8777 case uint32_t(ThreadOp::I64AtomicLoad16U):
8778 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint16));
8779 case uint32_t(ThreadOp::I64AtomicLoad32U):
8780 CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint32));
8782 case uint32_t(ThreadOp::I32AtomicStore):
8783 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Int32));
8784 case uint32_t(ThreadOp::I64AtomicStore):
8785 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Int64));
8786 case uint32_t(ThreadOp::I32AtomicStore8U):
8787 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint8));
8788 case uint32_t(ThreadOp::I32AtomicStore16U):
8789 CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint16));
8790 case uint32_t(ThreadOp::I64AtomicStore8U):
8791 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint8));
8792 case uint32_t(ThreadOp::I64AtomicStore16U):
8793 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint16));
8794 case uint32_t(ThreadOp::I64AtomicStore32U):
8795 CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
8797 case uint32_t(ThreadOp::I32AtomicAdd):
8798 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
8799 AtomicFetchAddOp));
8800 case uint32_t(ThreadOp::I64AtomicAdd):
8801 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
8802 AtomicFetchAddOp));
8803 case uint32_t(ThreadOp::I32AtomicAdd8U):
8804 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
8805 AtomicFetchAddOp));
8806 case uint32_t(ThreadOp::I32AtomicAdd16U):
8807 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
8808 AtomicFetchAddOp));
8809 case uint32_t(ThreadOp::I64AtomicAdd8U):
8810 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
8811 AtomicFetchAddOp));
8812 case uint32_t(ThreadOp::I64AtomicAdd16U):
8813 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
8814 AtomicFetchAddOp));
8815 case uint32_t(ThreadOp::I64AtomicAdd32U):
8816 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
8817 AtomicFetchAddOp));
8819 case uint32_t(ThreadOp::I32AtomicSub):
8820 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
8821 AtomicFetchSubOp));
8822 case uint32_t(ThreadOp::I64AtomicSub):
8823 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
8824 AtomicFetchSubOp));
8825 case uint32_t(ThreadOp::I32AtomicSub8U):
8826 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
8827 AtomicFetchSubOp));
8828 case uint32_t(ThreadOp::I32AtomicSub16U):
8829 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
8830 AtomicFetchSubOp));
8831 case uint32_t(ThreadOp::I64AtomicSub8U):
8832 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
8833 AtomicFetchSubOp));
8834 case uint32_t(ThreadOp::I64AtomicSub16U):
8835 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
8836 AtomicFetchSubOp));
8837 case uint32_t(ThreadOp::I64AtomicSub32U):
8838 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
8839 AtomicFetchSubOp));
8841 case uint32_t(ThreadOp::I32AtomicAnd):
8842 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
8843 AtomicFetchAndOp));
8844 case uint32_t(ThreadOp::I64AtomicAnd):
8845 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
8846 AtomicFetchAndOp));
8847 case uint32_t(ThreadOp::I32AtomicAnd8U):
8848 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
8849 AtomicFetchAndOp));
8850 case uint32_t(ThreadOp::I32AtomicAnd16U):
8851 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
8852 AtomicFetchAndOp));
8853 case uint32_t(ThreadOp::I64AtomicAnd8U):
8854 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
8855 AtomicFetchAndOp));
8856 case uint32_t(ThreadOp::I64AtomicAnd16U):
8857 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
8858 AtomicFetchAndOp));
8859 case uint32_t(ThreadOp::I64AtomicAnd32U):
8860 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
8861 AtomicFetchAndOp));
8863 case uint32_t(ThreadOp::I32AtomicOr):
8864 CHECK(
8865 EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
8866 case uint32_t(ThreadOp::I64AtomicOr):
8867 CHECK(
8868 EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
8869 case uint32_t(ThreadOp::I32AtomicOr8U):
8870 CHECK(
8871 EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
8872 case uint32_t(ThreadOp::I32AtomicOr16U):
8873 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
8874 AtomicFetchOrOp));
8875 case uint32_t(ThreadOp::I64AtomicOr8U):
8876 CHECK(
8877 EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
8878 case uint32_t(ThreadOp::I64AtomicOr16U):
8879 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
8880 AtomicFetchOrOp));
8881 case uint32_t(ThreadOp::I64AtomicOr32U):
8882 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
8883 AtomicFetchOrOp));
8885 case uint32_t(ThreadOp::I32AtomicXor):
8886 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
8887 AtomicFetchXorOp));
8888 case uint32_t(ThreadOp::I64AtomicXor):
8889 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
8890 AtomicFetchXorOp));
8891 case uint32_t(ThreadOp::I32AtomicXor8U):
8892 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
8893 AtomicFetchXorOp));
8894 case uint32_t(ThreadOp::I32AtomicXor16U):
8895 CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
8896 AtomicFetchXorOp));
8897 case uint32_t(ThreadOp::I64AtomicXor8U):
8898 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
8899 AtomicFetchXorOp));
8900 case uint32_t(ThreadOp::I64AtomicXor16U):
8901 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
8902 AtomicFetchXorOp));
8903 case uint32_t(ThreadOp::I64AtomicXor32U):
8904 CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
8905 AtomicFetchXorOp));
8907 case uint32_t(ThreadOp::I32AtomicXchg):
8908 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
8909 case uint32_t(ThreadOp::I64AtomicXchg):
8910 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Int64));
8911 case uint32_t(ThreadOp::I32AtomicXchg8U):
8912 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint8));
8913 case uint32_t(ThreadOp::I32AtomicXchg16U):
8914 CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint16));
8915 case uint32_t(ThreadOp::I64AtomicXchg8U):
8916 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint8));
8917 case uint32_t(ThreadOp::I64AtomicXchg16U):
8918 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint16));
8919 case uint32_t(ThreadOp::I64AtomicXchg32U):
8920 CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint32));
8922 case uint32_t(ThreadOp::I32AtomicCmpXchg):
8923 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Int32));
8924 case uint32_t(ThreadOp::I64AtomicCmpXchg):
8925 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Int64));
8926 case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
8927 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint8));
8928 case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
8929 CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint16));
8930 case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
8931 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint8));
8932 case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
8933 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint16));
8934 case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
8935 CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint32));
8937 default:
8938 return f.iter().unrecognizedOpcode(&op);
8940 break;
8943 // asm.js-specific operators
8944 case uint16_t(Op::MozPrefix): {
8945 if (op.b1 == uint32_t(MozOp::CallBuiltinModuleFunc)) {
8946 if (!f.moduleEnv().isBuiltinModule()) {
8947 return f.iter().unrecognizedOpcode(&op);
8949 CHECK(EmitCallBuiltinModuleFunc(f));
8952 if (!f.moduleEnv().isAsmJS()) {
8953 return f.iter().unrecognizedOpcode(&op);
8955 switch (op.b1) {
8956 case uint32_t(MozOp::TeeGlobal):
8957 CHECK(EmitTeeGlobal(f));
8958 case uint32_t(MozOp::I32Min):
8959 case uint32_t(MozOp::I32Max):
8960 CHECK(EmitMinMax(f, ValType::I32, MIRType::Int32,
8961 MozOp(op.b1) == MozOp::I32Max));
8962 case uint32_t(MozOp::I32Neg):
8963 CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
8964 case uint32_t(MozOp::I32BitNot):
8965 CHECK(EmitBitNot(f, ValType::I32));
8966 case uint32_t(MozOp::I32Abs):
8967 CHECK(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
8968 case uint32_t(MozOp::F32TeeStoreF64):
8969 CHECK(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
8970 case uint32_t(MozOp::F64TeeStoreF32):
8971 CHECK(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
8972 case uint32_t(MozOp::I32TeeStore8):
8973 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int8));
8974 case uint32_t(MozOp::I32TeeStore16):
8975 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int16));
8976 case uint32_t(MozOp::I64TeeStore8):
8977 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int8));
8978 case uint32_t(MozOp::I64TeeStore16):
8979 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int16));
8980 case uint32_t(MozOp::I64TeeStore32):
8981 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int32));
8982 case uint32_t(MozOp::I32TeeStore):
8983 CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int32));
8984 case uint32_t(MozOp::I64TeeStore):
8985 CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int64));
8986 case uint32_t(MozOp::F32TeeStore):
8987 CHECK(EmitTeeStore(f, ValType::F32, Scalar::Float32));
8988 case uint32_t(MozOp::F64TeeStore):
8989 CHECK(EmitTeeStore(f, ValType::F64, Scalar::Float64));
8990 case uint32_t(MozOp::F64Mod):
8991 CHECK(EmitRem(f, ValType::F64, MIRType::Double,
8992 /* isUnsigned = */ false));
8993 case uint32_t(MozOp::F64SinNative):
8994 CHECK(EmitUnaryMathBuiltinCall(f, SASigSinNativeD));
8995 case uint32_t(MozOp::F64SinFdlibm):
8996 CHECK(EmitUnaryMathBuiltinCall(f, SASigSinFdlibmD));
8997 case uint32_t(MozOp::F64CosNative):
8998 CHECK(EmitUnaryMathBuiltinCall(f, SASigCosNativeD));
8999 case uint32_t(MozOp::F64CosFdlibm):
9000 CHECK(EmitUnaryMathBuiltinCall(f, SASigCosFdlibmD));
9001 case uint32_t(MozOp::F64TanNative):
9002 CHECK(EmitUnaryMathBuiltinCall(f, SASigTanNativeD));
9003 case uint32_t(MozOp::F64TanFdlibm):
9004 CHECK(EmitUnaryMathBuiltinCall(f, SASigTanFdlibmD));
9005 case uint32_t(MozOp::F64Asin):
9006 CHECK(EmitUnaryMathBuiltinCall(f, SASigASinD));
9007 case uint32_t(MozOp::F64Acos):
9008 CHECK(EmitUnaryMathBuiltinCall(f, SASigACosD));
9009 case uint32_t(MozOp::F64Atan):
9010 CHECK(EmitUnaryMathBuiltinCall(f, SASigATanD));
9011 case uint32_t(MozOp::F64Exp):
9012 CHECK(EmitUnaryMathBuiltinCall(f, SASigExpD));
9013 case uint32_t(MozOp::F64Log):
9014 CHECK(EmitUnaryMathBuiltinCall(f, SASigLogD));
9015 case uint32_t(MozOp::F64Pow):
9016 CHECK(EmitBinaryMathBuiltinCall(f, SASigPowD));
9017 case uint32_t(MozOp::F64Atan2):
9018 CHECK(EmitBinaryMathBuiltinCall(f, SASigATan2D));
9019 case uint32_t(MozOp::OldCallDirect):
9020 CHECK(EmitCall(f, /* asmJSFuncDef = */ true));
9021 case uint32_t(MozOp::OldCallIndirect):
9022 CHECK(EmitCallIndirect(f, /* oldStyle = */ true));
9024 default:
9025 return f.iter().unrecognizedOpcode(&op);
9027 break;
9030 default:
9031 return f.iter().unrecognizedOpcode(&op);
9035 MOZ_CRASH("unreachable");
9037 #undef CHECK
9040 bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
9041 const CompilerEnvironment& compilerEnv,
9042 LifoAlloc& lifo,
9043 const FuncCompileInputVector& inputs,
9044 CompiledCode* code, UniqueChars* error) {
9045 MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
9046 MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
9048 TempAllocator alloc(&lifo);
9049 JitContext jitContext;
9050 MOZ_ASSERT(IsCompilingWasm());
9051 WasmMacroAssembler masm(alloc, moduleEnv);
9052 #if defined(JS_CODEGEN_ARM64)
9053 masm.SetStackPointer64(PseudoStackPointer64);
9054 #endif
9056 // Swap in already-allocated empty vectors to avoid malloc/free.
9057 MOZ_ASSERT(code->empty());
9058 if (!code->swap(masm)) {
9059 return false;
9062 // Create a description of the stack layout created by GenerateTrapExit().
9063 RegisterOffsets trapExitLayout;
9064 size_t trapExitLayoutNumWords;
9065 GenerateTrapExitRegisterOffsets(&trapExitLayout, &trapExitLayoutNumWords);
9067 for (const FuncCompileInput& func : inputs) {
9068 JitSpewCont(JitSpew_Codegen, "\n");
9069 JitSpew(JitSpew_Codegen,
9070 "# ================================"
9071 "==================================");
9072 JitSpew(JitSpew_Codegen, "# ==");
9073 JitSpew(JitSpew_Codegen,
9074 "# wasm::IonCompileFunctions: starting on function index %d",
9075 (int)func.index);
9077 Decoder d(func.begin, func.end, func.lineOrBytecode, error);
9079 // Build the local types vector.
9081 const FuncType& funcType = *moduleEnv.funcs[func.index].type;
9082 ValTypeVector locals;
9083 if (!locals.appendAll(funcType.args())) {
9084 return false;
9086 if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
9087 return false;
9090 // Set up for Ion compilation.
9092 const JitCompileOptions options;
9093 MIRGraph graph(&alloc);
9094 CompileInfo compileInfo(locals.length());
9095 MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
9096 IonOptimizations.get(OptimizationLevel::Wasm));
9097 if (moduleEnv.numMemories() > 0) {
9098 if (moduleEnv.memories[0].indexType() == IndexType::I32) {
9099 mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
9100 } else {
9101 mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
9105 // Build MIR graph
9107 FunctionCompiler f(moduleEnv, d, func, locals, mir, masm.tryNotes());
9108 if (!f.init()) {
9109 return false;
9112 if (!f.startBlock()) {
9113 return false;
9116 if (!EmitBodyExprs(f)) {
9117 return false;
9120 f.finish();
9123 // Compile MIR graph
9125 jit::SpewBeginWasmFunction(&mir, func.index);
9126 jit::AutoSpewEndFunction spewEndFunction(&mir);
9128 if (!OptimizeMIR(&mir)) {
9129 return false;
9132 LIRGraph* lir = GenerateLIR(&mir);
9133 if (!lir) {
9134 return false;
9137 size_t unwindInfoBefore = masm.codeRangeUnwindInfos().length();
9139 CodeGenerator codegen(&mir, lir, &masm);
9141 BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
9142 FuncOffsets offsets;
9143 ArgTypeVector args(funcType);
9144 if (!codegen.generateWasm(CallIndirectId::forFunc(moduleEnv, func.index),
9145 prologueTrapOffset, args, trapExitLayout,
9146 trapExitLayoutNumWords, &offsets,
9147 &code->stackMaps, &d)) {
9148 return false;
9151 bool hasUnwindInfo =
9152 unwindInfoBefore != masm.codeRangeUnwindInfos().length();
9153 if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
9154 offsets, hasUnwindInfo)) {
9155 return false;
9159 JitSpew(JitSpew_Codegen,
9160 "# wasm::IonCompileFunctions: completed function index %d",
9161 (int)func.index);
9162 JitSpew(JitSpew_Codegen, "# ==");
9163 JitSpew(JitSpew_Codegen,
9164 "# ================================"
9165 "==================================");
9166 JitSpewCont(JitSpew_Codegen, "\n");
9169 masm.finish();
9170 if (masm.oom()) {
9171 return false;
9174 return code->swap(masm);
9177 bool js::wasm::IonPlatformSupport() {
9178 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
9179 defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
9180 defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
9181 defined(JS_CODEGEN_RISCV64)
9182 return true;
9183 #else
9184 return false;
9185 #endif